From 470f667f16aff463bb670fc79791df53cab88b44 Mon Sep 17 00:00:00 2001 From: Vladislav Zhurba Date: Mon, 7 Oct 2024 11:19:29 -0700 Subject: [PATCH] Patch 12.6.1 - Resolve issue 90 - Apply the new module layout as per issue 75 while maintaining backwards compatability --- .gitignore | 48 +- MANIFEST.in | 1 - cuda/LICENSE | 48 + cuda/MANIFEST.in | 4 + cuda/README.md | 76 + cuda/{ => cuda}/__init__.pxd | 0 cuda/cuda/__init__.py | 10 + .../bindings/__init__.pxd} | 0 cuda/{ => cuda/bindings}/__init__.py | 0 .../bindings/_bindings}/__init__.py | 0 .../bindings/_bindings/cydriver.pxd.in} | 2 +- .../bindings/_bindings/cydriver.pyx.in} | 4 +- .../bindings/_bindings/cynvrtc.pxd.in} | 2 +- .../bindings/_bindings/cynvrtc.pyx.in} | 2 +- .../bindings/_bindings}/loader.cpp | 0 .../bindings/_bindings}/loader.h | 0 .../bindings/_bindings}/loader.pxd | 0 .../bindings/_lib}/__init__.py | 0 .../bindings/_lib/cyruntime}/__init__.py | 0 .../bindings/_lib/cyruntime/cyruntime.pxd.in} | 4 +- .../bindings/_lib/cyruntime/cyruntime.pyx.in} | 786 +- .../bindings/_lib/cyruntime}/utils.pxd.in | 72 +- .../bindings/_lib/cyruntime}/utils.pyx.in | 1996 +- cuda/{ => cuda/bindings}/_lib/dlfcn.pxd | 0 .../{ => cuda/bindings}/_lib/param_packer.cpp | 0 cuda/{ => cuda/bindings}/_lib/param_packer.h | 0 .../{ => cuda/bindings}/_lib/param_packer.pxd | 0 cuda/{ => cuda/bindings}/_lib/utils.pxd.in | 40 +- cuda/{ => cuda/bindings}/_lib/utils.pyx.in | 262 +- cuda/{ => cuda/bindings}/_version.py | 0 .../bindings/benchmarks}/__init__.py | 0 .../{ => cuda/bindings}/benchmarks/kernels.py | 0 .../bindings}/benchmarks/perf_test_utils.py | 0 .../bindings}/benchmarks/test_cupy.py | 0 .../benchmarks/test_launch_latency.py | 0 .../bindings}/benchmarks/test_numba.py | 0 .../benchmarks/test_pointer_attributes.py | 0 .../bindings/cydriver.pxd.in} | 0 .../bindings/cydriver.pyx.in} | 914 +- .../bindings/cynvrtc.pxd.in} | 0 .../bindings/cynvrtc.pyx.in} | 44 +- .../bindings/cyruntime.pxd.in} | 0 .../bindings/cyruntime.pyx.in} | 6 +- .../bindings/driver.pxd.in} | 610 +- .../bindings/driver.pyx.in} | 10430 ++-- .../0_Introduction/clock_nvrtc_test.py | 4 +- .../simpleCubemapTexture_test.py | 4 +- .../0_Introduction/simpleP2P_test.py | 4 +- .../0_Introduction/simpleZeroCopy_test.py | 6 +- .../0_Introduction/systemWideAtomics_test.py | 4 +- .../0_Introduction/vectorAddDrv_test.py | 4 +- .../0_Introduction/vectorAddMMAP_test.py | 4 +- .../streamOrderedAllocation_test.py | 6 +- .../globalToShmemAsyncCopy_test.py | 6 +- .../3_CUDA_Features/simpleCudaGraphs_test.py | 4 +- .../conjugateGradientMultiBlockCG_test.py | 4 +- .../cuda/bindings/examples}/__init__.py | 0 .../cuda/bindings/examples}/common/common.py | 2 +- .../bindings/examples}/common/helper_cuda.py | 2 +- .../examples}/common/helper_string.py | 0 .../examples}/extra/isoFDModelling_test.py | 4 +- .../examples}/extra/jit_program_test.py | 0 .../examples}/extra/numba_emm_plugin.py | 0 .../cuda/bindings/examples}/setup.cfg | 0 cuda/{ => cuda/bindings}/nvrtc.pxd.in | 8 +- cuda/{ => cuda/bindings}/nvrtc.pyx.in | 210 +- .../bindings/runtime.pxd.in} | 444 +- .../bindings/runtime.pyx.in} | 7064 +-- cuda/cuda/bindings/tests/__init__.py | 0 cuda/{ => cuda/bindings}/tests/test_ccuda.pyx | 4 +- .../bindings}/tests/test_ccudart.pyx | 2 + cuda/{ => cuda/bindings}/tests/test_cuda.py | 0 cuda/{ => cuda/bindings}/tests/test_cudart.py | 0 cuda/{ => cuda/bindings}/tests/test_cython.py | 6 +- .../bindings}/tests/test_interoperability.py | 0 .../tests/test_interoperability_cython.pyx | 3 + .../bindings}/tests/test_kernelParams.py | 0 cuda/{ => cuda/bindings}/tests/test_nvrtc.py | 0 cuda/cuda/ccuda.pxd | 7 + cuda/cuda/ccuda.pyx | 7 + cuda/cuda/ccudart.pxd | 7 + cuda/cuda/ccudart.pyx | 7 + cuda/cuda/cnvrtc.pxd | 7 + cuda/cuda/cnvrtc.pyx | 7 + cuda/cuda/cuda.pyx | 14 + cuda/cuda/cudart.pyx | 14 + cuda/cuda/nvrtc.pyx | 14 + pyproject.toml => cuda/pyproject.toml | 4 +- pytest.ini => cuda/pytest.ini | 0 requirements.txt => cuda/requirements.txt | 0 setup.py => cuda/setup.py | 89 +- docs/.buildinfo | 2 +- docs/_sources/api.rst.txt | 4 +- docs/_sources/module/cuda.rst.txt | 6792 --- docs/_sources/module/cudart.rst.txt | 5274 --- docs/_sources/module/driver.rst.txt | 6792 +++ docs/_sources/module/nvrtc.rst.txt | 72 +- docs/_sources/module/runtime.rst.txt | 5274 +++ docs/_sources/overview.md.txt | 52 +- docs/_sources/release.md.txt | 2 + docs/_sources/release/11.8.4-notes.md.txt | 32 + docs/_sources/release/12.6.1-notes.md.txt | 33 + docs/_static/documentation_options.js | 2 +- docs/api.html | 142 +- docs/conduct.html | 12 +- docs/contribute.html | 12 +- docs/genindex.html | 9476 ++-- docs/index.html | 18 +- docs/install.html | 12 +- docs/module/cuda.html | 39420 --------------- docs/module/cudart.html | 23594 --------- docs/module/driver.html | 39422 ++++++++++++++++ docs/module/nvrtc.html | 368 +- docs/module/runtime.html | 23596 +++++++++ docs/motivation.html | 12 +- docs/objects.inv | Bin 30796 -> 31172 bytes docs/overview.html | 64 +- docs/release.html | 34 +- docs/release/11.4.0-notes.html | 12 +- docs/release/11.5.0-notes.html | 12 +- docs/release/11.6.0-notes.html | 12 +- docs/release/11.6.1-notes.html | 12 +- docs/release/11.7.0-notes.html | 12 +- docs/release/11.7.1-notes.html | 12 +- docs/release/11.8.0-notes.html | 12 +- docs/release/11.8.1-notes.html | 12 +- docs/release/11.8.2-notes.html | 12 +- docs/release/11.8.3-notes.html | 18 +- docs/release/11.8.4-notes.html | 358 + docs/release/12.0.0-notes.html | 18 +- docs/release/12.1.0-notes.html | 12 +- docs/release/12.2.0-notes.html | 12 +- docs/release/12.2.1-notes.html | 12 +- docs/release/12.3.0-notes.html | 12 +- docs/release/12.4.0-notes.html | 12 +- docs/release/12.5.0-notes.html | 12 +- docs/release/12.6.0-notes.html | 18 +- docs/release/12.6.1-notes.html | 359 + docs/search.html | 12 +- docs/searchindex.js | 2 +- docs_src/source/api.rst | 4 +- docs_src/source/conf.py | 2 +- docs_src/source/module/cuda.rst | 6792 --- docs_src/source/module/cudart.rst | 5274 --- docs_src/source/module/driver.rst | 6792 +++ docs_src/source/module/nvrtc.rst | 72 +- docs_src/source/module/runtime.rst | 5274 +++ docs_src/source/overview.md | 52 +- docs_src/source/release.md | 2 + docs_src/source/release/11.8.4-notes.md | 32 + docs_src/source/release/12.6.1-notes.md | 33 + 151 files changed, 105159 insertions(+), 103985 deletions(-) delete mode 100644 MANIFEST.in create mode 100644 cuda/LICENSE create mode 100644 cuda/MANIFEST.in create mode 100644 cuda/README.md rename cuda/{ => cuda}/__init__.pxd (100%) create mode 100644 cuda/cuda/__init__.py rename cuda/{_cuda/__init__.py => cuda/bindings/__init__.pxd} (100%) rename cuda/{ => cuda/bindings}/__init__.py (100%) rename cuda/{_lib => cuda/bindings/_bindings}/__init__.py (100%) rename cuda/{_cuda/ccuda.pxd.in => cuda/bindings/_bindings/cydriver.pxd.in} (99%) rename cuda/{_cuda/ccuda.pyx.in => cuda/bindings/_bindings/cydriver.pyx.in} (99%) rename cuda/{_cuda/cnvrtc.pxd.in => cuda/bindings/_bindings/cynvrtc.pxd.in} (99%) rename cuda/{_cuda/cnvrtc.pyx.in => cuda/bindings/_bindings/cynvrtc.pyx.in} (99%) rename cuda/{_cuda => cuda/bindings/_bindings}/loader.cpp (100%) rename cuda/{_cuda => cuda/bindings/_bindings}/loader.h (100%) rename cuda/{_cuda => cuda/bindings/_bindings}/loader.pxd (100%) rename cuda/{_lib/ccudart => cuda/bindings/_lib}/__init__.py (100%) rename cuda/{benchmarks => cuda/bindings/_lib/cyruntime}/__init__.py (100%) rename cuda/{_lib/ccudart/ccudart.pxd.in => cuda/bindings/_lib/cyruntime/cyruntime.pxd.in} (99%) rename cuda/{_lib/ccudart/ccudart.pyx.in => cuda/bindings/_lib/cyruntime/cyruntime.pyx.in} (79%) rename cuda/{_lib/ccudart => cuda/bindings/_lib/cyruntime}/utils.pxd.in (70%) rename cuda/{_lib/ccudart => cuda/bindings/_lib/cyruntime}/utils.pyx.in (52%) rename cuda/{ => cuda/bindings}/_lib/dlfcn.pxd (100%) rename cuda/{ => cuda/bindings}/_lib/param_packer.cpp (100%) rename cuda/{ => cuda/bindings}/_lib/param_packer.h (100%) rename cuda/{ => cuda/bindings}/_lib/param_packer.pxd (100%) rename cuda/{ => cuda/bindings}/_lib/utils.pxd.in (74%) rename cuda/{ => cuda/bindings}/_lib/utils.pyx.in (69%) rename cuda/{ => cuda/bindings}/_version.py (100%) rename cuda/{tests => cuda/bindings/benchmarks}/__init__.py (100%) rename cuda/{ => cuda/bindings}/benchmarks/kernels.py (100%) rename cuda/{ => cuda/bindings}/benchmarks/perf_test_utils.py (100%) rename cuda/{ => cuda/bindings}/benchmarks/test_cupy.py (100%) rename cuda/{ => cuda/bindings}/benchmarks/test_launch_latency.py (100%) rename cuda/{ => cuda/bindings}/benchmarks/test_numba.py (100%) rename cuda/{ => cuda/bindings}/benchmarks/test_pointer_attributes.py (100%) rename cuda/{ccuda.pxd.in => cuda/bindings/cydriver.pxd.in} (100%) rename cuda/{ccuda.pyx.in => cuda/bindings/cydriver.pyx.in} (73%) rename cuda/{cnvrtc.pxd.in => cuda/bindings/cynvrtc.pxd.in} (100%) rename cuda/{cnvrtc.pyx.in => cuda/bindings/cynvrtc.pyx.in} (76%) rename cuda/{ccudart.pxd.in => cuda/bindings/cyruntime.pxd.in} (100%) rename cuda/{ccudart.pyx.in => cuda/bindings/cyruntime.pyx.in} (99%) rename cuda/{cuda.pxd.in => cuda/bindings/driver.pxd.in} (92%) rename cuda/{cuda.pyx.in => cuda/bindings/driver.pyx.in} (82%) rename {examples => cuda/cuda/bindings/examples}/0_Introduction/clock_nvrtc_test.py (95%) rename {examples => cuda/cuda/bindings/examples}/0_Introduction/simpleCubemapTexture_test.py (98%) rename {examples => cuda/cuda/bindings/examples}/0_Introduction/simpleP2P_test.py (98%) rename {examples => cuda/cuda/bindings/examples}/0_Introduction/simpleZeroCopy_test.py (96%) rename {examples => cuda/cuda/bindings/examples}/0_Introduction/systemWideAtomics_test.py (97%) rename {examples => cuda/cuda/bindings/examples}/0_Introduction/vectorAddDrv_test.py (96%) rename {examples => cuda/cuda/bindings/examples}/0_Introduction/vectorAddMMAP_test.py (98%) rename {examples => cuda/cuda/bindings/examples}/2_Concepts_and_Techniques/streamOrderedAllocation_test.py (97%) rename {examples => cuda/cuda/bindings/examples}/3_CUDA_Features/globalToShmemAsyncCopy_test.py (99%) rename {examples => cuda/cuda/bindings/examples}/3_CUDA_Features/simpleCudaGraphs_test.py (99%) rename {examples => cuda/cuda/bindings/examples}/4_CUDA_Libraries/conjugateGradientMultiBlockCG_test.py (98%) rename {examples => cuda/cuda/bindings/examples}/__init__.py (100%) rename {examples => cuda/cuda/bindings/examples}/common/common.py (97%) rename {examples => cuda/cuda/bindings/examples}/common/helper_cuda.py (94%) rename {examples => cuda/cuda/bindings/examples}/common/helper_string.py (100%) rename {examples => cuda/cuda/bindings/examples}/extra/isoFDModelling_test.py (99%) rename {examples => cuda/cuda/bindings/examples}/extra/jit_program_test.py (100%) rename {examples => cuda/cuda/bindings/examples}/extra/numba_emm_plugin.py (100%) rename {examples => cuda/cuda/bindings/examples}/setup.cfg (100%) rename cuda/{ => cuda/bindings}/nvrtc.pxd.in (83%) rename cuda/{ => cuda/bindings}/nvrtc.pyx.in (80%) rename cuda/{cudart.pxd.in => cuda/bindings/runtime.pxd.in} (89%) rename cuda/{cudart.pyx.in => cuda/bindings/runtime.pyx.in} (80%) create mode 100644 cuda/cuda/bindings/tests/__init__.py rename cuda/{ => cuda/bindings}/tests/test_ccuda.pyx (91%) rename cuda/{ => cuda/bindings}/tests/test_ccudart.pyx (95%) rename cuda/{ => cuda/bindings}/tests/test_cuda.py (100%) rename cuda/{ => cuda/bindings}/tests/test_cudart.py (100%) rename cuda/{ => cuda/bindings}/tests/test_cython.py (85%) rename cuda/{ => cuda/bindings}/tests/test_interoperability.py (100%) rename cuda/{ => cuda/bindings}/tests/test_interoperability_cython.pyx (98%) rename cuda/{ => cuda/bindings}/tests/test_kernelParams.py (100%) rename cuda/{ => cuda/bindings}/tests/test_nvrtc.py (100%) create mode 100644 cuda/cuda/ccuda.pxd create mode 100644 cuda/cuda/ccuda.pyx create mode 100644 cuda/cuda/ccudart.pxd create mode 100644 cuda/cuda/ccudart.pyx create mode 100644 cuda/cuda/cnvrtc.pxd create mode 100644 cuda/cuda/cnvrtc.pyx create mode 100644 cuda/cuda/cuda.pyx create mode 100644 cuda/cuda/cudart.pyx create mode 100644 cuda/cuda/nvrtc.pyx rename pyproject.toml => cuda/pyproject.toml (94%) rename pytest.ini => cuda/pytest.ini (100%) rename requirements.txt => cuda/requirements.txt (100%) rename setup.py => cuda/setup.py (78%) delete mode 100644 docs/_sources/module/cuda.rst.txt delete mode 100644 docs/_sources/module/cudart.rst.txt create mode 100644 docs/_sources/module/driver.rst.txt create mode 100644 docs/_sources/module/runtime.rst.txt create mode 100644 docs/_sources/release/11.8.4-notes.md.txt create mode 100644 docs/_sources/release/12.6.1-notes.md.txt delete mode 100644 docs/module/cuda.html delete mode 100644 docs/module/cudart.html create mode 100644 docs/module/driver.html create mode 100644 docs/module/runtime.html create mode 100644 docs/release/11.8.4-notes.html create mode 100644 docs/release/12.6.1-notes.html delete mode 100644 docs_src/source/module/cuda.rst delete mode 100644 docs_src/source/module/cudart.rst create mode 100644 docs_src/source/module/driver.rst create mode 100644 docs_src/source/module/runtime.rst create mode 100644 docs_src/source/release/11.8.4-notes.md create mode 100644 docs_src/source/release/12.6.1-notes.md diff --git a/.gitignore b/.gitignore index cb11a1bb..c45648a5 100644 --- a/.gitignore +++ b/.gitignore @@ -11,32 +11,32 @@ __pycache__/ .pytest_cache/ .benchmarks/ *.cpp -!cuda/_lib/param_packer.cpp -!cuda/_cuda/loader.cpp +!cuda/cuda/bindings/_lib/param_packer.cpp +!cuda/cuda/bindings/_bindings/loader.cpp # CUDA Python specific (auto-generated) -cuda/_cuda/ccuda.pxd -cuda/_cuda/ccuda.pyx -cuda/_cuda/cnvrtc.pxd -cuda/_cuda/cnvrtc.pyx -cuda/_lib/ccudart/ccudart.pxd -cuda/_lib/ccudart/ccudart.pyx -cuda/_lib/ccudart/utils.pxd -cuda/_lib/ccudart/utils.pyx -cuda/_lib/utils.pxd -cuda/_lib/utils.pyx -cuda/ccuda.pxd -cuda/ccuda.pyx -cuda/ccudart.pxd -cuda/ccudart.pyx -cuda/cnvrtc.pxd -cuda/cnvrtc.pyx -cuda/cuda.pxd -cuda/cuda.pyx -cuda/cudart.pxd -cuda/cudart.pyx -cuda/nvrtc.pxd -cuda/nvrtc.pyx +cuda/cuda/bindings/_bindings/cydriver.pxd +cuda/cuda/bindings/_bindings/cydriver.pyx +cuda/cuda/bindings/_bindings/cynvrtc.pxd +cuda/cuda/bindings/_bindings/cynvrtc.pyx +cuda/cuda/bindings/_lib/cyruntime/cyruntime.pxd +cuda/cuda/bindings/_lib/cyruntime/cyruntime.pyx +cuda/cuda/bindings/_lib/cyruntime/utils.pxd +cuda/cuda/bindings/_lib/cyruntime/utils.pyx +cuda/cuda/bindings/_lib/utils.pxd +cuda/cuda/bindings/_lib/utils.pyx +cuda/cuda/bindings/cydriver.pxd +cuda/cuda/bindings/cydriver.pyx +cuda/cuda/bindings/cyruntime.pxd +cuda/cuda/bindings/cyruntime.pyx +cuda/cuda/bindings/cynvrtc.pxd +cuda/cuda/bindings/cynvrtc.pyx +cuda/cuda/bindings/driver.pxd +cuda/cuda/bindings/driver.pyx +cuda/cuda/bindings/runtime.pxd +cuda/cuda/bindings/runtime.pyx +cuda/cuda/bindings/nvrtc.pxd +cuda/cuda/bindings/nvrtc.pyx # Distribution / packaging .Python diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 4e93de45..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -include cuda/_version.py diff --git a/cuda/LICENSE b/cuda/LICENSE new file mode 100644 index 00000000..b7d042fc --- /dev/null +++ b/cuda/LICENSE @@ -0,0 +1,48 @@ +NVIDIA SOFTWARE LICENSE + +This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA CUDA Python software and materials provided hereunder ("SOFTWARE"). + +This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users. + +You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + +1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license. + +2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant: +a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA's intellectual property rights. +b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE. + +3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows: +a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs. +b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE. +c. You may not modify or create derivative works of any portion of the SOFTWARE. +d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE. +e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. +f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. +g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney's fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms. + +4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems. + +5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE. + +6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + +7. FEEDBACK. You may, but don't have to, provide to NVIDIA any Feedback. "Feedback" means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice. + +8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED. + +9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA'S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + +10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA's sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you. + +11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + +12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA's permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. + +13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury's Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE. + +14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is "commercial items" consisting of "commercial computer software" and "commercial computer software documentation" provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051. + +15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party. + +(v. May 12, 2021) diff --git a/cuda/MANIFEST.in b/cuda/MANIFEST.in new file mode 100644 index 00000000..ef9def92 --- /dev/null +++ b/cuda/MANIFEST.in @@ -0,0 +1,4 @@ +recursive-include cuda/ *.pyx *.pxd +# at least with setuptools 75.0.0 this folder was added erroneously +# to the payload, causing file copying to the build environment failed +exclude cuda/bindings diff --git a/cuda/README.md b/cuda/README.md new file mode 100644 index 00000000..1cbafb56 --- /dev/null +++ b/cuda/README.md @@ -0,0 +1,76 @@ +# CUDA-Python + +CUDA Python is a standard set of low-level interfaces, providing full coverage of and access to the CUDA host APIs from Python. Checkout the [Overview](https://nvidia.github.io/cuda-python/overview.html) for the workflow and performance results. + +## Installing + +CUDA Python can be installed from: + +* PYPI +* Conda (nvidia channel) +* Source builds + +There're differences in each of these options that are described further in [Installation](https://nvidia.github.io/cuda-python/install.html) documentation. Each package will guarantee minor version compatibility. + +## Runtime Dependencies + +CUDA Python is supported on all platforms that CUDA is supported. Specific dependencies are as follows: + +* Driver: Linux (450.80.02 or later) Windows (456.38 or later) +* CUDA Toolkit 12.0 to 12.6 + +Only the NVRTC redistributable component is required from the CUDA Toolkit. [CUDA Toolkit Documentation](https://docs.nvidia.com/cuda/index.html) Installation Guides can be used for guidance. Note that the NVRTC component in the Toolkit can be obtained via PYPI, Conda or Local Installer. + +### Supported Python Versions + +CUDA Python follows [NEP 29](https://numpy.org/neps/nep-0029-deprecation_policy.html) for supported Python version guarantee. + +Before dropping support, an issue will be raised to look for feedback. + +Source builds work for multiple Python versions, however pre-build PyPI and Conda packages are only provided for a subset: + +* Python 3.9 to 3.12 + +## Testing + +### Requirements + +Latest dependencies can be found in [requirements.txt](https://github.com/NVIDIA/cuda-python/blob/main/requirements.txt). + +### Unit-tests + +You can run the included tests with: + +``` +python -m pytest +``` + +### Benchmark + +You can run benchmark only tests with: + +``` +python -m pytest --benchmark-only +``` + +### Samples + +You can run the included tests with: + +``` +python -m pytest examples +``` + +## Examples + +CUDA Samples rewriten using CUDA Python are found in `examples`. + +Custom extra included examples: + +- `examples/extra/jit_program_test.py`: Demonstrates the use of the API to compile and + launch a kernel on the device. Includes device memory allocation / + deallocation, transfers between host and device, creation and usage of + streams, and context management. +- `examples/extra/numba_emm_plugin.py`: Implements a Numba External Memory Management + plugin, showing that this CUDA Python Driver API can coexist with other + wrappers of the driver API. diff --git a/cuda/__init__.pxd b/cuda/cuda/__init__.pxd similarity index 100% rename from cuda/__init__.pxd rename to cuda/cuda/__init__.pxd diff --git a/cuda/cuda/__init__.py b/cuda/cuda/__init__.py new file mode 100644 index 00000000..8b302752 --- /dev/null +++ b/cuda/cuda/__init__.py @@ -0,0 +1,10 @@ +def __getattr__(name): + if name == "__version__": + import warnings + warnings.warn("accessing cuda.__version__ is deprecated, " + "please switch to use cuda.bindings.__version__ instead", + DeprecationWarning, stacklevel=2) + from . import bindings + return bindings.__version__ + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/cuda/_cuda/__init__.py b/cuda/cuda/bindings/__init__.pxd similarity index 100% rename from cuda/_cuda/__init__.py rename to cuda/cuda/bindings/__init__.pxd diff --git a/cuda/__init__.py b/cuda/cuda/bindings/__init__.py similarity index 100% rename from cuda/__init__.py rename to cuda/cuda/bindings/__init__.py diff --git a/cuda/_lib/__init__.py b/cuda/cuda/bindings/_bindings/__init__.py similarity index 100% rename from cuda/_lib/__init__.py rename to cuda/cuda/bindings/_bindings/__init__.py diff --git a/cuda/_cuda/ccuda.pxd.in b/cuda/cuda/bindings/_bindings/cydriver.pxd.in similarity index 99% rename from cuda/_cuda/ccuda.pxd.in rename to cuda/cuda/bindings/_bindings/cydriver.pxd.in index 82eba6e2..21cdb520 100644 --- a/cuda/_cuda/ccuda.pxd.in +++ b/cuda/cuda/bindings/_bindings/cydriver.pxd.in @@ -5,7 +5,7 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -from cuda.ccuda cimport * +from cuda.bindings.cydriver cimport * {{if 'cuGetErrorString' in found_functions}} diff --git a/cuda/_cuda/ccuda.pyx.in b/cuda/cuda/bindings/_bindings/cydriver.pyx.in similarity index 99% rename from cuda/_cuda/ccuda.pyx.in rename to cuda/cuda/bindings/_bindings/cydriver.pyx.in index 64a296f1..a1c260e1 100644 --- a/cuda/_cuda/ccuda.pyx.in +++ b/cuda/cuda/bindings/_bindings/cydriver.pyx.in @@ -10,11 +10,11 @@ import win32api import struct from pywintypes import error {{else}} -cimport cuda._lib.dlfcn as dlfcn +cimport cuda.bindings._lib.dlfcn as dlfcn {{endif}} import os import sys -cimport cuda._cuda.loader as loader +cimport cuda.bindings._bindings.loader as loader cdef bint __cuPythonInit = False {{if 'cuGetErrorString' in found_functions}}cdef void *__cuGetErrorString = NULL{{endif}} {{if 'cuGetErrorName' in found_functions}}cdef void *__cuGetErrorName = NULL{{endif}} diff --git a/cuda/_cuda/cnvrtc.pxd.in b/cuda/cuda/bindings/_bindings/cynvrtc.pxd.in similarity index 99% rename from cuda/_cuda/cnvrtc.pxd.in rename to cuda/cuda/bindings/_bindings/cynvrtc.pxd.in index 4eccbee8..26d3a6ff 100644 --- a/cuda/_cuda/cnvrtc.pxd.in +++ b/cuda/cuda/bindings/_bindings/cynvrtc.pxd.in @@ -5,7 +5,7 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -from cuda.cnvrtc cimport * +from cuda.bindings.cynvrtc cimport * {{if 'nvrtcGetErrorString' in found_functions}} diff --git a/cuda/_cuda/cnvrtc.pyx.in b/cuda/cuda/bindings/_bindings/cynvrtc.pyx.in similarity index 99% rename from cuda/_cuda/cnvrtc.pyx.in rename to cuda/cuda/bindings/_bindings/cynvrtc.pyx.in index c6da9417..bb4b4cdb 100644 --- a/cuda/_cuda/cnvrtc.pyx.in +++ b/cuda/cuda/bindings/_bindings/cynvrtc.pyx.in @@ -10,7 +10,7 @@ import win32api import struct from pywintypes import error {{else}} -cimport cuda._lib.dlfcn as dlfcn +cimport cuda.bindings._lib.dlfcn as dlfcn {{endif}} cdef bint __cuPythonInit = False diff --git a/cuda/_cuda/loader.cpp b/cuda/cuda/bindings/_bindings/loader.cpp similarity index 100% rename from cuda/_cuda/loader.cpp rename to cuda/cuda/bindings/_bindings/loader.cpp diff --git a/cuda/_cuda/loader.h b/cuda/cuda/bindings/_bindings/loader.h similarity index 100% rename from cuda/_cuda/loader.h rename to cuda/cuda/bindings/_bindings/loader.h diff --git a/cuda/_cuda/loader.pxd b/cuda/cuda/bindings/_bindings/loader.pxd similarity index 100% rename from cuda/_cuda/loader.pxd rename to cuda/cuda/bindings/_bindings/loader.pxd diff --git a/cuda/_lib/ccudart/__init__.py b/cuda/cuda/bindings/_lib/__init__.py similarity index 100% rename from cuda/_lib/ccudart/__init__.py rename to cuda/cuda/bindings/_lib/__init__.py diff --git a/cuda/benchmarks/__init__.py b/cuda/cuda/bindings/_lib/cyruntime/__init__.py similarity index 100% rename from cuda/benchmarks/__init__.py rename to cuda/cuda/bindings/_lib/cyruntime/__init__.py diff --git a/cuda/_lib/ccudart/ccudart.pxd.in b/cuda/cuda/bindings/_lib/cyruntime/cyruntime.pxd.in similarity index 99% rename from cuda/_lib/ccudart/ccudart.pxd.in rename to cuda/cuda/bindings/_lib/cyruntime/cyruntime.pxd.in index 072fa91a..bb42fc31 100644 --- a/cuda/_lib/ccudart/ccudart.pxd.in +++ b/cuda/cuda/bindings/_lib/cyruntime/cyruntime.pxd.in @@ -5,8 +5,8 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -cimport cuda.ccuda as ccuda -from cuda.ccudart cimport * +cimport cuda.bindings.cydriver as cydriver +from cuda.bindings.cyruntime cimport * from libc.stdlib cimport malloc, free, calloc from libc.string cimport memset, memcpy, strncmp from libcpp cimport bool diff --git a/cuda/_lib/ccudart/ccudart.pyx.in b/cuda/cuda/bindings/_lib/cyruntime/cyruntime.pyx.in similarity index 79% rename from cuda/_lib/ccudart/ccudart.pyx.in rename to cuda/cuda/bindings/_lib/cyruntime/cyruntime.pyx.in index 25cb36f2..7fb09820 100644 --- a/cuda/_lib/ccudart/ccudart.pyx.in +++ b/cuda/cuda/bindings/_lib/cyruntime/cyruntime.pyx.in @@ -5,12 +5,12 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -from cuda.ccudart cimport * -from cuda._lib.ccudart.utils cimport * +from cuda.bindings.cyruntime cimport * +from cuda.bindings._lib.cyruntime.utils cimport * from libc.stdlib cimport malloc, free, calloc from libc.string cimport memset, memcpy, strncmp from libcpp cimport bool -cimport cuda._cuda.ccuda as ccuda +cimport cuda.bindings._bindings.cydriver as cydriver cdef cudaPythonGlobal m_global = globalGetInstance() @@ -37,7 +37,7 @@ cdef cudaError_t _cudaStreamCreate(cudaStream_t* pStream) except ?cudaErrorCallR err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamCreate(pStream, 0) + err = cydriver._cuStreamCreate(pStream, 0) if err != cudaSuccess: _setLastError(err) return err @@ -52,7 +52,7 @@ cdef cudaError_t _cudaEventCreate(cudaEvent_t* event) except ?cudaErrorCallRequi err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEventCreate(event, ccuda.CUevent_flags_enum.CU_EVENT_DEFAULT) + err = cydriver._cuEventCreate(event, cydriver.CUevent_flags_enum.CU_EVENT_DEFAULT) if err != cudaSuccess: _setLastError(err) return err @@ -63,7 +63,7 @@ cdef cudaError_t _cudaEventCreate(cudaEvent_t* event) except ?cudaErrorCallRequi cdef cudaError_t _cudaEventQuery(cudaEvent_t event) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuEventQuery(event) + err = cydriver._cuEventQuery(event) if err != cudaSuccess: _setLastError(err) return err @@ -87,7 +87,7 @@ cdef cudaChannelFormatDesc _cudaCreateChannelDesc(int x, int y, int z, int w, cu cdef cudaError_t _cudaDriverGetVersion(int* driverVersion) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuDriverGetVersion(driverVersion) + err = cydriver._cuDriverGetVersion(driverVersion) if err != cudaSuccess: _setLastError(err) return err @@ -111,7 +111,7 @@ cdef cudaError_t _cudaDeviceGetTexture1DLinearMaxWidth(size_t* maxWidthInElement return cudaErrorInvalidValue cdef cudaError_t err = cudaSuccess - cdef ccuda.CUarray_format fmt + cdef cydriver.CUarray_format fmt cdef int numChannels = 0 err = m_global.lazyInitDriver() @@ -121,7 +121,7 @@ cdef cudaError_t _cudaDeviceGetTexture1DLinearMaxWidth(size_t* maxWidthInElement if err == cudaSuccess: _setLastError(err) return err - err = ccuda._cuDeviceGetTexture1DLinearMaxWidth(maxWidthInElements, fmt, numChannels, device) + err = cydriver._cuDeviceGetTexture1DLinearMaxWidth(maxWidthInElements, fmt, numChannels, device) if err != cudaSuccess: _setLastError(err) return err @@ -228,8 +228,8 @@ cdef cudaError_t _cudaMemcpyAsync(void* dst, const void* src, size_t count, cuda {{if 'cudaGraphAddMemcpyNode' in found_functions}} cdef cudaError_t _cudaGraphAddMemcpyNode(cudaGraphNode_t* pGraphNode, cudaGraph_t graph, const cudaGraphNode_t* pDependencies, size_t numDependencies, const cudaMemcpy3DParms* pCopyParams) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUcontext context - cdef ccuda.CUDA_MEMCPY3D_v2 driverNodeParams + cdef cydriver.CUcontext context + cdef cydriver.CUDA_MEMCPY3D_v2 driverNodeParams cdef cudaError_t err = cudaSuccess if pCopyParams == NULL: @@ -240,7 +240,7 @@ cdef cudaError_t _cudaGraphAddMemcpyNode(cudaGraphNode_t* pGraphNode, cudaGraph_ if err != cudaSuccess: return err - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -250,7 +250,7 @@ cdef cudaError_t _cudaGraphAddMemcpyNode(cudaGraphNode_t* pGraphNode, cudaGraph_ _setLastError(err) return err - err = ccuda._cuGraphAddMemcpyNode(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams, context) + err = cydriver._cuGraphAddMemcpyNode(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams, context) if err != cudaSuccess: _setLastError(err) return err @@ -260,8 +260,8 @@ cdef cudaError_t _cudaGraphAddMemcpyNode(cudaGraphNode_t* pGraphNode, cudaGraph_ {{if 'cudaGraphAddMemcpyNode1D' in found_functions}} cdef cudaError_t _cudaGraphAddMemcpyNode1D(cudaGraphNode_t* pGraphNode, cudaGraph_t graph, const cudaGraphNode_t* pDependencies, size_t numDependencies, void* dst, const void* src, size_t count, cudaMemcpyKind kind) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUcontext context - cdef ccuda.CUDA_MEMCPY3D_v2 driverNodeParams + cdef cydriver.CUcontext context + cdef cydriver.CUDA_MEMCPY3D_v2 driverNodeParams cdef cudaMemcpy3DParms copyParams cdef cudaError_t err = cudaSuccess @@ -269,7 +269,7 @@ cdef cudaError_t _cudaGraphAddMemcpyNode1D(cudaGraphNode_t* pGraphNode, cudaGrap if err != cudaSuccess: return err - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -281,7 +281,7 @@ cdef cudaError_t _cudaGraphAddMemcpyNode1D(cudaGraphNode_t* pGraphNode, cudaGrap _setLastError(err) return err - err = ccuda._cuGraphAddMemcpyNode(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams, context) + err = cydriver._cuGraphAddMemcpyNode(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams, context) if err != cudaSuccess: _setLastError(err) return err @@ -291,7 +291,7 @@ cdef cudaError_t _cudaGraphAddMemcpyNode1D(cudaGraphNode_t* pGraphNode, cudaGrap {{if 'cudaGraphMemcpyNodeSetParams1D' in found_functions}} cdef cudaError_t _cudaGraphMemcpyNodeSetParams1D(cudaGraphNode_t node, void* dst, const void* src, size_t count, cudaMemcpyKind kind) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUDA_MEMCPY3D_v2 driverNodeParams + cdef cydriver.CUDA_MEMCPY3D_v2 driverNodeParams cdef cudaMemcpy3DParms copyParams cdef cudaError_t err = cudaSuccess @@ -305,7 +305,7 @@ cdef cudaError_t _cudaGraphMemcpyNodeSetParams1D(cudaGraphNode_t node, void* dst _setLastError(err) return err - err = ccuda._cuGraphMemcpyNodeSetParams(node, &driverNodeParams) + err = cydriver._cuGraphMemcpyNodeSetParams(node, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -315,8 +315,8 @@ cdef cudaError_t _cudaGraphMemcpyNodeSetParams1D(cudaGraphNode_t node, void* dst {{if 'cudaGraphExecMemcpyNodeSetParams' in found_functions}} cdef cudaError_t _cudaGraphExecMemcpyNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const cudaMemcpy3DParms* pNodeParams) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUcontext context - cdef ccuda.CUDA_MEMCPY3D_v2 driverNodeParams + cdef cydriver.CUcontext context + cdef cydriver.CUDA_MEMCPY3D_v2 driverNodeParams cdef cudaError_t err = cudaSuccess if pNodeParams == NULL: @@ -327,7 +327,7 @@ cdef cudaError_t _cudaGraphExecMemcpyNodeSetParams(cudaGraphExec_t hGraphExec, c if err != cudaSuccess: return err - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -337,7 +337,7 @@ cdef cudaError_t _cudaGraphExecMemcpyNodeSetParams(cudaGraphExec_t hGraphExec, c _setLastError(err) return err - err = ccuda._cuGraphExecMemcpyNodeSetParams(hGraphExec, node, &driverNodeParams, context) + err = cydriver._cuGraphExecMemcpyNodeSetParams(hGraphExec, node, &driverNodeParams, context) if err != cudaSuccess: _setLastError(err) return err @@ -347,8 +347,8 @@ cdef cudaError_t _cudaGraphExecMemcpyNodeSetParams(cudaGraphExec_t hGraphExec, c {{if 'cudaGraphExecMemcpyNodeSetParams1D' in found_functions}} cdef cudaError_t _cudaGraphExecMemcpyNodeSetParams1D(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, void* dst, const void* src, size_t count, cudaMemcpyKind kind) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUcontext context - cdef ccuda.CUDA_MEMCPY3D_v2 driverNodeParams + cdef cydriver.CUcontext context + cdef cydriver.CUDA_MEMCPY3D_v2 driverNodeParams cdef cudaMemcpy3DParms copyParams cdef cudaError_t err = cudaSuccess @@ -356,7 +356,7 @@ cdef cudaError_t _cudaGraphExecMemcpyNodeSetParams1D(cudaGraphExec_t hGraphExec, if err != cudaSuccess: return err - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -368,7 +368,7 @@ cdef cudaError_t _cudaGraphExecMemcpyNodeSetParams1D(cudaGraphExec_t hGraphExec, _setLastError(err) return err - err = ccuda._cuGraphExecMemcpyNodeSetParams(hGraphExec, node, &driverNodeParams, context) + err = cydriver._cuGraphExecMemcpyNodeSetParams(hGraphExec, node, &driverNodeParams, context) if err != cudaSuccess: _setLastError(err) return err @@ -379,7 +379,7 @@ cdef cudaError_t _cudaGraphExecMemcpyNodeSetParams1D(cudaGraphExec_t hGraphExec, cdef cudaError_t _cudaGetDriverEntryPoint(const char* symbol, void** funcPtr, unsigned long long flags, cudaDriverEntryPointQueryResult* driverStatus) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuGetProcAddress_v2(symbol, funcPtr, m_global._CUDART_VERSION, flags, driverStatus) + err = cydriver._cuGetProcAddress_v2(symbol, funcPtr, m_global._CUDART_VERSION, flags, driverStatus) if err != cudaSuccess: _setLastError(err) return err @@ -389,8 +389,8 @@ cdef cudaError_t _cudaGetDriverEntryPoint(const char* symbol, void** funcPtr, un {{if 'cudaGraphAddMemsetNode' in found_functions}} cdef cudaError_t _cudaGraphAddMemsetNode(cudaGraphNode_t* pGraphNode, cudaGraph_t graph, const cudaGraphNode_t* pDependencies, size_t numDependencies, const cudaMemsetParams* pMemsetParams) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUcontext context - cdef ccuda.CUDA_MEMSET_NODE_PARAMS driverParams + cdef cydriver.CUcontext context + cdef cydriver.CUDA_MEMSET_NODE_PARAMS driverParams cdef cudaError_t err = cudaSuccess if pMemsetParams == NULL: @@ -401,14 +401,14 @@ cdef cudaError_t _cudaGraphAddMemsetNode(cudaGraphNode_t* pGraphNode, cudaGraph_ if err != cudaSuccess: return err - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err toDriverMemsetNodeParams(pMemsetParams, &driverParams) - err = ccuda._cuGraphAddMemsetNode(pGraphNode, graph, pDependencies, numDependencies, &driverParams, context) + err = cydriver._cuGraphAddMemsetNode(pGraphNode, graph, pDependencies, numDependencies, &driverParams, context) if err != cudaSuccess: _setLastError(err) return err @@ -418,8 +418,8 @@ cdef cudaError_t _cudaGraphAddMemsetNode(cudaGraphNode_t* pGraphNode, cudaGraph_ {{if 'cudaGraphExecMemsetNodeSetParams' in found_functions}} cdef cudaError_t _cudaGraphExecMemsetNodeSetParams(cudaGraphExec_t hGraphExec, cudaGraphNode_t node, const cudaMemsetParams* pNodeParams) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUcontext context - cdef ccuda.CUDA_MEMSET_NODE_PARAMS driverParams + cdef cydriver.CUcontext context + cdef cydriver.CUDA_MEMSET_NODE_PARAMS driverParams cdef cudaError_t err = cudaSuccess if pNodeParams == NULL: @@ -430,14 +430,14 @@ cdef cudaError_t _cudaGraphExecMemsetNodeSetParams(cudaGraphExec_t hGraphExec, c if err != cudaSuccess: return err - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err toDriverMemsetNodeParams(pNodeParams, &driverParams) - err = ccuda._cuGraphExecMemsetNodeSetParams(hGraphExec, node, &driverParams, context) + err = cydriver._cuGraphExecMemsetNodeSetParams(hGraphExec, node, &driverParams, context) if err != cudaSuccess: _setLastError(err) return err @@ -447,7 +447,7 @@ cdef cudaError_t _cudaGraphExecMemsetNodeSetParams(cudaGraphExec_t hGraphExec, c {{if 'cudaGraphMemcpyNodeSetParams' in found_functions}} cdef cudaError_t _cudaGraphMemcpyNodeSetParams(cudaGraphNode_t node, const cudaMemcpy3DParms* pNodeParams) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUDA_MEMCPY3D_v2 driverNodeParams + cdef cydriver.CUDA_MEMCPY3D_v2 driverNodeParams cdef cudaError_t err = cudaSuccess if pNodeParams == NULL: @@ -461,7 +461,7 @@ cdef cudaError_t _cudaGraphMemcpyNodeSetParams(cudaGraphNode_t node, const cudaM if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuGraphMemcpyNodeSetParams(node, &driverNodeParams) + err = cydriver._cuGraphMemcpyNodeSetParams(node, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -471,7 +471,7 @@ cdef cudaError_t _cudaGraphMemcpyNodeSetParams(cudaGraphNode_t node, const cudaM {{if 'cudaGraphMemcpyNodeGetParams' in found_functions}} cdef cudaError_t _cudaGraphMemcpyNodeGetParams(cudaGraphNode_t node, cudaMemcpy3DParms* p) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUDA_MEMCPY3D_v2 driverNodeParams + cdef cydriver.CUDA_MEMCPY3D_v2 driverNodeParams if p == NULL: _setLastError(cudaErrorInvalidValue) @@ -481,7 +481,7 @@ cdef cudaError_t _cudaGraphMemcpyNodeGetParams(cudaGraphNode_t node, cudaMemcpy3 err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphMemcpyNodeGetParams(node, &driverNodeParams) + err = cydriver._cuGraphMemcpyNodeGetParams(node, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -504,46 +504,46 @@ cdef cudaError_t _cudaFuncGetAttributes(cudaFuncAttributes* attr, const void* fu return cudaErrorInvalidValue cdef int bytes = 0 memset(attr, 0, sizeof(cudaFuncAttributes)) - err = ccuda._cuFuncGetAttribute(&attr[0].maxThreadsPerBlock, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func) + err = cydriver._cuFuncGetAttribute(&attr[0].maxThreadsPerBlock, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, func) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuFuncGetAttribute(&attr[0].numRegs, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_NUM_REGS, func) + err = cydriver._cuFuncGetAttribute(&attr[0].numRegs, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_NUM_REGS, func) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuFuncGetAttribute(&attr[0].ptxVersion, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_PTX_VERSION, func) + err = cydriver._cuFuncGetAttribute(&attr[0].ptxVersion, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_PTX_VERSION, func) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuFuncGetAttribute(&attr[0].binaryVersion, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_BINARY_VERSION, func) + err = cydriver._cuFuncGetAttribute(&attr[0].binaryVersion, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_BINARY_VERSION, func) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuFuncGetAttribute(&bytes, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, func) + err = cydriver._cuFuncGetAttribute(&bytes, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, func) if err != cudaSuccess: _setLastError(err) return err attr[0].sharedSizeBytes = bytes - err = ccuda._cuFuncGetAttribute(&bytes, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES, func) + err = cydriver._cuFuncGetAttribute(&bytes, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES, func) if err != cudaSuccess: _setLastError(err) return err attr[0].constSizeBytes = bytes - err = ccuda._cuFuncGetAttribute(&bytes, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, func) + err = cydriver._cuFuncGetAttribute(&bytes, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, func) if err != cudaSuccess: _setLastError(err) return err attr[0].localSizeBytes = bytes - err = ccuda._cuFuncGetAttribute(&attr[0].cacheModeCA, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CACHE_MODE_CA, func) + err = cydriver._cuFuncGetAttribute(&attr[0].cacheModeCA, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CACHE_MODE_CA, func) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuFuncGetAttribute(&bytes, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, func) + err = cydriver._cuFuncGetAttribute(&bytes, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, func) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuFuncGetAttribute(&attr[0].preferredShmemCarveout, ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT, func) + err = cydriver._cuFuncGetAttribute(&attr[0].preferredShmemCarveout, cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT, func) if err != cudaSuccess: _setLastError(err) return err @@ -615,7 +615,7 @@ cdef const char* _cudaGetErrorString(cudaError_t error) except ?NULL nogil: cdef const char* pStr = NULL cdef cudaError_t err = cudaSuccess - err = ccuda._cuGetErrorString(error, &pStr) + err = cydriver._cuGetErrorString(error, &pStr) if err != cudaSuccess: _setLastError(err) if err == cudaErrorInvalidValue: @@ -658,7 +658,7 @@ cdef cudaError_t _cudaStreamGetCaptureInfo_v2(cudaStream_t stream, cudaStreamCap cdef cudaError_t _cudaImportExternalSemaphore(cudaExternalSemaphore_t* extSem_out, const cudaExternalSemaphoreHandleDesc* semHandleDesc) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC driverSemHandleDesc + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC driverSemHandleDesc if semHandleDesc == NULL: _setLastError(cudaErrorInvalidValue) @@ -667,40 +667,40 @@ cdef cudaError_t _cudaImportExternalSemaphore(cudaExternalSemaphore_t* extSem_ou memset(&driverSemHandleDesc, 0, sizeof(driverSemHandleDesc)) if semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueFd: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD driverSemHandleDesc.handle.fd = semHandleDesc.handle.fd elif semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 driverSemHandleDesc.handle.win32.handle = semHandleDesc.handle.win32.handle driverSemHandleDesc.handle.win32.name = semHandleDesc.handle.win32.name elif semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT driverSemHandleDesc.handle.win32.handle = semHandleDesc.handle.win32.handle driverSemHandleDesc.handle.win32.name = semHandleDesc.handle.win32.name elif semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D12Fence: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE driverSemHandleDesc.handle.win32.handle = semHandleDesc.handle.win32.handle driverSemHandleDesc.handle.win32.name = semHandleDesc.handle.win32.name elif semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D11Fence: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE driverSemHandleDesc.handle.win32.handle = semHandleDesc.handle.win32.handle driverSemHandleDesc.handle.win32.name = semHandleDesc.handle.win32.name elif semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeNvSciSync: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC driverSemHandleDesc.handle.nvSciSyncObj = semHandleDesc.handle.nvSciSyncObj elif semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutex: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX driverSemHandleDesc.handle.win32.handle = semHandleDesc.handle.win32.handle driverSemHandleDesc.handle.win32.name = semHandleDesc.handle.win32.name elif semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutexKmt: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT driverSemHandleDesc.handle.win32.handle = semHandleDesc.handle.win32.handle driverSemHandleDesc.handle.win32.name = semHandleDesc.handle.win32.name elif semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD driverSemHandleDesc.handle.fd = semHandleDesc.handle.fd elif semHandleDesc.type == cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32: - driverSemHandleDesc.type = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + driverSemHandleDesc.type = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 driverSemHandleDesc.handle.win32.handle = semHandleDesc.handle.win32.handle driverSemHandleDesc.handle.win32.name = semHandleDesc.handle.win32.name driverSemHandleDesc.flags = semHandleDesc.flags @@ -708,7 +708,7 @@ cdef cudaError_t _cudaImportExternalSemaphore(cudaExternalSemaphore_t* extSem_ou err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuImportExternalSemaphore(extSem_out, &driverSemHandleDesc) + err = cydriver._cuImportExternalSemaphore(extSem_out, &driverSemHandleDesc) if err != cudaSuccess: _setLastError(err) return err @@ -722,7 +722,7 @@ cdef cudaError_t _cudaSignalExternalSemaphoresAsync_v2(const cudaExternalSemapho err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuSignalExternalSemaphoresAsync(extSemArray, paramsArray, numExtSems, stream) + err = cydriver._cuSignalExternalSemaphoresAsync(extSemArray, paramsArray, numExtSems, stream) if err != cudaSuccess: _setLastError(err) return err @@ -737,7 +737,7 @@ cdef cudaError_t _cudaWaitExternalSemaphoresAsync_v2(const cudaExternalSemaphore err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuWaitExternalSemaphoresAsync(extSemArray, paramsArray, numExtSems, stream) + err = cydriver._cuWaitExternalSemaphoresAsync(extSemArray, paramsArray, numExtSems, stream) if err != cudaSuccess: _setLastError(err) return err @@ -749,7 +749,7 @@ cdef cudaError_t _cudaWaitExternalSemaphoresAsync_v2(const cudaExternalSemaphore cdef cudaError_t _cudaArrayGetInfo(cudaChannelFormatDesc* desc, cudaExtent* extent, unsigned int* flags, cudaArray_t array) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR_v2 driverDesc + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR_v2 driverDesc cdef size_t width = 0 cdef size_t height = 0 cdef size_t depth = 0 @@ -765,7 +765,7 @@ cdef cudaError_t _cudaArrayGetInfo(cudaChannelFormatDesc* desc, cudaExtent* exte if extent: memset(extent, 0, sizeof(extent[0])) - err = ccuda._cuArray3DGetDescriptor_v2(&driverDesc, array) + err = cydriver._cuArray3DGetDescriptor_v2(&driverDesc, array) if err != cudaSuccess: _setLastError(err) return err @@ -965,16 +965,16 @@ cdef cudaError_t _cudaMemcpyFromArrayAsync(void* dst, cudaArray_const_t src, siz cdef cudaError_t _cudaPointerGetAttributes(cudaPointerAttributes* attributes, const void* ptr) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess cdef cudaPointerAttributes attrib - cdef ccuda.CUcontext driverContext = NULL - cdef ccuda.CUmemorytype driverMemoryType + cdef cydriver.CUcontext driverContext = NULL + cdef cydriver.CUmemorytype driverMemoryType cdef int isManaged = 0 - cdef ccuda.CUpointer_attribute[6] query - query[0] = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_CONTEXT - query[1] = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_TYPE - query[2] = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_POINTER - query[3] = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_HOST_POINTER - query[4] = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_MANAGED - query[5] = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL + cdef cydriver.CUpointer_attribute[6] query + query[0] = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_CONTEXT + query[1] = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_TYPE + query[2] = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_POINTER + query[3] = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_HOST_POINTER + query[4] = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_MANAGED + query[5] = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL memset(&attrib, 0, sizeof(attrib)) memset(&driverMemoryType, 0, sizeof(driverMemoryType)) @@ -993,7 +993,7 @@ cdef cudaError_t _cudaPointerGetAttributes(cudaPointerAttributes* attributes, co return cudaErrorInvalidValue # Get all the attributes we need - err = ccuda._cuPointerGetAttributes((sizeof(query)/sizeof(query[0])), query, data, ptr) + err = cydriver._cuPointerGetAttributes((sizeof(query)/sizeof(query[0])), query, data, ptr) if err != cudaSuccess: if attributes != NULL: memset(attributes, 0, sizeof(attributes[0])) @@ -1001,12 +1001,12 @@ cdef cudaError_t _cudaPointerGetAttributes(cudaPointerAttributes* attributes, co _setLastError(err) return err - if driverMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST: + if driverMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST: if isManaged: attrib.type = cudaMemoryTypeManaged else: attrib.type = cudaMemoryTypeHost - elif driverMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE: + elif driverMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE: if isManaged: attrib.type = cudaMemoryTypeManaged else: @@ -1040,15 +1040,15 @@ cdef cudaError_t _cudaGetDeviceFlags(unsigned int* flags) except ?cudaErrorCallR _setLastError(cudaErrorInvalidValue) return cudaErrorInvalidValue - cdef ccuda.CUcontext driverContext - err = ccuda._cuCtxGetCurrent(&driverContext) + cdef cydriver.CUcontext driverContext + err = cydriver._cuCtxGetCurrent(&driverContext) if err != cudaSuccess: _setLastError(err) return err # Get the flags from the current context if driverContext != NULL: - err = ccuda._cuCtxGetFlags(flags) + err = cydriver._cuCtxGetFlags(flags) if err != cudaSuccess: _setLastError(err) return err @@ -1058,7 +1058,7 @@ cdef cudaError_t _cudaGetDeviceFlags(unsigned int* flags) except ?cudaErrorCallR cdef unsigned int pcFlags cdef int pcActive device = m_global.getDevice(0) - err = ccuda._cuDevicePrimaryCtxGetState(device[0].driverDevice, &pcFlags, &pcActive) + err = cydriver._cuDevicePrimaryCtxGetState(device[0].driverDevice, &pcFlags, &pcActive) if err != cudaSuccess: _setLastError(err) return err @@ -1109,12 +1109,12 @@ cdef cudaError_t _cudaMemPoolSetAccess(cudaMemPool_t memPool, const cudaMemAcces if err != cudaSuccess: return err cdef size_t MAX_DEVICES = 32 - cdef ccuda.CUmemAccessDesc localList[32] - cdef ccuda.CUmemAccessDesc *cuDescList + cdef cydriver.CUmemAccessDesc localList[32] + cdef cydriver.CUmemAccessDesc *cuDescList cdef size_t i = 0 if (count > MAX_DEVICES): - cuDescList = calloc(sizeof(ccuda.CUmemAccessDesc), count) + cuDescList = calloc(sizeof(cydriver.CUmemAccessDesc), count) else: cuDescList = localList @@ -1123,12 +1123,12 @@ cdef cudaError_t _cudaMemPoolSetAccess(cudaMemPool_t memPool, const cudaMemAcces return cudaErrorMemoryAllocation while i < count: - cuDescList[i].location.type = descList[i].location.type + cuDescList[i].location.type = descList[i].location.type cuDescList[i].location.id = descList[i].location.id - cuDescList[i].flags = descList[i].flags + cuDescList[i].flags = descList[i].flags i += 1 - err = ccuda._cuMemPoolSetAccess(memPool, cuDescList, count) + err = cydriver._cuMemPoolSetAccess(memPool, cuDescList, count) if err != cudaSuccess: _setLastError(err) return err @@ -1148,8 +1148,8 @@ cdef cudaError_t _cudaDeviceReset() except ?cudaErrorCallRequiresNewerDriver nog if not m_global._lazyInitDriver: return cudaSuccess - cdef ccuda.CUcontext context - err = ccuda._cuCtxGetCurrent(&context) + cdef cydriver.CUcontext context + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -1184,14 +1184,14 @@ cdef cudaError_t _cudaPeekAtLastError() except ?cudaErrorCallRequiresNewerDriver cdef cudaError_t _cudaGetDevice(int* deviceOrdinal) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUdevice driverDevice = 0 + cdef cydriver.CUdevice driverDevice = 0 err = m_global.lazyInitDriver() if err != cudaSuccess: return err cdef cudaPythonDevice *cudaDevice - err = ccuda._cuCtxGetDevice(&driverDevice) + err = cydriver._cuCtxGetDevice(&driverDevice) if err == cudaSuccess: cudaDevice = m_global.getDeviceFromDriver(driverDevice) deviceOrdinal[0] = cudaDevice[0].deviceOrdinal @@ -1222,7 +1222,7 @@ cdef cudaError_t _cudaSetDevice(int deviceOrdinal) except ?cudaErrorCallRequires if device.primaryContext == NULL: initPrimaryContext(device) - err = ccuda._cuCtxSetCurrent(device.primaryContext) + err = cydriver._cuCtxSetCurrent(device.primaryContext) if err != cudaSuccess: _setLastError(err) return err @@ -1242,27 +1242,27 @@ cdef cudaError_t _cudaGetDeviceProperties_v2(cudaDeviceProp* prop, int deviceOrd _setLastError(err) return cudaErrorInvalidDevice - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.kernelExecTimeoutEnabled), ccuda.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, device[0].driverDevice) + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.kernelExecTimeoutEnabled), cydriver.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, device[0].driverDevice) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.computeMode), ccuda.CU_DEVICE_ATTRIBUTE_COMPUTE_MODE, device[0].driverDevice) + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.computeMode), cydriver.CU_DEVICE_ATTRIBUTE_COMPUTE_MODE, device[0].driverDevice) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.clockRate), ccuda.CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device[0].driverDevice) + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.clockRate), cydriver.CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device[0].driverDevice) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.memoryClockRate), ccuda.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, device[0].driverDevice) + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.memoryClockRate), cydriver.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, device[0].driverDevice) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.singleToDoublePrecisionPerfRatio), ccuda.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO, device[0].driverDevice) + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.singleToDoublePrecisionPerfRatio), cydriver.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO, device[0].driverDevice) if err != cudaSuccess: _setLastError(err) return err @@ -1357,9 +1357,9 @@ cdef cudaError_t _cudaCreateTextureObject(cudaTextureObject_t* pTexObject, const _setLastError(cudaErrorInvalidValue) return cudaErrorInvalidValue - cdef ccuda.CUDA_RESOURCE_DESC rd - cdef ccuda.CUDA_TEXTURE_DESC td - cdef ccuda.CUDA_RESOURCE_VIEW_DESC rvd + cdef cydriver.CUDA_RESOURCE_DESC rd + cdef cydriver.CUDA_TEXTURE_DESC td + cdef cydriver.CUDA_RESOURCE_VIEW_DESC rvd cdef cudaTextureDesc texDesc memcpy(&texDesc, pTexDesc, sizeof(cudaTextureDesc)) texDesc.seamlessCubemap = 0 @@ -1375,9 +1375,9 @@ cdef cudaError_t _cudaCreateTextureObject(cudaTextureObject_t* pTexObject, const _setLastError(err) return err if pResViewDesc: - err = ccuda._cuTexObjectCreate(pTexObject, &rd, &td, &rvd) + err = cydriver._cuTexObjectCreate(pTexObject, &rd, &td, &rvd) else: - err = ccuda._cuTexObjectCreate(pTexObject, &rd, &td, NULL) + err = cydriver._cuTexObjectCreate(pTexObject, &rd, &td, NULL) if err != cudaSuccess: _setLastError(err) return err @@ -1389,18 +1389,18 @@ cdef cudaError_t _cudaGetTextureObjectTextureDesc(cudaTextureDesc* pTexDesc, cud cdef cudaError_t err = cudaSuccess cdef cudaResourceDesc resDesc - cdef ccuda.CUDA_RESOURCE_DESC rd - cdef ccuda.CUDA_TEXTURE_DESC td + cdef cydriver.CUDA_RESOURCE_DESC rd + cdef cydriver.CUDA_TEXTURE_DESC td cdef cudaTextureDesc texDesc err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuTexObjectGetResourceDesc(&rd, texObject) + err = cydriver._cuTexObjectGetResourceDesc(&rd, texObject) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuTexObjectGetTextureDesc(&td, texObject) + err = cydriver._cuTexObjectGetTextureDesc(&td, texObject) if err != cudaSuccess: _setLastError(err) return err @@ -1418,17 +1418,17 @@ cdef cudaError_t _cudaGetTextureObjectTextureDesc(cudaTextureDesc* pTexDesc, cud cdef cudaError_t _cudaGetTextureObjectResourceViewDesc(cudaResourceViewDesc* pResViewDesc, cudaTextureObject_t texObject) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess cdef cudaResourceDesc resDesc - cdef ccuda.CUDA_RESOURCE_DESC rd - cdef ccuda.CUDA_RESOURCE_VIEW_DESC rvd + cdef cydriver.CUDA_RESOURCE_DESC rd + cdef cydriver.CUDA_RESOURCE_VIEW_DESC rvd err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda.cuTexObjectGetResourceDesc(&rd, texObject) + err = cydriver.cuTexObjectGetResourceDesc(&rd, texObject) if err != cudaSuccess: _setLastError(err) return err - err = ccuda.cuTexObjectGetResourceViewDesc(&rvd, texObject) + err = cydriver.cuTexObjectGetResourceViewDesc(&rvd, texObject) if err != cudaSuccess: _setLastError(err) return err @@ -1449,7 +1449,7 @@ cdef cudaError_t _cudaGetExportTable(const void** ppExportTable, const cudaUUID_ err = m_global.lazyInitDriver() if err != cudaSuccess: return err - err = ccuda._cuGetExportTable(ppExportTable, pExportTableId) + err = cydriver._cuGetExportTable(ppExportTable, pExportTableId) if err != cudaSuccess: _setLastError(err) return err @@ -1566,8 +1566,8 @@ cdef cudaError_t _cudaSetDeviceFlags(unsigned int flags) except ?cudaErrorCallRe if err != cudaSuccess: return err - cdef ccuda.CUcontext context - err = ccuda._cuCtxGetCurrent(&context) + cdef cydriver.CUcontext context + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -1579,7 +1579,7 @@ cdef cudaError_t _cudaSetDeviceFlags(unsigned int flags) except ?cudaErrorCallRe # cudaSetDevice may need to be called before retrying call return cudaErrorIncompatibleDriverContext - err = ccuda._cuDevicePrimaryCtxSetFlags_v2(device[0].driverDevice, flags) + err = cydriver._cuDevicePrimaryCtxSetFlags_v2(device[0].driverDevice, flags) if err != cudaSuccess: _setLastError(err) return err @@ -1597,7 +1597,7 @@ cdef cudaError_t _cudaGraphAddMemAllocNode(cudaGraphNode_t* pGraphNode, cudaGrap err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddMemAllocNode(pGraphNode, graph, pDependencies, numDependencies, nodeParams) + err = cydriver._cuGraphAddMemAllocNode(pGraphNode, graph, pDependencies, numDependencies, nodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -1615,7 +1615,7 @@ cdef cudaError_t _cudaGraphMemAllocNodeGetParams(cudaGraphNode_t node, cudaMemAl err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphMemAllocNodeGetParams(node, params_out) + err = cydriver._cuGraphMemAllocNodeGetParams(node, params_out) if err != cudaSuccess: _setLastError(err) return err @@ -1633,7 +1633,7 @@ cdef cudaError_t _cudaGraphMemFreeNodeGetParams(cudaGraphNode_t node, void* dptr err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphMemFreeNodeGetParams(node, dptr_out) + err = cydriver._cuGraphMemFreeNodeGetParams(node, dptr_out) if err != cudaSuccess: _setLastError(err) return err @@ -1647,7 +1647,7 @@ cdef cudaError_t _cudaMemAdvise(const void* devPtr, size_t count, cudaMemoryAdvi err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemAdvise(devPtr, count, advice, device) + err = cydriver._cuMemAdvise(devPtr, count, advice, device) if err != cudaSuccess: _setLastError(err) return err @@ -1658,13 +1658,13 @@ cdef cudaError_t _cudaMemAdvise(const void* devPtr, size_t count, cudaMemoryAdvi cdef cudaError_t _cudaMemAdvise_v2(const void* devPtr, size_t count, cudaMemoryAdvise advice, cudaMemLocation location) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUmemLocation _driver_location - _driver_location.type = location.type + cdef cydriver.CUmemLocation _driver_location + _driver_location.type = location.type _driver_location.id = location.id err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemAdvise_v2(devPtr, count, advice, _driver_location) + err = cydriver._cuMemAdvise_v2(devPtr, count, advice, _driver_location) if err != cudaSuccess: _setLastError(err) return err @@ -1675,7 +1675,7 @@ cdef cudaError_t _cudaMemAdvise_v2(const void* devPtr, size_t count, cudaMemoryA cdef cudaError_t _cudaMemRangeGetAttribute(void* data, size_t dataSize, cudaMemRangeAttribute attribute, const void* devPtr, size_t count) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuMemRangeGetAttribute(data, dataSize, attribute, devPtr, count) + err = cydriver._cuMemRangeGetAttribute(data, dataSize, attribute, devPtr, count) if err != cudaSuccess: _setLastError(err) return err @@ -1686,7 +1686,7 @@ cdef cudaError_t _cudaMemRangeGetAttribute(void* data, size_t dataSize, cudaMemR cdef cudaError_t _cudaMemRangeGetAttributes(void** data, size_t* dataSizes, cudaMemRangeAttribute* attributes, size_t numAttributes, const void* devPtr, size_t count) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuMemRangeGetAttributes(data, dataSizes, attributes, numAttributes, devPtr, count) + err = cydriver._cuMemRangeGetAttributes(data, dataSizes, attributes, numAttributes, devPtr, count) if err != cudaSuccess: _setLastError(err) return err @@ -1709,7 +1709,7 @@ cdef cudaError_t _cudaGetDeviceCount(int* count) except ?cudaErrorCallRequiresNe cdef cudaError_t _cudaDeviceGetAttribute(int* value, cudaDeviceAttr attr, int device) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuDeviceGetAttribute(value, attr, device) + err = cydriver._cuDeviceGetAttribute(value, attr, device) if err != cudaSuccess: _setLastError(err) return err @@ -1723,7 +1723,7 @@ cdef cudaError_t _cudaDeviceSetSharedMemConfig(cudaSharedMemConfig config) excep err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxSetSharedMemConfig(config) + err = cydriver._cuCtxSetSharedMemConfig(config) if err != cudaSuccess: _setLastError(err) return err @@ -1734,7 +1734,7 @@ cdef cudaError_t _cudaDeviceSetSharedMemConfig(cudaSharedMemConfig config) excep cdef cudaError_t _cudaDeviceGetByPCIBusId(int* device, const char* pciBusId) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuDeviceGetByPCIBusId(device, pciBusId) + err = cydriver._cuDeviceGetByPCIBusId(device, pciBusId) if err != cudaSuccess: _setLastError(err) return err @@ -1745,7 +1745,7 @@ cdef cudaError_t _cudaDeviceGetByPCIBusId(int* device, const char* pciBusId) exc cdef cudaError_t _cudaDeviceGetPCIBusId(char* pciBusId, int length, int device) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuDeviceGetPCIBusId(pciBusId, length, device) + err = cydriver._cuDeviceGetPCIBusId(pciBusId, length, device) if err != cudaSuccess: _setLastError(err) return err @@ -1756,7 +1756,7 @@ cdef cudaError_t _cudaDeviceGetPCIBusId(char* pciBusId, int length, int device) cdef cudaError_t _cudaDeviceGetP2PAttribute(int* value, cudaDeviceP2PAttr attr, int srcDevice, int dstDevice) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuDeviceGetP2PAttribute(value, attr, srcDevice, dstDevice) + err = cydriver._cuDeviceGetP2PAttribute(value, attr, srcDevice, dstDevice) if err != cudaSuccess: _setLastError(err) return err @@ -1767,13 +1767,13 @@ cdef cudaError_t _cudaDeviceGetP2PAttribute(int* value, cudaDeviceP2PAttr attr, cdef cudaError_t _cudaArrayGetSparseProperties(cudaArraySparseProperties* sparseProperties, cudaArray_t array) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUDA_ARRAY_SPARSE_PROPERTIES _driver_sparseProperties + cdef cydriver.CUDA_ARRAY_SPARSE_PROPERTIES _driver_sparseProperties if not sparseProperties: _setLastError(cudaErrorInvalidValue) return cudaError.cudaErrorInvalidValue memset(sparseProperties, 0, sizeof(cudaArraySparseProperties)) - err = ccuda._cuArrayGetSparseProperties(&_driver_sparseProperties, array) + err = cydriver._cuArrayGetSparseProperties(&_driver_sparseProperties, array) if err == cudaSuccess: sparseProperties[0].miptailFirstLevel = _driver_sparseProperties.miptailFirstLevel sparseProperties[0].miptailSize = _driver_sparseProperties.miptailSize @@ -1792,13 +1792,13 @@ cdef cudaError_t _cudaArrayGetSparseProperties(cudaArraySparseProperties* sparse cdef cudaError_t _cudaMipmappedArrayGetSparseProperties(cudaArraySparseProperties* sparseProperties, cudaMipmappedArray_t mipmap) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUDA_ARRAY_SPARSE_PROPERTIES _driver_sparseProperties + cdef cydriver.CUDA_ARRAY_SPARSE_PROPERTIES _driver_sparseProperties if not sparseProperties: _setLastError(cudaErrorInvalidValue) return cudaError.cudaErrorInvalidValue memset(sparseProperties, 0, sizeof(cudaArraySparseProperties)) - err = ccuda._cuMipmappedArrayGetSparseProperties(&_driver_sparseProperties, mipmap) + err = cydriver._cuMipmappedArrayGetSparseProperties(&_driver_sparseProperties, mipmap) if err == cudaSuccess: sparseProperties[0].miptailFirstLevel = _driver_sparseProperties.miptailFirstLevel sparseProperties[0].miptailSize = _driver_sparseProperties.miptailSize @@ -1827,7 +1827,7 @@ cdef cudaError_t _cudaDeviceCanAccessPeer(int* canAccessPeer, int device, int pe if driverDevice == NULL or driverPeerDevice == NULL: return cudaErrorInvalidDevice - err = ccuda._cuDeviceCanAccessPeer(canAccessPeer, driverDevice.driverDevice, driverPeerDevice.driverDevice) + err = cydriver._cuDeviceCanAccessPeer(canAccessPeer, driverDevice.driverDevice, driverPeerDevice.driverDevice) if err != cudaSuccess: _setLastError(err) return err @@ -1860,7 +1860,7 @@ cdef cudaError_t _cudaMemcpyPeer(void* dst, int dstDevice, const void* src, int if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuMemcpyPeer(dst, peerDevice[0].primaryContext, src, device[0].primaryContext, count) + err = cydriver._cuMemcpyPeer(dst, peerDevice[0].primaryContext, src, device[0].primaryContext, count) if err != cudaSuccess: _setLastError(err) return err @@ -1891,7 +1891,7 @@ cdef cudaError_t _cudaMemcpyPeerAsync(void* dst, int dstDevice, const void* src, if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuMemcpyPeerAsync(dst, peerDevice[0].primaryContext, src, device[0].primaryContext, count, stream) + err = cydriver._cuMemcpyPeerAsync(dst, peerDevice[0].primaryContext, src, device[0].primaryContext, count, stream) if err != cudaSuccess: _setLastError(err) return err @@ -1903,13 +1903,13 @@ cdef cudaError_t _cudaMemcpyPeerAsync(void* dst, int dstDevice, const void* src, cdef cudaError_t _cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUcontext context + cdef cydriver.CUcontext context cdef cudaPythonDevice *dev err = m_global.lazyInitContextState() if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -1926,7 +1926,7 @@ cdef cudaError_t _cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuCtxEnablePeerAccess(dev.primaryContext, flags) + err = cydriver._cuCtxEnablePeerAccess(dev.primaryContext, flags) if err != cudaSuccess: _setLastError(err) return err @@ -1937,13 +1937,13 @@ cdef cudaError_t _cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags) cdef cudaError_t _cudaDeviceDisablePeerAccess(int peerDevice) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUcontext context + cdef cydriver.CUcontext context cdef cudaPythonDevice *dev err = m_global.lazyInitContextState() if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -1960,7 +1960,7 @@ cdef cudaError_t _cudaDeviceDisablePeerAccess(int peerDevice) except ?cudaErrorC if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuCtxDisablePeerAccess(dev.primaryContext) + err = cydriver._cuCtxDisablePeerAccess(dev.primaryContext) if err != cudaSuccess: _setLastError(err) return err @@ -1971,7 +1971,7 @@ cdef cudaError_t _cudaDeviceDisablePeerAccess(int peerDevice) except ?cudaErrorC cdef cudaError_t _cudaExternalMemoryGetMappedMipmappedArray(cudaMipmappedArray_t* mipmap, cudaExternalMemory_t extMem, const cudaExternalMemoryMipmappedArrayDesc* mipmapDesc) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess err = m_global.lazyInitContextState() - cdef ccuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC _driver_mipmapDesc + cdef cydriver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC _driver_mipmapDesc memset(&_driver_mipmapDesc, 0, sizeof(_driver_mipmapDesc)) _driver_mipmapDesc.offset = mipmapDesc[0].offset _driver_mipmapDesc.arrayDesc.Width = mipmapDesc[0].extent.width @@ -1986,7 +1986,7 @@ cdef cudaError_t _cudaExternalMemoryGetMappedMipmappedArray(cudaMipmappedArray_t if err != cudaSuccess: return err - err = ccuda._cuExternalMemoryGetMappedMipmappedArray(mipmap, extMem, &_driver_mipmapDesc) + err = cydriver._cuExternalMemoryGetMappedMipmappedArray(mipmap, extMem, &_driver_mipmapDesc) if err != cudaSuccess: _setLastError(err) return err @@ -1997,55 +1997,55 @@ cdef cudaError_t _cudaExternalMemoryGetMappedMipmappedArray(cudaMipmappedArray_t cdef cudaError_t _cudaGetSurfaceObjectResourceDesc(cudaResourceDesc* pResDesc, cudaSurfaceObject_t surfObject) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess err = m_global.lazyInitContextState() - cdef ccuda.CUDA_RESOURCE_DESC _driver_pResDesc + cdef cydriver.CUDA_RESOURCE_DESC _driver_pResDesc if err != cudaSuccess: return err - err = ccuda._cuSurfObjectGetResourceDesc(&_driver_pResDesc, surfObject) + err = cydriver._cuSurfObjectGetResourceDesc(&_driver_pResDesc, surfObject) memset(pResDesc, 0, sizeof(cudaResourceDesc)) - if _driver_pResDesc.resType == ccuda.CU_RESOURCE_TYPE_ARRAY: + if _driver_pResDesc.resType == cydriver.CU_RESOURCE_TYPE_ARRAY: pResDesc[0].resType = cudaResourceType.cudaResourceTypeArray pResDesc[0].res.array.array = _driver_pResDesc.res.array.hArray - elif _driver_pResDesc.resType == ccuda.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: + elif _driver_pResDesc.resType == cydriver.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: pResDesc[0].resType = cudaResourceType.cudaResourceTypeMipmappedArray pResDesc[0].res.mipmap.mipmap = _driver_pResDesc.res.mipmap.hMipmappedArray - elif _driver_pResDesc.resType == ccuda.CU_RESOURCE_TYPE_LINEAR: + elif _driver_pResDesc.resType == cydriver.CU_RESOURCE_TYPE_LINEAR: pResDesc[0].resType = cudaResourceType.cudaResourceTypeLinear pResDesc[0].res.linear.devPtr = _driver_pResDesc.res.linear.devPtr pResDesc[0].res.linear.sizeInBytes = _driver_pResDesc.res.linear.sizeInBytes - elif _driver_pResDesc.resType == ccuda.CU_RESOURCE_TYPE_PITCH2D: + elif _driver_pResDesc.resType == cydriver.CU_RESOURCE_TYPE_PITCH2D: pResDesc[0].resType = cudaResourceType.cudaResourceTypePitch2D pResDesc[0].res.pitch2D.devPtr = _driver_pResDesc.res.pitch2D.devPtr pResDesc[0].res.pitch2D.pitchInBytes = _driver_pResDesc.res.pitch2D.pitchInBytes pResDesc[0].res.pitch2D.width = _driver_pResDesc.res.pitch2D.width pResDesc[0].res.pitch2D.height = _driver_pResDesc.res.pitch2D.height - if _driver_pResDesc.resType == ccuda.CU_RESOURCE_TYPE_LINEAR or _driver_pResDesc.resType == ccuda.CU_RESOURCE_TYPE_PITCH2D: + if _driver_pResDesc.resType == cydriver.CU_RESOURCE_TYPE_LINEAR or _driver_pResDesc.resType == cydriver.CU_RESOURCE_TYPE_PITCH2D: channel_size = 0 - if _driver_pResDesc.res.linear.format == ccuda.CU_AD_FORMAT_UNSIGNED_INT8: + if _driver_pResDesc.res.linear.format == cydriver.CU_AD_FORMAT_UNSIGNED_INT8: pResDesc[0].res.linear.desc.f = cudaChannelFormatKind.cudaChannelFormatKindUnsigned channel_size = 8 - elif _driver_pResDesc.res.linear.format == ccuda.CU_AD_FORMAT_UNSIGNED_INT16: + elif _driver_pResDesc.res.linear.format == cydriver.CU_AD_FORMAT_UNSIGNED_INT16: pResDesc[0].res.linear.desc.f = cudaChannelFormatKind.cudaChannelFormatKindUnsigned channel_size = 16 - elif _driver_pResDesc.res.linear.format == ccuda.CU_AD_FORMAT_UNSIGNED_INT32: + elif _driver_pResDesc.res.linear.format == cydriver.CU_AD_FORMAT_UNSIGNED_INT32: pResDesc[0].res.linear.desc.f = cudaChannelFormatKind.cudaChannelFormatKindUnsigned channel_size = 32 - elif _driver_pResDesc.res.linear.format == ccuda.CU_AD_FORMAT_SIGNED_INT8: + elif _driver_pResDesc.res.linear.format == cydriver.CU_AD_FORMAT_SIGNED_INT8: pResDesc[0].res.linear.desc.f = cudaChannelFormatKind.cudaChannelFormatKindSigned channel_size = 8 - elif _driver_pResDesc.res.linear.format == ccuda.CU_AD_FORMAT_SIGNED_INT16: + elif _driver_pResDesc.res.linear.format == cydriver.CU_AD_FORMAT_SIGNED_INT16: pResDesc[0].res.linear.desc.f = cudaChannelFormatKind.cudaChannelFormatKindSigned channel_size = 16 - elif _driver_pResDesc.res.linear.format == ccuda.CU_AD_FORMAT_SIGNED_INT32: + elif _driver_pResDesc.res.linear.format == cydriver.CU_AD_FORMAT_SIGNED_INT32: pResDesc[0].res.linear.desc.f = cudaChannelFormatKind.cudaChannelFormatKindSigned channel_size = 32 - elif _driver_pResDesc.res.linear.format == ccuda.CU_AD_FORMAT_HALF: + elif _driver_pResDesc.res.linear.format == cydriver.CU_AD_FORMAT_HALF: pResDesc[0].res.linear.desc.f = cudaChannelFormatKind.cudaChannelFormatKindFloat channel_size = 16 - elif _driver_pResDesc.res.linear.format == ccuda.CU_AD_FORMAT_FLOAT: + elif _driver_pResDesc.res.linear.format == cydriver.CU_AD_FORMAT_FLOAT: pResDesc[0].res.linear.desc.f = cudaChannelFormatKind.cudaChannelFormatKindFloat channel_size = 32 - elif _driver_pResDesc.res.linear.format == ccuda.CU_AD_FORMAT_NV12: + elif _driver_pResDesc.res.linear.format == cydriver.CU_AD_FORMAT_NV12: pResDesc[0].res.linear.desc.f = cudaChannelFormatKind.cudaChannelFormatKindNV12 channel_size = 8 else: @@ -2077,11 +2077,11 @@ cdef cudaError_t _cudaGetSurfaceObjectResourceDesc(cudaResourceDesc* pResDesc, c cdef cudaError_t _cudaGraphKernelNodeGetParams(cudaGraphNode_t node, cudaKernelNodeParams* pNodeParams) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess err = m_global.lazyInitContextState() - cdef ccuda.CUDA_KERNEL_NODE_PARAMS driverNodeParams + cdef cydriver.CUDA_KERNEL_NODE_PARAMS driverNodeParams if err != cudaSuccess: return err - err = ccuda._cuGraphKernelNodeGetParams_v2(node, &driverNodeParams) + err = cydriver._cuGraphKernelNodeGetParams_v2(node, &driverNodeParams) pNodeParams[0].func = driverNodeParams.func pNodeParams[0].gridDim.x = driverNodeParams.gridDimX pNodeParams[0].gridDim.y = driverNodeParams.gridDimY @@ -2103,7 +2103,7 @@ cdef cudaError_t _cudaGraphKernelNodeGetParams(cudaGraphNode_t node, cudaKernelN cdef cudaError_t _cudaExternalMemoryGetMappedBuffer(void** devPtr, cudaExternalMemory_t extMem, const cudaExternalMemoryBufferDesc* bufferDesc) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess err = m_global.lazyInitContextState() - cdef ccuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC _driver_bufferDesc + cdef cydriver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC _driver_bufferDesc memset(&_driver_bufferDesc, 0, sizeof(_driver_bufferDesc)) _driver_bufferDesc.offset = bufferDesc[0].offset _driver_bufferDesc.size = bufferDesc[0].size @@ -2111,7 +2111,7 @@ cdef cudaError_t _cudaExternalMemoryGetMappedBuffer(void** devPtr, cudaExternalM if err != cudaSuccess: return err - err = ccuda._cuExternalMemoryGetMappedBuffer(devPtr, extMem, &_driver_bufferDesc) + err = cydriver._cuExternalMemoryGetMappedBuffer(devPtr, extMem, &_driver_bufferDesc) if err != cudaSuccess: _setLastError(err) return err @@ -2122,45 +2122,45 @@ cdef cudaError_t _cudaExternalMemoryGetMappedBuffer(void** devPtr, cudaExternalM cdef cudaError_t _cudaImportExternalMemory(cudaExternalMemory_t* extMem_out, const cudaExternalMemoryHandleDesc* memHandleDesc) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess err = m_global.lazyInitContextState() - cdef ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC _driver_memHandleDesc + cdef cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC _driver_memHandleDesc memset(&_driver_memHandleDesc, 0, sizeof(_driver_memHandleDesc)) if memHandleDesc[0].type == cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueFd: - _driver_memHandleDesc.type = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD + _driver_memHandleDesc.type = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD _driver_memHandleDesc.handle.fd = memHandleDesc[0].handle.fd elif memHandleDesc[0].type == cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32: - _driver_memHandleDesc.type = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 + _driver_memHandleDesc.type = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 _driver_memHandleDesc.handle.win32.handle = memHandleDesc[0].handle.win32.handle _driver_memHandleDesc.handle.win32.name = memHandleDesc[0].handle.win32.name elif memHandleDesc[0].type == cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32Kmt: - _driver_memHandleDesc.type = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT + _driver_memHandleDesc.type = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT _driver_memHandleDesc.handle.win32.handle = memHandleDesc[0].handle.win32.handle _driver_memHandleDesc.handle.win32.name = memHandleDesc[0].handle.win32.name elif memHandleDesc[0].type == cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Heap: - _driver_memHandleDesc.type = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP + _driver_memHandleDesc.type = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP _driver_memHandleDesc.handle.win32.handle = memHandleDesc[0].handle.win32.handle _driver_memHandleDesc.handle.win32.name = memHandleDesc[0].handle.win32.name elif memHandleDesc[0].type == cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Resource: - _driver_memHandleDesc.type = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE + _driver_memHandleDesc.type = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE _driver_memHandleDesc.handle.win32.handle = memHandleDesc[0].handle.win32.handle _driver_memHandleDesc.handle.win32.name = memHandleDesc[0].handle.win32.name elif memHandleDesc[0].type == cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11Resource: - _driver_memHandleDesc.type = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE + _driver_memHandleDesc.type = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE _driver_memHandleDesc.handle.win32.handle = memHandleDesc[0].handle.win32.handle _driver_memHandleDesc.handle.win32.name = memHandleDesc[0].handle.win32.name elif memHandleDesc[0].type == cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11ResourceKmt: - _driver_memHandleDesc.type = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + _driver_memHandleDesc.type = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT _driver_memHandleDesc.handle.win32.handle = memHandleDesc[0].handle.win32.handle _driver_memHandleDesc.handle.win32.name = memHandleDesc[0].handle.win32.name elif memHandleDesc[0].type == cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeNvSciBuf: - _driver_memHandleDesc.type = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF + _driver_memHandleDesc.type = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF _driver_memHandleDesc.handle.nvSciBufObject = memHandleDesc[0].handle.nvSciBufObject _driver_memHandleDesc.size = memHandleDesc[0].size _driver_memHandleDesc.flags = memHandleDesc[0].flags if err != cudaSuccess: return err - err = ccuda._cuImportExternalMemory(extMem_out, &_driver_memHandleDesc) + err = cydriver._cuImportExternalMemory(extMem_out, &_driver_memHandleDesc) if err != cudaSuccess: _setLastError(err) return err @@ -2173,14 +2173,14 @@ cdef cudaError_t _cudaCreateSurfaceObject(cudaSurfaceObject_t* pSurfObject, cons err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUDA_RESOURCE_DESC _driver_pResDesc + cdef cydriver.CUDA_RESOURCE_DESC _driver_pResDesc memset(&_driver_pResDesc, 0, sizeof(_driver_pResDesc)) err = toDriverCudaResourceDesc(&_driver_pResDesc, pResDesc) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuSurfObjectCreate(pSurfObject, &_driver_pResDesc) + err = cydriver._cuSurfObjectCreate(pSurfObject, &_driver_pResDesc) if err != cudaSuccess: _setLastError(err) return err @@ -2193,14 +2193,14 @@ cdef cudaError_t _cudaGetTextureObjectResourceDesc(cudaResourceDesc* pResDesc, c err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUDA_RESOURCE_DESC _driver_pResDesc + cdef cydriver.CUDA_RESOURCE_DESC _driver_pResDesc memset(&_driver_pResDesc, 0, sizeof(_driver_pResDesc)) err = toDriverCudaResourceDesc(&_driver_pResDesc, pResDesc) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuTexObjectGetResourceDesc(&_driver_pResDesc, texObject) + err = cydriver._cuTexObjectGetResourceDesc(&_driver_pResDesc, texObject) if err != cudaSuccess: _setLastError(err) return err @@ -2212,12 +2212,12 @@ cdef cudaError_t _cudaEGLStreamProducerPresentFrame(cudaEglStreamConnection* con err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUeglFrame cueglFrame + cdef cydriver.CUeglFrame cueglFrame err = getDriverEglFrame(&cueglFrame, eglframe) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuEGLStreamProducerPresentFrame(conn, cueglFrame, pStream) + err = cydriver._cuEGLStreamProducerPresentFrame(conn, cueglFrame, pStream) if err != cudaSuccess: _setLastError(err) return err @@ -2231,8 +2231,8 @@ cdef cudaError_t _cudaEGLStreamProducerReturnFrame(cudaEglStreamConnection* conn err = cudaErrorInvalidResourceHandle _setLastError(err) return err - cdef ccuda.CUeglFrame cueglFrame - err = ccuda._cuEGLStreamProducerReturnFrame(conn, &cueglFrame, pStream) + cdef cydriver.CUeglFrame cueglFrame + err = cydriver._cuEGLStreamProducerReturnFrame(conn, &cueglFrame, pStream) if err != cudaSuccess: _setLastError(err) return err @@ -2247,9 +2247,9 @@ cdef cudaError_t _cudaGraphicsResourceGetMappedEglFrame(cudaEglFrame* eglFrame, err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUeglFrame cueglFrame + cdef cydriver.CUeglFrame cueglFrame memset(&cueglFrame, 0, sizeof(cueglFrame)) - err = ccuda._cuGraphicsResourceGetMappedEglFrame(&cueglFrame, resource, index, mipLevel) + err = cydriver._cuGraphicsResourceGetMappedEglFrame(&cueglFrame, resource, index, mipLevel) if err != cudaSuccess: _setLastError(err) return err @@ -2266,13 +2266,13 @@ cdef cudaError_t _cudaVDPAUSetVDPAUDevice(int device, VdpDevice vdpDevice, VdpGe cdef cudaError_t _cudaArrayGetMemoryRequirements(cudaArrayMemoryRequirements* memoryRequirements, cudaArray_t array, int device) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUDA_ARRAY_MEMORY_REQUIREMENTS driverMemoryRequirements + cdef cydriver.CUDA_ARRAY_MEMORY_REQUIREMENTS driverMemoryRequirements if memoryRequirements == NULL: _setLastError(cudaErrorInvalidValue) return cudaErrorInvalidValue memset(memoryRequirements, 0, sizeof(memoryRequirements[0])) - err = ccuda._cuArrayGetMemoryRequirements(&driverMemoryRequirements, array, device) + err = cydriver._cuArrayGetMemoryRequirements(&driverMemoryRequirements, array, device) if err != cudaSuccess: _setLastError(err) return err @@ -2286,13 +2286,13 @@ cdef cudaError_t _cudaArrayGetMemoryRequirements(cudaArrayMemoryRequirements* me cdef cudaError_t _cudaMipmappedArrayGetMemoryRequirements(cudaArrayMemoryRequirements* memoryRequirements, cudaMipmappedArray_t mipmap, int device) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUDA_ARRAY_MEMORY_REQUIREMENTS driverMemoryRequirements + cdef cydriver.CUDA_ARRAY_MEMORY_REQUIREMENTS driverMemoryRequirements if memoryRequirements == NULL: _setLastError(cudaErrorInvalidValue) return cudaErrorInvalidValue memset(memoryRequirements, 0, sizeof(memoryRequirements[0])) - err = ccuda._cuMipmappedArrayGetMemoryRequirements(&driverMemoryRequirements, mipmap, device) + err = cydriver._cuMipmappedArrayGetMemoryRequirements(&driverMemoryRequirements, mipmap, device) if err != cudaSuccess: _setLastError(err) return err @@ -2309,7 +2309,7 @@ cdef cudaError_t _cudaStreamGetAttribute(cudaStream_t hStream, cudaStreamAttrID err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamGetAttribute(hStream, attr, value_out) + err = cydriver._cuStreamGetAttribute(hStream, attr, value_out) if err != cudaSuccess: _setLastError(err) return err @@ -2322,7 +2322,7 @@ cdef cudaError_t _cudaStreamSetAttribute(cudaStream_t hStream, cudaStreamAttrID err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamSetAttribute(hStream, attr, value) + err = cydriver._cuStreamSetAttribute(hStream, attr, value) if err != cudaSuccess: _setLastError(err) return err @@ -2335,7 +2335,7 @@ cdef cudaError_t _cudaGraphKernelNodeGetAttribute(cudaGraphNode_t hNode, cudaKer err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphKernelNodeGetAttribute(hNode, attr, value_out) + err = cydriver._cuGraphKernelNodeGetAttribute(hNode, attr, value_out) if err != cudaSuccess: _setLastError(err) return err @@ -2348,7 +2348,7 @@ cdef cudaError_t _cudaGraphKernelNodeSetAttribute(cudaGraphNode_t hNode, cudaKer err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphKernelNodeSetAttribute(hNode, attr, value) + err = cydriver._cuGraphKernelNodeSetAttribute(hNode, attr, value) if err != cudaSuccess: _setLastError(err) return err @@ -2361,7 +2361,7 @@ cdef cudaError_t _cudaVDPAUGetDevice(int* device, VdpDevice vdpDevice, VdpGetPro err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuVDPAUGetDevice(device, vdpDevice, vdpGetProcAddress) + err = cydriver._cuVDPAUGetDevice(device, vdpDevice, vdpGetProcAddress) if err != cudaSuccess: _setLastError(err) return err @@ -2374,7 +2374,7 @@ cdef cudaError_t _cudaGraphicsVDPAURegisterVideoSurface(cudaGraphicsResource** r err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsVDPAURegisterVideoSurface(resource, vdpSurface, flags) + err = cydriver._cuGraphicsVDPAURegisterVideoSurface(resource, vdpSurface, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2387,7 +2387,7 @@ cdef cudaError_t _cudaGraphicsVDPAURegisterOutputSurface(cudaGraphicsResource** err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsVDPAURegisterOutputSurface(resource, vdpSurface, flags) + err = cydriver._cuGraphicsVDPAURegisterOutputSurface(resource, vdpSurface, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2400,7 +2400,7 @@ cdef cudaError_t _cudaGLGetDevices(unsigned int* pCudaDeviceCount, int* pCudaDev err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGLGetDevices_v2(pCudaDeviceCount, pCudaDevices, cudaDeviceCount, deviceList) + err = cydriver._cuGLGetDevices_v2(pCudaDeviceCount, pCudaDevices, cudaDeviceCount, deviceList) if err != cudaSuccess: _setLastError(err) return err @@ -2413,7 +2413,7 @@ cdef cudaError_t _cudaGraphicsGLRegisterImage(cudaGraphicsResource** resource, G err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsGLRegisterImage(resource, image, target, flags) + err = cydriver._cuGraphicsGLRegisterImage(resource, image, target, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2426,7 +2426,7 @@ cdef cudaError_t _cudaGraphicsGLRegisterBuffer(cudaGraphicsResource** resource, err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsGLRegisterBuffer(resource, buffer, flags) + err = cydriver._cuGraphicsGLRegisterBuffer(resource, buffer, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2439,7 +2439,7 @@ cdef cudaError_t _cudaDeviceSynchronize() except ?cudaErrorCallRequiresNewerDriv err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxSynchronize() + err = cydriver._cuCtxSynchronize() if err != cudaSuccess: _setLastError(err) return err @@ -2452,7 +2452,7 @@ cdef cudaError_t _cudaDeviceSetLimit(cudaLimit limit, size_t value) except ?cuda err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxSetLimit(limit, value) + err = cydriver._cuCtxSetLimit(limit, value) if err != cudaSuccess: _setLastError(err) return err @@ -2465,7 +2465,7 @@ cdef cudaError_t _cudaDeviceGetLimit(size_t* pValue, cudaLimit limit) except ?cu err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxGetLimit(pValue, limit) + err = cydriver._cuCtxGetLimit(pValue, limit) if err != cudaSuccess: _setLastError(err) return err @@ -2478,7 +2478,7 @@ cdef cudaError_t _cudaDeviceGetCacheConfig(cudaFuncCache* pCacheConfig) except ? err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxGetCacheConfig(pCacheConfig) + err = cydriver._cuCtxGetCacheConfig(pCacheConfig) if err != cudaSuccess: _setLastError(err) return err @@ -2491,7 +2491,7 @@ cdef cudaError_t _cudaDeviceGetStreamPriorityRange(int* leastPriority, int* grea err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxGetStreamPriorityRange(leastPriority, greatestPriority) + err = cydriver._cuCtxGetStreamPriorityRange(leastPriority, greatestPriority) if err != cudaSuccess: _setLastError(err) return err @@ -2504,7 +2504,7 @@ cdef cudaError_t _cudaDeviceSetCacheConfig(cudaFuncCache cacheConfig) except ?cu err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxSetCacheConfig(cacheConfig) + err = cydriver._cuCtxSetCacheConfig(cacheConfig) if err != cudaSuccess: _setLastError(err) return err @@ -2517,7 +2517,7 @@ cdef cudaError_t _cudaDeviceGetSharedMemConfig(cudaSharedMemConfig* pConfig) exc err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxGetSharedMemConfig(pConfig) + err = cydriver._cuCtxGetSharedMemConfig(pConfig) if err != cudaSuccess: _setLastError(err) return err @@ -2530,7 +2530,7 @@ cdef cudaError_t _cudaIpcGetEventHandle(cudaIpcEventHandle_t* handle, cudaEvent_ err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuIpcGetEventHandle(handle, event) + err = cydriver._cuIpcGetEventHandle(handle, event) if err != cudaSuccess: _setLastError(err) return err @@ -2541,11 +2541,11 @@ cdef cudaError_t _cudaIpcGetEventHandle(cudaIpcEventHandle_t* handle, cudaEvent_ cdef cudaError_t _cudaIpcOpenEventHandle(cudaEvent_t* event, cudaIpcEventHandle_t handle) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess err = m_global.lazyInitContextState() - cdef ccuda.CUipcEventHandle _driver_handle + cdef cydriver.CUipcEventHandle _driver_handle memcpy(&_driver_handle, &handle, sizeof(_driver_handle)) if err != cudaSuccess: return err - err = ccuda._cuIpcOpenEventHandle(event, _driver_handle) + err = cydriver._cuIpcOpenEventHandle(event, _driver_handle) if err != cudaSuccess: _setLastError(err) return err @@ -2558,7 +2558,7 @@ cdef cudaError_t _cudaIpcGetMemHandle(cudaIpcMemHandle_t* handle, void* devPtr) err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuIpcGetMemHandle(handle, devPtr) + err = cydriver._cuIpcGetMemHandle(handle, devPtr) if err != cudaSuccess: _setLastError(err) return err @@ -2569,11 +2569,11 @@ cdef cudaError_t _cudaIpcGetMemHandle(cudaIpcMemHandle_t* handle, void* devPtr) cdef cudaError_t _cudaIpcOpenMemHandle(void** devPtr, cudaIpcMemHandle_t handle, unsigned int flags) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess err = m_global.lazyInitContextState() - cdef ccuda.CUipcMemHandle _driver_handle + cdef cydriver.CUipcMemHandle _driver_handle memcpy(&_driver_handle, &handle, sizeof(_driver_handle)) if err != cudaSuccess: return err - err = ccuda._cuIpcOpenMemHandle_v2(devPtr, _driver_handle, flags) + err = cydriver._cuIpcOpenMemHandle_v2(devPtr, _driver_handle, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2586,7 +2586,7 @@ cdef cudaError_t _cudaIpcCloseMemHandle(void* devPtr) except ?cudaErrorCallRequi err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuIpcCloseMemHandle(devPtr) + err = cydriver._cuIpcCloseMemHandle(devPtr) if err != cudaSuccess: _setLastError(err) return err @@ -2599,7 +2599,7 @@ cdef cudaError_t _cudaDeviceFlushGPUDirectRDMAWrites(cudaFlushGPUDirectRDMAWrite err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuFlushGPUDirectRDMAWrites(target, scope) + err = cydriver._cuFlushGPUDirectRDMAWrites(target, scope) if err != cudaSuccess: _setLastError(err) return err @@ -2612,7 +2612,7 @@ cdef cudaError_t _cudaDeviceGetDefaultMemPool(cudaMemPool_t* memPool, int device err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuDeviceGetDefaultMemPool(memPool, device) + err = cydriver._cuDeviceGetDefaultMemPool(memPool, device) if err != cudaSuccess: _setLastError(err) return err @@ -2625,7 +2625,7 @@ cdef cudaError_t _cudaDeviceSetMemPool(int device, cudaMemPool_t memPool) except err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuDeviceSetMemPool(device, memPool) + err = cydriver._cuDeviceSetMemPool(device, memPool) if err != cudaSuccess: _setLastError(err) return err @@ -2638,7 +2638,7 @@ cdef cudaError_t _cudaDeviceGetMemPool(cudaMemPool_t* memPool, int device) excep err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuDeviceGetMemPool(memPool, device) + err = cydriver._cuDeviceGetMemPool(memPool, device) if err != cudaSuccess: _setLastError(err) return err @@ -2651,7 +2651,7 @@ cdef cudaError_t _cudaDeviceGetNvSciSyncAttributes(void* nvSciSyncAttrList, int err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, device, flags) + err = cydriver._cuDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, device, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2664,7 +2664,7 @@ cdef cudaError_t _cudaStreamCreateWithFlags(cudaStream_t* pStream, unsigned int err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamCreate(pStream, flags) + err = cydriver._cuStreamCreate(pStream, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2677,7 +2677,7 @@ cdef cudaError_t _cudaStreamCreateWithPriority(cudaStream_t* pStream, unsigned i err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamCreateWithPriority(pStream, flags, priority) + err = cydriver._cuStreamCreateWithPriority(pStream, flags, priority) if err != cudaSuccess: _setLastError(err) return err @@ -2690,7 +2690,7 @@ cdef cudaError_t _cudaStreamGetPriority(cudaStream_t hStream, int* priority) exc err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamGetPriority(hStream, priority) + err = cydriver._cuStreamGetPriority(hStream, priority) if err != cudaSuccess: _setLastError(err) return err @@ -2703,7 +2703,7 @@ cdef cudaError_t _cudaStreamGetFlags(cudaStream_t hStream, unsigned int* flags) err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamGetFlags(hStream, flags) + err = cydriver._cuStreamGetFlags(hStream, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2716,7 +2716,7 @@ cdef cudaError_t _cudaCtxResetPersistingL2Cache() except ?cudaErrorCallRequiresN err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxResetPersistingL2Cache() + err = cydriver._cuCtxResetPersistingL2Cache() if err != cudaSuccess: _setLastError(err) return err @@ -2729,7 +2729,7 @@ cdef cudaError_t _cudaStreamCopyAttributes(cudaStream_t dst, cudaStream_t src) e err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamCopyAttributes(dst, src) + err = cydriver._cuStreamCopyAttributes(dst, src) if err != cudaSuccess: _setLastError(err) return err @@ -2742,7 +2742,7 @@ cdef cudaError_t _cudaStreamDestroy(cudaStream_t stream) except ?cudaErrorCallRe err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamDestroy_v2(stream) + err = cydriver._cuStreamDestroy_v2(stream) if err != cudaSuccess: _setLastError(err) return err @@ -2755,7 +2755,7 @@ cdef cudaError_t _cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, un err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamWaitEvent(stream, event, flags) + err = cydriver._cuStreamWaitEvent(stream, event, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2768,7 +2768,7 @@ cdef cudaError_t _cudaStreamSynchronize(cudaStream_t stream) except ?cudaErrorCa err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamSynchronize(stream) + err = cydriver._cuStreamSynchronize(stream) if err != cudaSuccess: _setLastError(err) return err @@ -2781,7 +2781,7 @@ cdef cudaError_t _cudaStreamQuery(cudaStream_t stream) except ?cudaErrorCallRequ err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamQuery(stream) + err = cydriver._cuStreamQuery(stream) if err != cudaSuccess: _setLastError(err) return err @@ -2794,7 +2794,7 @@ cdef cudaError_t _cudaStreamAttachMemAsync(cudaStream_t stream, void* devPtr, si err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamAttachMemAsync(stream, devPtr, length, flags) + err = cydriver._cuStreamAttachMemAsync(stream, devPtr, length, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2807,7 +2807,7 @@ cdef cudaError_t _cudaStreamBeginCapture(cudaStream_t stream, cudaStreamCaptureM err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamBeginCapture_v2(stream, mode) + err = cydriver._cuStreamBeginCapture_v2(stream, mode) if err != cudaSuccess: _setLastError(err) return err @@ -2820,7 +2820,7 @@ cdef cudaError_t _cudaStreamBeginCaptureToGraph(cudaStream_t stream, cudaGraph_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamBeginCaptureToGraph(stream, graph, dependencies, dependencyData, numDependencies, mode) + err = cydriver._cuStreamBeginCaptureToGraph(stream, graph, dependencies, dependencyData, numDependencies, mode) if err != cudaSuccess: _setLastError(err) return err @@ -2833,7 +2833,7 @@ cdef cudaError_t _cudaThreadExchangeStreamCaptureMode(cudaStreamCaptureMode* mod err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuThreadExchangeStreamCaptureMode(mode) + err = cydriver._cuThreadExchangeStreamCaptureMode(mode) if err != cudaSuccess: _setLastError(err) return err @@ -2846,7 +2846,7 @@ cdef cudaError_t _cudaStreamEndCapture(cudaStream_t stream, cudaGraph_t* pGraph) err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamEndCapture(stream, pGraph) + err = cydriver._cuStreamEndCapture(stream, pGraph) if err != cudaSuccess: _setLastError(err) return err @@ -2859,7 +2859,7 @@ cdef cudaError_t _cudaStreamIsCapturing(cudaStream_t stream, cudaStreamCaptureSt err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamIsCapturing(stream, pCaptureStatus) + err = cydriver._cuStreamIsCapturing(stream, pCaptureStatus) if err != cudaSuccess: _setLastError(err) return err @@ -2872,7 +2872,7 @@ cdef cudaError_t _cudaStreamUpdateCaptureDependencies(cudaStream_t stream, cudaG err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamUpdateCaptureDependencies(stream, dependencies, numDependencies, flags) + err = cydriver._cuStreamUpdateCaptureDependencies(stream, dependencies, numDependencies, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2885,7 +2885,7 @@ cdef cudaError_t _cudaEventCreateWithFlags(cudaEvent_t* event, unsigned int flag err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEventCreate(event, flags) + err = cydriver._cuEventCreate(event, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2898,7 +2898,7 @@ cdef cudaError_t _cudaEventRecord(cudaEvent_t event, cudaStream_t stream) except err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEventRecord(event, stream) + err = cydriver._cuEventRecord(event, stream) if err != cudaSuccess: _setLastError(err) return err @@ -2911,7 +2911,7 @@ cdef cudaError_t _cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t strea err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEventRecordWithFlags(event, stream, flags) + err = cydriver._cuEventRecordWithFlags(event, stream, flags) if err != cudaSuccess: _setLastError(err) return err @@ -2924,7 +2924,7 @@ cdef cudaError_t _cudaEventSynchronize(cudaEvent_t event) except ?cudaErrorCallR err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEventSynchronize(event) + err = cydriver._cuEventSynchronize(event) if err != cudaSuccess: _setLastError(err) return err @@ -2937,7 +2937,7 @@ cdef cudaError_t _cudaEventDestroy(cudaEvent_t event) except ?cudaErrorCallRequi err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEventDestroy_v2(event) + err = cydriver._cuEventDestroy_v2(event) if err != cudaSuccess: _setLastError(err) return err @@ -2950,7 +2950,7 @@ cdef cudaError_t _cudaEventElapsedTime(float* ms, cudaEvent_t start, cudaEvent_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEventElapsedTime(ms, start, end) + err = cydriver._cuEventElapsedTime(ms, start, end) if err != cudaSuccess: _setLastError(err) return err @@ -2963,7 +2963,7 @@ cdef cudaError_t _cudaDestroyExternalMemory(cudaExternalMemory_t extMem) except err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuDestroyExternalMemory(extMem) + err = cydriver._cuDestroyExternalMemory(extMem) if err != cudaSuccess: _setLastError(err) return err @@ -2976,7 +2976,7 @@ cdef cudaError_t _cudaDestroyExternalSemaphore(cudaExternalSemaphore_t extSem) e err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuDestroyExternalSemaphore(extSem) + err = cydriver._cuDestroyExternalSemaphore(extSem) if err != cudaSuccess: _setLastError(err) return err @@ -2989,7 +2989,7 @@ cdef cudaError_t _cudaFuncSetCacheConfig(const void* func, cudaFuncCache cacheCo err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuFuncSetCacheConfig(func, cacheConfig) + err = cydriver._cuFuncSetCacheConfig(func, cacheConfig) if err != cudaSuccess: _setLastError(err) return err @@ -3002,7 +3002,7 @@ cdef cudaError_t _cudaFuncSetSharedMemConfig(const void* func, cudaSharedMemConf err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuFuncSetSharedMemConfig(func, config) + err = cydriver._cuFuncSetSharedMemConfig(func, config) if err != cudaSuccess: _setLastError(err) return err @@ -3015,7 +3015,7 @@ cdef cudaError_t _cudaFuncSetAttribute(const void* func, cudaFuncAttribute attr, err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuFuncSetAttribute(func, attr, value) + err = cydriver._cuFuncSetAttribute(func, attr, value) if err != cudaSuccess: _setLastError(err) return err @@ -3041,7 +3041,7 @@ cdef cudaError_t _cudaOccupancyMaxActiveBlocksPerMultiprocessor(int* numBlocks, err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func, blockSize, dynamicSMemSize) + err = cydriver._cuOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func, blockSize, dynamicSMemSize) if err != cudaSuccess: _setLastError(err) return err @@ -3054,7 +3054,7 @@ cdef cudaError_t _cudaOccupancyAvailableDynamicSMemPerBlock(size_t* dynamicSmemS err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuOccupancyAvailableDynamicSMemPerBlock(dynamicSmemSize, func, numBlocks, blockSize) + err = cydriver._cuOccupancyAvailableDynamicSMemPerBlock(dynamicSmemSize, func, numBlocks, blockSize) if err != cudaSuccess: _setLastError(err) return err @@ -3067,7 +3067,7 @@ cdef cudaError_t _cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int* nu err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func, blockSize, dynamicSMemSize, flags) + err = cydriver._cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func, blockSize, dynamicSMemSize, flags) if err != cudaSuccess: _setLastError(err) return err @@ -3080,7 +3080,7 @@ cdef cudaError_t _cudaMallocManaged(void** devPtr, size_t size, unsigned int fla err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemAllocManaged(devPtr, size, flags) + err = cydriver._cuMemAllocManaged(devPtr, size, flags) if err != cudaSuccess: _setLastError(err) return err @@ -3093,7 +3093,7 @@ cdef cudaError_t _cudaMalloc(void** devPtr, size_t size) except ?cudaErrorCallRe err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemAlloc_v2(devPtr, size) + err = cydriver._cuMemAlloc_v2(devPtr, size) if err != cudaSuccess: _setLastError(err) return err @@ -3106,7 +3106,7 @@ cdef cudaError_t _cudaFree(void* devPtr) except ?cudaErrorCallRequiresNewerDrive err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemFree_v2(devPtr) + err = cydriver._cuMemFree_v2(devPtr) if err != cudaSuccess: _setLastError(err) return err @@ -3119,7 +3119,7 @@ cdef cudaError_t _cudaFreeHost(void* ptr) except ?cudaErrorCallRequiresNewerDriv err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemFreeHost(ptr) + err = cydriver._cuMemFreeHost(ptr) if err != cudaSuccess: _setLastError(err) return err @@ -3132,7 +3132,7 @@ cdef cudaError_t _cudaFreeArray(cudaArray_t array) except ?cudaErrorCallRequires err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuArrayDestroy(array) + err = cydriver._cuArrayDestroy(array) if err != cudaSuccess: _setLastError(err) return err @@ -3145,7 +3145,7 @@ cdef cudaError_t _cudaFreeMipmappedArray(cudaMipmappedArray_t mipmappedArray) ex err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMipmappedArrayDestroy(mipmappedArray) + err = cydriver._cuMipmappedArrayDestroy(mipmappedArray) if err != cudaSuccess: _setLastError(err) return err @@ -3158,7 +3158,7 @@ cdef cudaError_t _cudaHostAlloc(void** pHost, size_t size, unsigned int flags) e err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemHostAlloc(pHost, size, flags) + err = cydriver._cuMemHostAlloc(pHost, size, flags) if err != cudaSuccess: _setLastError(err) return err @@ -3171,7 +3171,7 @@ cdef cudaError_t _cudaHostRegister(void* ptr, size_t size, unsigned int flags) e err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemHostRegister_v2(ptr, size, flags) + err = cydriver._cuMemHostRegister_v2(ptr, size, flags) if err != cudaSuccess: _setLastError(err) return err @@ -3184,7 +3184,7 @@ cdef cudaError_t _cudaHostUnregister(void* ptr) except ?cudaErrorCallRequiresNew err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemHostUnregister(ptr) + err = cydriver._cuMemHostUnregister(ptr) if err != cudaSuccess: _setLastError(err) return err @@ -3197,7 +3197,7 @@ cdef cudaError_t _cudaHostGetDevicePointer(void** pDevice, void* pHost, unsigned err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemHostGetDevicePointer_v2(pDevice, pHost, flags) + err = cydriver._cuMemHostGetDevicePointer_v2(pDevice, pHost, flags) if err != cudaSuccess: _setLastError(err) return err @@ -3210,7 +3210,7 @@ cdef cudaError_t _cudaHostGetFlags(unsigned int* pFlags, void* pHost) except ?cu err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemHostGetFlags(pFlags, pHost) + err = cydriver._cuMemHostGetFlags(pFlags, pHost) if err != cudaSuccess: _setLastError(err) return err @@ -3223,7 +3223,7 @@ cdef cudaError_t _cudaGetMipmappedArrayLevel(cudaArray_t* levelArray, cudaMipmap err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMipmappedArrayGetLevel(levelArray, mipmappedArray, level) + err = cydriver._cuMipmappedArrayGetLevel(levelArray, mipmappedArray, level) if err != cudaSuccess: _setLastError(err) return err @@ -3236,7 +3236,7 @@ cdef cudaError_t _cudaMemGetInfo(size_t* free, size_t* total) except ?cudaErrorC err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemGetInfo_v2(free, total) + err = cydriver._cuMemGetInfo_v2(free, total) if err != cudaSuccess: _setLastError(err) return err @@ -3249,7 +3249,7 @@ cdef cudaError_t _cudaArrayGetPlane(cudaArray_t* pPlaneArray, cudaArray_t hArray err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuArrayGetPlane(pPlaneArray, hArray, planeIdx) + err = cydriver._cuArrayGetPlane(pPlaneArray, hArray, planeIdx) if err != cudaSuccess: _setLastError(err) return err @@ -3262,7 +3262,7 @@ cdef cudaError_t _cudaMemset(void* devPtr, int value, size_t count) except ?cuda err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemsetD8_v2(devPtr, value, count) + err = cydriver._cuMemsetD8_v2(devPtr, value, count) if err != cudaSuccess: _setLastError(err) return err @@ -3275,7 +3275,7 @@ cdef cudaError_t _cudaMemset2D(void* devPtr, size_t pitch, int value, size_t wid err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemsetD2D8_v2(devPtr, pitch, value, width, height) + err = cydriver._cuMemsetD2D8_v2(devPtr, pitch, value, width, height) if err != cudaSuccess: _setLastError(err) return err @@ -3288,7 +3288,7 @@ cdef cudaError_t _cudaMemsetAsync(void* devPtr, int value, size_t count, cudaStr err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemsetD8Async(devPtr, value, count, stream) + err = cydriver._cuMemsetD8Async(devPtr, value, count, stream) if err != cudaSuccess: _setLastError(err) return err @@ -3301,7 +3301,7 @@ cdef cudaError_t _cudaMemset2DAsync(void* devPtr, size_t pitch, int value, size_ err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemsetD2D8Async(devPtr, pitch, value, width, height, stream) + err = cydriver._cuMemsetD2D8Async(devPtr, pitch, value, width, height, stream) if err != cudaSuccess: _setLastError(err) return err @@ -3314,7 +3314,7 @@ cdef cudaError_t _cudaMemPrefetchAsync(const void* devPtr, size_t count, int dst err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPrefetchAsync(devPtr, count, dstDevice, stream) + err = cydriver._cuMemPrefetchAsync(devPtr, count, dstDevice, stream) if err != cudaSuccess: _setLastError(err) return err @@ -3324,13 +3324,13 @@ cdef cudaError_t _cudaMemPrefetchAsync(const void* devPtr, size_t count, int dst cdef cudaError_t _cudaMemPrefetchAsync_v2(const void* devPtr, size_t count, cudaMemLocation location, unsigned int flags, cudaStream_t stream) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUmemLocation _driver_location + cdef cydriver.CUmemLocation _driver_location err = m_global.lazyInitContextState() if err != cudaSuccess: return err - _driver_location.type = location.type + _driver_location.type = location.type _driver_location.id = location.id - err = ccuda._cuMemPrefetchAsync_v2(devPtr, count, _driver_location, flags, stream) + err = cydriver._cuMemPrefetchAsync_v2(devPtr, count, _driver_location, flags, stream) if err != cudaSuccess: _setLastError(err) return err @@ -3343,7 +3343,7 @@ cdef cudaError_t _cudaMallocAsync(void** devPtr, size_t size, cudaStream_t hStre err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemAllocAsync(devPtr, size, hStream) + err = cydriver._cuMemAllocAsync(devPtr, size, hStream) if err != cudaSuccess: _setLastError(err) return err @@ -3356,7 +3356,7 @@ cdef cudaError_t _cudaFreeAsync(void* devPtr, cudaStream_t hStream) except ?cuda err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemFreeAsync(devPtr, hStream) + err = cydriver._cuMemFreeAsync(devPtr, hStream) if err != cudaSuccess: _setLastError(err) return err @@ -3369,7 +3369,7 @@ cdef cudaError_t _cudaMemPoolTrimTo(cudaMemPool_t memPool, size_t minBytesToKeep err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolTrimTo(memPool, minBytesToKeep) + err = cydriver._cuMemPoolTrimTo(memPool, minBytesToKeep) if err != cudaSuccess: _setLastError(err) return err @@ -3382,7 +3382,7 @@ cdef cudaError_t _cudaMemPoolSetAttribute(cudaMemPool_t memPool, cudaMemPoolAttr err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolSetAttribute(memPool, attr, value) + err = cydriver._cuMemPoolSetAttribute(memPool, attr, value) if err != cudaSuccess: _setLastError(err) return err @@ -3395,7 +3395,7 @@ cdef cudaError_t _cudaMemPoolGetAttribute(cudaMemPool_t memPool, cudaMemPoolAttr err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolGetAttribute(memPool, attr, value) + err = cydriver._cuMemPoolGetAttribute(memPool, attr, value) if err != cudaSuccess: _setLastError(err) return err @@ -3408,7 +3408,7 @@ cdef cudaError_t _cudaMemPoolGetAccess(cudaMemAccessFlags* flags, cudaMemPool_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolGetAccess(flags, memPool, location) + err = cydriver._cuMemPoolGetAccess(flags, memPool, location) if err != cudaSuccess: _setLastError(err) return err @@ -3421,7 +3421,7 @@ cdef cudaError_t _cudaMemPoolCreate(cudaMemPool_t* memPool, const cudaMemPoolPro err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolCreate(memPool, poolProps) + err = cydriver._cuMemPoolCreate(memPool, poolProps) if err != cudaSuccess: _setLastError(err) return err @@ -3434,7 +3434,7 @@ cdef cudaError_t _cudaMemPoolDestroy(cudaMemPool_t memPool) except ?cudaErrorCal err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolDestroy(memPool) + err = cydriver._cuMemPoolDestroy(memPool) if err != cudaSuccess: _setLastError(err) return err @@ -3447,7 +3447,7 @@ cdef cudaError_t _cudaMallocFromPoolAsync(void** ptr, size_t size, cudaMemPool_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemAllocFromPoolAsync(ptr, size, memPool, stream) + err = cydriver._cuMemAllocFromPoolAsync(ptr, size, memPool, stream) if err != cudaSuccess: _setLastError(err) return err @@ -3460,7 +3460,7 @@ cdef cudaError_t _cudaMemPoolExportToShareableHandle(void* shareableHandle, cuda err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolExportToShareableHandle(shareableHandle, memPool, handleType, flags) + err = cydriver._cuMemPoolExportToShareableHandle(shareableHandle, memPool, handleType, flags) if err != cudaSuccess: _setLastError(err) return err @@ -3473,7 +3473,7 @@ cdef cudaError_t _cudaMemPoolImportFromShareableHandle(cudaMemPool_t* memPool, v err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolImportFromShareableHandle(memPool, shareableHandle, handleType, flags) + err = cydriver._cuMemPoolImportFromShareableHandle(memPool, shareableHandle, handleType, flags) if err != cudaSuccess: _setLastError(err) return err @@ -3486,7 +3486,7 @@ cdef cudaError_t _cudaMemPoolExportPointer(cudaMemPoolPtrExportData* exportData, err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolExportPointer(exportData, ptr) + err = cydriver._cuMemPoolExportPointer(exportData, ptr) if err != cudaSuccess: _setLastError(err) return err @@ -3499,7 +3499,7 @@ cdef cudaError_t _cudaMemPoolImportPointer(void** ptr, cudaMemPool_t memPool, cu err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuMemPoolImportPointer(ptr, memPool, exportData) + err = cydriver._cuMemPoolImportPointer(ptr, memPool, exportData) if err != cudaSuccess: _setLastError(err) return err @@ -3512,7 +3512,7 @@ cdef cudaError_t _cudaGraphicsUnregisterResource(cudaGraphicsResource_t resource err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsUnregisterResource(resource) + err = cydriver._cuGraphicsUnregisterResource(resource) if err != cudaSuccess: _setLastError(err) return err @@ -3525,7 +3525,7 @@ cdef cudaError_t _cudaGraphicsResourceSetMapFlags(cudaGraphicsResource_t resourc err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsResourceSetMapFlags_v2(resource, flags) + err = cydriver._cuGraphicsResourceSetMapFlags_v2(resource, flags) if err != cudaSuccess: _setLastError(err) return err @@ -3538,7 +3538,7 @@ cdef cudaError_t _cudaGraphicsMapResources(int count, cudaGraphicsResource_t* re err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsMapResources(count, resources, stream) + err = cydriver._cuGraphicsMapResources(count, resources, stream) if err != cudaSuccess: _setLastError(err) return err @@ -3551,7 +3551,7 @@ cdef cudaError_t _cudaGraphicsUnmapResources(int count, cudaGraphicsResource_t* err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsUnmapResources(count, resources, stream) + err = cydriver._cuGraphicsUnmapResources(count, resources, stream) if err != cudaSuccess: _setLastError(err) return err @@ -3564,7 +3564,7 @@ cdef cudaError_t _cudaGraphicsResourceGetMappedPointer(void** devPtr, size_t* si err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsResourceGetMappedPointer_v2(devPtr, size, resource) + err = cydriver._cuGraphicsResourceGetMappedPointer_v2(devPtr, size, resource) if err != cudaSuccess: _setLastError(err) return err @@ -3577,7 +3577,7 @@ cdef cudaError_t _cudaGraphicsSubResourceGetMappedArray(cudaArray_t* array, cuda err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsSubResourceGetMappedArray(array, resource, arrayIndex, mipLevel) + err = cydriver._cuGraphicsSubResourceGetMappedArray(array, resource, arrayIndex, mipLevel) if err != cudaSuccess: _setLastError(err) return err @@ -3590,7 +3590,7 @@ cdef cudaError_t _cudaGraphicsResourceGetMappedMipmappedArray(cudaMipmappedArray err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsResourceGetMappedMipmappedArray(mipmappedArray, resource) + err = cydriver._cuGraphicsResourceGetMappedMipmappedArray(mipmappedArray, resource) if err != cudaSuccess: _setLastError(err) return err @@ -3603,7 +3603,7 @@ cdef cudaError_t _cudaDestroyTextureObject(cudaTextureObject_t texObject) except err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuTexObjectDestroy(texObject) + err = cydriver._cuTexObjectDestroy(texObject) if err != cudaSuccess: _setLastError(err) return err @@ -3616,7 +3616,7 @@ cdef cudaError_t _cudaDestroySurfaceObject(cudaSurfaceObject_t surfObject) excep err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuSurfObjectDestroy(surfObject) + err = cydriver._cuSurfObjectDestroy(surfObject) if err != cudaSuccess: _setLastError(err) return err @@ -3629,7 +3629,7 @@ cdef cudaError_t _cudaGraphCreate(cudaGraph_t* pGraph, unsigned int flags) excep err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphCreate(pGraph, flags) + err = cydriver._cuGraphCreate(pGraph, flags) if err != cudaSuccess: _setLastError(err) return err @@ -3642,11 +3642,11 @@ cdef cudaError_t _cudaGraphAddKernelNode(cudaGraphNode_t* pGraphNode, cudaGraph_ err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUDA_KERNEL_NODE_PARAMS driverNodeParams + cdef cydriver.CUDA_KERNEL_NODE_PARAMS driverNodeParams err = toDriverKernelNodeParams(pNodeParams, &driverNodeParams) if err != cudaSuccess: return err - err = ccuda._cuGraphAddKernelNode_v2(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams) + err = cydriver._cuGraphAddKernelNode_v2(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3659,11 +3659,11 @@ cdef cudaError_t _cudaGraphKernelNodeSetParams(cudaGraphNode_t node, const cudaK err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUDA_KERNEL_NODE_PARAMS driverNodeParams + cdef cydriver.CUDA_KERNEL_NODE_PARAMS driverNodeParams err = toDriverKernelNodeParams(pNodeParams, &driverNodeParams) if err != cudaSuccess: return err - err = ccuda._cuGraphKernelNodeSetParams_v2(node, &driverNodeParams) + err = cydriver._cuGraphKernelNodeSetParams_v2(node, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3676,7 +3676,7 @@ cdef cudaError_t _cudaGraphKernelNodeCopyAttributes(cudaGraphNode_t hSrc, cudaGr err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphKernelNodeCopyAttributes(hSrc, hDst) + err = cydriver._cuGraphKernelNodeCopyAttributes(hSrc, hDst) if err != cudaSuccess: _setLastError(err) return err @@ -3689,7 +3689,7 @@ cdef cudaError_t _cudaGraphMemsetNodeGetParams(cudaGraphNode_t node, cudaMemsetP err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphMemsetNodeGetParams(node, pNodeParams) + err = cydriver._cuGraphMemsetNodeGetParams(node, pNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3702,7 +3702,7 @@ cdef cudaError_t _cudaGraphMemsetNodeSetParams(cudaGraphNode_t node, const cudaM err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphMemsetNodeSetParams(node, pNodeParams) + err = cydriver._cuGraphMemsetNodeSetParams(node, pNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3715,9 +3715,9 @@ cdef cudaError_t _cudaGraphAddHostNode(cudaGraphNode_t* pGraphNode, cudaGraph_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUDA_HOST_NODE_PARAMS driverNodeParams + cdef cydriver.CUDA_HOST_NODE_PARAMS driverNodeParams toDriverHostNodeParams(pNodeParams, &driverNodeParams) - err = ccuda._cuGraphAddHostNode(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams) + err = cydriver._cuGraphAddHostNode(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3730,7 +3730,7 @@ cdef cudaError_t _cudaGraphHostNodeGetParams(cudaGraphNode_t node, cudaHostNodeP err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphHostNodeGetParams(node, pNodeParams) + err = cydriver._cuGraphHostNodeGetParams(node, pNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3743,9 +3743,9 @@ cdef cudaError_t _cudaGraphHostNodeSetParams(cudaGraphNode_t node, const cudaHos err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUDA_HOST_NODE_PARAMS driverNodeParams + cdef cydriver.CUDA_HOST_NODE_PARAMS driverNodeParams toDriverHostNodeParams(pNodeParams, &driverNodeParams) - err = ccuda._cuGraphHostNodeSetParams(node, &driverNodeParams) + err = cydriver._cuGraphHostNodeSetParams(node, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3758,7 +3758,7 @@ cdef cudaError_t _cudaGraphAddChildGraphNode(cudaGraphNode_t* pGraphNode, cudaGr err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddChildGraphNode(pGraphNode, graph, pDependencies, numDependencies, childGraph) + err = cydriver._cuGraphAddChildGraphNode(pGraphNode, graph, pDependencies, numDependencies, childGraph) if err != cudaSuccess: _setLastError(err) return err @@ -3771,7 +3771,7 @@ cdef cudaError_t _cudaGraphChildGraphNodeGetGraph(cudaGraphNode_t node, cudaGrap err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphChildGraphNodeGetGraph(node, pGraph) + err = cydriver._cuGraphChildGraphNodeGetGraph(node, pGraph) if err != cudaSuccess: _setLastError(err) return err @@ -3784,7 +3784,7 @@ cdef cudaError_t _cudaGraphAddEmptyNode(cudaGraphNode_t* pGraphNode, cudaGraph_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddEmptyNode(pGraphNode, graph, pDependencies, numDependencies) + err = cydriver._cuGraphAddEmptyNode(pGraphNode, graph, pDependencies, numDependencies) if err != cudaSuccess: _setLastError(err) return err @@ -3797,7 +3797,7 @@ cdef cudaError_t _cudaGraphAddEventRecordNode(cudaGraphNode_t* pGraphNode, cudaG err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddEventRecordNode(pGraphNode, graph, pDependencies, numDependencies, event) + err = cydriver._cuGraphAddEventRecordNode(pGraphNode, graph, pDependencies, numDependencies, event) if err != cudaSuccess: _setLastError(err) return err @@ -3810,7 +3810,7 @@ cdef cudaError_t _cudaGraphEventRecordNodeGetEvent(cudaGraphNode_t node, cudaEve err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphEventRecordNodeGetEvent(node, event_out) + err = cydriver._cuGraphEventRecordNodeGetEvent(node, event_out) if err != cudaSuccess: _setLastError(err) return err @@ -3823,7 +3823,7 @@ cdef cudaError_t _cudaGraphEventRecordNodeSetEvent(cudaGraphNode_t node, cudaEve err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphEventRecordNodeSetEvent(node, event) + err = cydriver._cuGraphEventRecordNodeSetEvent(node, event) if err != cudaSuccess: _setLastError(err) return err @@ -3836,7 +3836,7 @@ cdef cudaError_t _cudaGraphAddEventWaitNode(cudaGraphNode_t* pGraphNode, cudaGra err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddEventWaitNode(pGraphNode, graph, pDependencies, numDependencies, event) + err = cydriver._cuGraphAddEventWaitNode(pGraphNode, graph, pDependencies, numDependencies, event) if err != cudaSuccess: _setLastError(err) return err @@ -3849,7 +3849,7 @@ cdef cudaError_t _cudaGraphEventWaitNodeGetEvent(cudaGraphNode_t node, cudaEvent err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphEventWaitNodeGetEvent(node, event_out) + err = cydriver._cuGraphEventWaitNodeGetEvent(node, event_out) if err != cudaSuccess: _setLastError(err) return err @@ -3862,7 +3862,7 @@ cdef cudaError_t _cudaGraphEventWaitNodeSetEvent(cudaGraphNode_t node, cudaEvent err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphEventWaitNodeSetEvent(node, event) + err = cydriver._cuGraphEventWaitNodeSetEvent(node, event) if err != cudaSuccess: _setLastError(err) return err @@ -3875,7 +3875,7 @@ cdef cudaError_t _cudaGraphAddExternalSemaphoresSignalNode(cudaGraphNode_t* pGra err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddExternalSemaphoresSignalNode(pGraphNode, graph, pDependencies, numDependencies, nodeParams) + err = cydriver._cuGraphAddExternalSemaphoresSignalNode(pGraphNode, graph, pDependencies, numDependencies, nodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3888,7 +3888,7 @@ cdef cudaError_t _cudaGraphExternalSemaphoresSignalNodeGetParams(cudaGraphNode_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExternalSemaphoresSignalNodeGetParams(hNode, params_out) + err = cydriver._cuGraphExternalSemaphoresSignalNodeGetParams(hNode, params_out) if err != cudaSuccess: _setLastError(err) return err @@ -3901,7 +3901,7 @@ cdef cudaError_t _cudaGraphExternalSemaphoresSignalNodeSetParams(cudaGraphNode_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExternalSemaphoresSignalNodeSetParams(hNode, nodeParams) + err = cydriver._cuGraphExternalSemaphoresSignalNodeSetParams(hNode, nodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3914,7 +3914,7 @@ cdef cudaError_t _cudaGraphAddExternalSemaphoresWaitNode(cudaGraphNode_t* pGraph err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddExternalSemaphoresWaitNode(pGraphNode, graph, pDependencies, numDependencies, nodeParams) + err = cydriver._cuGraphAddExternalSemaphoresWaitNode(pGraphNode, graph, pDependencies, numDependencies, nodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3927,7 +3927,7 @@ cdef cudaError_t _cudaGraphExternalSemaphoresWaitNodeGetParams(cudaGraphNode_t h err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExternalSemaphoresWaitNodeGetParams(hNode, params_out) + err = cydriver._cuGraphExternalSemaphoresWaitNodeGetParams(hNode, params_out) if err != cudaSuccess: _setLastError(err) return err @@ -3940,7 +3940,7 @@ cdef cudaError_t _cudaGraphExternalSemaphoresWaitNodeSetParams(cudaGraphNode_t h err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExternalSemaphoresWaitNodeSetParams(hNode, nodeParams) + err = cydriver._cuGraphExternalSemaphoresWaitNodeSetParams(hNode, nodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -3953,7 +3953,7 @@ cdef cudaError_t _cudaGraphAddMemFreeNode(cudaGraphNode_t* pGraphNode, cudaGraph err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddMemFreeNode(pGraphNode, graph, pDependencies, numDependencies, dptr) + err = cydriver._cuGraphAddMemFreeNode(pGraphNode, graph, pDependencies, numDependencies, dptr) if err != cudaSuccess: _setLastError(err) return err @@ -3966,7 +3966,7 @@ cdef cudaError_t _cudaDeviceGraphMemTrim(int device) except ?cudaErrorCallRequir err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuDeviceGraphMemTrim(device) + err = cydriver._cuDeviceGraphMemTrim(device) if err != cudaSuccess: _setLastError(err) return err @@ -3979,7 +3979,7 @@ cdef cudaError_t _cudaDeviceGetGraphMemAttribute(int device, cudaGraphMemAttribu err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuDeviceGetGraphMemAttribute(device, attr, value) + err = cydriver._cuDeviceGetGraphMemAttribute(device, attr, value) if err != cudaSuccess: _setLastError(err) return err @@ -3992,7 +3992,7 @@ cdef cudaError_t _cudaDeviceSetGraphMemAttribute(int device, cudaGraphMemAttribu err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuDeviceSetGraphMemAttribute(device, attr, value) + err = cydriver._cuDeviceSetGraphMemAttribute(device, attr, value) if err != cudaSuccess: _setLastError(err) return err @@ -4005,7 +4005,7 @@ cdef cudaError_t _cudaGraphClone(cudaGraph_t* pGraphClone, cudaGraph_t originalG err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphClone(pGraphClone, originalGraph) + err = cydriver._cuGraphClone(pGraphClone, originalGraph) if err != cudaSuccess: _setLastError(err) return err @@ -4018,7 +4018,7 @@ cdef cudaError_t _cudaGraphNodeFindInClone(cudaGraphNode_t* pNode, cudaGraphNode err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphNodeFindInClone(pNode, originalNode, clonedGraph) + err = cydriver._cuGraphNodeFindInClone(pNode, originalNode, clonedGraph) if err != cudaSuccess: _setLastError(err) return err @@ -4031,7 +4031,7 @@ cdef cudaError_t _cudaGraphNodeGetType(cudaGraphNode_t node, cudaGraphNodeType* err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphNodeGetType(node, pType) + err = cydriver._cuGraphNodeGetType(node, pType) if err != cudaSuccess: _setLastError(err) return err @@ -4044,7 +4044,7 @@ cdef cudaError_t _cudaGraphGetNodes(cudaGraph_t graph, cudaGraphNode_t* nodes, s err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphGetNodes(graph, nodes, numNodes) + err = cydriver._cuGraphGetNodes(graph, nodes, numNodes) if err != cudaSuccess: _setLastError(err) return err @@ -4057,7 +4057,7 @@ cdef cudaError_t _cudaGraphGetRootNodes(cudaGraph_t graph, cudaGraphNode_t* pRoo err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphGetRootNodes(graph, pRootNodes, pNumRootNodes) + err = cydriver._cuGraphGetRootNodes(graph, pRootNodes, pNumRootNodes) if err != cudaSuccess: _setLastError(err) return err @@ -4070,7 +4070,7 @@ cdef cudaError_t _cudaGraphGetEdges(cudaGraph_t graph, cudaGraphNode_t* from_, c err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphGetEdges(graph, from_, to, numEdges) + err = cydriver._cuGraphGetEdges(graph, from_, to, numEdges) if err != cudaSuccess: _setLastError(err) return err @@ -4083,7 +4083,7 @@ cdef cudaError_t _cudaGraphNodeGetDependencies(cudaGraphNode_t node, cudaGraphNo err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphNodeGetDependencies(node, pDependencies, pNumDependencies) + err = cydriver._cuGraphNodeGetDependencies(node, pDependencies, pNumDependencies) if err != cudaSuccess: _setLastError(err) return err @@ -4096,7 +4096,7 @@ cdef cudaError_t _cudaGraphNodeGetDependentNodes(cudaGraphNode_t node, cudaGraph err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphNodeGetDependentNodes(node, pDependentNodes, pNumDependentNodes) + err = cydriver._cuGraphNodeGetDependentNodes(node, pDependentNodes, pNumDependentNodes) if err != cudaSuccess: _setLastError(err) return err @@ -4109,7 +4109,7 @@ cdef cudaError_t _cudaGraphAddDependencies(cudaGraph_t graph, const cudaGraphNod err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddDependencies(graph, from_, to, numDependencies) + err = cydriver._cuGraphAddDependencies(graph, from_, to, numDependencies) if err != cudaSuccess: _setLastError(err) return err @@ -4122,7 +4122,7 @@ cdef cudaError_t _cudaGraphRemoveDependencies(cudaGraph_t graph, const cudaGraph err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphRemoveDependencies(graph, from_, to, numDependencies) + err = cydriver._cuGraphRemoveDependencies(graph, from_, to, numDependencies) if err != cudaSuccess: _setLastError(err) return err @@ -4135,7 +4135,7 @@ cdef cudaError_t _cudaGraphDestroyNode(cudaGraphNode_t node) except ?cudaErrorCa err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphDestroyNode(node) + err = cydriver._cuGraphDestroyNode(node) if err != cudaSuccess: _setLastError(err) return err @@ -4154,7 +4154,7 @@ cdef cudaError_t _cudaGraphInstantiateWithFlags(cudaGraphExec_t* pGraphExec, cud err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphInstantiateWithFlags(pGraphExec, graph, flags) + err = cydriver._cuGraphInstantiateWithFlags(pGraphExec, graph, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4167,11 +4167,11 @@ cdef cudaError_t _cudaGraphExecKernelNodeSetParams(cudaGraphExec_t hGraphExec, c err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUDA_KERNEL_NODE_PARAMS driverNodeParams + cdef cydriver.CUDA_KERNEL_NODE_PARAMS driverNodeParams err = toDriverKernelNodeParams(pNodeParams, &driverNodeParams) if err != cudaSuccess: return err - err = ccuda._cuGraphExecKernelNodeSetParams_v2(hGraphExec, node, &driverNodeParams) + err = cydriver._cuGraphExecKernelNodeSetParams_v2(hGraphExec, node, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -4184,9 +4184,9 @@ cdef cudaError_t _cudaGraphExecHostNodeSetParams(cudaGraphExec_t hGraphExec, cud err = m_global.lazyInitContextState() if err != cudaSuccess: return err - cdef ccuda.CUDA_HOST_NODE_PARAMS driverNodeParams + cdef cydriver.CUDA_HOST_NODE_PARAMS driverNodeParams toDriverHostNodeParams(pNodeParams, &driverNodeParams) - err = ccuda._cuGraphExecHostNodeSetParams(hGraphExec, node, &driverNodeParams) + err = cydriver._cuGraphExecHostNodeSetParams(hGraphExec, node, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -4199,7 +4199,7 @@ cdef cudaError_t _cudaGraphExecChildGraphNodeSetParams(cudaGraphExec_t hGraphExe err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExecChildGraphNodeSetParams(hGraphExec, node, childGraph) + err = cydriver._cuGraphExecChildGraphNodeSetParams(hGraphExec, node, childGraph) if err != cudaSuccess: _setLastError(err) return err @@ -4212,7 +4212,7 @@ cdef cudaError_t _cudaGraphExecEventRecordNodeSetEvent(cudaGraphExec_t hGraphExe err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event) + err = cydriver._cuGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event) if err != cudaSuccess: _setLastError(err) return err @@ -4225,7 +4225,7 @@ cdef cudaError_t _cudaGraphExecEventWaitNodeSetEvent(cudaGraphExec_t hGraphExec, err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event) + err = cydriver._cuGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event) if err != cudaSuccess: _setLastError(err) return err @@ -4238,7 +4238,7 @@ cdef cudaError_t _cudaGraphExecExternalSemaphoresSignalNodeSetParams(cudaGraphEx err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, nodeParams) + err = cydriver._cuGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, nodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -4251,7 +4251,7 @@ cdef cudaError_t _cudaGraphExecExternalSemaphoresWaitNodeSetParams(cudaGraphExec err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, nodeParams) + err = cydriver._cuGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, nodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -4264,7 +4264,7 @@ cdef cudaError_t _cudaGraphNodeSetEnabled(cudaGraphExec_t hGraphExec, cudaGraphN err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphNodeSetEnabled(hGraphExec, hNode, isEnabled) + err = cydriver._cuGraphNodeSetEnabled(hGraphExec, hNode, isEnabled) if err != cudaSuccess: _setLastError(err) return err @@ -4277,7 +4277,7 @@ cdef cudaError_t _cudaGraphNodeGetEnabled(cudaGraphExec_t hGraphExec, cudaGraphN err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphNodeGetEnabled(hGraphExec, hNode, isEnabled) + err = cydriver._cuGraphNodeGetEnabled(hGraphExec, hNode, isEnabled) if err != cudaSuccess: _setLastError(err) return err @@ -4290,7 +4290,7 @@ cdef cudaError_t _cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hG err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExecUpdate_v2(hGraphExec, hGraph, resultInfo) + err = cydriver._cuGraphExecUpdate_v2(hGraphExec, hGraph, resultInfo) if err != cudaSuccess: _setLastError(err) return err @@ -4303,7 +4303,7 @@ cdef cudaError_t _cudaGraphUpload(cudaGraphExec_t graphExec, cudaStream_t stream err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphUpload(graphExec, stream) + err = cydriver._cuGraphUpload(graphExec, stream) if err != cudaSuccess: _setLastError(err) return err @@ -4316,7 +4316,7 @@ cdef cudaError_t _cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphLaunch(graphExec, stream) + err = cydriver._cuGraphLaunch(graphExec, stream) if err != cudaSuccess: _setLastError(err) return err @@ -4329,7 +4329,7 @@ cdef cudaError_t _cudaGraphExecDestroy(cudaGraphExec_t graphExec) except ?cudaEr err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExecDestroy(graphExec) + err = cydriver._cuGraphExecDestroy(graphExec) if err != cudaSuccess: _setLastError(err) return err @@ -4342,7 +4342,7 @@ cdef cudaError_t _cudaGraphDestroy(cudaGraph_t graph) except ?cudaErrorCallRequi err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphDestroy(graph) + err = cydriver._cuGraphDestroy(graph) if err != cudaSuccess: _setLastError(err) return err @@ -4355,7 +4355,7 @@ cdef cudaError_t _cudaGraphDebugDotPrint(cudaGraph_t graph, const char* path, un err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphDebugDotPrint(graph, path, flags) + err = cydriver._cuGraphDebugDotPrint(graph, path, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4368,7 +4368,7 @@ cdef cudaError_t _cudaUserObjectCreate(cudaUserObject_t* object_out, void* ptr, err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuUserObjectCreate(object_out, ptr, destroy, initialRefcount, flags) + err = cydriver._cuUserObjectCreate(object_out, ptr, destroy, initialRefcount, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4381,7 +4381,7 @@ cdef cudaError_t _cudaUserObjectRetain(cudaUserObject_t object, unsigned int cou err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuUserObjectRetain(object, count) + err = cydriver._cuUserObjectRetain(object, count) if err != cudaSuccess: _setLastError(err) return err @@ -4394,7 +4394,7 @@ cdef cudaError_t _cudaUserObjectRelease(cudaUserObject_t object, unsigned int co err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuUserObjectRelease(object, count) + err = cydriver._cuUserObjectRelease(object, count) if err != cudaSuccess: _setLastError(err) return err @@ -4407,7 +4407,7 @@ cdef cudaError_t _cudaGraphRetainUserObject(cudaGraph_t graph, cudaUserObject_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphRetainUserObject(graph, object, count, flags) + err = cydriver._cuGraphRetainUserObject(graph, object, count, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4420,7 +4420,7 @@ cdef cudaError_t _cudaGraphReleaseUserObject(cudaGraph_t graph, cudaUserObject_t err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphReleaseUserObject(graph, object, count) + err = cydriver._cuGraphReleaseUserObject(graph, object, count) if err != cudaSuccess: _setLastError(err) return err @@ -4433,7 +4433,7 @@ cdef cudaError_t _cudaProfilerStart() except ?cudaErrorCallRequiresNewerDriver n err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuProfilerStart() + err = cydriver._cuProfilerStart() if err != cudaSuccess: _setLastError(err) return err @@ -4446,7 +4446,7 @@ cdef cudaError_t _cudaProfilerStop() except ?cudaErrorCallRequiresNewerDriver no err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuProfilerStop() + err = cydriver._cuProfilerStop() if err != cudaSuccess: _setLastError(err) return err @@ -4459,7 +4459,7 @@ cdef cudaError_t _cudaGraphicsEGLRegisterImage(cudaGraphicsResource_t* pCudaReso err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphicsEGLRegisterImage(pCudaResource, image, flags) + err = cydriver._cuGraphicsEGLRegisterImage(pCudaResource, image, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4472,7 +4472,7 @@ cdef cudaError_t _cudaEGLStreamConsumerConnect(cudaEglStreamConnection* conn, EG err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEGLStreamConsumerConnect(conn, eglStream) + err = cydriver._cuEGLStreamConsumerConnect(conn, eglStream) if err != cudaSuccess: _setLastError(err) return err @@ -4485,7 +4485,7 @@ cdef cudaError_t _cudaEGLStreamConsumerConnectWithFlags(cudaEglStreamConnection* err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEGLStreamConsumerConnectWithFlags(conn, eglStream, flags) + err = cydriver._cuEGLStreamConsumerConnectWithFlags(conn, eglStream, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4498,7 +4498,7 @@ cdef cudaError_t _cudaEGLStreamConsumerDisconnect(cudaEglStreamConnection* conn) err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEGLStreamConsumerDisconnect(conn) + err = cydriver._cuEGLStreamConsumerDisconnect(conn) if err != cudaSuccess: _setLastError(err) return err @@ -4511,7 +4511,7 @@ cdef cudaError_t _cudaEGLStreamConsumerAcquireFrame(cudaEglStreamConnection* con err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, timeout) + err = cydriver._cuEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, timeout) if err != cudaSuccess: _setLastError(err) return err @@ -4524,7 +4524,7 @@ cdef cudaError_t _cudaEGLStreamConsumerReleaseFrame(cudaEglStreamConnection* con err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream) + err = cydriver._cuEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream) if err != cudaSuccess: _setLastError(err) return err @@ -4537,7 +4537,7 @@ cdef cudaError_t _cudaEGLStreamProducerConnect(cudaEglStreamConnection* conn, EG err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEGLStreamProducerConnect(conn, eglStream, width, height) + err = cydriver._cuEGLStreamProducerConnect(conn, eglStream, width, height) if err != cudaSuccess: _setLastError(err) return err @@ -4550,7 +4550,7 @@ cdef cudaError_t _cudaEGLStreamProducerDisconnect(cudaEglStreamConnection* conn) err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEGLStreamProducerDisconnect(conn) + err = cydriver._cuEGLStreamProducerDisconnect(conn) if err != cudaSuccess: _setLastError(err) return err @@ -4563,7 +4563,7 @@ cdef cudaError_t _cudaEventCreateFromEGLSync(cudaEvent_t* phEvent, EGLSyncKHR eg err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuEventCreateFromEGLSync(phEvent, eglSync, flags) + err = cydriver._cuEventCreateFromEGLSync(phEvent, eglSync, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4599,7 +4599,7 @@ cdef cudaError_t _cudaInitDevice(int deviceOrdinal, unsigned int deviceFlags, un _setLastError(cudaErrorInvalidValue) return cudaErrorInvalidValue - err = ccuda._cuDevicePrimaryCtxSetFlags_v2(device[0].driverDevice, deviceFlags) + err = cydriver._cuDevicePrimaryCtxSetFlags_v2(device[0].driverDevice, deviceFlags) if err != cudaSuccess: _setLastError(err) return err @@ -4613,7 +4613,7 @@ cdef cudaError_t _cudaStreamGetId(cudaStream_t hStream, unsigned long long* stre err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamGetId(hStream, streamId) + err = cydriver._cuStreamGetId(hStream, streamId) if err != cudaSuccess: _setLastError(err) return err @@ -4626,7 +4626,7 @@ cdef cudaError_t _cudaGraphInstantiateWithParams(cudaGraphExec_t* pGraphExec, cu err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphInstantiateWithParams(pGraphExec, graph, instantiateParams) + err = cydriver._cuGraphInstantiateWithParams(pGraphExec, graph, instantiateParams) if err != cudaSuccess: _setLastError(err) return err @@ -4639,7 +4639,7 @@ cdef cudaError_t _cudaGraphExecGetFlags(cudaGraphExec_t graphExec, unsigned long err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphExecGetFlags(graphExec, flags) + err = cydriver._cuGraphExecGetFlags(graphExec, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4670,12 +4670,12 @@ cdef cudaError_t _cudaGraphAddNode(cudaGraphNode_t* pGraphNode, cudaGraph_t grap if err != cudaSuccess: _setLastError(err) return err - cdef ccuda.CUgraphNodeParams driverNodeParams + cdef cydriver.CUgraphNodeParams driverNodeParams err = toDriverGraphNodeParams(nodeParams, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuGraphAddNode(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams) + err = cydriver._cuGraphAddNode(pGraphNode, graph, pDependencies, numDependencies, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -4694,12 +4694,12 @@ cdef cudaError_t _cudaGraphNodeSetParams(cudaGraphNode_t node, cudaGraphNodePara if err != cudaSuccess: _setLastError(err) return err - cdef ccuda.CUgraphNodeParams driverNodeParams + cdef cydriver.CUgraphNodeParams driverNodeParams err = toDriverGraphNodeParams(nodeParams, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuGraphNodeSetParams(node, &driverNodeParams) + err = cydriver._cuGraphNodeSetParams(node, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -4718,12 +4718,12 @@ cdef cudaError_t _cudaGraphExecNodeSetParams(cudaGraphExec_t graphExec, cudaGrap if err != cudaSuccess: _setLastError(err) return err - cdef ccuda.CUgraphNodeParams driverNodeParams + cdef cydriver.CUgraphNodeParams driverNodeParams err = toDriverGraphNodeParams(nodeParams, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuGraphExecNodeSetParams(graphExec, node, &driverNodeParams) + err = cydriver._cuGraphExecNodeSetParams(graphExec, node, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -4735,15 +4735,15 @@ cdef cudaError_t _cudaGraphExecNodeSetParams(cudaGraphExec_t graphExec, cudaGrap cdef cudaError_t _cudaGraphConditionalHandleCreate(cudaGraphConditionalHandle* pHandle_out, cudaGraph_t graph, unsigned int defaultLaunchValue, unsigned int flags) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUcontext context + cdef cydriver.CUcontext context err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuGraphConditionalHandleCreate(pHandle_out, graph, context, defaultLaunchValue, flags) + err = cydriver._cuGraphConditionalHandleCreate(pHandle_out, graph, context, defaultLaunchValue, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4769,7 +4769,7 @@ cdef cudaError_t _cudaStreamUpdateCaptureDependencies_v2(cudaStream_t stream, cu err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuStreamUpdateCaptureDependencies_v2(stream, dependencies, dependencyData, numDependencies, flags) + err = cydriver._cuStreamUpdateCaptureDependencies_v2(stream, dependencies, dependencyData, numDependencies, flags) if err != cudaSuccess: _setLastError(err) return err @@ -4782,7 +4782,7 @@ cdef cudaError_t _cudaGraphGetEdges_v2(cudaGraph_t graph, cudaGraphNode_t* from_ err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphGetEdges_v2(graph, from_, to, edgeData, numEdges) + err = cydriver._cuGraphGetEdges_v2(graph, from_, to, edgeData, numEdges) if err != cudaSuccess: _setLastError(err) return err @@ -4795,7 +4795,7 @@ cdef cudaError_t _cudaGraphNodeGetDependencies_v2(cudaGraphNode_t node, cudaGrap err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphNodeGetDependencies_v2(node, pDependencies, edgeData, pNumDependencies) + err = cydriver._cuGraphNodeGetDependencies_v2(node, pDependencies, edgeData, pNumDependencies) if err != cudaSuccess: _setLastError(err) return err @@ -4808,7 +4808,7 @@ cdef cudaError_t _cudaGraphNodeGetDependentNodes_v2(cudaGraphNode_t node, cudaGr err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphNodeGetDependentNodes_v2(node, pDependentNodes, edgeData, pNumDependentNodes) + err = cydriver._cuGraphNodeGetDependentNodes_v2(node, pDependentNodes, edgeData, pNumDependentNodes) if err != cudaSuccess: _setLastError(err) return err @@ -4821,7 +4821,7 @@ cdef cudaError_t _cudaGraphAddDependencies_v2(cudaGraph_t graph, const cudaGraph err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphAddDependencies_v2(graph, from_, to, edgeData, numDependencies) + err = cydriver._cuGraphAddDependencies_v2(graph, from_, to, edgeData, numDependencies) if err != cudaSuccess: _setLastError(err) return err @@ -4834,7 +4834,7 @@ cdef cudaError_t _cudaGraphRemoveDependencies_v2(cudaGraph_t graph, const cudaGr err = m_global.lazyInitContextState() if err != cudaSuccess: return err - err = ccuda._cuGraphRemoveDependencies_v2(graph, from_, to, edgeData, numDependencies) + err = cydriver._cuGraphRemoveDependencies_v2(graph, from_, to, edgeData, numDependencies) if err != cudaSuccess: _setLastError(err) return err @@ -4851,12 +4851,12 @@ cdef cudaError_t _cudaGraphAddNode_v2(cudaGraphNode_t* pGraphNode, cudaGraph_t g if err != cudaSuccess: _setLastError(err) return err - cdef ccuda.CUgraphNodeParams driverNodeParams + cdef cydriver.CUgraphNodeParams driverNodeParams err = toDriverGraphNodeParams(nodeParams, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err - err = ccuda._cuGraphAddNode_v2(pGraphNode, graph, pDependencies, dependencyData, numDependencies, &driverNodeParams) + err = cydriver._cuGraphAddNode_v2(pGraphNode, graph, pDependencies, dependencyData, numDependencies, &driverNodeParams) if err != cudaSuccess: _setLastError(err) return err @@ -4867,7 +4867,7 @@ cdef cudaError_t _cudaGraphAddNode_v2(cudaGraphNode_t* pGraphNode, cudaGraph_t g {{if True}} {{if 'Windows' != platform.system()}} -cimport cuda._lib.dlfcn as dlfcn +cimport cuda.bindings._lib.dlfcn as dlfcn {{endif}} cdef cudaError_t _getLocalRuntimeVersion(int* runtimeVersion) except ?cudaErrorCallRequiresNewerDriver nogil: @@ -4922,7 +4922,7 @@ cdef cudaError_t _cudaDeviceUnregisterAsyncNotification(int device, cudaAsyncCal cdef cudaError_t _cudaGetDriverEntryPointByVersion(const char* symbol, void** funcPtr, unsigned int cudaVersion, unsigned long long flags, cudaDriverEntryPointQueryResult* driverStatus) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuGetProcAddress_v2(symbol, funcPtr, cudaVersion, flags, driverStatus) + err = cydriver._cuGetProcAddress_v2(symbol, funcPtr, cudaVersion, flags, driverStatus) if err != cudaSuccess: _setLastError(err) return err diff --git a/cuda/_lib/ccudart/utils.pxd.in b/cuda/cuda/bindings/_lib/cyruntime/utils.pxd.in similarity index 70% rename from cuda/_lib/ccudart/utils.pxd.in rename to cuda/cuda/bindings/_lib/cyruntime/utils.pxd.in index 7bc552bb..7b219cbf 100644 --- a/cuda/_lib/ccudart/utils.pxd.in +++ b/cuda/cuda/bindings/_lib/cyruntime/utils.pxd.in @@ -5,12 +5,12 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -from cuda.ccudart cimport * +from cuda.bindings.cyruntime cimport * from libc.stdlib cimport malloc, free, calloc from libc.string cimport memset, memcpy, strncmp from libcpp cimport bool from libcpp.map cimport map -cimport cuda._cuda.ccuda as ccuda +cimport cuda.bindings._bindings.cydriver as cydriver ctypedef struct cudaAsyncCallbackData_st: cudaAsyncCallback callback @@ -19,8 +19,8 @@ ctypedef struct cudaAsyncCallbackData_st: ctypedef cudaAsyncCallbackData_st cudaAsyncCallbackData cdef struct cudaPythonDevice: - ccuda.CUdevice driverDevice - ccuda.CUcontext primaryContext + cydriver.CUdevice driverDevice + cydriver.CUcontext primaryContext bool primaryContextRetained int deviceOrdinal cudaDeviceProp deviceProperties @@ -36,48 +36,48 @@ cdef class cudaPythonGlobal: cdef cudaError_t lazyInitDriver(self) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t lazyInitContextState(self) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaPythonDevice* getDevice(self, int deviceOrdinal) noexcept nogil - cdef cudaPythonDevice* getDeviceFromDriver(self, ccuda.CUdevice driverDevice) noexcept nogil - cdef cudaPythonDevice* getDeviceFromPrimaryCtx(self, ccuda.CUcontext context) noexcept nogil + cdef cudaPythonDevice* getDeviceFromDriver(self, cydriver.CUdevice driverDevice) noexcept nogil + cdef cudaPythonDevice* getDeviceFromPrimaryCtx(self, cydriver.CUcontext context) noexcept nogil cdef cudaError_t initPrimaryContext(cudaPythonDevice *device) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t resetPrimaryContext(cudaPythonDevice* device) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaPythonGlobal globalGetInstance() cdef cudaError_t _setLastError(cudaError_t err) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t getDescInfo(const cudaChannelFormatDesc* d, int *numberOfChannels, ccuda.CUarray_format *format) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t getDescInfo(const cudaChannelFormatDesc* d, int *numberOfChannels, cydriver.CUarray_format *format) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t streamAddCallbackCommon(cudaStream_t stream, cudaStreamCallback_t callback, void *userData, unsigned int flags) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t streamAddHostCallbackCommon(cudaStream_t stream, cudaHostFn_t callback, void *userData) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t streamGetCaptureInfoCommon(cudaStream_t stream, cudaStreamCaptureStatus* captureStatus_out, unsigned long long *id_out, cudaGraph_t *graph_out, const cudaGraphNode_t **dependencies_out, size_t *numDependencies_out) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t streamGetCaptureInfoCommon_v3(cudaStream_t stream, cudaStreamCaptureStatus* captureStatus_out, unsigned long long *id_out, cudaGraph_t *graph_out, const cudaGraphNode_t **dependencies_out, const cudaGraphEdgeData** edgeData_out, size_t *numDependencies_out) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t getChannelFormatDescFromDriverDesc(cudaChannelFormatDesc* pRuntimeDesc, size_t* pDepth, size_t* pHeight, size_t* pWidth, const ccuda.CUDA_ARRAY3D_DESCRIPTOR_v2* pDriverDesc) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t copyFromHost2D(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t spitch, size_t width, size_t height, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t copyFromDevice2D(ccuda.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t srcOffset, - size_t spitch, size_t width, size_t height, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t getChannelFormatDescFromDriverDesc(cudaChannelFormatDesc* pRuntimeDesc, size_t* pDepth, size_t* pHeight, size_t* pWidth, const cydriver.CUDA_ARRAY3D_DESCRIPTOR_v2* pDriverDesc) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t copyFromHost2D(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t spitch, size_t width, size_t height, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t copyFromDevice2D(cydriver.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t srcOffset, + size_t spitch, size_t width, size_t height, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t copyToHost2D(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, char *dst, size_t dpitch, size_t width, - size_t height, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t copyToDevice2D(ccuda.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *dst, size_t dstOffset, size_t dpitch, - size_t width, size_t height, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil + size_t height, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t copyToDevice2D(cydriver.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *dst, size_t dstOffset, size_t dpitch, + size_t width, size_t height, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t copyToArray2D(cudaArray_const_t thisArray, size_t hOffsetSrc, size_t wOffsetSrc, cudaArray_t dst, size_t hOffsetDst, size_t wOffsetDst, size_t width, size_t height) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t getChannelDesc(cudaArray_const_t thisArray, cudaChannelFormatDesc *outDesc) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t getDriverResDescFromResDesc(ccuda.CUDA_RESOURCE_DESC *rdDst, const cudaResourceDesc *rdSrc, - ccuda.CUDA_TEXTURE_DESC *tdDst, const cudaTextureDesc *tdSrc, - ccuda.CUDA_RESOURCE_VIEW_DESC *rvdDst, const cudaResourceViewDesc *rvdSrc) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t getResDescFromDriverResDesc(cudaResourceDesc *rdDst, const ccuda.CUDA_RESOURCE_DESC *rdSrc, - cudaTextureDesc *tdDst, const ccuda.CUDA_TEXTURE_DESC *tdSrc, - cudaResourceViewDesc *rvdDst, const ccuda.CUDA_RESOURCE_VIEW_DESC *rvdSrc) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t getDriverResDescFromResDesc(cydriver.CUDA_RESOURCE_DESC *rdDst, const cudaResourceDesc *rdSrc, + cydriver.CUDA_TEXTURE_DESC *tdDst, const cudaTextureDesc *tdSrc, + cydriver.CUDA_RESOURCE_VIEW_DESC *rvdDst, const cudaResourceViewDesc *rvdSrc) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t getResDescFromDriverResDesc(cudaResourceDesc *rdDst, const cydriver.CUDA_RESOURCE_DESC *rdSrc, + cudaTextureDesc *tdDst, const cydriver.CUDA_TEXTURE_DESC *tdSrc, + cudaResourceViewDesc *rvdDst, const cydriver.CUDA_RESOURCE_VIEW_DESC *rvdSrc) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t memsetPtr(char *mem, int c, size_t count, cudaStream_t sid, bool async) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t memset2DPtr(char *mem, size_t pitch, int c, size_t width, size_t height, cudaStream_t sid, bool async) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t copyFromHost(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t count, - ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t copyFromDevice(ccuda.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, - const char *src, size_t srcOffset, size_t count, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t copyToHost(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, char *dst, size_t count, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t copyToDevice(ccuda.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, - const char *dst, size_t dstOffset, size_t count, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil + cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t copyFromDevice(cydriver.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, + const char *src, size_t srcOffset, size_t count, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t copyToHost(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, char *dst, size_t count, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t copyToDevice(cydriver.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, + const char *dst, size_t dstOffset, size_t count, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t copy1DConvertTo3DParams(void* dst, const void* src, size_t count, cudaMemcpyKind kind, cudaMemcpy3DParms *p) except ?cudaErrorCallRequiresNewerDriver nogil -cdef void toDriverMemsetNodeParams(const cudaMemsetParams *pRuntimeParams, ccuda.CUDA_MEMSET_NODE_PARAMS *pDriverParams) noexcept nogil -cdef cudaError_t toDriverMemCopy3DParams(const cudaMemcpy3DParms *p, ccuda.CUDA_MEMCPY3D *cd) except ?cudaErrorCallRequiresNewerDriver nogil +cdef void toDriverMemsetNodeParams(const cudaMemsetParams *pRuntimeParams, cydriver.CUDA_MEMSET_NODE_PARAMS *pDriverParams) noexcept nogil +cdef cudaError_t toDriverMemCopy3DParams(const cudaMemcpy3DParms *p, cydriver.CUDA_MEMCPY3D *cd) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t mallocArray(cudaArray_t *arrayPtr, const cudaChannelFormatDesc *desc, size_t depth, size_t height, size_t width, int corr2D, unsigned int flags) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t memcpy2DToArray(cudaArray_t dst, size_t hOffset, size_t wOffset, const char *src, @@ -93,7 +93,7 @@ cdef cudaError_t memcpy2DPtr(char *dst, size_t dpitch, const char *src, size_t s cudaStream_t sid, bool async) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t memcpy3D(const cudaMemcpy3DParms *p, bool peer, int srcDevice, int dstDevice, cudaStream_t sid, bool async) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t memcpyAsyncDispatch(void *dst, const void *src, size_t size, cudaMemcpyKind kind, cudaStream_t stream) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaMemcpy3DParms *p) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t toCudartMemCopy3DParams(const cydriver.CUDA_MEMCPY3D_v2 *cd, cudaMemcpy3DParms *p) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t memcpy2DFromArray(char *dst, size_t dpitch, cudaArray_const_t src, size_t hOffset, size_t wOffset, size_t width, size_t height, cudaMemcpyKind kind, cudaStream_t sid, bool async) except ?cudaErrorCallRequiresNewerDriver nogil @@ -110,12 +110,12 @@ cdef cudaError_t memcpyFromArray(char *dst, cudaArray_const_t src, size_t hOffse cdef cudaError_t memcpyArrayToArray(cudaArray_t dst, size_t hOffsetDst, size_t wOffsetDst, cudaArray_const_t src, size_t hOffsetSrc, size_t wOffsetSrc, size_t count, cudaMemcpyKind kind) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t toDriverCudaResourceDesc(ccuda.CUDA_RESOURCE_DESC *_driver_pResDesc, const cudaResourceDesc *pResDesc) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t getDriverEglFrame(ccuda.CUeglFrame *cuEglFrame, cudaEglFrame eglFrame) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t getRuntimeEglFrame(cudaEglFrame *eglFrame, ccuda.CUeglFrame cueglFrame) except ?cudaErrorCallRequiresNewerDriver nogil -cdef cudaError_t toDriverGraphNodeParams(const cudaGraphNodeParams *rtParams, ccuda.CUgraphNodeParams *driverParams) except ?cudaErrorCallRequiresNewerDriver nogil -cdef void toCudartGraphNodeOutParams(const ccuda.CUgraphNodeParams *driverParams, cudaGraphNodeParams *rtParams) noexcept nogil -cdef cudaError_t toDriverKernelNodeParams(const cudaKernelNodeParams *nodeParams, ccuda.CUDA_KERNEL_NODE_PARAMS *driverNodeParams) except ?cudaErrorCallRequiresNewerDriver nogil -cdef void toDriverHostNodeParams(const cudaHostNodeParams *pRuntimeNodeParams, ccuda.CUDA_HOST_NODE_PARAMS *pDriverNodeParams) noexcept nogil +cdef cudaError_t toDriverCudaResourceDesc(cydriver.CUDA_RESOURCE_DESC *_driver_pResDesc, const cudaResourceDesc *pResDesc) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t getDriverEglFrame(cydriver.CUeglFrame *cuEglFrame, cudaEglFrame eglFrame) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t getRuntimeEglFrame(cudaEglFrame *eglFrame, cydriver.CUeglFrame cueglFrame) except ?cudaErrorCallRequiresNewerDriver nogil +cdef cudaError_t toDriverGraphNodeParams(const cudaGraphNodeParams *rtParams, cydriver.CUgraphNodeParams *driverParams) except ?cudaErrorCallRequiresNewerDriver nogil +cdef void toCudartGraphNodeOutParams(const cydriver.CUgraphNodeParams *driverParams, cudaGraphNodeParams *rtParams) noexcept nogil +cdef cudaError_t toDriverKernelNodeParams(const cudaKernelNodeParams *nodeParams, cydriver.CUDA_KERNEL_NODE_PARAMS *driverNodeParams) except ?cudaErrorCallRequiresNewerDriver nogil +cdef void toDriverHostNodeParams(const cudaHostNodeParams *pRuntimeNodeParams, cydriver.CUDA_HOST_NODE_PARAMS *pDriverNodeParams) noexcept nogil cdef cudaError_t DeviceRegisterAsyncNotificationCommon(int device, cudaAsyncCallback callbackFunc, void* userData, cudaAsyncCallbackHandle_t* callback) except ?cudaErrorCallRequiresNewerDriver nogil cdef cudaError_t DeviceUnregisterAsyncNotificationCommon(int device, cudaAsyncCallbackHandle_t callback) except ?cudaErrorCallRequiresNewerDriver nogil diff --git a/cuda/_lib/ccudart/utils.pyx.in b/cuda/cuda/bindings/_lib/cyruntime/utils.pyx.in similarity index 52% rename from cuda/_lib/ccudart/utils.pyx.in rename to cuda/cuda/bindings/_lib/cyruntime/utils.pyx.in index c34c96f1..ea47081d 100644 --- a/cuda/_lib/ccudart/utils.pyx.in +++ b/cuda/cuda/bindings/_lib/cyruntime/utils.pyx.in @@ -6,14 +6,14 @@ # this software and related documentation outside the terms of the EULA # is strictly prohibited. import cython -from cuda.ccudart cimport * +from cuda.bindings.cyruntime cimport * from libc.stdlib cimport malloc, free, calloc from libc.string cimport memset, memcpy, strncmp, memcmp from libcpp cimport bool -cimport cuda._cuda.ccuda as ccuda +cimport cuda.bindings._bindings.cydriver as cydriver cdef struct cudaArrayLocalState: - ccuda.CUarray array + cydriver.CUarray array cudaChannelFormatDesc desc size_t depth size_t height @@ -52,10 +52,10 @@ cdef class cudaPythonGlobal: return cudaSuccess cdef cudaError_t err = cudaSuccess - err = ccuda._cuInit(0) + err = cydriver._cuInit(0) if err != cudaSuccess: return err - err = ccuda._cuDeviceGetCount(&self._numDevices) + err = cydriver._cuDeviceGetCount(&self._numDevices) if err != cudaSuccess: return err @@ -73,14 +73,14 @@ cdef class cudaPythonGlobal: cdef cudaError_t lazyInitContextState(self) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUcontext driverContext + cdef cydriver.CUcontext driverContext cdef cudaPythonDevice *device err = self.lazyInitDriver() if err != cudaSuccess: return err - err = ccuda._cuCtxGetCurrent(&driverContext) + err = cydriver._cuCtxGetCurrent(&driverContext) if err != cudaSuccess: return err device = self.getDeviceFromPrimaryCtx(driverContext) @@ -96,7 +96,7 @@ cdef class cudaPythonGlobal: if driverContext != NULL: # If the context exists, but is non-primary, make sure it can be used with the CUDA 3.2 API, # then return immediately - err = ccuda._cuCtxGetApiVersion(driverContext, &version) + err = cydriver._cuCtxGetApiVersion(driverContext, &version) if err == cudaErrorContextIsDestroyed: return cudaErrorIncompatibleDriverContext elif err != cudaSuccess: @@ -114,7 +114,7 @@ cdef class cudaPythonGlobal: err = initPrimaryContext(device) if err != cudaSuccess: return err - err = ccuda._cuCtxSetCurrent(device.primaryContext) + err = cydriver._cuCtxSetCurrent(device.primaryContext) return err cdef cudaPythonDevice* getDevice(self, int deviceOrdinal) noexcept nogil: @@ -122,13 +122,13 @@ cdef class cudaPythonGlobal: return NULL return &self._deviceList[deviceOrdinal] - cdef cudaPythonDevice* getDeviceFromDriver(self, ccuda.CUdevice driverDevice) noexcept nogil: + cdef cudaPythonDevice* getDeviceFromDriver(self, cydriver.CUdevice driverDevice) noexcept nogil: for i in range(self._numDevices): if self._deviceList[i].driverDevice == driverDevice: return &self._deviceList[i] return NULL - cdef cudaPythonDevice* getDeviceFromPrimaryCtx(self, ccuda.CUcontext context) noexcept nogil: + cdef cudaPythonDevice* getDeviceFromPrimaryCtx(self, cydriver.CUcontext context) noexcept nogil: if context == NULL: return NULL for i in range(self._numDevices): @@ -140,469 +140,469 @@ cdef cudaPythonGlobal m_global = cudaPythonGlobal() cdef cudaError_t initDevice(cudaPythonDevice *device, int deviceOrdinal) except ?cudaErrorCallRequiresNewerDriver nogil: - # ccuda.CUcontext primaryContext + # cydriver.CUcontext primaryContext device[0].primaryContext = NULL # bool primaryContextRetained device[0].primaryContextRetained = False # int deviceOrdinal device[0].deviceOrdinal = deviceOrdinal - # ccuda.CUdevice driverDevice - err = ccuda._cuDeviceGet(&device[0].driverDevice, deviceOrdinal) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + # cydriver.CUdevice driverDevice + err = cydriver._cuDeviceGet(&device[0].driverDevice, deviceOrdinal) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError # cudaDeviceProp deviceProperties - err = ccuda._cuDeviceGetName(device[0].deviceProperties.name, sizeof(device[0].deviceProperties.name), deviceOrdinal) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetName(device[0].deviceProperties.name, sizeof(device[0].deviceProperties.name), deviceOrdinal) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceTotalMem_v2(&(device[0].deviceProperties.totalGlobalMem), deviceOrdinal) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceTotalMem_v2(&(device[0].deviceProperties.totalGlobalMem), deviceOrdinal) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceTotalMem_v2(&(device[0].deviceProperties.totalGlobalMem), deviceOrdinal) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceTotalMem_v2(&(device[0].deviceProperties.totalGlobalMem), deviceOrdinal) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.major), ccuda.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.major), cydriver.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.minor), ccuda.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.minor), cydriver.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.deviceOverlap), ccuda.CU_DEVICE_ATTRIBUTE_GPU_OVERLAP, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.deviceOverlap), cydriver.CU_DEVICE_ATTRIBUTE_GPU_OVERLAP, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.asyncEngineCount), ccuda.CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.asyncEngineCount), cydriver.CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.multiProcessorCount), ccuda.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.multiProcessorCount), cydriver.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.kernelExecTimeoutEnabled), ccuda.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.kernelExecTimeoutEnabled), cydriver.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.integrated), ccuda.CU_DEVICE_ATTRIBUTE_INTEGRATED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.integrated), cydriver.CU_DEVICE_ATTRIBUTE_INTEGRATED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.canMapHostMemory), ccuda.CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.canMapHostMemory), cydriver.CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1D), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1D), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1DMipmap), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1DMipmap), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1DLinear), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1DLinear), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2D[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2D[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2D[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2D[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DMipmap[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DMipmap[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DMipmap[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DMipmap[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLinear[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLinear[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLinear[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLinear[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLinear[2]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLinear[2]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DGather[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DGather[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DGather[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DGather[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3D[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3D[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3D[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3D[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3D[2]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3D[2]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3DAlt[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3DAlt[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3DAlt[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3DAlt[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3DAlt[2]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture3DAlt[2]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTextureCubemap), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTextureCubemap), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1DLayered[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1DLayered[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1DLayered[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture1DLayered[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLayered[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLayered[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLayered[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLayered[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLayered[2]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTexture2DLayered[2]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTextureCubemapLayered[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTextureCubemapLayered[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTextureCubemapLayered[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxTextureCubemapLayered[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface1D), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface1D), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2D[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2D[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2D[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2D[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface3D[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface3D[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface3D[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface3D[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface3D[2]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface3D[2]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface1DLayered[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface1DLayered[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface1DLayered[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface1DLayered[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2DLayered[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2DLayered[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2DLayered[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2DLayered[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2DLayered[2]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurface2DLayered[2]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurfaceCubemap), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurfaceCubemap), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurfaceCubemapLayered[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurfaceCubemapLayered[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurfaceCubemapLayered[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxSurfaceCubemapLayered[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.concurrentKernels), ccuda.CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.concurrentKernels), cydriver.CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.ECCEnabled), ccuda.CU_DEVICE_ATTRIBUTE_ECC_ENABLED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.ECCEnabled), cydriver.CU_DEVICE_ATTRIBUTE_ECC_ENABLED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.pciBusID), ccuda.CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.pciBusID), cydriver.CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.pciDeviceID), ccuda.CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.pciDeviceID), cydriver.CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.pciDomainID), ccuda.CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.pciDomainID), cydriver.CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.tccDriver), ccuda.CU_DEVICE_ATTRIBUTE_TCC_DRIVER, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.tccDriver), cydriver.CU_DEVICE_ATTRIBUTE_TCC_DRIVER, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.unifiedAddressing), ccuda.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.unifiedAddressing), cydriver.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.memoryClockRate), ccuda.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.memoryClockRate), cydriver.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.memoryBusWidth), ccuda.CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.memoryBusWidth), cydriver.CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.l2CacheSize), ccuda.CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.l2CacheSize), cydriver.CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.persistingL2CacheMaxSize), ccuda.CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.persistingL2CacheMaxSize), cydriver.CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsPerMultiProcessor), ccuda.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsPerMultiProcessor), cydriver.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError cdef int surfaceAlignment - err = ccuda._cuDeviceGetAttribute(&(surfaceAlignment), ccuda.CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(surfaceAlignment), cydriver.CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.surfaceAlignment = surfaceAlignment cdef int texturePitchAlignment - err = ccuda._cuDeviceGetAttribute(&texturePitchAlignment, ccuda.CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&texturePitchAlignment, cydriver.CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.texturePitchAlignment = texturePitchAlignment cdef int sharedMemPerBlock - err = ccuda._cuDeviceGetAttribute(&sharedMemPerBlock, ccuda.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&sharedMemPerBlock, cydriver.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.sharedMemPerBlock = sharedMemPerBlock cdef int sharedMemPerBlockOptin - err = ccuda._cuDeviceGetAttribute(&sharedMemPerBlockOptin, ccuda.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&sharedMemPerBlockOptin, cydriver.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.sharedMemPerBlockOptin = sharedMemPerBlockOptin cdef int sharedMemPerMultiprocessor - err = ccuda._cuDeviceGetAttribute(&sharedMemPerMultiprocessor, ccuda.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&sharedMemPerMultiprocessor, cydriver.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.sharedMemPerMultiprocessor = sharedMemPerMultiprocessor - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.regsPerBlock), ccuda.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.regsPerBlock), cydriver.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.regsPerMultiprocessor), ccuda.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.regsPerMultiprocessor), cydriver.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.warpSize), ccuda.CU_DEVICE_ATTRIBUTE_WARP_SIZE, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.warpSize), cydriver.CU_DEVICE_ATTRIBUTE_WARP_SIZE, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError cdef int memPitch - err = ccuda._cuDeviceGetAttribute(&memPitch, ccuda.CU_DEVICE_ATTRIBUTE_MAX_PITCH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&memPitch, cydriver.CU_DEVICE_ATTRIBUTE_MAX_PITCH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.memPitch = memPitch - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsPerBlock), ccuda.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsPerBlock), cydriver.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsDim[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsDim[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsDim[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsDim[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsDim[2]), ccuda.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxThreadsDim[2]), cydriver.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxGridSize[0]), ccuda.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxGridSize[0]), cydriver.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxGridSize[1]), ccuda.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxGridSize[1]), cydriver.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxGridSize[2]), ccuda.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxGridSize[2]), cydriver.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError cdef int totalConstMem - err = ccuda._cuDeviceGetAttribute(&totalConstMem, ccuda.CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&totalConstMem, cydriver.CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.totalConstMem = totalConstMem - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.clockRate), ccuda.CU_DEVICE_ATTRIBUTE_CLOCK_RATE, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.clockRate), cydriver.CU_DEVICE_ATTRIBUTE_CLOCK_RATE, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError cdef int textureAlignment - err = ccuda._cuDeviceGetAttribute(&textureAlignment, ccuda.CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&textureAlignment, cydriver.CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.textureAlignment = textureAlignment - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.streamPrioritiesSupported), ccuda.CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.streamPrioritiesSupported), cydriver.CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.globalL1CacheSupported), ccuda.CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.globalL1CacheSupported), cydriver.CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.localL1CacheSupported), ccuda.CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.localL1CacheSupported), cydriver.CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.managedMemory), ccuda.CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.managedMemory), cydriver.CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.isMultiGpuBoard), ccuda.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.isMultiGpuBoard), cydriver.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.multiGpuBoardGroupID), ccuda.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.multiGpuBoardGroupID), cydriver.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.hostNativeAtomicSupported), ccuda.CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.hostNativeAtomicSupported), cydriver.CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.singleToDoublePrecisionPerfRatio), ccuda.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.singleToDoublePrecisionPerfRatio), cydriver.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.pageableMemoryAccess), ccuda.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.pageableMemoryAccess), cydriver.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.concurrentManagedAccess), ccuda.CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.concurrentManagedAccess), cydriver.CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.computePreemptionSupported), ccuda.CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.computePreemptionSupported), cydriver.CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.canUseHostPointerForRegisteredMem), ccuda.CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.canUseHostPointerForRegisteredMem), cydriver.CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.cooperativeLaunch), ccuda.CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.cooperativeLaunch), cydriver.CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.cooperativeMultiDeviceLaunch), ccuda.CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.cooperativeMultiDeviceLaunch), cydriver.CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.pageableMemoryAccessUsesHostPageTables), ccuda.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.pageableMemoryAccessUsesHostPageTables), cydriver.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.directManagedMemAccessFromHost), ccuda.CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.directManagedMemAccessFromHost), cydriver.CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetUuid((&(device[0].deviceProperties.uuid)), deviceOrdinal) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetUuid((&(device[0].deviceProperties.uuid)), deviceOrdinal) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.maxBlocksPerMultiProcessor), ccuda.CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.maxBlocksPerMultiProcessor), cydriver.CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.accessPolicyMaxWindowSize), ccuda.CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.accessPolicyMaxWindowSize), cydriver.CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.hostRegisterSupported), ccuda.CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.hostRegisterSupported), cydriver.CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.sparseCudaArraySupported), ccuda.CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.sparseCudaArraySupported), cydriver.CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.hostRegisterReadOnlySupported), ccuda.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.hostRegisterReadOnlySupported), cydriver.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.timelineSemaphoreInteropSupported), ccuda.CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.timelineSemaphoreInteropSupported), cydriver.CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.memoryPoolsSupported), ccuda.CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.memoryPoolsSupported), cydriver.CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.gpuDirectRDMASupported), ccuda.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.gpuDirectRDMASupported), cydriver.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError cdef int gpuDirectRDMAFlushWritesOptions - err = ccuda._cuDeviceGetAttribute(&gpuDirectRDMAFlushWritesOptions, ccuda.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&gpuDirectRDMAFlushWritesOptions, cydriver.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.gpuDirectRDMAFlushWritesOptions = gpuDirectRDMAFlushWritesOptions - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.gpuDirectRDMAWritesOrdering), ccuda.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.gpuDirectRDMAWritesOrdering), cydriver.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError cdef int memoryPoolSupportedHandleTypes - err = ccuda._cuDeviceGetAttribute(&memoryPoolSupportedHandleTypes, ccuda.CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&memoryPoolSupportedHandleTypes, cydriver.CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.memoryPoolSupportedHandleTypes = memoryPoolSupportedHandleTypes; - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.deferredMappingCudaArraySupported), ccuda.CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.deferredMappingCudaArraySupported), cydriver.CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.ipcEventSupported), ccuda.CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.ipcEventSupported), cydriver.CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError - err = ccuda._cuDeviceGetAttribute(&(device[0].deviceProperties.clusterLaunch), ccuda.CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&(device[0].deviceProperties.clusterLaunch), cydriver.CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError cdef int reservedSharedMemPerBlock - err = ccuda._cuDeviceGetAttribute(&reservedSharedMemPerBlock, ccuda.CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK, (deviceOrdinal)) - if err != ccuda.cudaError_enum.CUDA_SUCCESS: + err = cydriver._cuDeviceGetAttribute(&reservedSharedMemPerBlock, cydriver.CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK, (deviceOrdinal)) + if err != cydriver.cudaError_enum.CUDA_SUCCESS: return cudaErrorInitializationError device[0].deviceProperties.reservedSharedMemPerBlock = reservedSharedMemPerBlock @@ -615,9 +615,9 @@ cdef cudaError_t initPrimaryContext(cudaPythonDevice *device) except ?cudaErrorC # If we have context retained we need to check if it is not reset cdef unsigned int version if device[0].primaryContextRetained: - err = ccuda._cuCtxGetApiVersion(device[0].primaryContext, &version) + err = cydriver._cuCtxGetApiVersion(device[0].primaryContext, &version) if err == cudaErrorDeviceUninitialized: - err = ccuda.cuDevicePrimaryCtxRelease(device[0].driverDevice) + err = cydriver.cuDevicePrimaryCtxRelease(device[0].driverDevice) if err != cudaSuccess: return err device[0].primaryContextRetained = False @@ -626,7 +626,7 @@ cdef cudaError_t initPrimaryContext(cudaPythonDevice *device) except ?cudaErrorC # If we don't or it is invalid we need to recreate it if not device[0].primaryContextRetained: - err = ccuda._cuDevicePrimaryCtxRetain(&device[0].primaryContext, device[0].driverDevice) + err = cydriver._cuDevicePrimaryCtxRetain(&device[0].primaryContext, device[0].driverDevice) if err != cudaSuccess: return err device[0].primaryContextRetained = True @@ -636,14 +636,14 @@ cdef cudaError_t resetPrimaryContext(cudaPythonDevice* device) except ?cudaError cdef cudaError_t err = cudaSuccess cdef unsigned int version - err = ccuda._cuCtxGetApiVersion(device[0].primaryContext, &version) + err = cydriver._cuCtxGetApiVersion(device[0].primaryContext, &version) if err == cudaSuccess: if not device[0].primaryContextRetained: - err = ccuda._cuDevicePrimaryCtxRetain(&device[0].primaryContext, device[0].driverDevice) + err = cydriver._cuDevicePrimaryCtxRetain(&device[0].primaryContext, device[0].driverDevice) if err != cudaSuccess: return err device[0].primaryContextRetained = True - ccuda._cuDevicePrimaryCtxReset_v2(device[0].driverDevice) + cydriver._cuDevicePrimaryCtxReset_v2(device[0].driverDevice) return cudaSuccess elif err == cudaErrorDeviceUninitialized: return cudaSuccess @@ -664,7 +664,7 @@ cdef int case_desc(const cudaChannelFormatDesc* d, int x, int y, int z, int w, i return d[0].x == x and d[0].y == y and d[0].z == z and d[0].w == w and d[0].f == f -cdef cudaError_t getDescInfo(const cudaChannelFormatDesc* d, int *numberOfChannels, ccuda.CUarray_format *format) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t getDescInfo(const cudaChannelFormatDesc* d, int *numberOfChannels, cydriver.CUarray_format *format) except ?cudaErrorCallRequiresNewerDriver nogil: # Check validity if d[0].f in (cudaChannelFormatKind.cudaChannelFormatKindSigned, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): @@ -722,145 +722,145 @@ cdef cudaError_t getDescInfo(const cudaChannelFormatDesc* d, int *numberOfChanne return cudaErrorInvalidChannelDescriptor if case_desc(d, 8, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8 elif case_desc(d, 8, 8, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8 elif case_desc(d, 8, 8, 8, 0, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8 elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8 elif case_desc(d, 8, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8 elif case_desc(d, 8, 8, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8 elif case_desc(d, 8, 8, 8, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8 elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8 elif case_desc(d, 16, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16 elif case_desc(d, 16, 16, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16 elif case_desc(d, 16, 16, 16, 0, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16 elif case_desc(d, 16, 16, 16, 16, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16 elif case_desc(d, 16, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16 elif case_desc(d, 16, 16, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16 elif case_desc(d, 16, 16, 16, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16 elif case_desc(d, 16, 16, 16, 16, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16 elif case_desc(d, 32, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32 elif case_desc(d, 32, 32, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32 elif case_desc(d, 32, 32, 32, 0, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32 elif case_desc(d, 32, 32, 32, 32, cudaChannelFormatKind.cudaChannelFormatKindSigned): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32 elif case_desc(d, 32, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32 elif case_desc(d, 32, 32, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32 elif case_desc(d, 32, 32, 32, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32 elif case_desc(d, 32, 32, 32, 32, cudaChannelFormatKind.cudaChannelFormatKindUnsigned): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32 elif case_desc(d, 16, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindFloat): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_HALF + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_HALF elif case_desc(d, 16, 16, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindFloat): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_HALF + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_HALF elif case_desc(d, 16, 16, 16, 0, cudaChannelFormatKind.cudaChannelFormatKindFloat): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_HALF + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_HALF elif case_desc(d, 16, 16, 16, 16, cudaChannelFormatKind.cudaChannelFormatKindFloat): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_HALF + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_HALF elif case_desc(d, 32, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindFloat): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_FLOAT + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_FLOAT elif case_desc(d, 32, 32, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindFloat): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_FLOAT + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_FLOAT elif case_desc(d, 32, 32, 32, 0, cudaChannelFormatKind.cudaChannelFormatKindFloat): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_FLOAT + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_FLOAT elif case_desc(d, 32, 32, 32, 32, cudaChannelFormatKind.cudaChannelFormatKindFloat): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_FLOAT + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_FLOAT elif case_desc(d, 8, 8, 8, 0, cudaChannelFormatKind.cudaChannelFormatKindNV12): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_NV12 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_NV12 elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC1_UNORM + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC1_UNORM elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1SRGB): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC1_UNORM_SRGB + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC1_UNORM_SRGB elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC2_UNORM + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC2_UNORM elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2SRGB): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC2_UNORM_SRGB + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC2_UNORM_SRGB elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC3_UNORM + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC3_UNORM elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3SRGB): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC3_UNORM_SRGB + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC3_UNORM_SRGB elif case_desc(d, 8, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed4): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC4_UNORM + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC4_UNORM elif case_desc(d, 8, 0, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed4): numberOfChannels[0] = 1 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC4_SNORM + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC4_SNORM elif case_desc(d, 8, 8, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed5): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC5_UNORM + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC5_UNORM elif case_desc(d, 8, 8, 0, 0, cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed5): numberOfChannels[0] = 2 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC5_SNORM + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC5_SNORM elif case_desc(d, 16, 16, 16, 0, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed6H): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC6H_UF16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC6H_UF16 elif case_desc(d, 16, 16, 16, 0, cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed6H): numberOfChannels[0] = 3 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC6H_SF16 + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC6H_SF16 elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC7_UNORM + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC7_UNORM elif case_desc(d, 8, 8, 8, 8, cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7SRGB): numberOfChannels[0] = 4 - format[0] = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC7_UNORM_SRGB + format[0] = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC7_UNORM_SRGB else: return cudaErrorInvalidChannelDescriptor @@ -876,7 +876,7 @@ cdef cudaError_t getDescInfo(const cudaChannelFormatDesc* d, int *numberOfChanne @cython.show_performance_hints(False) -cdef void cudaStreamRtCallbackWrapper(ccuda.CUstream stream, ccuda.CUresult status, void *data) nogil: +cdef void cudaStreamRtCallbackWrapper(cydriver.CUstream stream, cydriver.CUresult status, void *data) nogil: cdef cudaStreamCallbackData *cbData = data cdef cudaError_t err = status with gil: @@ -902,7 +902,7 @@ cdef cudaError_t streamAddCallbackCommon( cbData.callback = callback cbData.userData = userData - err = ccuda._cuStreamAddCallback(stream, cudaStreamRtCallbackWrapper, cbData, flags) + err = cydriver._cuStreamAddCallback(stream, cudaStreamRtCallbackWrapper, cbData, flags) if err != cudaSuccess: free(cbData) return err @@ -933,18 +933,18 @@ cdef cudaError_t streamAddHostCallbackCommon( cbData.callback = callback cbData.userData = userData - err = ccuda._cuLaunchHostFunc(stream, cudaStreamRtHostCallbackWrapper, cbData) + err = cydriver._cuLaunchHostFunc(stream, cudaStreamRtHostCallbackWrapper, cbData) if err != cudaSuccess: free(cbData) return err -cdef cudaError_t toRuntimeStreamCaptureStatus(ccuda.CUstreamCaptureStatus driverCaptureStatus, cudaStreamCaptureStatus *runtimeStatus) except ?cudaErrorCallRequiresNewerDriver nogil: - if driverCaptureStatus == ccuda.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_NONE: +cdef cudaError_t toRuntimeStreamCaptureStatus(cydriver.CUstreamCaptureStatus driverCaptureStatus, cudaStreamCaptureStatus *runtimeStatus) except ?cudaErrorCallRequiresNewerDriver nogil: + if driverCaptureStatus == cydriver.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_NONE: runtimeStatus[0] = cudaStreamCaptureStatus.cudaStreamCaptureStatusNone - elif driverCaptureStatus == ccuda.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_ACTIVE: + elif driverCaptureStatus == cydriver.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_ACTIVE: runtimeStatus[0] = cudaStreamCaptureStatus.cudaStreamCaptureStatusActive - elif driverCaptureStatus == ccuda.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_INVALIDATED: + elif driverCaptureStatus == cydriver.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_INVALIDATED: runtimeStatus[0] = cudaStreamCaptureStatus.cudaStreamCaptureStatusInvalidated else: return cudaErrorUnknown @@ -963,9 +963,9 @@ cdef cudaError_t streamGetCaptureInfoCommon( if captureStatus_out == NULL: return cudaErrorInvalidValue - cdef ccuda.CUstreamCaptureStatus driverCaptureStatus + cdef cydriver.CUstreamCaptureStatus driverCaptureStatus - err = ccuda._cuStreamGetCaptureInfo_v2(stream, &driverCaptureStatus, id_out, + err = cydriver._cuStreamGetCaptureInfo_v2(stream, &driverCaptureStatus, id_out, graph_out, dependencies_out, numDependencies_out) if err != cudaSuccess: return err @@ -986,18 +986,18 @@ cdef cudaError_t streamGetCaptureInfoCommon_v3( if captureStatus_out == NULL: return cudaErrorInvalidValue - cdef ccuda.CUstreamCaptureStatus driverCaptureStatus + cdef cydriver.CUstreamCaptureStatus driverCaptureStatus - err = ccuda._cuStreamGetCaptureInfo_v3(stream, &driverCaptureStatus, id_out, - graph_out, dependencies_out, edgeData_out, numDependencies_out) + err = cydriver._cuStreamGetCaptureInfo_v3(stream, &driverCaptureStatus, id_out, + graph_out, dependencies_out, edgeData_out, numDependencies_out) if err != cudaSuccess: return err return toRuntimeStreamCaptureStatus(driverCaptureStatus, captureStatus_out) -cdef ccuda.CUDA_MEMCPY3D_v2 memCopy3DInit(ccuda.CUmemorytype_enum dstType, ccuda.CUmemorytype_enum srcType) noexcept nogil: - cdef ccuda.CUDA_MEMCPY3D_v2 cp +cdef cydriver.CUDA_MEMCPY3D_v2 memCopy3DInit(cydriver.CUmemorytype_enum dstType, cydriver.CUmemorytype_enum srcType) noexcept nogil: + cdef cydriver.CUDA_MEMCPY3D_v2 cp memset(&cp, 0, sizeof(cp)) cp.dstMemoryType = dstType cp.srcMemoryType = srcType @@ -1007,8 +1007,8 @@ cdef ccuda.CUDA_MEMCPY3D_v2 memCopy3DInit(ccuda.CUmemorytype_enum dstType, ccuda return cp -cdef ccuda.CUDA_MEMCPY2D_v2 memCopy2DInit(ccuda.CUmemorytype_enum dstType, ccuda.CUmemorytype_enum srcType) noexcept nogil: - cdef ccuda.CUDA_MEMCPY2D_v2 cp +cdef cydriver.CUDA_MEMCPY2D_v2 memCopy2DInit(cydriver.CUmemorytype_enum dstType, cydriver.CUmemorytype_enum srcType) noexcept nogil: + cdef cydriver.CUDA_MEMCPY2D_v2 cp memset(&cp, 0, sizeof(cp)) cp.dstMemoryType = dstType cp.srcMemoryType = srcType @@ -1017,52 +1017,52 @@ cdef ccuda.CUDA_MEMCPY2D_v2 memCopy2DInit(ccuda.CUmemorytype_enum dstType, ccuda return cp -cdef cudaError_t bytesPerElement(size_t *bytes, int numberOfChannels, ccuda.CUarray_format format) except ?cudaErrorCallRequiresNewerDriver nogil: - if format in (ccuda.CU_AD_FORMAT_FLOAT, - ccuda.CU_AD_FORMAT_UNSIGNED_INT32, - ccuda.CU_AD_FORMAT_SIGNED_INT32): +cdef cudaError_t bytesPerElement(size_t *bytes, int numberOfChannels, cydriver.CUarray_format format) except ?cudaErrorCallRequiresNewerDriver nogil: + if format in (cydriver.CU_AD_FORMAT_FLOAT, + cydriver.CU_AD_FORMAT_UNSIGNED_INT32, + cydriver.CU_AD_FORMAT_SIGNED_INT32): bytes[0] = numberOfChannels * 4 return cudaSuccess - elif format in (ccuda.CU_AD_FORMAT_HALF, - ccuda.CU_AD_FORMAT_SIGNED_INT16, - ccuda.CU_AD_FORMAT_UNSIGNED_INT16): + elif format in (cydriver.CU_AD_FORMAT_HALF, + cydriver.CU_AD_FORMAT_SIGNED_INT16, + cydriver.CU_AD_FORMAT_UNSIGNED_INT16): bytes[0] = numberOfChannels * 2 return cudaSuccess - elif format in (ccuda.CU_AD_FORMAT_SIGNED_INT8, - ccuda.CU_AD_FORMAT_UNSIGNED_INT8, - ccuda.CU_AD_FORMAT_NV12): + elif format in (cydriver.CU_AD_FORMAT_SIGNED_INT8, + cydriver.CU_AD_FORMAT_UNSIGNED_INT8, + cydriver.CU_AD_FORMAT_NV12): bytes[0] = numberOfChannels return cudaSuccess - elif format in (ccuda.CU_AD_FORMAT_SNORM_INT8X1, - ccuda.CU_AD_FORMAT_UNORM_INT8X1): + elif format in (cydriver.CU_AD_FORMAT_SNORM_INT8X1, + cydriver.CU_AD_FORMAT_UNORM_INT8X1): bytes[0] = 1 return cudaSuccess - elif format in (ccuda.CU_AD_FORMAT_SNORM_INT8X2, - ccuda.CU_AD_FORMAT_UNORM_INT8X2, - ccuda.CU_AD_FORMAT_SNORM_INT16X1, - ccuda.CU_AD_FORMAT_UNORM_INT16X1): + elif format in (cydriver.CU_AD_FORMAT_SNORM_INT8X2, + cydriver.CU_AD_FORMAT_UNORM_INT8X2, + cydriver.CU_AD_FORMAT_SNORM_INT16X1, + cydriver.CU_AD_FORMAT_UNORM_INT16X1): bytes[0] = 2 return cudaSuccess - elif format in (ccuda.CU_AD_FORMAT_SNORM_INT8X4, - ccuda.CU_AD_FORMAT_UNORM_INT8X4, - ccuda.CU_AD_FORMAT_SNORM_INT16X2, - ccuda.CU_AD_FORMAT_UNORM_INT16X2): + elif format in (cydriver.CU_AD_FORMAT_SNORM_INT8X4, + cydriver.CU_AD_FORMAT_UNORM_INT8X4, + cydriver.CU_AD_FORMAT_SNORM_INT16X2, + cydriver.CU_AD_FORMAT_UNORM_INT16X2): bytes[0] = 4 return cudaSuccess - elif format in (ccuda.CU_AD_FORMAT_SNORM_INT16X4, - ccuda.CU_AD_FORMAT_UNORM_INT16X4): + elif format in (cydriver.CU_AD_FORMAT_SNORM_INT16X4, + cydriver.CU_AD_FORMAT_UNORM_INT16X4): bytes[0] = 8 return cudaSuccess - elif format in (ccuda.CU_AD_FORMAT_BC2_UNORM, - ccuda.CU_AD_FORMAT_BC2_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC3_UNORM, - ccuda.CU_AD_FORMAT_BC3_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC5_UNORM, - ccuda.CU_AD_FORMAT_BC5_SNORM, - ccuda.CU_AD_FORMAT_BC6H_UF16, - ccuda.CU_AD_FORMAT_BC6H_SF16, - ccuda.CU_AD_FORMAT_BC7_UNORM, - ccuda.CU_AD_FORMAT_BC7_UNORM_SRGB): + elif format in (cydriver.CU_AD_FORMAT_BC2_UNORM, + cydriver.CU_AD_FORMAT_BC2_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC3_UNORM, + cydriver.CU_AD_FORMAT_BC3_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC5_UNORM, + cydriver.CU_AD_FORMAT_BC5_SNORM, + cydriver.CU_AD_FORMAT_BC6H_UF16, + cydriver.CU_AD_FORMAT_BC6H_SF16, + cydriver.CU_AD_FORMAT_BC7_UNORM, + cydriver.CU_AD_FORMAT_BC7_UNORM_SRGB): bytes[0] = 16 return cudaSuccess return cudaErrorInvalidChannelDescriptor @@ -1070,112 +1070,112 @@ cdef cudaError_t bytesPerElement(size_t *bytes, int numberOfChannels, ccuda.CUar cdef cudaError_t getChannelFormatDescFromDriverDesc( cudaChannelFormatDesc* pRuntimeDesc, size_t* pDepth, size_t* pHeight, size_t* pWidth, - const ccuda.CUDA_ARRAY3D_DESCRIPTOR_v2* pDriverDesc) except ?cudaErrorCallRequiresNewerDriver nogil: + const cydriver.CUDA_ARRAY3D_DESCRIPTOR_v2* pDriverDesc) except ?cudaErrorCallRequiresNewerDriver nogil: cdef int channel_size = 0 - if pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_UNSIGNED_INT8: + if pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_UNSIGNED_INT8: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsigned channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_UNSIGNED_INT16: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_UNSIGNED_INT16: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsigned channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_UNSIGNED_INT32: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_UNSIGNED_INT32: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsigned channel_size = 32 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_SIGNED_INT8: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_SIGNED_INT8: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSigned channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_SIGNED_INT16: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_SIGNED_INT16: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSigned channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_SIGNED_INT32: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_SIGNED_INT32: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSigned channel_size = 32 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_HALF: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_HALF: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindFloat channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_FLOAT: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_FLOAT: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindFloat channel_size = 32 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_NV12: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_NV12: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindNV12 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_UNORM_INT8X1: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_UNORM_INT8X1: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X1 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_UNORM_INT8X2: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_UNORM_INT8X2: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X2 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_UNORM_INT8X4: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_UNORM_INT8X4: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X4 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_SNORM_INT8X1: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_SNORM_INT8X1: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X1 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_SNORM_INT8X2: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_SNORM_INT8X2: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X2 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_SNORM_INT8X4: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_SNORM_INT8X4: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X4 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_UNORM_INT16X1: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_UNORM_INT16X1: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X1 channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_UNORM_INT16X2: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_UNORM_INT16X2: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X2 channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_UNORM_INT16X4: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_UNORM_INT16X4: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X4 channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_SNORM_INT16X1: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_SNORM_INT16X1: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X1 channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_SNORM_INT16X2: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_SNORM_INT16X2: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X2 channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_SNORM_INT16X4: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_SNORM_INT16X4: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X4 channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC1_UNORM: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC1_UNORM: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC1_UNORM_SRGB: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC1_UNORM_SRGB: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1SRGB channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC2_UNORM: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC2_UNORM: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC2_UNORM_SRGB: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC2_UNORM_SRGB: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2SRGB channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC3_UNORM: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC3_UNORM: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC3_UNORM_SRGB: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC3_UNORM_SRGB: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3SRGB channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC4_UNORM: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC4_UNORM: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed4 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC4_SNORM: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC4_SNORM: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed4 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC5_UNORM: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC5_UNORM: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed5 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC5_SNORM: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC5_SNORM: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed5 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC6H_UF16: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC6H_UF16: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed6H channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC6H_SF16: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC6H_SF16: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed6H channel_size = 16 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC7_UNORM: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC7_UNORM: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7 channel_size = 8 - elif pDriverDesc[0].Format == ccuda.CU_AD_FORMAT_BC7_UNORM_SRGB: + elif pDriverDesc[0].Format == cydriver.CU_AD_FORMAT_BC7_UNORM_SRGB: pRuntimeDesc[0].f = cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7SRGB channel_size = 8 else: @@ -1209,45 +1209,45 @@ cdef cudaError_t getChannelFormatDescFromDriverDesc( return cudaSuccess -cdef cudaError_t getArrayBlockExtent(cudaExtent *blockExtent, ccuda.CUarray_format format) except ?cudaErrorCallRequiresNewerDriver nogil: - if format in (ccuda.CU_AD_FORMAT_FLOAT, - ccuda.CU_AD_FORMAT_UNSIGNED_INT32, - ccuda.CU_AD_FORMAT_SIGNED_INT32, - ccuda.CU_AD_FORMAT_HALF, - ccuda.CU_AD_FORMAT_SIGNED_INT16, - ccuda.CU_AD_FORMAT_UNSIGNED_INT16, - ccuda.CU_AD_FORMAT_SIGNED_INT8, - ccuda.CU_AD_FORMAT_UNSIGNED_INT8, - ccuda.CU_AD_FORMAT_NV12, - ccuda.CU_AD_FORMAT_SNORM_INT8X1, - ccuda.CU_AD_FORMAT_UNORM_INT8X1, - ccuda.CU_AD_FORMAT_SNORM_INT8X2, - ccuda.CU_AD_FORMAT_UNORM_INT8X2, - ccuda.CU_AD_FORMAT_SNORM_INT16X1, - ccuda.CU_AD_FORMAT_UNORM_INT16X1, - ccuda.CU_AD_FORMAT_SNORM_INT8X4, - ccuda.CU_AD_FORMAT_UNORM_INT8X4, - ccuda.CU_AD_FORMAT_SNORM_INT16X2, - ccuda.CU_AD_FORMAT_UNORM_INT16X2, - ccuda.CU_AD_FORMAT_SNORM_INT16X4, - ccuda.CU_AD_FORMAT_UNORM_INT16X4): +cdef cudaError_t getArrayBlockExtent(cudaExtent *blockExtent, cydriver.CUarray_format format) except ?cudaErrorCallRequiresNewerDriver nogil: + if format in (cydriver.CU_AD_FORMAT_FLOAT, + cydriver.CU_AD_FORMAT_UNSIGNED_INT32, + cydriver.CU_AD_FORMAT_SIGNED_INT32, + cydriver.CU_AD_FORMAT_HALF, + cydriver.CU_AD_FORMAT_SIGNED_INT16, + cydriver.CU_AD_FORMAT_UNSIGNED_INT16, + cydriver.CU_AD_FORMAT_SIGNED_INT8, + cydriver.CU_AD_FORMAT_UNSIGNED_INT8, + cydriver.CU_AD_FORMAT_NV12, + cydriver.CU_AD_FORMAT_SNORM_INT8X1, + cydriver.CU_AD_FORMAT_UNORM_INT8X1, + cydriver.CU_AD_FORMAT_SNORM_INT8X2, + cydriver.CU_AD_FORMAT_UNORM_INT8X2, + cydriver.CU_AD_FORMAT_SNORM_INT16X1, + cydriver.CU_AD_FORMAT_UNORM_INT16X1, + cydriver.CU_AD_FORMAT_SNORM_INT8X4, + cydriver.CU_AD_FORMAT_UNORM_INT8X4, + cydriver.CU_AD_FORMAT_SNORM_INT16X2, + cydriver.CU_AD_FORMAT_UNORM_INT16X2, + cydriver.CU_AD_FORMAT_SNORM_INT16X4, + cydriver.CU_AD_FORMAT_UNORM_INT16X4): blockExtent[0].width = 1 blockExtent[0].height = 1 blockExtent[0].depth = 1 - elif format in (ccuda.CU_AD_FORMAT_BC1_UNORM, - ccuda.CU_AD_FORMAT_BC1_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC4_UNORM, - ccuda.CU_AD_FORMAT_BC4_SNORM, - ccuda.CU_AD_FORMAT_BC2_UNORM, - ccuda.CU_AD_FORMAT_BC2_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC3_UNORM, - ccuda.CU_AD_FORMAT_BC3_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC5_UNORM, - ccuda.CU_AD_FORMAT_BC5_SNORM, - ccuda.CU_AD_FORMAT_BC6H_UF16, - ccuda.CU_AD_FORMAT_BC6H_SF16, - ccuda.CU_AD_FORMAT_BC7_UNORM, - ccuda.CU_AD_FORMAT_BC7_UNORM_SRGB): + elif format in (cydriver.CU_AD_FORMAT_BC1_UNORM, + cydriver.CU_AD_FORMAT_BC1_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC4_UNORM, + cydriver.CU_AD_FORMAT_BC4_SNORM, + cydriver.CU_AD_FORMAT_BC2_UNORM, + cydriver.CU_AD_FORMAT_BC2_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC3_UNORM, + cydriver.CU_AD_FORMAT_BC3_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC5_UNORM, + cydriver.CU_AD_FORMAT_BC5_SNORM, + cydriver.CU_AD_FORMAT_BC6H_UF16, + cydriver.CU_AD_FORMAT_BC6H_SF16, + cydriver.CU_AD_FORMAT_BC7_UNORM, + cydriver.CU_AD_FORMAT_BC7_UNORM_SRGB): blockExtent[0].width = 4 blockExtent[0].height = 4 blockExtent[0].depth = 1 @@ -1259,15 +1259,15 @@ cdef cudaError_t getArrayBlockExtent(cudaExtent *blockExtent, ccuda.CUarray_form cdef cudaError_t getLocalState(cudaArrayLocalState *state, cudaArray_const_t thisArray) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaArrayLocalState arrayState memset(&arrayState, 0, sizeof(arrayState)) - arrayState.array = thisArray + arrayState.array = thisArray cdef cudaExtent compBlockExtent compBlockExtent.width = 1 compBlockExtent.height = 1 compBlockExtent.depth = 1 - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR_v2 driverDesc + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR_v2 driverDesc memset(&driverDesc, 0, sizeof(driverDesc)) - err = ccuda._cuArray3DGetDescriptor_v2(&driverDesc, arrayState.array) + err = cydriver._cuArray3DGetDescriptor_v2(&driverDesc, arrayState.array) if err != cudaSuccess: return err err = getChannelFormatDescFromDriverDesc(&arrayState.desc, &arrayState.depth, &arrayState.height, &arrayState.width, &driverDesc) @@ -1285,14 +1285,14 @@ cdef cudaError_t getLocalState(cudaArrayLocalState *state, cudaArray_const_t thi return cudaSuccess -cdef cudaError_t copyFromHost2D(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t spitch, size_t width, size_t height, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t copyFromHost2D(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t spitch, size_t width, size_t height, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess cdef cudaArrayLocalState arrayState memset(&arrayState, 0, sizeof(arrayState)) err = getLocalState(&arrayState, thisArray) if err != cudaSuccess: return err - cdef ccuda.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST) + cdef cydriver.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST) cp.dstArray = arrayState.array cp.dstXInBytes = wOffset @@ -1310,21 +1310,21 @@ cdef cudaError_t copyFromHost2D(cudaArray_const_t thisArray, size_t hOffset, siz return err -cdef cudaError_t copyFromDevice2D(ccuda.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t srcOffset, - size_t spitch, size_t width, size_t height, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t copyFromDevice2D(cydriver.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t srcOffset, + size_t spitch, size_t width, size_t height, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess cdef cudaArrayLocalState arrayState memset(&arrayState, 0, sizeof(arrayState)) err = getLocalState(&arrayState, thisArray) if err != cudaSuccess: return err - cdef ccuda.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, type) + cdef cydriver.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, type) cp.dstArray = arrayState.array cp.dstXInBytes = wOffset cp.dstY = hOffset - cp.srcDevice = src + cp.srcDevice = src cp.srcPitch = spitch cp.srcXInBytes = srcOffset % spitch cp.srcY = (srcOffset / spitch) @@ -1340,14 +1340,14 @@ cdef cudaError_t copyFromDevice2D(ccuda.CUmemorytype type, cudaArray_const_t thi cdef cudaError_t copyToHost2D(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, char *dst, size_t dpitch, size_t width, - size_t height, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: + size_t height, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaArrayLocalState arrayState cdef cudaError_t err = cudaSuccess memset(&arrayState, 0, sizeof(arrayState)) err = getLocalState(&arrayState, thisArray) if err != cudaSuccess: return err - cdef ccuda.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) + cdef cydriver.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) cp.dstHost = dst cp.dstPitch = dpitch @@ -1368,8 +1368,8 @@ cdef cudaError_t copyToHost2D(cudaArray_const_t thisArray, size_t hOffset, size_ return cudaSuccess -cdef cudaError_t copyToDevice2D(ccuda.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *dst, size_t dstOffset, size_t dpitch, - size_t width, size_t height, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t copyToDevice2D(cydriver.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *dst, size_t dstOffset, size_t dpitch, + size_t width, size_t height, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaArrayLocalState arrayState cdef cudaError_t err = cudaSuccess @@ -1377,9 +1377,9 @@ cdef cudaError_t copyToDevice2D(ccuda.CUmemorytype type, cudaArray_const_t thisA err = getLocalState(&arrayState, thisArray) if err != cudaSuccess: return err - cdef ccuda.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(type, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) + cdef cydriver.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(type, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) - cp.dstDevice = dst + cp.dstDevice = dst cp.dstPitch = dpitch cp.dstXInBytes = dstOffset % dpitch cp.dstY = (dstOffset / dpitch) @@ -1406,9 +1406,9 @@ cdef cudaError_t copyToArray2D(cudaArray_const_t thisArray, size_t hOffsetSrc, s err = getLocalState(&arrayState, thisArray) if err != cudaSuccess: return err - cdef ccuda.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) + cdef cydriver.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) - cp.dstArray = dst + cp.dstArray = dst cp.dstXInBytes = wOffsetDst cp.dstY = hOffsetDst @@ -1467,59 +1467,59 @@ cdef cudaError_t getChannelDesc(cudaArray_const_t thisArray, cudaChannelFormatDe return cudaSuccess -cdef cudaError_t getFormat(cudaArray_const_t thisArray, int &numberOfChannels, ccuda.CUarray_format *format) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t getFormat(cudaArray_const_t thisArray, int &numberOfChannels, cydriver.CUarray_format *format) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaArrayLocalState arrayState cdef cudaError_t err = cudaSuccess memset(&arrayState, 0, sizeof(arrayState)) err = getLocalState(&arrayState, thisArray) if err != cudaSuccess: return err - return getDescInfo(&arrayState.desc, &numberOfChannels, format) + return getDescInfo(&arrayState.desc, &numberOfChannels, format) -cdef cudaError_t getDriverResDescFromResDesc(ccuda.CUDA_RESOURCE_DESC *rdDst, const cudaResourceDesc *rdSrc, - ccuda.CUDA_TEXTURE_DESC *tdDst, const cudaTextureDesc *tdSrc, - ccuda.CUDA_RESOURCE_VIEW_DESC *rvdDst, const cudaResourceViewDesc *rvdSrc) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t getDriverResDescFromResDesc(cydriver.CUDA_RESOURCE_DESC *rdDst, const cudaResourceDesc *rdSrc, + cydriver.CUDA_TEXTURE_DESC *tdDst, const cudaTextureDesc *tdSrc, + cydriver.CUDA_RESOURCE_VIEW_DESC *rvdDst, const cudaResourceViewDesc *rvdSrc) except ?cudaErrorCallRequiresNewerDriver nogil: cdef int i = 0 cdef int numChannels = 0 - cdef ccuda.CUarray_format format - cdef ccuda.CUarray hArray = NULL + cdef cydriver.CUarray_format format + cdef cydriver.CUarray hArray = NULL cdef cudaError_t err = cudaSuccess i = 0 memset(rdDst, 0, sizeof(rdDst[0])) if rdSrc[0].resType == cudaResourceType.cudaResourceTypeArray: - rdDst[0].resType = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_ARRAY - rdDst[0].res.array.hArray = rdSrc[0].res.array.array + rdDst[0].resType = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_ARRAY + rdDst[0].res.array.hArray = rdSrc[0].res.array.array err = getFormat(rdSrc[0].res.array.array, numChannels, &format) if err != cudaSuccess: return err elif rdSrc[0].resType == cudaResourceType.cudaResourceTypeMipmappedArray: - rdDst[0].resType = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY - rdDst[0].res.mipmap.hMipmappedArray = rdSrc[0].res.mipmap.mipmap - err = ccuda._cuMipmappedArrayGetLevel(&hArray, rdDst[0].res.mipmap.hMipmappedArray, 0) + rdDst[0].resType = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY + rdDst[0].res.mipmap.hMipmappedArray = rdSrc[0].res.mipmap.mipmap + err = cydriver._cuMipmappedArrayGetLevel(&hArray, rdDst[0].res.mipmap.hMipmappedArray, 0) if err != cudaSuccess: return err err = getFormat(hArray, numChannels, &format) if err != cudaSuccess: return err elif rdSrc[0].resType == cudaResourceType.cudaResourceTypeLinear: - rdDst[0].resType = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_LINEAR - rdDst[0].res.linear.devPtr = rdSrc[0].res.linear.devPtr + rdDst[0].resType = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_LINEAR + rdDst[0].res.linear.devPtr = rdSrc[0].res.linear.devPtr rdDst[0].res.linear.sizeInBytes = rdSrc[0].res.linear.sizeInBytes - err = getDescInfo(&rdSrc[0].res.linear.desc, &numChannels, &format) + err = getDescInfo(&rdSrc[0].res.linear.desc, &numChannels, &format) if err != cudaSuccess: return err rdDst[0].res.linear.format = format rdDst[0].res.linear.numChannels = numChannels elif rdSrc[0].resType == cudaResourceType.cudaResourceTypePitch2D: - rdDst[0].resType = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_PITCH2D - rdDst[0].res.pitch2D.devPtr = rdSrc[0].res.pitch2D.devPtr + rdDst[0].resType = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_PITCH2D + rdDst[0].res.pitch2D.devPtr = rdSrc[0].res.pitch2D.devPtr rdDst[0].res.pitch2D.pitchInBytes = rdSrc[0].res.pitch2D.pitchInBytes rdDst[0].res.pitch2D.width = rdSrc[0].res.pitch2D.width rdDst[0].res.pitch2D.height = rdSrc[0].res.pitch2D.height - err = getDescInfo(&rdSrc[0].res.linear.desc, &numChannels, &format) + err = getDescInfo(&rdSrc[0].res.linear.desc, &numChannels, &format) if err != cudaSuccess: return err rdDst[0].res.pitch2D.format = format @@ -1534,11 +1534,11 @@ cdef cudaError_t getDriverResDescFromResDesc(ccuda.CUDA_RESOURCE_DESC *rdDst, co memset(tdDst, 0, sizeof(tdDst[0])) while (i < 3): - tdDst[0].addressMode[i] = tdSrc[0].addressMode[i] + tdDst[0].addressMode[i] = tdSrc[0].addressMode[i] i += 1 - tdDst[0].filterMode = tdSrc[0].filterMode - tdDst[0].mipmapFilterMode = tdSrc[0].mipmapFilterMode + tdDst[0].filterMode = tdSrc[0].filterMode + tdDst[0].mipmapFilterMode = tdSrc[0].mipmapFilterMode tdDst[0].mipmapLevelBias = tdSrc[0].mipmapLevelBias tdDst[0].minMipmapLevelClamp = tdSrc[0].minMipmapLevelClamp tdDst[0].maxMipmapLevelClamp = tdSrc[0].maxMipmapLevelClamp @@ -1549,62 +1549,62 @@ cdef cudaError_t getDriverResDescFromResDesc(ccuda.CUDA_RESOURCE_DESC *rdDst, co i += 1 if tdSrc[0].sRGB: - tdDst[0].flags |= ccuda.CU_TRSF_SRGB + tdDst[0].flags |= cydriver.CU_TRSF_SRGB else: tdDst[0].flags |= 0 if tdSrc[0].normalizedCoords: - tdDst[0].flags |= ccuda.CU_TRSF_NORMALIZED_COORDINATES + tdDst[0].flags |= cydriver.CU_TRSF_NORMALIZED_COORDINATES else: tdDst[0].flags |= 0 if tdSrc[0].disableTrilinearOptimization: - tdDst[0].flags |= ccuda.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION + tdDst[0].flags |= cydriver.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION else: tdDst[0].flags |= 0 if tdSrc[0].seamlessCubemap: - tdDst[0].flags |= ccuda.CU_TRSF_SEAMLESS_CUBEMAP + tdDst[0].flags |= cydriver.CU_TRSF_SEAMLESS_CUBEMAP else: tdDst[0].flags |= 0 - if format in (ccuda.CU_AD_FORMAT_SNORM_INT8X1, - ccuda.CU_AD_FORMAT_SNORM_INT8X2, - ccuda.CU_AD_FORMAT_SNORM_INT8X4, - ccuda.CU_AD_FORMAT_UNORM_INT8X1, - ccuda.CU_AD_FORMAT_UNORM_INT8X2, - ccuda.CU_AD_FORMAT_UNORM_INT8X4, - ccuda.CU_AD_FORMAT_SNORM_INT16X1, - ccuda.CU_AD_FORMAT_SNORM_INT16X2, - ccuda.CU_AD_FORMAT_SNORM_INT16X4, - ccuda.CU_AD_FORMAT_UNORM_INT16X1, - ccuda.CU_AD_FORMAT_UNORM_INT16X2, - ccuda.CU_AD_FORMAT_UNORM_INT16X4, - ccuda.CU_AD_FORMAT_BC1_UNORM, - ccuda.CU_AD_FORMAT_BC1_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC2_UNORM, - ccuda.CU_AD_FORMAT_BC2_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC3_UNORM, - ccuda.CU_AD_FORMAT_BC3_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC4_UNORM, - ccuda.CU_AD_FORMAT_BC4_SNORM, - ccuda.CU_AD_FORMAT_BC5_UNORM, - ccuda.CU_AD_FORMAT_BC5_SNORM, - ccuda.CU_AD_FORMAT_BC7_UNORM, - ccuda.CU_AD_FORMAT_BC7_UNORM_SRGB): + if format in (cydriver.CU_AD_FORMAT_SNORM_INT8X1, + cydriver.CU_AD_FORMAT_SNORM_INT8X2, + cydriver.CU_AD_FORMAT_SNORM_INT8X4, + cydriver.CU_AD_FORMAT_UNORM_INT8X1, + cydriver.CU_AD_FORMAT_UNORM_INT8X2, + cydriver.CU_AD_FORMAT_UNORM_INT8X4, + cydriver.CU_AD_FORMAT_SNORM_INT16X1, + cydriver.CU_AD_FORMAT_SNORM_INT16X2, + cydriver.CU_AD_FORMAT_SNORM_INT16X4, + cydriver.CU_AD_FORMAT_UNORM_INT16X1, + cydriver.CU_AD_FORMAT_UNORM_INT16X2, + cydriver.CU_AD_FORMAT_UNORM_INT16X4, + cydriver.CU_AD_FORMAT_BC1_UNORM, + cydriver.CU_AD_FORMAT_BC1_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC2_UNORM, + cydriver.CU_AD_FORMAT_BC2_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC3_UNORM, + cydriver.CU_AD_FORMAT_BC3_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC4_UNORM, + cydriver.CU_AD_FORMAT_BC4_SNORM, + cydriver.CU_AD_FORMAT_BC5_UNORM, + cydriver.CU_AD_FORMAT_BC5_SNORM, + cydriver.CU_AD_FORMAT_BC7_UNORM, + cydriver.CU_AD_FORMAT_BC7_UNORM_SRGB): if tdSrc[0].readMode != cudaTextureReadMode.cudaReadModeNormalizedFloat: return cudaErrorInvalidNormSetting - elif format in (ccuda.CU_AD_FORMAT_SIGNED_INT8, - ccuda.CU_AD_FORMAT_SIGNED_INT16, - ccuda.CU_AD_FORMAT_UNSIGNED_INT8, - ccuda.CU_AD_FORMAT_UNSIGNED_INT16): + elif format in (cydriver.CU_AD_FORMAT_SIGNED_INT8, + cydriver.CU_AD_FORMAT_SIGNED_INT16, + cydriver.CU_AD_FORMAT_UNSIGNED_INT8, + cydriver.CU_AD_FORMAT_UNSIGNED_INT16): if tdSrc[0].readMode == cudaReadModeElementType: if tdSrc[0].filterMode == cudaTextureFilterMode.cudaFilterModeLinear: return cudaErrorInvalidFilterSetting - tdDst[0].flags |= ccuda.CU_TRSF_READ_AS_INTEGER - elif format == ccuda.CU_AD_FORMAT_NV12: + tdDst[0].flags |= cydriver.CU_TRSF_READ_AS_INTEGER + elif format == cydriver.CU_AD_FORMAT_NV12: return cudaErrorInvalidValue - elif format == ccuda.CU_AD_FORMAT_SIGNED_INT32 or format == ccuda.CU_AD_FORMAT_UNSIGNED_INT32: + elif format == cydriver.CU_AD_FORMAT_SIGNED_INT32 or format == cydriver.CU_AD_FORMAT_UNSIGNED_INT32: if tdSrc[0].filterMode == cudaTextureFilterMode.cudaFilterModeLinear: return cudaErrorInvalidFilterSetting if tdSrc[0].readMode == cudaTextureReadMode.cudaReadModeNormalizedFloat: @@ -1616,7 +1616,7 @@ cdef cudaError_t getDriverResDescFromResDesc(ccuda.CUDA_RESOURCE_DESC *rdDst, co if rvdDst and rvdSrc: memset(rvdDst, 0, sizeof(rvdDst[0])) - rvdDst[0].format = rvdSrc[0].format + rvdDst[0].format = rvdSrc[0].format rvdDst[0].width = rvdSrc[0].width rvdDst[0].height = rvdSrc[0].height rvdDst[0].depth = rvdSrc[0].depth @@ -1628,34 +1628,34 @@ cdef cudaError_t getDriverResDescFromResDesc(ccuda.CUDA_RESOURCE_DESC *rdDst, co return cudaSuccess -cdef cudaError_t getResDescFromDriverResDesc(cudaResourceDesc *rdDst, const ccuda.CUDA_RESOURCE_DESC *rdSrc, - cudaTextureDesc *tdDst, const ccuda.CUDA_TEXTURE_DESC *tdSrc, - cudaResourceViewDesc *rvdDst, const ccuda.CUDA_RESOURCE_VIEW_DESC *rvdSrc) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t getResDescFromDriverResDesc(cudaResourceDesc *rdDst, const cydriver.CUDA_RESOURCE_DESC *rdSrc, + cudaTextureDesc *tdDst, const cydriver.CUDA_TEXTURE_DESC *tdSrc, + cudaResourceViewDesc *rvdDst, const cydriver.CUDA_RESOURCE_VIEW_DESC *rvdSrc) except ?cudaErrorCallRequiresNewerDriver nogil: cdef int i = 0 cdef int numChannels = 0 - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR_v2 ad - cdef ccuda.CUarray hArray + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR_v2 ad + cdef cydriver.CUarray hArray memset(rdDst, 0, sizeof(rdDst[0])) memset(&ad, 0, sizeof(ad)) memset(&hArray, 0, sizeof(hArray)) - if rdSrc[0].resType == ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_ARRAY: + if rdSrc[0].resType == cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_ARRAY: rdDst[0].resType = cudaResourceType.cudaResourceTypeArray rdDst[0].res.array.array = rdSrc[0].res.array.hArray err = getFormat(rdDst[0].res.array.array, numChannels, &ad.Format) if err != cudaSuccess: return err - elif rdSrc[0].resType == ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: + elif rdSrc[0].resType == cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: rdDst[0].resType = cudaResourceType.cudaResourceTypeMipmappedArray rdDst[0].res.mipmap.mipmap = rdSrc[0].res.mipmap.hMipmappedArray - err = ccuda._cuMipmappedArrayGetLevel(&hArray, rdSrc[0].res.mipmap.hMipmappedArray, 0) + err = cydriver._cuMipmappedArrayGetLevel(&hArray, rdSrc[0].res.mipmap.hMipmappedArray, 0) if err != cudaSuccess: return err err = getFormat(hArray, numChannels, &ad.Format) if err != cudaSuccess: return err - elif rdSrc[0].resType == ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_LINEAR: + elif rdSrc[0].resType == cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_LINEAR: rdDst[0].resType = cudaResourceType.cudaResourceTypeLinear rdDst[0].res.linear.devPtr = rdSrc[0].res.linear.devPtr rdDst[0].res.linear.sizeInBytes = rdSrc[0].res.linear.sizeInBytes @@ -1666,7 +1666,7 @@ cdef cudaError_t getResDescFromDriverResDesc(cudaResourceDesc *rdDst, const ccud &ad) if err != cudaSuccess: return err - elif rdSrc[0].resType == ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_PITCH2D: + elif rdSrc[0].resType == cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_PITCH2D: rdDst[0].resType = cudaResourceType.cudaResourceTypePitch2D rdDst[0].res.pitch2D.devPtr = rdSrc[0].res.pitch2D.devPtr rdDst[0].res.pitch2D.pitchInBytes = rdSrc[0].res.pitch2D.pitchInBytes @@ -1700,57 +1700,57 @@ cdef cudaError_t getResDescFromDriverResDesc(cudaResourceDesc *rdDst, const ccud tdDst[0].borderColor[i] = tdSrc[0].borderColor[i] i += 1 - if tdSrc[0].flags & ccuda.CU_TRSF_SRGB: + if tdSrc[0].flags & cydriver.CU_TRSF_SRGB: tdDst[0].sRGB = 1 else: tdDst[0].sRGB = 0 - if tdSrc[0].flags & ccuda.CU_TRSF_NORMALIZED_COORDINATES: + if tdSrc[0].flags & cydriver.CU_TRSF_NORMALIZED_COORDINATES: tdDst[0].normalizedCoords = 1 else: tdDst[0].normalizedCoords = 0 - if tdSrc[0].flags & ccuda.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION: + if tdSrc[0].flags & cydriver.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION: tdDst[0].disableTrilinearOptimization = 1 else: tdDst[0].disableTrilinearOptimization = 0 - if tdSrc[0].flags & ccuda.CU_TRSF_SEAMLESS_CUBEMAP: + if tdSrc[0].flags & cydriver.CU_TRSF_SEAMLESS_CUBEMAP: tdDst[0].seamlessCubemap |= 1 else: tdDst[0].seamlessCubemap |= 0 - if ad.Format in (ccuda.CU_AD_FORMAT_SNORM_INT8X1, - ccuda.CU_AD_FORMAT_SNORM_INT8X2, - ccuda.CU_AD_FORMAT_SNORM_INT8X4, - ccuda.CU_AD_FORMAT_UNORM_INT8X1, - ccuda.CU_AD_FORMAT_UNORM_INT8X2, - ccuda.CU_AD_FORMAT_UNORM_INT8X4, - ccuda.CU_AD_FORMAT_SNORM_INT16X1, - ccuda.CU_AD_FORMAT_SNORM_INT16X2, - ccuda.CU_AD_FORMAT_SNORM_INT16X4, - ccuda.CU_AD_FORMAT_UNORM_INT16X1, - ccuda.CU_AD_FORMAT_UNORM_INT16X2, - ccuda.CU_AD_FORMAT_UNORM_INT16X4, - ccuda.CU_AD_FORMAT_BC1_UNORM, - ccuda.CU_AD_FORMAT_BC1_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC2_UNORM, - ccuda.CU_AD_FORMAT_BC2_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC3_UNORM, - ccuda.CU_AD_FORMAT_BC3_UNORM_SRGB, - ccuda.CU_AD_FORMAT_BC4_UNORM, - ccuda.CU_AD_FORMAT_BC4_SNORM, - ccuda.CU_AD_FORMAT_BC5_UNORM, - ccuda.CU_AD_FORMAT_BC5_SNORM, - ccuda.CU_AD_FORMAT_BC7_UNORM, - ccuda.CU_AD_FORMAT_BC7_UNORM_SRGB): + if ad.Format in (cydriver.CU_AD_FORMAT_SNORM_INT8X1, + cydriver.CU_AD_FORMAT_SNORM_INT8X2, + cydriver.CU_AD_FORMAT_SNORM_INT8X4, + cydriver.CU_AD_FORMAT_UNORM_INT8X1, + cydriver.CU_AD_FORMAT_UNORM_INT8X2, + cydriver.CU_AD_FORMAT_UNORM_INT8X4, + cydriver.CU_AD_FORMAT_SNORM_INT16X1, + cydriver.CU_AD_FORMAT_SNORM_INT16X2, + cydriver.CU_AD_FORMAT_SNORM_INT16X4, + cydriver.CU_AD_FORMAT_UNORM_INT16X1, + cydriver.CU_AD_FORMAT_UNORM_INT16X2, + cydriver.CU_AD_FORMAT_UNORM_INT16X4, + cydriver.CU_AD_FORMAT_BC1_UNORM, + cydriver.CU_AD_FORMAT_BC1_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC2_UNORM, + cydriver.CU_AD_FORMAT_BC2_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC3_UNORM, + cydriver.CU_AD_FORMAT_BC3_UNORM_SRGB, + cydriver.CU_AD_FORMAT_BC4_UNORM, + cydriver.CU_AD_FORMAT_BC4_SNORM, + cydriver.CU_AD_FORMAT_BC5_UNORM, + cydriver.CU_AD_FORMAT_BC5_SNORM, + cydriver.CU_AD_FORMAT_BC7_UNORM, + cydriver.CU_AD_FORMAT_BC7_UNORM_SRGB): tdDst[0].readMode = cudaTextureReadMode.cudaReadModeNormalizedFloat - elif ad.Format in (ccuda.CU_AD_FORMAT_SIGNED_INT8, - ccuda.CU_AD_FORMAT_SIGNED_INT16, - ccuda.CU_AD_FORMAT_UNSIGNED_INT8, - ccuda.CU_AD_FORMAT_UNSIGNED_INT16): + elif ad.Format in (cydriver.CU_AD_FORMAT_SIGNED_INT8, + cydriver.CU_AD_FORMAT_SIGNED_INT16, + cydriver.CU_AD_FORMAT_UNSIGNED_INT8, + cydriver.CU_AD_FORMAT_UNSIGNED_INT16): with gil: - if (tdSrc[0].flags & ccuda.CU_TRSF_READ_AS_INTEGER): + if (tdSrc[0].flags & cydriver.CU_TRSF_READ_AS_INTEGER): tdDst[0].readMode = cudaTextureReadMode.cudaReadModeElementType else: tdDst[0].readMode = cudaTextureReadMode.cudaReadModeNormalizedFloat @@ -1777,9 +1777,9 @@ cdef cudaError_t memsetPtr(char *mem, int c, size_t count, cudaStream_t sid, boo return cudaSuccess if not async: - return ccuda._cuMemsetD8_v2(mem, c, count) + return cydriver._cuMemsetD8_v2(mem, c, count) else: - return ccuda._cuMemsetD8Async(mem, c, count, sid) + return cydriver._cuMemsetD8Async(mem, c, count, sid) cdef cudaError_t memset2DPtr(char *mem, size_t pitch, int c, size_t width, size_t height, cudaStream_t sid, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: @@ -1787,12 +1787,12 @@ cdef cudaError_t memset2DPtr(char *mem, size_t pitch, int c, size_t width, size_ return cudaSuccess if not async: - return ccuda._cuMemsetD2D8_v2(mem, pitch, c, width, height) + return cydriver._cuMemsetD2D8_v2(mem, pitch, c, width, height) else: - return ccuda._cuMemsetD2D8Async(mem, pitch, c, width, height, sid) + return cydriver._cuMemsetD2D8Async(mem, pitch, c, width, height, sid) -cdef cudaError_t copyFromHost(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t count, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t copyFromHost(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t count, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaArrayLocalState arrayState cdef cudaError_t err = cudaSuccess memset(&arrayState, 0, sizeof(arrayState)) @@ -1800,7 +1800,7 @@ cdef cudaError_t copyFromHost(cudaArray_const_t thisArray, size_t hOffset, size_ if err != cudaSuccess: return err cdef size_t copied = 0 - cdef ccuda.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST) + cdef cydriver.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST) if (wOffset > 0) and (count >= arrayState.widthInBytes - wOffset): cp.dstArray = arrayState.array @@ -1864,7 +1864,7 @@ cdef cudaError_t copyFromHost(cudaArray_const_t thisArray, size_t hOffset, size_ return cudaSuccess -cdef cudaError_t copyFromDevice(ccuda.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t srcOffset, size_t count, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t copyFromDevice(cydriver.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *src, size_t srcOffset, size_t count, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaArrayLocalState arrayState cdef cudaError_t err = cudaSuccess memset(&arrayState, 0, sizeof(arrayState)) @@ -1872,14 +1872,14 @@ cdef cudaError_t copyFromDevice(ccuda.CUmemorytype type, cudaArray_const_t thisA if err != cudaSuccess: return err cdef size_t copied = 0 - cdef ccuda.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, type) + cdef cydriver.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY, type) if (wOffset > 0) and (count >= arrayState.widthInBytes - wOffset): cp.dstArray = arrayState.array cp.dstXInBytes = wOffset cp.dstY = hOffset - cp.srcDevice = src + cp.srcDevice = src cp.srcPitch = arrayState.widthInBytes cp.srcXInBytes = srcOffset cp.srcY = 0 @@ -1900,7 +1900,7 @@ cdef cudaError_t copyFromDevice(ccuda.CUmemorytype type, cudaArray_const_t thisA cp.dstXInBytes = wOffset cp.dstY = hOffset - cp.srcDevice = (src + copied) + cp.srcDevice = (src + copied) cp.srcPitch = arrayState.widthInBytes cp.srcXInBytes = srcOffset cp.srcY = 0 @@ -1922,7 +1922,7 @@ cdef cudaError_t copyFromDevice(ccuda.CUmemorytype type, cudaArray_const_t thisA cp.dstXInBytes = wOffset cp.dstY = hOffset - cp.srcDevice = (src + copied) + cp.srcDevice = (src + copied) cp.srcPitch = arrayState.widthInBytes cp.srcXInBytes = srcOffset cp.srcY = 0 @@ -1937,7 +1937,7 @@ cdef cudaError_t copyFromDevice(ccuda.CUmemorytype type, cudaArray_const_t thisA return cudaSuccess -cdef cudaError_t copyToHost(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, char *dst, size_t count, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t copyToHost(cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, char *dst, size_t count, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaArrayLocalState arrayState cdef cudaError_t err = cudaSuccess memset(&arrayState, 0, sizeof(arrayState)) @@ -1945,7 +1945,7 @@ cdef cudaError_t copyToHost(cudaArray_const_t thisArray, size_t hOffset, size_t if err != cudaSuccess: return err cdef size_t copied = 0 - cdef ccuda.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) + cdef cydriver.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) if (wOffset > 0) and (count >= arrayState.widthInBytes - wOffset): cp.dstHost = dst @@ -2009,23 +2009,23 @@ cdef cudaError_t copyToHost(cudaArray_const_t thisArray, size_t hOffset, size_t return cudaSuccess -cdef cudaError_t driverMemcpy3DPeer(ccuda.CUDA_MEMCPY3D_PEER *cp, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t driverMemcpy3DPeer(cydriver.CUDA_MEMCPY3D_PEER *cp, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: if async: - return ccuda._cuMemcpy3DPeerAsync(cp, stream) + return cydriver._cuMemcpy3DPeerAsync(cp, stream) else: - return ccuda._cuMemcpy3DPeer(cp) + return cydriver._cuMemcpy3DPeer(cp) -cdef cudaError_t driverMemcpy3D(ccuda.CUDA_MEMCPY3D_v2 *cp, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t driverMemcpy3D(cydriver.CUDA_MEMCPY3D_v2 *cp, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: if async: - return ccuda._cuMemcpy3DAsync_v2(cp, stream) + return cydriver._cuMemcpy3DAsync_v2(cp, stream) else: - return ccuda._cuMemcpy3D_v2(cp) + return cydriver._cuMemcpy3D_v2(cp) cdef cudaError_t memcpy3D(const cudaMemcpy3DParms *p, bool peer, int srcDevice, int dstDevice, cudaStream_t sid, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUDA_MEMCPY3D_v2 cd - cdef ccuda.CUDA_MEMCPY3D_PEER cdPeer + cdef cydriver.CUDA_MEMCPY3D_v2 cd + cdef cydriver.CUDA_MEMCPY3D_PEER cdPeer cdef cudaPythonDevice *srcDev cdef cudaPythonDevice *dstDev @@ -2072,7 +2072,7 @@ cdef cudaError_t memcpy3D(const cudaMemcpy3DParms *p, bool peer, int srcDevice, return err -cdef cudaError_t copyToDevice(ccuda.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *dst, size_t dstOffset, size_t count, ccuda.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t copyToDevice(cydriver.CUmemorytype type, cudaArray_const_t thisArray, size_t hOffset, size_t wOffset, const char *dst, size_t dstOffset, size_t count, cydriver.CUstream stream, bool async) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaArrayLocalState arrayState cdef cudaError_t err = cudaSuccess memset(&arrayState, 0, sizeof(arrayState)) @@ -2080,10 +2080,10 @@ cdef cudaError_t copyToDevice(ccuda.CUmemorytype type, cudaArray_const_t thisArr if err != cudaSuccess: return err cdef size_t copied = 0 - cdef ccuda.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(type, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) + cdef cydriver.CUDA_MEMCPY3D_v2 cp = memCopy3DInit(type, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) if (wOffset > 0) and (count >= arrayState.widthInBytes - wOffset): - cp.dstDevice = dst + cp.dstDevice = dst cp.dstPitch = arrayState.widthInBytes cp.dstXInBytes = dstOffset cp.dstY = 0 @@ -2104,7 +2104,7 @@ cdef cudaError_t copyToDevice(ccuda.CUmemorytype type, cudaArray_const_t thisArr return err if (count - copied >= arrayState.widthInBytes): - cp.dstDevice = (dst + copied) + cp.dstDevice = (dst + copied) cp.dstPitch = arrayState.widthInBytes cp.dstXInBytes = dstOffset cp.dstY = 0 @@ -2125,7 +2125,7 @@ cdef cudaError_t copyToDevice(ccuda.CUmemorytype type, cudaArray_const_t thisArr return err if (count - copied > 0): - cp.dstDevice = (dst + copied) + cp.dstDevice = (dst + copied) cp.dstPitch = arrayState.widthInBytes cp.dstXInBytes = dstOffset cp.dstY = 0 @@ -2154,8 +2154,8 @@ cdef cudaError_t copy1DConvertTo3DParams(void* dst, const void* src, size_t coun p[0].kind = kind -cdef void toDriverMemsetNodeParams(const cudaMemsetParams *pRuntimeParams, ccuda.CUDA_MEMSET_NODE_PARAMS *pDriverParams) noexcept nogil: - pDriverParams[0].dst = pRuntimeParams[0].dst +cdef void toDriverMemsetNodeParams(const cudaMemsetParams *pRuntimeParams, cydriver.CUDA_MEMSET_NODE_PARAMS *pDriverParams) noexcept nogil: + pDriverParams[0].dst = pRuntimeParams[0].dst pDriverParams[0].pitch = pRuntimeParams[0].pitch pDriverParams[0].value = pRuntimeParams[0].value pDriverParams[0].elementSize = pRuntimeParams[0].elementSize @@ -2164,33 +2164,33 @@ cdef void toDriverMemsetNodeParams(const cudaMemsetParams *pRuntimeParams, ccuda cdef cudaError_t getElementSize(size_t *elementSize, cudaArray_t array) except ?cudaErrorCallRequiresNewerDriver nogil: - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR driverDesc + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR driverDesc cdef cudaError_t err = cudaSuccess - err = ccuda._cuArray3DGetDescriptor_v2(&driverDesc, array) + err = cydriver._cuArray3DGetDescriptor_v2(&driverDesc, array) if err != cudaSuccess: return err - if (driverDesc.Format == ccuda.CU_AD_FORMAT_FLOAT or - driverDesc.Format == ccuda.CU_AD_FORMAT_UNSIGNED_INT32 or - driverDesc.Format == ccuda.CU_AD_FORMAT_SIGNED_INT32): + if (driverDesc.Format == cydriver.CU_AD_FORMAT_FLOAT or + driverDesc.Format == cydriver.CU_AD_FORMAT_UNSIGNED_INT32 or + driverDesc.Format == cydriver.CU_AD_FORMAT_SIGNED_INT32): elementSize[0] = driverDesc.NumChannels * 4 return cudaSuccess - elif (driverDesc.Format == ccuda.CU_AD_FORMAT_HALF or - driverDesc.Format == ccuda.CU_AD_FORMAT_SIGNED_INT16 or - driverDesc.Format == ccuda.CU_AD_FORMAT_UNSIGNED_INT16): + elif (driverDesc.Format == cydriver.CU_AD_FORMAT_HALF or + driverDesc.Format == cydriver.CU_AD_FORMAT_SIGNED_INT16 or + driverDesc.Format == cydriver.CU_AD_FORMAT_UNSIGNED_INT16): elementSize[0] = driverDesc.NumChannels * 2 return cudaSuccess - elif (driverDesc.Format == ccuda.CU_AD_FORMAT_SIGNED_INT8 or - driverDesc.Format == ccuda.CU_AD_FORMAT_UNSIGNED_INT8 or - driverDesc.Format == ccuda.CU_AD_FORMAT_NV12): + elif (driverDesc.Format == cydriver.CU_AD_FORMAT_SIGNED_INT8 or + driverDesc.Format == cydriver.CU_AD_FORMAT_UNSIGNED_INT8 or + driverDesc.Format == cydriver.CU_AD_FORMAT_NV12): elementSize[0] = driverDesc.NumChannels return cudaSuccess return cudaErrorInvalidChannelDescriptor -cdef cudaError_t toDriverMemCopy3DParams(const cudaMemcpy3DParms *p, ccuda.CUDA_MEMCPY3D *cd) except ?cudaErrorCallRequiresNewerDriver nogil: - memset(cd, 0, sizeof(ccuda.CUDA_MEMCPY3D)) - cd[0].dstMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE - cd[0].srcMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE +cdef cudaError_t toDriverMemCopy3DParams(const cudaMemcpy3DParms *p, cydriver.CUDA_MEMCPY3D *cd) except ?cudaErrorCallRequiresNewerDriver nogil: + memset(cd, 0, sizeof(cydriver.CUDA_MEMCPY3D)) + cd[0].dstMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE + cd[0].srcMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE cd[0].WidthInBytes = 0 cd[0].Height = 1 cd[0].Depth = 1 @@ -2201,8 +2201,8 @@ cdef cudaError_t toDriverMemCopy3DParams(const cudaMemcpy3DParms *p, ccuda.CUDA_ cdef cudaExtent srcBlockExtent cdef cudaExtent dstBlockExtent cdef cudaExtent copyBlockExtent - cdef ccuda.CUarray_format srcFmt - cdef ccuda.CUarray_format dstFmt + cdef cydriver.CUarray_format srcFmt + cdef cydriver.CUarray_format dstFmt cdef int numChannels = 0 srcBlockExtent.width = srcBlockExtent.height = srcBlockExtent.depth = 1 dstBlockExtent.width = dstBlockExtent.height = dstBlockExtent.depth = 1 @@ -2212,20 +2212,20 @@ cdef cudaError_t toDriverMemCopy3DParams(const cudaMemcpy3DParms *p, ccuda.CUDA_ return cudaSuccess if p[0].kind == cudaMemcpyHostToHost: - cd[0].srcMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST - cd[0].dstMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST + cd[0].srcMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST + cd[0].dstMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST elif p[0].kind == cudaMemcpyHostToDevice: - cd[0].srcMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST - cd[0].dstMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE + cd[0].srcMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST + cd[0].dstMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE elif p[0].kind == cudaMemcpyDeviceToHost: - cd[0].srcMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE - cd[0].dstMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST + cd[0].srcMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE + cd[0].dstMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST elif p[0].kind == cudaMemcpyDeviceToDevice: - cd[0].srcMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE - cd[0].dstMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE + cd[0].srcMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE + cd[0].dstMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE elif p[0].kind == cudaMemcpyDefault: - cd[0].srcMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED - cd[0].dstMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED + cd[0].srcMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED + cd[0].dstMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED else: return cudaErrorInvalidMemcpyDirection @@ -2248,10 +2248,10 @@ cdef cudaError_t toDriverMemCopy3DParams(const cudaMemcpy3DParms *p, ccuda.CUDA_ copyBlockExtent = dstBlockExtent if p[0].srcArray: - if NULL != p[0].srcPtr.ptr or ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST == cd[0].srcMemoryType: + if NULL != p[0].srcPtr.ptr or cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST == cd[0].srcMemoryType: return cudaErrorInvalidValue - cd[0].srcMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY - cd[0].srcArray = p[0].srcArray + cd[0].srcMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY + cd[0].srcArray = p[0].srcArray err = getElementSize(&srcElementSize, p[0].srcArray) if err != cudaSuccess: return err @@ -2265,18 +2265,18 @@ cdef cudaError_t toDriverMemCopy3DParams(const cudaMemcpy3DParms *p, ccuda.CUDA_ if p[0].extent.height > adjustedSrcHeight: return cudaErrorInvalidPitchValue - if ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST == cd[0].srcMemoryType: + if cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST == cd[0].srcMemoryType: cd[0].srcHost = p[0].srcPtr.ptr else: - cd[0].srcDevice = (p[0].srcPtr.ptr) + cd[0].srcDevice = (p[0].srcPtr.ptr) cd[0].srcPitch = p[0].srcPtr.pitch cd[0].srcHeight = p[0].srcPtr.ysize if p[0].dstArray: if NULL != p[0].dstPtr.ptr: return cudaErrorInvalidValue - cd[0].dstMemoryType = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY - cd[0].dstArray = p[0].dstArray + cd[0].dstMemoryType = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY + cd[0].dstArray = p[0].dstArray err = getElementSize(&dstElementSize, p[0].dstArray) if err != cudaSuccess: return err @@ -2290,10 +2290,10 @@ cdef cudaError_t toDriverMemCopy3DParams(const cudaMemcpy3DParms *p, ccuda.CUDA_ if p[0].extent.height > adjustedDstHeight: return cudaErrorInvalidPitchValue - if ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST == cd[0].dstMemoryType: + if cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST == cd[0].dstMemoryType: cd[0].dstHost = p[0].dstPtr.ptr else: - cd[0].dstDevice = (p[0].dstPtr.ptr) + cd[0].dstDevice = (p[0].dstPtr.ptr) cd[0].dstPitch = p[0].dstPtr.pitch cd[0].dstHeight = p[0].dstPtr.ysize @@ -2331,8 +2331,8 @@ cdef cudaError_t mallocArray(cudaArray_t *arrayPtr, const cudaChannelFormatDesc if arrayPtr == NULL: return cudaErrorInvalidValue - cdef ccuda.CUarray array = NULL - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR_v2 ad + cdef cydriver.CUarray array = NULL + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR_v2 ad cdef cudaError_t err = cudaSuccess arrayPtr[0] = NULL if (((width == 0)) or @@ -2343,14 +2343,14 @@ cdef cudaError_t mallocArray(cudaArray_t *arrayPtr, const cudaChannelFormatDesc return cudaErrorInvalidValue else: memset(&ad, 0, sizeof(ad)) - err = getDescInfo(desc, &ad.NumChannels, &ad.Format) + err = getDescInfo(desc, &ad.NumChannels, &ad.Format) if err != cudaSuccess: return err ad.Height = height ad.Width = width ad.Depth = (depth - corr2D) ad.Flags = flags - err = ccuda._cuArray3DCreate_v2(&array, &ad) + err = cydriver._cuArray3DCreate_v2(&array, &ad) if err != cudaSuccess: return err @@ -2370,9 +2370,9 @@ cdef cudaError_t memcpy2DToArray(cudaArray_t dst, size_t hOffset, size_t wOffset if kind == cudaMemcpyKind.cudaMemcpyHostToDevice: err = copyFromHost2D(dst, hOffset, wOffset, src, spitch, width, height, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyDeviceToDevice: - err = copyFromDevice2D(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, dst, hOffset, wOffset, src, 0, spitch, width, height, sid, async) + err = copyFromDevice2D(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, dst, hOffset, wOffset, src, 0, spitch, width, height, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyDefault: - err = copyFromDevice2D(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, dst, hOffset, wOffset, src, 0, spitch, width, height, sid, async) + err = copyFromDevice2D(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, dst, hOffset, wOffset, src, 0, spitch, width, height, sid, async) else: return cudaErrorInvalidMemcpyDirection return err @@ -2389,29 +2389,29 @@ cdef cudaError_t memcpy2DPtr(char *dst, size_t dpitch, const char *src, size_t s return cudaErrorInvalidPitchValue cdef cudaError_t err = cudaSuccess - cdef ccuda.CUDA_MEMCPY2D_v2 cp + cdef cydriver.CUDA_MEMCPY2D_v2 cp memset(&cp, 0, sizeof(cp)) if kind == cudaMemcpyKind.cudaMemcpyHostToHost: - cp = memCopy2DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST) + cp = memCopy2DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST) cp.dstHost = dst cp.srcHost = src elif kind == cudaMemcpyKind.cudaMemcpyDeviceToHost: - cp = memCopy2DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE) + cp = memCopy2DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE) cp.dstHost = dst - cp.srcDevice = src + cp.srcDevice = src elif kind == cudaMemcpyKind.cudaMemcpyHostToDevice: - cp = memCopy2DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST) - cp.dstDevice = dst + cp = memCopy2DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST) + cp.dstDevice = dst cp.srcHost = src elif kind == cudaMemcpyKind.cudaMemcpyDeviceToDevice: - cp = memCopy2DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE) - cp.dstDevice = dst - cp.srcDevice = src + cp = memCopy2DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE) + cp.dstDevice = dst + cp.srcDevice = src elif kind == cudaMemcpyKind.cudaMemcpyDefault: - cp = memCopy2DInit(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED) - cp.dstDevice = dst - cp.srcDevice = src + cp = memCopy2DInit(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED) + cp.dstDevice = dst + cp.srcDevice = src else: err = cudaErrorInvalidMemcpyDirection @@ -2424,9 +2424,9 @@ cdef cudaError_t memcpy2DPtr(char *dst, size_t dpitch, const char *src, size_t s cp.Height = height if async: - err = ccuda._cuMemcpy2DAsync_v2(&cp, sid) + err = cydriver._cuMemcpy2DAsync_v2(&cp, sid) else: - err = ccuda._cuMemcpy2DUnaligned_v2(&cp) + err = cydriver._cuMemcpy2DUnaligned_v2(&cp) return err @@ -2438,13 +2438,13 @@ cdef cudaError_t memcpyDispatch(void *dst, const void *src, size_t size, cudaMem if kind == cudaMemcpyKind.cudaMemcpyHostToHost: return memcpy2DPtr(dst, size, src, size, size, 1, kind, NULL, 0) elif kind == cudaMemcpyKind.cudaMemcpyDeviceToHost: - err = ccuda._cuMemcpyDtoH_v2(dst, src, size) + err = cydriver._cuMemcpyDtoH_v2(dst, src, size) elif kind == cudaMemcpyKind.cudaMemcpyHostToDevice: - err = ccuda._cuMemcpyHtoD_v2(dst, src, size) + err = cydriver._cuMemcpyHtoD_v2(dst, src, size) elif kind == cudaMemcpyKind.cudaMemcpyDeviceToDevice: - err = ccuda._cuMemcpyDtoD_v2(dst, src, size) + err = cydriver._cuMemcpyDtoD_v2(dst, src, size) elif kind == cudaMemcpyKind.cudaMemcpyDefault: - err = ccuda._cuMemcpy(dst, src, size) + err = cydriver._cuMemcpy(dst, src, size) else: return cudaErrorInvalidMemcpyDirection @@ -2456,7 +2456,7 @@ cdef cudaError_t mallocHost(size_t size, void **mem, unsigned int flags) except mem[0] = NULL return cudaSuccess else: - return ccuda._cuMemHostAlloc(mem, size, flags) + return cydriver._cuMemHostAlloc(mem, size, flags) cdef cudaError_t mallocPitch(size_t width, size_t height, size_t depth, void **mem, size_t *pitch) except ?cudaErrorCallRequiresNewerDriver nogil: @@ -2468,7 +2468,7 @@ cdef cudaError_t mallocPitch(size_t width, size_t height, size_t depth, void **m mem[0] = NULL pitch[0] = 0 else: - return ccuda._cuMemAllocPitch_v2(mem, pitch, width, height, 4) + return cydriver._cuMemAllocPitch_v2(mem, pitch, width, height, 4) return cudaSuccess @@ -2477,8 +2477,8 @@ cdef cudaError_t mallocMipmappedArray(cudaMipmappedArray_t *mipmappedArray, cons if mipmappedArray == NULL: return cudaErrorInvalidValue - cdef ccuda.CUmipmappedArray mipmap = NULL - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR_v2 ad + cdef cydriver.CUmipmappedArray mipmap = NULL + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR_v2 ad memset(&ad, 0, sizeof(ad)) mipmappedArray[0] = NULL @@ -2496,7 +2496,7 @@ cdef cudaError_t mallocMipmappedArray(cudaMipmappedArray_t *mipmappedArray, cons ad.Width = width ad.Depth = depth ad.Flags = flags - err = ccuda._cuMipmappedArrayCreate(&mipmap, &ad, numLevels) + err = cydriver._cuMipmappedArrayCreate(&mipmap, &ad, numLevels) if err != cudaSuccess: return err mipmappedArray[0] = mipmap @@ -2509,22 +2509,22 @@ cdef cudaError_t memcpyAsyncDispatch(void *dst, const void *src, size_t size, cu elif kind == cudaMemcpyKind.cudaMemcpyHostToHost: return memcpy2DPtr(dst, size, src, size, size, 1, kind, stream, True) elif kind == cudaMemcpyKind.cudaMemcpyDeviceToHost: - return ccuda._cuMemcpyDtoHAsync_v2(dst, src, size, stream) + return cydriver._cuMemcpyDtoHAsync_v2(dst, src, size, stream) elif kind == cudaMemcpyKind.cudaMemcpyHostToDevice: - returnccuda._cuMemcpyHtoDAsync_v2(dst, src, size, stream) + returncydriver._cuMemcpyHtoDAsync_v2(dst, src, size, stream) elif kind == cudaMemcpyKind.cudaMemcpyDeviceToDevice: - returnccuda._cuMemcpyDtoDAsync_v2(dst, src, size, stream) + returncydriver._cuMemcpyDtoDAsync_v2(dst, src, size, stream) elif kind == cudaMemcpyKind.cudaMemcpyDefault: - returnccuda._cuMemcpyAsync(dst, src, size, stream) + returncydriver._cuMemcpyAsync(dst, src, size, stream) return cudaErrorInvalidMemcpyDirection -cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaMemcpy3DParms *p) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t toCudartMemCopy3DParams(const cydriver.CUDA_MEMCPY3D_v2 *cd, cudaMemcpy3DParms *p) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaExtent srcBlockExtent cdef cudaExtent dstBlockExtent cdef cudaExtent copyBlockExtent - cdef ccuda.CUarray_format srcFmt - cdef ccuda.CUarray_format dstFmt + cdef cydriver.CUarray_format srcFmt + cdef cydriver.CUarray_format dstFmt cdef int numChannels = 0 srcBlockExtent.width = srcBlockExtent.height = srcBlockExtent.depth = 1 dstBlockExtent.width = dstBlockExtent.height = dstBlockExtent.depth = 1 @@ -2534,7 +2534,7 @@ cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaM p[0].srcPtr.xsize = 0 p[0].dstPtr.xsize = 0 - if (cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST and cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST): + if (cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST and cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST): p[0].kind = cudaMemcpyHostToHost p[0].srcPtr.ptr = cd[0].srcHost @@ -2544,26 +2544,26 @@ cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaM p[0].dstPtr.ptr = cd[0].dstHost p[0].dstPtr.pitch = cd[0].dstPitch p[0].dstPtr.ysize = cd[0].dstHeight - elif (cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST - and (cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE - or cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY)): + elif (cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST + and (cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE + or cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY)): p[0].kind = cudaMemcpyHostToDevice p[0].srcPtr.ptr = cd[0].srcHost p[0].srcPtr.pitch = cd[0].srcPitch p[0].srcPtr.ysize = cd[0].srcHeight - if (cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): + if (cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): p[0].dstArray = cd[0].dstArray else: p[0].dstPtr.ptr = cd[0].dstDevice p[0].dstPtr.pitch = cd[0].dstPitch p[0].dstPtr.ysize = cd[0].dstHeight - elif ((cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE or cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) - and cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST): + elif ((cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE or cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) + and cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST): p[0].kind = cudaMemcpyDeviceToHost - if (cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): + if (cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): p[0].srcArray = cd[0].srcArray else: p[0].srcPtr.ptr = cd[0].srcDevice @@ -2573,24 +2573,24 @@ cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaM p[0].dstPtr.ptr = cd[0].dstHost p[0].dstPtr.pitch = cd[0].dstPitch p[0].dstPtr.ysize = cd[0].dstHeight - elif ((cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE or cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) - and (cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE or cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY)): + elif ((cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE or cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY) + and (cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE or cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY)): p[0].kind = cudaMemcpyDeviceToDevice - if (cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): + if (cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): p[0].srcArray = cd[0].srcArray else: p[0].srcPtr.ptr = cd[0].srcDevice p[0].srcPtr.pitch = cd[0].srcPitch p[0].srcPtr.ysize = cd[0].srcHeight - if (cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): + if (cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): p[0].dstArray = cd[0].dstArray else: p[0].dstPtr.ptr = cd[0].dstDevice p[0].dstPtr.pitch = cd[0].dstPitch p[0].dstPtr.ysize = cd[0].dstHeight - elif (cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED and cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED): + elif (cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED and cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED): p[0].kind = cudaMemcpyDefault p[0].srcPtr.ptr = cd[0].srcDevice @@ -2600,7 +2600,7 @@ cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaM p[0].dstPtr.ptr = cd[0].dstDevice p[0].dstPtr.pitch = cd[0].dstPitch p[0].dstPtr.ysize = cd[0].dstHeight - elif (cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED and cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): + elif (cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED and cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): p[0].kind = cudaMemcpyDefault p[0].srcPtr.ptr = cd[0].srcDevice @@ -2608,7 +2608,7 @@ cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaM p[0].srcPtr.ysize = cd[0].srcHeight p[0].dstArray = cd[0].dstArray - elif (cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY and cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED): + elif (cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY and cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED): p[0].kind = cudaMemcpyDefault p[0].srcArray = cd[0].srcArray @@ -2623,7 +2623,7 @@ cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaM cdef size_t dstElementSize = 0 cdef cudaError_t err = cudaSuccess - if (cd[0].srcMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): + if (cd[0].srcMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): err = getFormat(cd[0].srcArray, numChannels, &srcFmt) if err != cudaSuccess: return err @@ -2635,7 +2635,7 @@ cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaM return err copyBlockExtent = srcBlockExtent - if (cd[0].dstMemoryType == ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): + if (cd[0].dstMemoryType == cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY): err = getFormat(cd[0].dstArray, numChannels, &dstFmt) if err != cudaSuccess: return err @@ -2645,7 +2645,7 @@ cdef cudaError_t toCudartMemCopy3DParams(const ccuda.CUDA_MEMCPY3D_v2 *cd, cudaM err = getElementSize(&dstElementSize, cd[0].dstArray) if err != cudaSuccess: return err - if cd[0].srcMemoryType != ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY: + if cd[0].srcMemoryType != cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY: copyBlockExtent = dstBlockExtent if (srcElementSize and dstElementSize and srcElementSize != dstElementSize): @@ -2685,9 +2685,9 @@ cdef cudaError_t memcpy2DFromArray(char *dst, size_t dpitch, cudaArray_const_t s if kind == cudaMemcpyKind.cudaMemcpyDeviceToHost: err = copyToHost2D(src, hOffset, wOffset, dst, dpitch, width, height, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyDeviceToDevice: - err = copyToDevice2D(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, src, hOffset, wOffset, dst, 0, dpitch, width, height, sid, async) + err = copyToDevice2D(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, src, hOffset, wOffset, dst, 0, dpitch, width, height, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyDefault: - err = copyToDevice2D(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, src, hOffset, wOffset, dst, 0, dpitch, width, height, sid, async) + err = copyToDevice2D(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, src, hOffset, wOffset, dst, 0, dpitch, width, height, sid, async) else: return cudaErrorInvalidMemcpyDirection return err @@ -2741,9 +2741,9 @@ cdef cudaError_t memcpyToArray(cudaArray_t dst, size_t hOffset, size_t wOffset, if kind == cudaMemcpyKind.cudaMemcpyHostToDevice: return copyFromHost(dst, hOffset, wOffset, src, count, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyDeviceToDevice: - return copyFromDevice(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, dst, hOffset, wOffset, src, 0, count, sid, async) + return copyFromDevice(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, dst, hOffset, wOffset, src, 0, count, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyDefault: - return copyFromDevice(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, dst, hOffset, wOffset, src, 0, count, sid, async) + return copyFromDevice(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, dst, hOffset, wOffset, src, 0, count, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyHostToHost or kind == cudaMemcpyKind.cudaMemcpyDeviceToHost: return cudaErrorInvalidMemcpyDirection return cudaSuccess @@ -2758,28 +2758,28 @@ cdef cudaError_t memcpyFromArray(char *dst, cudaArray_const_t src, size_t hOffse if kind == cudaMemcpyKind.cudaMemcpyDeviceToHost: return copyToHost(src, hOffset, wOffset, dst, count, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyDeviceToDevice: - return copyToDevice(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, src, hOffset, wOffset, dst, 0, count, sid, async) + return copyToDevice(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE, src, hOffset, wOffset, dst, 0, count, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyDefault: - return copyToDevice(ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, src, hOffset, wOffset, dst, 0, count, sid, async) + return copyToDevice(cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED, src, hOffset, wOffset, dst, 0, count, sid, async) elif kind == cudaMemcpyKind.cudaMemcpyHostToDevice or kind == cudaMemcpyKind.cudaMemcpyHostToHost: return cudaErrorInvalidMemcpyDirection return cudaSuccess -cdef cudaError_t toDriverCudaResourceDesc(ccuda.CUDA_RESOURCE_DESC *_driver_pResDesc, const cudaResourceDesc *pResDesc) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t toDriverCudaResourceDesc(cydriver.CUDA_RESOURCE_DESC *_driver_pResDesc, const cudaResourceDesc *pResDesc) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess cdef int numChannels - cdef ccuda.CUarray_format format + cdef cydriver.CUarray_format format if pResDesc[0].resType == cudaResourceType.cudaResourceTypeArray: - _driver_pResDesc[0].resType = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_ARRAY - _driver_pResDesc[0].res.array.hArray = pResDesc[0].res.array.array + _driver_pResDesc[0].resType = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_ARRAY + _driver_pResDesc[0].res.array.hArray = pResDesc[0].res.array.array elif pResDesc[0].resType == cudaResourceType.cudaResourceTypeMipmappedArray: - _driver_pResDesc[0].resType = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY - _driver_pResDesc[0].res.mipmap.hMipmappedArray = pResDesc[0].res.mipmap.mipmap + _driver_pResDesc[0].resType = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY + _driver_pResDesc[0].res.mipmap.hMipmappedArray = pResDesc[0].res.mipmap.mipmap elif pResDesc[0].resType == cudaResourceType.cudaResourceTypeLinear: - _driver_pResDesc[0].resType = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_LINEAR - _driver_pResDesc[0].res.linear.devPtr = pResDesc[0].res.linear.devPtr + _driver_pResDesc[0].resType = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_LINEAR + _driver_pResDesc[0].res.linear.devPtr = pResDesc[0].res.linear.devPtr _driver_pResDesc[0].res.linear.sizeInBytes = pResDesc[0].res.linear.sizeInBytes err = getDescInfo(&pResDesc[0].res.linear.desc, &numChannels, &format) if err != cudaSuccess: @@ -2788,8 +2788,8 @@ cdef cudaError_t toDriverCudaResourceDesc(ccuda.CUDA_RESOURCE_DESC *_driver_pRes _driver_pResDesc[0].res.linear.format = format _driver_pResDesc[0].res.linear.numChannels = numChannels elif pResDesc[0].resType == cudaResourceType.cudaResourceTypePitch2D: - _driver_pResDesc[0].resType = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_PITCH2D - _driver_pResDesc[0].res.pitch2D.devPtr = pResDesc[0].res.pitch2D.devPtr + _driver_pResDesc[0].resType = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_PITCH2D + _driver_pResDesc[0].res.pitch2D.devPtr = pResDesc[0].res.pitch2D.devPtr _driver_pResDesc[0].res.pitch2D.pitchInBytes = pResDesc[0].res.pitch2D.pitchInBytes _driver_pResDesc[0].res.pitch2D.width = pResDesc[0].res.pitch2D.width _driver_pResDesc[0].res.pitch2D.height = pResDesc[0].res.pitch2D.height @@ -2807,7 +2807,7 @@ cdef cudaError_t toDriverCudaResourceDesc(ccuda.CUDA_RESOURCE_DESC *_driver_pRes return err -cdef cudaError_t getDriverEglFrame(ccuda.CUeglFrame *cuEglFrame, cudaEglFrame eglFrame) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t getDriverEglFrame(cydriver.CUeglFrame *cuEglFrame, cudaEglFrame eglFrame) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess cdef unsigned int i = 0 @@ -2816,7 +2816,7 @@ cdef cudaError_t getDriverEglFrame(ccuda.CUeglFrame *cuEglFrame, cudaEglFrame eg return err for i in range(eglFrame.planeCount): if eglFrame.frameType == cudaEglFrameTypeArray: - cuEglFrame[0].frame.pArray[i] = eglFrame.frame.pArray[i] + cuEglFrame[0].frame.pArray[i] = eglFrame.frame.pArray[i] else: cuEglFrame[0].frame.pPitch[i] = eglFrame.frame.pPitch[i].ptr cuEglFrame[0].width = eglFrame.planeDesc[0].width @@ -2825,240 +2825,240 @@ cdef cudaError_t getDriverEglFrame(ccuda.CUeglFrame *cuEglFrame, cudaEglFrame eg cuEglFrame[0].pitch = eglFrame.planeDesc[0].pitch cuEglFrame[0].planeCount = eglFrame.planeCount if eglFrame.eglColorFormat == cudaEglColorFormatYUV420Planar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYUV420SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYUV422Planar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYUV422SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYUV444Planar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYUV444SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYUYV422: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_422 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_422 elif eglFrame.eglColorFormat == cudaEglColorFormatUYVY422: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_422 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_422 elif eglFrame.eglColorFormat == cudaEglColorFormatARGB: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ARGB + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ARGB elif eglFrame.eglColorFormat == cudaEglColorFormatRGBA: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RGBA + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RGBA elif eglFrame.eglColorFormat == cudaEglColorFormatABGR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ABGR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ABGR elif eglFrame.eglColorFormat == cudaEglColorFormatBGRA: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BGRA + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BGRA elif eglFrame.eglColorFormat == cudaEglColorFormatL: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_L + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_L elif eglFrame.eglColorFormat == cudaEglColorFormatR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_R + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_R elif eglFrame.eglColorFormat == cudaEglColorFormatA: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_A + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_A elif eglFrame.eglColorFormat == cudaEglColorFormatRG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RG elif eglFrame.eglColorFormat == cudaEglColorFormatAYUV: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV elif eglFrame.eglColorFormat == cudaEglColorFormatYVU444SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYVU422SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYVU420SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_444SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_420SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatY12V12U12_444SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatY12V12U12_420SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatVYUY_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY_ER elif eglFrame.eglColorFormat == cudaEglColorFormatUYVY_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYUYV_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYVYU_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYUVA_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA_ER elif eglFrame.eglColorFormat == cudaEglColorFormatAYUV_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYUV444Planar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYUV422Planar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYUV420Planar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYUV444SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYUV422SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYUV420SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYVU444Planar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYVU422Planar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYVU420Planar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYVU444SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYVU422SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYVU420SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatBayerRGGB: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RGGB + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RGGB elif eglFrame.eglColorFormat == cudaEglColorFormatBayerBGGR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BGGR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BGGR elif eglFrame.eglColorFormat == cudaEglColorFormatBayerGRBG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GRBG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GRBG elif eglFrame.eglColorFormat == cudaEglColorFormatBayerGBRG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GBRG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GBRG elif eglFrame.eglColorFormat == cudaEglColorFormatBayer10RGGB: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_RGGB + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_RGGB elif eglFrame.eglColorFormat == cudaEglColorFormatBayer10BGGR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_BGGR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_BGGR elif eglFrame.eglColorFormat == cudaEglColorFormatBayer10GRBG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GRBG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GRBG elif eglFrame.eglColorFormat == cudaEglColorFormatBayer10GBRG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GBRG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GBRG elif eglFrame.eglColorFormat == cudaEglColorFormatBayer12RGGB: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RGGB + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RGGB elif eglFrame.eglColorFormat == cudaEglColorFormatBayer12BGGR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BGGR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BGGR elif eglFrame.eglColorFormat == cudaEglColorFormatBayer12GRBG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GRBG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GRBG elif eglFrame.eglColorFormat == cudaEglColorFormatBayer12GBRG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GBRG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GBRG elif eglFrame.eglColorFormat == cudaEglColorFormatBayer14RGGB: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_RGGB + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_RGGB elif eglFrame.eglColorFormat == cudaEglColorFormatBayer14BGGR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_BGGR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_BGGR elif eglFrame.eglColorFormat == cudaEglColorFormatBayer14GRBG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GRBG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GRBG elif eglFrame.eglColorFormat == cudaEglColorFormatBayer14GBRG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GBRG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GBRG elif eglFrame.eglColorFormat == cudaEglColorFormatBayer20RGGB: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_RGGB + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_RGGB elif eglFrame.eglColorFormat == cudaEglColorFormatBayer20BGGR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_BGGR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_BGGR elif eglFrame.eglColorFormat == cudaEglColorFormatBayer20GRBG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GRBG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GRBG elif eglFrame.eglColorFormat == cudaEglColorFormatBayer20GBRG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GBRG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GBRG elif eglFrame.eglColorFormat == cudaEglColorFormatBayerIspRGGB: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB elif eglFrame.eglColorFormat == cudaEglColorFormatBayerIspBGGR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR elif eglFrame.eglColorFormat == cudaEglColorFormatBayerIspGRBG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG elif eglFrame.eglColorFormat == cudaEglColorFormatBayerIspGBRG: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG elif eglFrame.eglColorFormat == cudaEglColorFormatYVU444Planar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYVU422Planar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatYVU420Planar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatBayerBCCR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BCCR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BCCR elif eglFrame.eglColorFormat == cudaEglColorFormatBayerRCCB: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RCCB + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RCCB elif eglFrame.eglColorFormat == cudaEglColorFormatBayerCRBC: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CRBC + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CRBC elif eglFrame.eglColorFormat == cudaEglColorFormatBayerCBRC: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CBRC + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CBRC elif eglFrame.eglColorFormat == cudaEglColorFormatBayer10CCCC: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_CCCC + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_CCCC elif eglFrame.eglColorFormat == cudaEglColorFormatBayer12BCCR: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BCCR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BCCR elif eglFrame.eglColorFormat == cudaEglColorFormatBayer12RCCB: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RCCB + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RCCB elif eglFrame.eglColorFormat == cudaEglColorFormatBayer12CRBC: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CRBC + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CRBC elif eglFrame.eglColorFormat == cudaEglColorFormatBayer12CBRC: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CBRC + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CBRC elif eglFrame.eglColorFormat == cudaEglColorFormatBayer12CCCC: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CCCC + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CCCC elif eglFrame.eglColorFormat == cudaEglColorFormatY: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y elif eglFrame.eglColorFormat == cudaEglColorFormatYUV420SemiPlanar_2020: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 elif eglFrame.eglColorFormat == cudaEglColorFormatYVU420SemiPlanar_2020: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 elif eglFrame.eglColorFormat == cudaEglColorFormatYUV420Planar_2020: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 elif eglFrame.eglColorFormat == cudaEglColorFormatYVU420Planar_2020: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 elif eglFrame.eglColorFormat == cudaEglColorFormatYUV420SemiPlanar_709: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 elif eglFrame.eglColorFormat == cudaEglColorFormatYVU420SemiPlanar_709: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 elif eglFrame.eglColorFormat == cudaEglColorFormatYUV420Planar_709: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 elif eglFrame.eglColorFormat == cudaEglColorFormatYVU420Planar_709: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_420SemiPlanar_709: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_420SemiPlanar_2020: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_422SemiPlanar_2020: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_422SemiPlanar: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_422SemiPlanar_709: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 elif eglFrame.eglColorFormat == cudaEglColorFormatY_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY_709_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_709_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_709_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY10_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY10_709_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_709_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_709_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY12_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY12_709_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_709_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_709_ER elif eglFrame.eglColorFormat == cudaEglColorFormatYUVA: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA elif eglFrame.eglColorFormat == cudaEglColorFormatYVYU: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU elif eglFrame.eglColorFormat == cudaEglColorFormatVYUY: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_420SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_444SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY12V12U12_420SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY12V12U12_444SemiPlanar_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER elif eglFrame.eglColorFormat == cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER: - cuEglFrame[0].eglColorFormat = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER + cuEglFrame[0].eglColorFormat = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER else: return cudaErrorInvalidValue if eglFrame.frameType == cudaEglFrameTypeArray: - cuEglFrame[0].frameType = ccuda.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_ARRAY + cuEglFrame[0].frameType = cydriver.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_ARRAY elif eglFrame.frameType == cudaEglFrameTypePitch: - cuEglFrame[0].frameType = ccuda.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_PITCH + cuEglFrame[0].frameType = cydriver.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_PITCH else: return cudaErrorInvalidValue @cython.show_performance_hints(False) -cdef cudaError_t getRuntimeEglFrame(cudaEglFrame *eglFrame, ccuda.CUeglFrame cueglFrame) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t getRuntimeEglFrame(cudaEglFrame *eglFrame, cydriver.CUeglFrame cueglFrame) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess cdef unsigned int i - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR_v2 ad + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR_v2 ad cdef cudaPitchedPtr pPtr memset(eglFrame, 0, sizeof(eglFrame[0])) memset(&ad, 0, sizeof(ad)) @@ -3080,97 +3080,97 @@ cdef cudaError_t getRuntimeEglFrame(cudaEglFrame *eglFrame, ccuda.CUeglFrame cue eglFrame[0].planeDesc[i].width = cueglFrame.width eglFrame[0].planeDesc[i].height = cueglFrame.height eglFrame[0].planeDesc[i].pitch = cueglFrame.pitch - elif (cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709): + elif (cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709): eglFrame[0].planeDesc[i].width = (cueglFrame.width / 2) eglFrame[0].planeDesc[i].height = (cueglFrame.height / 2) eglFrame[0].planeDesc[i].pitch = (cueglFrame.pitch / 2) - elif (cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER): + elif (cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER): eglFrame[0].planeDesc[i].width = (cueglFrame.width / 2) eglFrame[0].planeDesc[i].height = (cueglFrame.height / 2) eglFrame[0].planeDesc[i].pitch = (cueglFrame.pitch / 2) eglFrame[0].planeDesc[1].channelDesc.y = 8 - if (cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER): + if (cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER): eglFrame[0].planeDesc[1].channelDesc.y = 16 - elif (cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER): + elif (cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER): eglFrame[0].planeDesc[i].height = cueglFrame.height eglFrame[0].planeDesc[i].width = (cueglFrame.width / 2) eglFrame[0].planeDesc[i].pitch = (cueglFrame.pitch / 2) - elif (cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709): + elif (cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709): eglFrame[0].planeDesc[i].width = (cueglFrame.width / 2) eglFrame[0].planeDesc[i].height = cueglFrame.height eglFrame[0].planeDesc[i].pitch = (cueglFrame.pitch / 2) eglFrame[0].planeDesc[1].channelDesc.y = 8 - if (cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709): + if (cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709): eglFrame[0].planeDesc[1].channelDesc.y = 16 - elif (cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER): + elif (cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER): eglFrame[0].planeDesc[i].height = cueglFrame.height eglFrame[0].planeDesc[i].width = cueglFrame.width eglFrame[0].planeDesc[i].pitch = cueglFrame.pitch - elif (cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER): + elif (cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER): eglFrame[0].planeDesc[i].height = cueglFrame.height eglFrame[0].planeDesc[i].width = cueglFrame.width eglFrame[0].planeDesc[i].pitch = cueglFrame.pitch eglFrame[0].planeDesc[1].channelDesc.y = 8 - if (cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER or - cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER): + if (cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER or + cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER): eglFrame[0].planeDesc[1].channelDesc.y = 16 - if cueglFrame.frameType == ccuda.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_ARRAY: + if cueglFrame.frameType == cydriver.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_ARRAY: eglFrame[0].frame.pArray[i] = cueglFrame.frame.pArray[i] else: pPtr = make_cudaPitchedPtr(cueglFrame.frame.pPitch[i], eglFrame[0].planeDesc[i].pitch, @@ -3178,249 +3178,249 @@ cdef cudaError_t getRuntimeEglFrame(cudaEglFrame *eglFrame, ccuda.CUeglFrame cue eglFrame[0].frame.pPitch[i] = pPtr eglFrame[0].planeCount = cueglFrame.planeCount - if cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR: + if cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV420Planar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV420SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV422Planar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV422SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV444Planar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV444SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_422: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_422: eglFrame[0].eglColorFormat = cudaEglColorFormatYUYV422 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_422: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_422: eglFrame[0].eglColorFormat = cudaEglColorFormatUYVY422 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ARGB: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ARGB: eglFrame[0].eglColorFormat = cudaEglColorFormatARGB - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RGBA: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RGBA: eglFrame[0].eglColorFormat = cudaEglColorFormatRGBA - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ABGR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ABGR: eglFrame[0].eglColorFormat = cudaEglColorFormatABGR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BGRA: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BGRA: eglFrame[0].eglColorFormat = cudaEglColorFormatBGRA - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_L: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_L: eglFrame[0].eglColorFormat = cudaEglColorFormatL - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_R: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_R: eglFrame[0].eglColorFormat = cudaEglColorFormatR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_A: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_A: eglFrame[0].eglColorFormat = cudaEglColorFormatA - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RG: eglFrame[0].eglColorFormat = cudaEglColorFormatRG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV: eglFrame[0].eglColorFormat = cudaEglColorFormatAYUV - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU444SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU422SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU420SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_444SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_420SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatY12V12U12_444SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatY12V12U12_420SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatVYUY_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatUYVY_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYUYV_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYVYU_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYUVA_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatAYUV_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV444Planar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV422Planar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV420Planar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV444SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV422SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV420SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU444Planar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU422Planar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU420Planar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU444SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU422SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU420SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RGGB: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RGGB: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerRGGB - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BGGR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BGGR: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerBGGR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GRBG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GRBG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerGRBG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GBRG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GBRG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerGBRG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_RGGB: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_RGGB: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer10RGGB - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_BGGR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_BGGR: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer10BGGR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GRBG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GRBG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer10GRBG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GBRG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GBRG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer10GBRG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RGGB: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RGGB: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer12RGGB - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BGGR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BGGR: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer12BGGR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GRBG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GRBG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer12GRBG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GBRG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GBRG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer12GBRG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_RGGB: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_RGGB: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer14RGGB - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_BGGR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_BGGR: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer14BGGR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GRBG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GRBG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer14GRBG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GBRG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GBRG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer14GBRG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_RGGB: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_RGGB: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer20RGGB - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_BGGR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_BGGR: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer20BGGR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GRBG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GRBG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer20GRBG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GBRG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GBRG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer20GBRG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerIspRGGB - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerIspBGGR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerIspGRBG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerIspGBRG - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU444Planar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU422Planar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU420Planar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BCCR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BCCR: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerBCCR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RCCB: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RCCB: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerRCCB - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CRBC: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CRBC: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerCRBC - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CBRC: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CBRC: eglFrame[0].eglColorFormat = cudaEglColorFormatBayerCBRC - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_CCCC: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_CCCC: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer10CCCC - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BCCR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BCCR: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer12BCCR - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RCCB: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RCCB: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer12RCCB - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CRBC: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CRBC: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer12CRBC - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CBRC: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CBRC: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer12CBRC - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CCCC: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CCCC: eglFrame[0].eglColorFormat = cudaEglColorFormatBayer12CCCC - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y: eglFrame[0].eglColorFormat = cudaEglColorFormatY - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV420SemiPlanar_2020 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU420SemiPlanar_2020 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV420Planar_2020 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU420Planar_2020 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV420SemiPlanar_709 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU420SemiPlanar_709 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709: eglFrame[0].eglColorFormat = cudaEglColorFormatYUV420Planar_709 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709: eglFrame[0].eglColorFormat = cudaEglColorFormatYVU420Planar_709 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_420SemiPlanar_709 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_420SemiPlanar_2020 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_422SemiPlanar_2020 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_422SemiPlanar - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_422SemiPlanar_709 - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_709_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_709_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY_709_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY10_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_709_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_709_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY10_709_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY12_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_709_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_709_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY12_709_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA: eglFrame[0].eglColorFormat = cudaEglColorFormatYUVA - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU: eglFrame[0].eglColorFormat = cudaEglColorFormatYVYU - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY: eglFrame[0].eglColorFormat = cudaEglColorFormatVYUY - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_420SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_444SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY12V12U12_420SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY12V12U12_444SemiPlanar_ER - elif cueglFrame.eglColorFormat == ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER: + elif cueglFrame.eglColorFormat == cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER: eglFrame[0].eglColorFormat = cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER else: return cudaErrorInvalidValue - if cueglFrame.frameType == ccuda.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_ARRAY: + if cueglFrame.frameType == cydriver.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_ARRAY: eglFrame[0].frameType = cudaEglFrameTypeArray - elif cueglFrame.frameType == ccuda.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_PITCH: + elif cueglFrame.frameType == cydriver.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_PITCH: eglFrame[0].frameType = cudaEglFrameTypePitch else: return cudaErrorInvalidValue -cdef cudaError_t toDriverGraphNodeParams(const cudaGraphNodeParams *rtParams, ccuda.CUgraphNodeParams *driverParams) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t toDriverGraphNodeParams(const cudaGraphNodeParams *rtParams, cydriver.CUgraphNodeParams *driverParams) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err - cdef ccuda.CUcontext context + cdef cydriver.CUcontext context memset(driverParams, 0, sizeof(driverParams[0])) if rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeKernel: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_KERNEL - err = toDriverKernelNodeParams(&rtParams[0].kernel, &driverParams[0].kernel) + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_KERNEL + err = toDriverKernelNodeParams(&rtParams[0].kernel, &driverParams[0].kernel) if err != cudaSuccess: return err elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeMemcpy: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEMCPY - err = ccuda._cuCtxGetCurrent(&context) + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEMCPY + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -3429,44 +3429,44 @@ cdef cudaError_t toDriverGraphNodeParams(const cudaGraphNodeParams *rtParams, cc return err driverParams[0].memcpy.copyCtx = context elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeMemset: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEMSET - err = ccuda._cuCtxGetCurrent(&context) + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEMSET + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err - toDriverMemsetNodeParams(&rtParams[0].memset, &driverParams[0].memset) + toDriverMemsetNodeParams(&rtParams[0].memset, &driverParams[0].memset) driverParams[0].memset.ctx = context elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeHost: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_HOST - toDriverHostNodeParams(&rtParams[0].host, &driverParams[0].host) + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_HOST + toDriverHostNodeParams(&rtParams[0].host, &driverParams[0].host) elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeGraph: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_GRAPH + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_GRAPH driverParams[0].graph.graph = rtParams[0].graph.graph elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeEmpty: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EMPTY + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EMPTY elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeWaitEvent: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_WAIT_EVENT + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_WAIT_EVENT driverParams[0].eventWait.event = rtParams[0].eventWait.event elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeEventRecord: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EVENT_RECORD + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EVENT_RECORD driverParams[0].eventRecord.event = rtParams[0].eventRecord.event elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreSignal: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL - driverParams[0].extSemSignal = ((&rtParams[0].extSemSignal))[0] + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL + driverParams[0].extSemSignal = ((&rtParams[0].extSemSignal))[0] elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreWait: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT - driverParams[0].extSemWait = (&rtParams[0].extSemWait)[0] + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT + driverParams[0].extSemWait = (&rtParams[0].extSemWait)[0] elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeMemAlloc: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_ALLOC - driverParams[0].alloc = (&rtParams[0].alloc)[0] + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_ALLOC + driverParams[0].alloc = (&rtParams[0].alloc)[0] elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeMemFree: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_FREE - driverParams[0].free.dptr = rtParams[0].free.dptr + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_FREE + driverParams[0].free.dptr = rtParams[0].free.dptr elif rtParams[0].type == cudaGraphNodeType.cudaGraphNodeTypeConditional: - driverParams[0].type = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_CONDITIONAL + driverParams[0].type = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_CONDITIONAL # RT params mirror the driver params except the RT struct lacks the ctx at the end. memcpy(&driverParams[0].conditional, &rtParams[0].conditional, sizeof(rtParams[0].conditional)) - err = ccuda._cuCtxGetCurrent(&context) + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err @@ -3476,21 +3476,21 @@ cdef cudaError_t toDriverGraphNodeParams(const cudaGraphNodeParams *rtParams, cc return cudaSuccess -cdef void toCudartGraphNodeOutParams(const ccuda.CUgraphNodeParams *driverParams, cudaGraphNodeParams *rtParams) noexcept nogil: - if driverParams[0].type == ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_ALLOC: +cdef void toCudartGraphNodeOutParams(const cydriver.CUgraphNodeParams *driverParams, cudaGraphNodeParams *rtParams) noexcept nogil: + if driverParams[0].type == cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_ALLOC: rtParams[0].alloc.dptr = driverParams[0].alloc.dptr - elif driverParams[0].type == ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_CONDITIONAL: + elif driverParams[0].type == cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_CONDITIONAL: rtParams[0].conditional.phGraph_out = driverParams[0].conditional.phGraph_out -cdef cudaError_t toDriverKernelNodeParams(const cudaKernelNodeParams nodeParams[0], ccuda.CUDA_KERNEL_NODE_PARAMS *driverNodeParams) except ?cudaErrorCallRequiresNewerDriver nogil: +cdef cudaError_t toDriverKernelNodeParams(const cudaKernelNodeParams nodeParams[0], cydriver.CUDA_KERNEL_NODE_PARAMS *driverNodeParams) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - cdef ccuda.CUcontext context - err = ccuda._cuCtxGetCurrent(&context) + cdef cydriver.CUcontext context + err = cydriver._cuCtxGetCurrent(&context) if err != cudaSuccess: _setLastError(err) return err - driverNodeParams[0].func = nodeParams[0].func + driverNodeParams[0].func = nodeParams[0].func driverNodeParams[0].kern = NULL driverNodeParams[0].ctx = context driverNodeParams[0].gridDimX = nodeParams[0].gridDim.x @@ -3505,7 +3505,7 @@ cdef cudaError_t toDriverKernelNodeParams(const cudaKernelNodeParams nodeParams[ return err -cdef void toDriverHostNodeParams(const cudaHostNodeParams *pRuntimeNodeParams, ccuda.CUDA_HOST_NODE_PARAMS *pDriverNodeParams) noexcept nogil: +cdef void toDriverHostNodeParams(const cudaHostNodeParams *pRuntimeNodeParams, cydriver.CUDA_HOST_NODE_PARAMS *pDriverNodeParams) noexcept nogil: pDriverNodeParams[0].fn = pRuntimeNodeParams[0].fn pDriverNodeParams[0].userData = pRuntimeNodeParams[0].userData @@ -3527,7 +3527,7 @@ cdef cudaError_t DeviceRegisterAsyncNotificationCommon(int device, cudaAsyncCall cbData.callback = callbackFunc cbData.userData = userData - err = ccuda._cuDeviceRegisterAsyncNotification(device, cudaAsyncNotificationCallbackWrapper, cbData, callback) + err = cydriver._cuDeviceRegisterAsyncNotification(device, cudaAsyncNotificationCallbackWrapper, cbData, callback) if err != cudaSuccess: free(cbData) @@ -3537,7 +3537,7 @@ cdef cudaError_t DeviceRegisterAsyncNotificationCommon(int device, cudaAsyncCall cdef cudaError_t DeviceUnregisterAsyncNotificationCommon(int device, cudaAsyncCallbackHandle_t callback) except ?cudaErrorCallRequiresNewerDriver nogil: cdef cudaError_t err = cudaSuccess - err = ccuda._cuDeviceUnregisterAsyncNotification(device, callback) + err = cydriver._cuDeviceUnregisterAsyncNotification(device, callback) if err != cudaSuccess: _setLastError(err) return err diff --git a/cuda/_lib/dlfcn.pxd b/cuda/cuda/bindings/_lib/dlfcn.pxd similarity index 100% rename from cuda/_lib/dlfcn.pxd rename to cuda/cuda/bindings/_lib/dlfcn.pxd diff --git a/cuda/_lib/param_packer.cpp b/cuda/cuda/bindings/_lib/param_packer.cpp similarity index 100% rename from cuda/_lib/param_packer.cpp rename to cuda/cuda/bindings/_lib/param_packer.cpp diff --git a/cuda/_lib/param_packer.h b/cuda/cuda/bindings/_lib/param_packer.h similarity index 100% rename from cuda/_lib/param_packer.h rename to cuda/cuda/bindings/_lib/param_packer.h diff --git a/cuda/_lib/param_packer.pxd b/cuda/cuda/bindings/_lib/param_packer.pxd similarity index 100% rename from cuda/_lib/param_packer.pxd rename to cuda/cuda/bindings/_lib/param_packer.pxd diff --git a/cuda/_lib/utils.pxd.in b/cuda/cuda/bindings/_lib/utils.pxd.in similarity index 74% rename from cuda/_lib/utils.pxd.in rename to cuda/cuda/bindings/_lib/utils.pxd.in index 229f6be9..e2022a36 100644 --- a/cuda/_lib/utils.pxd.in +++ b/cuda/cuda/bindings/_lib/utils.pxd.in @@ -5,8 +5,8 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -cimport cuda.cuda as cuda -cimport cuda.ccuda as ccuda +cimport cuda.bindings.driver as driver +cimport cuda.bindings.cydriver as cydriver from libcpp.vector cimport vector cdef class HelperKernelParams: @@ -25,18 +25,18 @@ cdef class HelperInputVoidPtr: cdef class HelperCUmemPool_attribute: cdef void* _cptr - cdef ccuda.CUmemPool_attribute_enum _attr + cdef cydriver.CUmemPool_attribute_enum _attr cdef bint _is_getter # Return values cdef int _int_val - cdef cuda.cuuint64_t _cuuint64_t_val + cdef driver.cuuint64_t _cuuint64_t_val {{endif}} {{if 'CUmem_range_attribute_enum' in found_types}} cdef class HelperCUmem_range_attribute: cdef void* _cptr - cdef ccuda.CUmem_range_attribute_enum _attr + cdef cydriver.CUmem_range_attribute_enum _attr cdef size_t _data_size # Return values @@ -47,44 +47,44 @@ cdef class HelperCUmem_range_attribute: cdef class HelperCUpointer_attribute: cdef void* _cptr - cdef ccuda.CUpointer_attribute_enum _attr + cdef cydriver.CUpointer_attribute_enum _attr cdef bint _is_getter # Return values - cdef cuda.CUcontext _ctx + cdef driver.CUcontext _ctx cdef unsigned int _uint - cdef cuda.CUdeviceptr _devptr + cdef driver.CUdeviceptr _devptr cdef void** _void - cdef cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS _token + cdef driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS _token cdef bint _bool cdef unsigned long long _ull cdef size_t _size - cdef cuda.CUmemoryPool _mempool + cdef driver.CUmemoryPool _mempool {{endif}} {{if 'CUgraphMem_attribute_enum' in found_types}} cdef class HelperCUgraphMem_attribute: cdef void* _cptr - cdef ccuda.CUgraphMem_attribute_enum _attr + cdef cydriver.CUgraphMem_attribute_enum _attr cdef bint _is_getter # Return values - cdef cuda.cuuint64_t _cuuint64_t_val + cdef driver.cuuint64_t _cuuint64_t_val {{endif}} {{if 'CUjit_option_enum' in found_types}} cdef class HelperCUjit_option: cdef void* _cptr - cdef ccuda.CUjit_option_enum _attr + cdef cydriver.CUjit_option_enum _attr # Return values cdef unsigned int _uint cdef float _float cdef char* _charstar - cdef ccuda.CUjit_target_enum _target - cdef ccuda.CUjit_fallback_enum _fallback + cdef cydriver.CUjit_target_enum _target + cdef cydriver.CUjit_fallback_enum _fallback cdef int _int - cdef ccuda.CUjit_cacheMode_enum _cacheMode + cdef cydriver.CUjit_cacheMode_enum _cacheMode cdef vector[char*] _charstarstar # list of names cdef InputVoidPtrPtrHelper _voidstarstar # list of addresses {{endif}} @@ -92,7 +92,7 @@ cdef class HelperCUjit_option: cdef class HelperCUlibraryOption: cdef void* _cptr - cdef ccuda.CUlibraryOption_enum _attr + cdef cydriver.CUlibraryOption_enum _attr # Return values cdef unsigned int _uint @@ -101,13 +101,13 @@ cdef class HelperCUlibraryOption: cdef class HelperCUmemAllocationHandleType: cdef void* _cptr - cdef ccuda.CUmemAllocationHandleType_enum _type + cdef cydriver.CUmemAllocationHandleType_enum _type # Return values cdef int _int cdef void* _handle cdef unsigned int _d3dkmt_handle - cdef cuda.CUmemFabricHandle _mem_fabric_handle + cdef driver.CUmemFabricHandle _mem_fabric_handle {{endif}} cdef class InputVoidPtrPtrHelper: @@ -117,7 +117,7 @@ cdef class InputVoidPtrPtrHelper: cdef class HelperCUcoredumpSettings: cdef void* _cptr - cdef ccuda.CUcoredumpSettings_enum _attrib + cdef cydriver.CUcoredumpSettings_enum _attrib cdef bint _is_getter cdef size_t _size diff --git a/cuda/_lib/utils.pyx.in b/cuda/cuda/bindings/_lib/utils.pyx.in similarity index 69% rename from cuda/_lib/utils.pyx.in rename to cuda/cuda/bindings/_lib/utils.pyx.in index 6f61f90b..15bafa99 100644 --- a/cuda/_lib/utils.pyx.in +++ b/cuda/cuda/bindings/_lib/utils.pyx.in @@ -13,9 +13,9 @@ from libc.string cimport memcpy from enum import Enum from typing import List, Tuple import ctypes -cimport cuda.ccuda as ccuda -import cuda.cuda as cuda -cimport cuda._lib.param_packer as param_packer +cimport cuda.bindings.cydriver as cydriver +import cuda.bindings.driver as driver +cimport cuda.bindings._lib.param_packer as param_packer ctypedef unsigned long long void_ptr @@ -142,7 +142,7 @@ cdef class HelperInputVoidPtr: elif isinstance(ptr, (int)): # Easy run, user gave us an already configured void** address self._cptr = ptr - elif isinstance(ptr, (cuda.CUdeviceptr)): + elif isinstance(ptr, (driver.CUdeviceptr)): self._cptr = int(ptr) elif PyObject_CheckBuffer(ptr): # Easy run, get address from Python Buffer Protocol @@ -168,18 +168,18 @@ cdef class HelperCUmemPool_attribute: def __cinit__(self, attr, init_value, is_getter=False): self._is_getter = is_getter self._attr = attr.value - if self._attr in ({{if 'CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES,{{endif}} - {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC,{{endif}} - {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES,{{endif}}): + if self._attr in ({{if 'CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES,{{endif}} + {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC,{{endif}} + {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES,{{endif}}): self._int_val = init_value self._cptr = &self._int_val - elif self._attr in ({{if 'CU_MEMPOOL_ATTR_RELEASE_THRESHOLD'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD,{{endif}} - {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT,{{endif}} - {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH,{{endif}} - {{if 'CU_MEMPOOL_ATTR_USED_MEM_CURRENT'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_CURRENT,{{endif}} - {{if 'CU_MEMPOOL_ATTR_USED_MEM_HIGH'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_HIGH,{{endif}}): + elif self._attr in ({{if 'CU_MEMPOOL_ATTR_RELEASE_THRESHOLD'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD,{{endif}} + {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT,{{endif}} + {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH,{{endif}} + {{if 'CU_MEMPOOL_ATTR_USED_MEM_CURRENT'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_CURRENT,{{endif}} + {{if 'CU_MEMPOOL_ATTR_USED_MEM_HIGH'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_HIGH,{{endif}}): if self._is_getter: - self._cuuint64_t_val = cuda.cuuint64_t() + self._cuuint64_t_val = driver.cuuint64_t() self._cptr = self._cuuint64_t_val.getPtr() else: self._cptr = init_value.getPtr() @@ -195,15 +195,15 @@ cdef class HelperCUmemPool_attribute: def pyObj(self): assert(self._is_getter == True) - if self._attr in ({{if 'CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES,{{endif}} - {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC,{{endif}} - {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES,{{endif}}): + if self._attr in ({{if 'CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES,{{endif}} + {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC,{{endif}} + {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES,{{endif}}): return self._int_val - elif self._attr in ({{if 'CU_MEMPOOL_ATTR_RELEASE_THRESHOLD'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD,{{endif}} - {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT,{{endif}} - {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH,{{endif}} - {{if 'CU_MEMPOOL_ATTR_USED_MEM_CURRENT'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_CURRENT,{{endif}} - {{if 'CU_MEMPOOL_ATTR_USED_MEM_HIGH'}}ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_HIGH,{{endif}}): + elif self._attr in ({{if 'CU_MEMPOOL_ATTR_RELEASE_THRESHOLD'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD,{{endif}} + {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT,{{endif}} + {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH,{{endif}} + {{if 'CU_MEMPOOL_ATTR_USED_MEM_CURRENT'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_CURRENT,{{endif}} + {{if 'CU_MEMPOOL_ATTR_USED_MEM_HIGH'}}cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_HIGH,{{endif}}): return self._cuuint64_t_val else: raise TypeError('Unsupported attribute value: {}'.format(self._attr)) @@ -214,18 +214,18 @@ cdef class HelperCUmem_range_attribute: def __cinit__(self, attr, data_size): self._data_size = data_size self._attr = attr.value - if self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY'}}ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY,{{endif}} - {{if 'CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION'}}ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION,{{endif}} - {{if 'CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION'}}ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION,{{endif}}): + if self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY'}}cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY,{{endif}} + {{if 'CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION'}}cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION,{{endif}} + {{if 'CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION'}}cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION,{{endif}}): self._cptr = &self._int_val - elif self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY'}}ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY,{{endif}}): + elif self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY'}}cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY,{{endif}}): self._cptr = callocWrapper(1, self._data_size) self._int_val_list = self._cptr else: raise TypeError('Unsupported attribute: {}'.format(attr.name)) def __dealloc__(self): - if self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY'}}ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY,{{endif}}): + if self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY'}}cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY,{{endif}}): free(self._cptr) @property @@ -233,11 +233,11 @@ cdef class HelperCUmem_range_attribute: return self._cptr def pyObj(self): - if self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY'}}ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY,{{endif}} - {{if 'CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION'}}ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION,{{endif}} - {{if 'CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION'}}ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION,{{endif}}): + if self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY'}}cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY,{{endif}} + {{if 'CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION'}}cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION,{{endif}} + {{if 'CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION'}}cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION,{{endif}}): return self._int_val - elif self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY'}}ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY,{{endif}}): + elif self._attr in ({{if 'CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY'}}cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY,{{endif}}): return [self._int_val_list[idx] for idx in range(int(self._data_size/4))] else: raise TypeError('Unsupported attribute value: {}'.format(self._attr)) @@ -248,50 +248,50 @@ cdef class HelperCUpointer_attribute: def __cinit__(self, attr, init_value, is_getter=False): self._is_getter = is_getter self._attr = attr.value - if self._attr in ({{if 'CU_POINTER_ATTRIBUTE_CONTEXT'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_CONTEXT,{{endif}}): + if self._attr in ({{if 'CU_POINTER_ATTRIBUTE_CONTEXT'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_CONTEXT,{{endif}}): if self._is_getter: - self._ctx = cuda.CUcontext() + self._ctx = driver.CUcontext() self._cptr = self._ctx.getPtr() else: self._cptr = init_value.getPtr() - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_MEMORY_TYPE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_TYPE,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_ACCESS_FLAGS'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_MEMORY_TYPE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_TYPE,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_ACCESS_FLAGS'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS,{{endif}}): self._uint = init_value self._cptr = &self._uint - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_DEVICE_POINTER'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_POINTER,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_RANGE_START_ADDR'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_DEVICE_POINTER'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_POINTER,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_RANGE_START_ADDR'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR,{{endif}}): if self._is_getter: - self._devptr = cuda.CUdeviceptr() + self._devptr = driver.CUdeviceptr() self._cptr = self._devptr.getPtr() else: self._cptr = init_value.getPtr() - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_HOST_POINTER'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_HOST_POINTER,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_HOST_POINTER'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_HOST_POINTER,{{endif}}): self._void = init_value self._cptr = &self._void - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_P2P_TOKENS'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_P2P_TOKENS,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_P2P_TOKENS'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_P2P_TOKENS,{{endif}}): if self._is_getter: - self._token = cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS() + self._token = driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS() self._cptr = self._token.getPtr() else: self._cptr = init_value.getPtr() - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_SYNC_MEMOPS'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_IS_MANAGED'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_MANAGED,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_MAPPED'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPED,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_SYNC_MEMOPS'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_IS_MANAGED'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_MANAGED,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_MAPPED'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPED,{{endif}}): self._bool = init_value self._cptr = &self._bool - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_BUFFER_ID'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_BUFFER_ID,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_BUFFER_ID'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_BUFFER_ID,{{endif}}): self._ull = init_value self._cptr = &self._ull - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_RANGE_SIZE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_SIZE,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_RANGE_SIZE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_SIZE,{{endif}}): self._size = init_value self._cptr = &self._size - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE,{{endif}}): if self._is_getter: - self._mempool = cuda.CUmemoryPool() + self._mempool = driver.CUmemoryPool() self._cptr = self._mempool.getPtr() else: self._cptr = init_value.getPtr() @@ -307,31 +307,31 @@ cdef class HelperCUpointer_attribute: def pyObj(self): assert(self._is_getter == True) - if self._attr in ({{if 'CU_POINTER_ATTRIBUTE_CONTEXT'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_CONTEXT,{{endif}}): + if self._attr in ({{if 'CU_POINTER_ATTRIBUTE_CONTEXT'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_CONTEXT,{{endif}}): return self._ctx - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_MEMORY_TYPE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_TYPE,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_ACCESS_FLAGS'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_MEMORY_TYPE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_TYPE,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_ACCESS_FLAGS'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS,{{endif}}): return self._uint - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_DEVICE_POINTER'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_POINTER,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_RANGE_START_ADDR'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_DEVICE_POINTER'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_POINTER,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_RANGE_START_ADDR'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR,{{endif}}): return self._devptr - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_HOST_POINTER'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_HOST_POINTER,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_HOST_POINTER'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_HOST_POINTER,{{endif}}): return self._void - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_P2P_TOKENS'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_P2P_TOKENS,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_P2P_TOKENS'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_P2P_TOKENS,{{endif}}): return self._token - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_SYNC_MEMOPS'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_IS_MANAGED'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_MANAGED,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE,{{endif}} - {{if 'CU_POINTER_ATTRIBUTE_MAPPED'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPED,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_SYNC_MEMOPS'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_IS_MANAGED'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_MANAGED,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE,{{endif}} + {{if 'CU_POINTER_ATTRIBUTE_MAPPED'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPED,{{endif}}): return self._bool - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_BUFFER_ID'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_BUFFER_ID,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_BUFFER_ID'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_BUFFER_ID,{{endif}}): return self._ull - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_RANGE_SIZE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_SIZE,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_RANGE_SIZE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_SIZE,{{endif}}): return self._size - elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE'}}ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE,{{endif}}): + elif self._attr in ({{if 'CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE'}}cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE,{{endif}}): return self._mempool else: raise TypeError('Unsupported attribute value: {}'.format(self._attr)) @@ -342,12 +342,12 @@ cdef class HelperCUgraphMem_attribute: def __cinit__(self, attr, init_value, is_getter=False): self._is_getter = is_getter self._attr = attr.value - if self._attr in ({{if 'CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT' in found_values}}ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT,{{endif}} - {{if 'CU_GRAPH_MEM_ATTR_USED_MEM_HIGH' in found_values}}ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH,{{endif}} - {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT' in found_values}}ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT,{{endif}} - {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH' in found_values}}ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH,{{endif}}): + if self._attr in ({{if 'CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT' in found_values}}cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT,{{endif}} + {{if 'CU_GRAPH_MEM_ATTR_USED_MEM_HIGH' in found_values}}cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH,{{endif}} + {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT' in found_values}}cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT,{{endif}} + {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH' in found_values}}cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH,{{endif}}): if self._is_getter: - self._cuuint64_t_val = cuda.cuuint64_t() + self._cuuint64_t_val = driver.cuuint64_t() self._cptr = self._cuuint64_t_val.getPtr() else: self._cptr = init_value.getPtr() @@ -363,10 +363,10 @@ cdef class HelperCUgraphMem_attribute: def pyObj(self): assert(self._is_getter == True) - if self._attr in ({{if 'CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT' in found_values}}ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT,{{endif}} - {{if 'CU_GRAPH_MEM_ATTR_USED_MEM_HIGH' in found_values}}ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH,{{endif}} - {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT' in found_values}}ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT,{{endif}} - {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH' in found_values}}ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH,{{endif}}): + if self._attr in ({{if 'CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT' in found_values}}cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT,{{endif}} + {{if 'CU_GRAPH_MEM_ATTR_USED_MEM_HIGH' in found_values}}cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH,{{endif}} + {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT' in found_values}}cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT,{{endif}} + {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH' in found_values}}cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH,{{endif}}): return self._cuuint64_t_val else: raise TypeError('Unsupported attribute value: {}'.format(self._attr)) @@ -376,51 +376,51 @@ cdef class HelperCUgraphMem_attribute: cdef class HelperCUjit_option: def __cinit__(self, attr, init_value): self._attr = attr.value - if self._attr in ({{if 'CU_JIT_MAX_REGISTERS' in found_values}}ccuda.CUjit_option_enum.CU_JIT_MAX_REGISTERS,{{endif}} - {{if 'CU_JIT_THREADS_PER_BLOCK' in found_values}}ccuda.CUjit_option_enum.CU_JIT_THREADS_PER_BLOCK,{{endif}} - {{if 'CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES' in found_values}}ccuda.CUjit_option_enum.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES,{{endif}} - {{if 'CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES' in found_values}}ccuda.CUjit_option_enum.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,{{endif}} - {{if 'CU_JIT_OPTIMIZATION_LEVEL' in found_values}}ccuda.CUjit_option_enum.CU_JIT_OPTIMIZATION_LEVEL,{{endif}} - {{if 'CU_JIT_GLOBAL_SYMBOL_COUNT' in found_values}}ccuda.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_COUNT,{{endif}} - {{if 'CU_JIT_TARGET_FROM_CUCONTEXT' in found_values}}ccuda.CUjit_option_enum.CU_JIT_TARGET_FROM_CUCONTEXT,{{endif}} - {{if 'CU_JIT_REFERENCED_KERNEL_COUNT' in found_values}}ccuda.CUjit_option_enum.CU_JIT_REFERENCED_KERNEL_COUNT,{{endif}} - {{if 'CU_JIT_REFERENCED_VARIABLE_COUNT' in found_values}}ccuda.CUjit_option_enum.CU_JIT_REFERENCED_VARIABLE_COUNT,{{endif}} - {{if 'CU_JIT_MIN_CTA_PER_SM' in found_values}}ccuda.CUjit_option_enum.CU_JIT_MIN_CTA_PER_SM,{{endif}}): + if self._attr in ({{if 'CU_JIT_MAX_REGISTERS' in found_values}}cydriver.CUjit_option_enum.CU_JIT_MAX_REGISTERS,{{endif}} + {{if 'CU_JIT_THREADS_PER_BLOCK' in found_values}}cydriver.CUjit_option_enum.CU_JIT_THREADS_PER_BLOCK,{{endif}} + {{if 'CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES' in found_values}}cydriver.CUjit_option_enum.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES,{{endif}} + {{if 'CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES' in found_values}}cydriver.CUjit_option_enum.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,{{endif}} + {{if 'CU_JIT_OPTIMIZATION_LEVEL' in found_values}}cydriver.CUjit_option_enum.CU_JIT_OPTIMIZATION_LEVEL,{{endif}} + {{if 'CU_JIT_GLOBAL_SYMBOL_COUNT' in found_values}}cydriver.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_COUNT,{{endif}} + {{if 'CU_JIT_TARGET_FROM_CUCONTEXT' in found_values}}cydriver.CUjit_option_enum.CU_JIT_TARGET_FROM_CUCONTEXT,{{endif}} + {{if 'CU_JIT_REFERENCED_KERNEL_COUNT' in found_values}}cydriver.CUjit_option_enum.CU_JIT_REFERENCED_KERNEL_COUNT,{{endif}} + {{if 'CU_JIT_REFERENCED_VARIABLE_COUNT' in found_values}}cydriver.CUjit_option_enum.CU_JIT_REFERENCED_VARIABLE_COUNT,{{endif}} + {{if 'CU_JIT_MIN_CTA_PER_SM' in found_values}}cydriver.CUjit_option_enum.CU_JIT_MIN_CTA_PER_SM,{{endif}}): self._uint = init_value self._cptr = self._uint - elif self._attr in ({{if 'CU_JIT_WALL_TIME' in found_values}}ccuda.CUjit_option_enum.CU_JIT_WALL_TIME,{{endif}}): + elif self._attr in ({{if 'CU_JIT_WALL_TIME' in found_values}}cydriver.CUjit_option_enum.CU_JIT_WALL_TIME,{{endif}}): self._float = init_value self._cptr = self._float - elif self._attr in ({{if 'CU_JIT_INFO_LOG_BUFFER' in found_values}}ccuda.CUjit_option_enum.CU_JIT_INFO_LOG_BUFFER,{{endif}} - {{if 'CU_JIT_ERROR_LOG_BUFFER' in found_values}}ccuda.CUjit_option_enum.CU_JIT_ERROR_LOG_BUFFER{{endif}}): + elif self._attr in ({{if 'CU_JIT_INFO_LOG_BUFFER' in found_values}}cydriver.CUjit_option_enum.CU_JIT_INFO_LOG_BUFFER,{{endif}} + {{if 'CU_JIT_ERROR_LOG_BUFFER' in found_values}}cydriver.CUjit_option_enum.CU_JIT_ERROR_LOG_BUFFER{{endif}}): self._charstar = init_value self._cptr = self._charstar - elif self._attr in ({{if 'CU_JIT_TARGET' in found_values}}ccuda.CUjit_option_enum.CU_JIT_TARGET,{{endif}}): + elif self._attr in ({{if 'CU_JIT_TARGET' in found_values}}cydriver.CUjit_option_enum.CU_JIT_TARGET,{{endif}}): self._target = init_value.value self._cptr = self._target - elif self._attr in ({{if 'CU_JIT_FALLBACK_STRATEGY' in found_values}}ccuda.CUjit_option_enum.CU_JIT_FALLBACK_STRATEGY,{{endif}}): + elif self._attr in ({{if 'CU_JIT_FALLBACK_STRATEGY' in found_values}}cydriver.CUjit_option_enum.CU_JIT_FALLBACK_STRATEGY,{{endif}}): self._fallback = init_value.value self._cptr = self._fallback - elif self._attr in ({{if 'CU_JIT_GENERATE_DEBUG_INFO' in found_values}}ccuda.CUjit_option_enum.CU_JIT_GENERATE_DEBUG_INFO,{{endif}} - {{if 'CU_JIT_LOG_VERBOSE' in found_values}}ccuda.CUjit_option_enum.CU_JIT_LOG_VERBOSE,{{endif}} - {{if 'CU_JIT_GENERATE_LINE_INFO' in found_values}}ccuda.CUjit_option_enum.CU_JIT_GENERATE_LINE_INFO,{{endif}} - {{if 'CU_JIT_LTO' in found_values}}ccuda.CUjit_option_enum.CU_JIT_LTO,{{endif}} - {{if 'CU_JIT_FTZ' in found_values}}ccuda.CUjit_option_enum.CU_JIT_FTZ,{{endif}} - {{if 'CU_JIT_PREC_DIV' in found_values}}ccuda.CUjit_option_enum.CU_JIT_PREC_DIV,{{endif}} - {{if 'CU_JIT_PREC_SQRT' in found_values}}ccuda.CUjit_option_enum.CU_JIT_PREC_SQRT,{{endif}} - {{if 'CU_JIT_FMA' in found_values}}ccuda.CUjit_option_enum.CU_JIT_FMA,{{endif}} - {{if 'CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES' in found_values}}ccuda.CUjit_option_enum.CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES,{{endif}}): + elif self._attr in ({{if 'CU_JIT_GENERATE_DEBUG_INFO' in found_values}}cydriver.CUjit_option_enum.CU_JIT_GENERATE_DEBUG_INFO,{{endif}} + {{if 'CU_JIT_LOG_VERBOSE' in found_values}}cydriver.CUjit_option_enum.CU_JIT_LOG_VERBOSE,{{endif}} + {{if 'CU_JIT_GENERATE_LINE_INFO' in found_values}}cydriver.CUjit_option_enum.CU_JIT_GENERATE_LINE_INFO,{{endif}} + {{if 'CU_JIT_LTO' in found_values}}cydriver.CUjit_option_enum.CU_JIT_LTO,{{endif}} + {{if 'CU_JIT_FTZ' in found_values}}cydriver.CUjit_option_enum.CU_JIT_FTZ,{{endif}} + {{if 'CU_JIT_PREC_DIV' in found_values}}cydriver.CUjit_option_enum.CU_JIT_PREC_DIV,{{endif}} + {{if 'CU_JIT_PREC_SQRT' in found_values}}cydriver.CUjit_option_enum.CU_JIT_PREC_SQRT,{{endif}} + {{if 'CU_JIT_FMA' in found_values}}cydriver.CUjit_option_enum.CU_JIT_FMA,{{endif}} + {{if 'CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES' in found_values}}cydriver.CUjit_option_enum.CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES,{{endif}}): self._int = init_value self._cptr = self._int - elif self._attr in ({{if 'CU_JIT_CACHE_MODE' in found_values}}ccuda.CUjit_option_enum.CU_JIT_CACHE_MODE,{{endif}}): + elif self._attr in ({{if 'CU_JIT_CACHE_MODE' in found_values}}cydriver.CUjit_option_enum.CU_JIT_CACHE_MODE,{{endif}}): self._cacheMode = init_value.value self._cptr = self._cacheMode - elif self._attr in ({{if 'CU_JIT_GLOBAL_SYMBOL_NAMES' in found_values}}ccuda.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_NAMES,{{endif}} - {{if 'CU_JIT_REFERENCED_KERNEL_NAMES' in found_values}}ccuda.CUjit_option_enum.CU_JIT_REFERENCED_KERNEL_NAMES,{{endif}} - {{if 'CU_JIT_REFERENCED_VARIABLE_NAMES' in found_values}}ccuda.CUjit_option_enum.CU_JIT_REFERENCED_VARIABLE_NAMES,{{endif}}): + elif self._attr in ({{if 'CU_JIT_GLOBAL_SYMBOL_NAMES' in found_values}}cydriver.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_NAMES,{{endif}} + {{if 'CU_JIT_REFERENCED_KERNEL_NAMES' in found_values}}cydriver.CUjit_option_enum.CU_JIT_REFERENCED_KERNEL_NAMES,{{endif}} + {{if 'CU_JIT_REFERENCED_VARIABLE_NAMES' in found_values}}cydriver.CUjit_option_enum.CU_JIT_REFERENCED_VARIABLE_NAMES,{{endif}}): self._charstarstar = init_value self._cptr = &self._charstarstar[0] - elif self._attr in ({{if 'CU_JIT_GLOBAL_SYMBOL_ADDRESSES' in found_values}}ccuda.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_ADDRESSES,{{endif}}): + elif self._attr in ({{if 'CU_JIT_GLOBAL_SYMBOL_ADDRESSES' in found_values}}cydriver.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_ADDRESSES,{{endif}}): pylist = [HelperInputVoidPtr(val) for val in init_value] self._voidstarstar = InputVoidPtrPtrHelper(pylist) self._cptr = self._voidstarstar.cptr @@ -443,11 +443,11 @@ cdef class HelperCUlibraryOption: if False: pass {{if 'CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE' in found_values}} - elif self._attr in (ccuda.CUlibraryOption_enum.CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE,): + elif self._attr in (cydriver.CUlibraryOption_enum.CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE,): self._cptr = init_value.getPtr() {{endif}} {{if 'CU_LIBRARY_BINARY_IS_PRESERVED' in found_values}} - elif self._attr in (ccuda.CUlibraryOption_enum.CU_LIBRARY_BINARY_IS_PRESERVED,): + elif self._attr in (cydriver.CUlibraryOption_enum.CU_LIBRARY_BINARY_IS_PRESERVED,): self._uint = init_value self._cptr = self._uint {{endif}} @@ -470,24 +470,24 @@ cdef class HelperCUmemAllocationHandleType: if False: pass {{if 'CU_MEM_HANDLE_TYPE_NONE' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_NONE,): + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_NONE,): self._cptr = &self._int {{endif}} {{if 'CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR,): + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR,): self._cptr = &self._int {{endif}} {{if 'CU_MEM_HANDLE_TYPE_WIN32' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32,): + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32,): self._cptr = &self._handle {{endif}} {{if 'CU_MEM_HANDLE_TYPE_WIN32_KMT' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32_KMT,): + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32_KMT,): self._cptr = &self._d3dkmt_handle {{endif}} {{if 'CU_MEM_HANDLE_TYPE_FABRIC' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_FABRIC,): - self._mem_fabric_handle = cuda.CUmemFabricHandle() + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_FABRIC,): + self._mem_fabric_handle = driver.CUmemFabricHandle() self._cptr = self._mem_fabric_handle.getPtr() {{endif}} else: @@ -504,23 +504,23 @@ cdef class HelperCUmemAllocationHandleType: if False: pass {{if 'CU_MEM_HANDLE_TYPE_NONE' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_NONE,): + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_NONE,): return self._int {{endif}} {{if 'CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR,): + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR,): return self._int {{endif}} {{if 'CU_MEM_HANDLE_TYPE_WIN32' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32,): + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32,): return self._handle {{endif}} {{if 'CU_MEM_HANDLE_TYPE_WIN32_KMT' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32_KMT,): + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32_KMT,): return self._d3dkmt_handle {{endif}} {{if 'CU_MEM_HANDLE_TYPE_FABRIC' in found_values}} - elif self._type in (ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_FABRIC,): + elif self._type in (cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_FABRIC,): return self._mem_fabric_handle {{endif}} else: @@ -546,8 +546,8 @@ cdef class HelperCUcoredumpSettings: def __cinit__(self, attr, init_value, is_getter=False): self._is_getter = is_getter self._attrib = attr.value - if self._attrib in ({{if 'CU_COREDUMP_FILE' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_FILE,{{endif}} - {{if 'CU_COREDUMP_PIPE' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_PIPE,{{endif}}): + if self._attrib in ({{if 'CU_COREDUMP_FILE' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_FILE,{{endif}} + {{if 'CU_COREDUMP_PIPE' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_PIPE,{{endif}}): if self._is_getter: self._charstar = callocWrapper(1024, 1) self._cptr = self._charstar @@ -556,10 +556,10 @@ cdef class HelperCUcoredumpSettings: self._charstar = init_value self._cptr = self._charstar self._size = len(init_value) - elif self._attrib in ({{if 'CU_COREDUMP_ENABLE_ON_EXCEPTION' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_ON_EXCEPTION,{{endif}} - {{if 'CU_COREDUMP_TRIGGER_HOST' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_TRIGGER_HOST,{{endif}} - {{if 'CU_COREDUMP_LIGHTWEIGHT' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_LIGHTWEIGHT,{{endif}} - {{if 'CU_COREDUMP_ENABLE_USER_TRIGGER' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_USER_TRIGGER,{{endif}}): + elif self._attrib in ({{if 'CU_COREDUMP_ENABLE_ON_EXCEPTION' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_ON_EXCEPTION,{{endif}} + {{if 'CU_COREDUMP_TRIGGER_HOST' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_TRIGGER_HOST,{{endif}} + {{if 'CU_COREDUMP_LIGHTWEIGHT' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_LIGHTWEIGHT,{{endif}} + {{if 'CU_COREDUMP_ENABLE_USER_TRIGGER' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_USER_TRIGGER,{{endif}}): if self._is_getter == False: self._bool = init_value @@ -580,13 +580,13 @@ cdef class HelperCUcoredumpSettings: def pyObj(self): assert(self._is_getter == True) - if self._attrib in ({{if 'CU_COREDUMP_FILE' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_FILE,{{endif}} - {{if 'CU_COREDUMP_PIPE' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_PIPE,{{endif}}): + if self._attrib in ({{if 'CU_COREDUMP_FILE' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_FILE,{{endif}} + {{if 'CU_COREDUMP_PIPE' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_PIPE,{{endif}}): return self._charstar - elif self._attrib in ({{if 'CU_COREDUMP_ENABLE_ON_EXCEPTION' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_ON_EXCEPTION,{{endif}} - {{if 'CU_COREDUMP_TRIGGER_HOST' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_TRIGGER_HOST,{{endif}} - {{if 'CU_COREDUMP_LIGHTWEIGHT' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_LIGHTWEIGHT,{{endif}} - {{if 'CU_COREDUMP_ENABLE_USER_TRIGGER' in found_values}}ccuda.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_USER_TRIGGER,{{endif}}): + elif self._attrib in ({{if 'CU_COREDUMP_ENABLE_ON_EXCEPTION' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_ON_EXCEPTION,{{endif}} + {{if 'CU_COREDUMP_TRIGGER_HOST' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_TRIGGER_HOST,{{endif}} + {{if 'CU_COREDUMP_LIGHTWEIGHT' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_LIGHTWEIGHT,{{endif}} + {{if 'CU_COREDUMP_ENABLE_USER_TRIGGER' in found_values}}cydriver.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_USER_TRIGGER,{{endif}}): return self._bool else: raise TypeError('Unsupported attribute value: {}'.format(self._attrib)) diff --git a/cuda/_version.py b/cuda/cuda/bindings/_version.py similarity index 100% rename from cuda/_version.py rename to cuda/cuda/bindings/_version.py diff --git a/cuda/tests/__init__.py b/cuda/cuda/bindings/benchmarks/__init__.py similarity index 100% rename from cuda/tests/__init__.py rename to cuda/cuda/bindings/benchmarks/__init__.py diff --git a/cuda/benchmarks/kernels.py b/cuda/cuda/bindings/benchmarks/kernels.py similarity index 100% rename from cuda/benchmarks/kernels.py rename to cuda/cuda/bindings/benchmarks/kernels.py diff --git a/cuda/benchmarks/perf_test_utils.py b/cuda/cuda/bindings/benchmarks/perf_test_utils.py similarity index 100% rename from cuda/benchmarks/perf_test_utils.py rename to cuda/cuda/bindings/benchmarks/perf_test_utils.py diff --git a/cuda/benchmarks/test_cupy.py b/cuda/cuda/bindings/benchmarks/test_cupy.py similarity index 100% rename from cuda/benchmarks/test_cupy.py rename to cuda/cuda/bindings/benchmarks/test_cupy.py diff --git a/cuda/benchmarks/test_launch_latency.py b/cuda/cuda/bindings/benchmarks/test_launch_latency.py similarity index 100% rename from cuda/benchmarks/test_launch_latency.py rename to cuda/cuda/bindings/benchmarks/test_launch_latency.py diff --git a/cuda/benchmarks/test_numba.py b/cuda/cuda/bindings/benchmarks/test_numba.py similarity index 100% rename from cuda/benchmarks/test_numba.py rename to cuda/cuda/bindings/benchmarks/test_numba.py diff --git a/cuda/benchmarks/test_pointer_attributes.py b/cuda/cuda/bindings/benchmarks/test_pointer_attributes.py similarity index 100% rename from cuda/benchmarks/test_pointer_attributes.py rename to cuda/cuda/bindings/benchmarks/test_pointer_attributes.py diff --git a/cuda/ccuda.pxd.in b/cuda/cuda/bindings/cydriver.pxd.in similarity index 100% rename from cuda/ccuda.pxd.in rename to cuda/cuda/bindings/cydriver.pxd.in diff --git a/cuda/ccuda.pyx.in b/cuda/cuda/bindings/cydriver.pyx.in similarity index 73% rename from cuda/ccuda.pyx.in rename to cuda/cuda/bindings/cydriver.pyx.in index 48c06b5b..3809e358 100644 --- a/cuda/ccuda.pyx.in +++ b/cuda/cuda/bindings/cydriver.pyx.in @@ -5,2740 +5,2740 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -cimport cuda._cuda.ccuda as ccuda +cimport cuda.bindings._bindings.cydriver as cydriver {{if 'cuGetErrorString' in found_functions}} cdef CUresult cuGetErrorString(CUresult error, const char** pStr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGetErrorString(error, pStr) + return cydriver._cuGetErrorString(error, pStr) {{endif}} {{if 'cuGetErrorName' in found_functions}} cdef CUresult cuGetErrorName(CUresult error, const char** pStr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGetErrorName(error, pStr) + return cydriver._cuGetErrorName(error, pStr) {{endif}} {{if 'cuInit' in found_functions}} cdef CUresult cuInit(unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuInit(Flags) + return cydriver._cuInit(Flags) {{endif}} {{if 'cuDriverGetVersion' in found_functions}} cdef CUresult cuDriverGetVersion(int* driverVersion) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDriverGetVersion(driverVersion) + return cydriver._cuDriverGetVersion(driverVersion) {{endif}} {{if 'cuDeviceGet' in found_functions}} cdef CUresult cuDeviceGet(CUdevice* device, int ordinal) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGet(device, ordinal) + return cydriver._cuDeviceGet(device, ordinal) {{endif}} {{if 'cuDeviceGetCount' in found_functions}} cdef CUresult cuDeviceGetCount(int* count) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetCount(count) + return cydriver._cuDeviceGetCount(count) {{endif}} {{if 'cuDeviceGetName' in found_functions}} cdef CUresult cuDeviceGetName(char* name, int length, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetName(name, length, dev) + return cydriver._cuDeviceGetName(name, length, dev) {{endif}} {{if 'cuDeviceGetUuid' in found_functions}} cdef CUresult cuDeviceGetUuid(CUuuid* uuid, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetUuid(uuid, dev) + return cydriver._cuDeviceGetUuid(uuid, dev) {{endif}} {{if 'cuDeviceGetUuid_v2' in found_functions}} cdef CUresult cuDeviceGetUuid_v2(CUuuid* uuid, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetUuid_v2(uuid, dev) + return cydriver._cuDeviceGetUuid_v2(uuid, dev) {{endif}} {{if 'cuDeviceGetLuid' in found_functions}} cdef CUresult cuDeviceGetLuid(char* luid, unsigned int* deviceNodeMask, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetLuid(luid, deviceNodeMask, dev) + return cydriver._cuDeviceGetLuid(luid, deviceNodeMask, dev) {{endif}} {{if 'cuDeviceTotalMem_v2' in found_functions}} cdef CUresult cuDeviceTotalMem(size_t* numbytes, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceTotalMem_v2(numbytes, dev) + return cydriver._cuDeviceTotalMem_v2(numbytes, dev) {{endif}} {{if 'cuDeviceGetTexture1DLinearMaxWidth' in found_functions}} cdef CUresult cuDeviceGetTexture1DLinearMaxWidth(size_t* maxWidthInElements, CUarray_format pformat, unsigned numChannels, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetTexture1DLinearMaxWidth(maxWidthInElements, pformat, numChannels, dev) + return cydriver._cuDeviceGetTexture1DLinearMaxWidth(maxWidthInElements, pformat, numChannels, dev) {{endif}} {{if 'cuDeviceGetAttribute' in found_functions}} cdef CUresult cuDeviceGetAttribute(int* pi, CUdevice_attribute attrib, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetAttribute(pi, attrib, dev) + return cydriver._cuDeviceGetAttribute(pi, attrib, dev) {{endif}} {{if 'cuDeviceGetNvSciSyncAttributes' in found_functions}} cdef CUresult cuDeviceGetNvSciSyncAttributes(void* nvSciSyncAttrList, CUdevice dev, int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, dev, flags) + return cydriver._cuDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, dev, flags) {{endif}} {{if 'cuDeviceSetMemPool' in found_functions}} cdef CUresult cuDeviceSetMemPool(CUdevice dev, CUmemoryPool pool) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceSetMemPool(dev, pool) + return cydriver._cuDeviceSetMemPool(dev, pool) {{endif}} {{if 'cuDeviceGetMemPool' in found_functions}} cdef CUresult cuDeviceGetMemPool(CUmemoryPool* pool, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetMemPool(pool, dev) + return cydriver._cuDeviceGetMemPool(pool, dev) {{endif}} {{if 'cuDeviceGetDefaultMemPool' in found_functions}} cdef CUresult cuDeviceGetDefaultMemPool(CUmemoryPool* pool_out, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetDefaultMemPool(pool_out, dev) + return cydriver._cuDeviceGetDefaultMemPool(pool_out, dev) {{endif}} {{if 'cuDeviceGetExecAffinitySupport' in found_functions}} cdef CUresult cuDeviceGetExecAffinitySupport(int* pi, CUexecAffinityType typename, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetExecAffinitySupport(pi, typename, dev) + return cydriver._cuDeviceGetExecAffinitySupport(pi, typename, dev) {{endif}} {{if 'cuFlushGPUDirectRDMAWrites' in found_functions}} cdef CUresult cuFlushGPUDirectRDMAWrites(CUflushGPUDirectRDMAWritesTarget target, CUflushGPUDirectRDMAWritesScope scope) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFlushGPUDirectRDMAWrites(target, scope) + return cydriver._cuFlushGPUDirectRDMAWrites(target, scope) {{endif}} {{if 'cuDeviceGetProperties' in found_functions}} cdef CUresult cuDeviceGetProperties(CUdevprop* prop, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetProperties(prop, dev) + return cydriver._cuDeviceGetProperties(prop, dev) {{endif}} {{if 'cuDeviceComputeCapability' in found_functions}} cdef CUresult cuDeviceComputeCapability(int* major, int* minor, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceComputeCapability(major, minor, dev) + return cydriver._cuDeviceComputeCapability(major, minor, dev) {{endif}} {{if 'cuDevicePrimaryCtxRetain' in found_functions}} cdef CUresult cuDevicePrimaryCtxRetain(CUcontext* pctx, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDevicePrimaryCtxRetain(pctx, dev) + return cydriver._cuDevicePrimaryCtxRetain(pctx, dev) {{endif}} {{if 'cuDevicePrimaryCtxRelease_v2' in found_functions}} cdef CUresult cuDevicePrimaryCtxRelease(CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDevicePrimaryCtxRelease_v2(dev) + return cydriver._cuDevicePrimaryCtxRelease_v2(dev) {{endif}} {{if 'cuDevicePrimaryCtxSetFlags_v2' in found_functions}} cdef CUresult cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDevicePrimaryCtxSetFlags_v2(dev, flags) + return cydriver._cuDevicePrimaryCtxSetFlags_v2(dev, flags) {{endif}} {{if 'cuDevicePrimaryCtxGetState' in found_functions}} cdef CUresult cuDevicePrimaryCtxGetState(CUdevice dev, unsigned int* flags, int* active) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDevicePrimaryCtxGetState(dev, flags, active) + return cydriver._cuDevicePrimaryCtxGetState(dev, flags, active) {{endif}} {{if 'cuDevicePrimaryCtxReset_v2' in found_functions}} cdef CUresult cuDevicePrimaryCtxReset(CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDevicePrimaryCtxReset_v2(dev) + return cydriver._cuDevicePrimaryCtxReset_v2(dev) {{endif}} {{if 'cuCtxCreate_v2' in found_functions}} cdef CUresult cuCtxCreate(CUcontext* pctx, unsigned int flags, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxCreate_v2(pctx, flags, dev) + return cydriver._cuCtxCreate_v2(pctx, flags, dev) {{endif}} {{if 'cuCtxCreate_v3' in found_functions}} cdef CUresult cuCtxCreate_v3(CUcontext* pctx, CUexecAffinityParam* paramsArray, int numParams, unsigned int flags, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxCreate_v3(pctx, paramsArray, numParams, flags, dev) + return cydriver._cuCtxCreate_v3(pctx, paramsArray, numParams, flags, dev) {{endif}} {{if 'cuCtxCreate_v4' in found_functions}} cdef CUresult cuCtxCreate_v4(CUcontext* pctx, CUctxCreateParams* ctxCreateParams, unsigned int flags, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxCreate_v4(pctx, ctxCreateParams, flags, dev) + return cydriver._cuCtxCreate_v4(pctx, ctxCreateParams, flags, dev) {{endif}} {{if 'cuCtxDestroy_v2' in found_functions}} cdef CUresult cuCtxDestroy(CUcontext ctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxDestroy_v2(ctx) + return cydriver._cuCtxDestroy_v2(ctx) {{endif}} {{if 'cuCtxPushCurrent_v2' in found_functions}} cdef CUresult cuCtxPushCurrent(CUcontext ctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxPushCurrent_v2(ctx) + return cydriver._cuCtxPushCurrent_v2(ctx) {{endif}} {{if 'cuCtxPopCurrent_v2' in found_functions}} cdef CUresult cuCtxPopCurrent(CUcontext* pctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxPopCurrent_v2(pctx) + return cydriver._cuCtxPopCurrent_v2(pctx) {{endif}} {{if 'cuCtxSetCurrent' in found_functions}} cdef CUresult cuCtxSetCurrent(CUcontext ctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxSetCurrent(ctx) + return cydriver._cuCtxSetCurrent(ctx) {{endif}} {{if 'cuCtxGetCurrent' in found_functions}} cdef CUresult cuCtxGetCurrent(CUcontext* pctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetCurrent(pctx) + return cydriver._cuCtxGetCurrent(pctx) {{endif}} {{if 'cuCtxGetDevice' in found_functions}} cdef CUresult cuCtxGetDevice(CUdevice* device) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetDevice(device) + return cydriver._cuCtxGetDevice(device) {{endif}} {{if 'cuCtxGetFlags' in found_functions}} cdef CUresult cuCtxGetFlags(unsigned int* flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetFlags(flags) + return cydriver._cuCtxGetFlags(flags) {{endif}} {{if 'cuCtxSetFlags' in found_functions}} cdef CUresult cuCtxSetFlags(unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxSetFlags(flags) + return cydriver._cuCtxSetFlags(flags) {{endif}} {{if 'cuCtxGetId' in found_functions}} cdef CUresult cuCtxGetId(CUcontext ctx, unsigned long long* ctxId) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetId(ctx, ctxId) + return cydriver._cuCtxGetId(ctx, ctxId) {{endif}} {{if 'cuCtxSynchronize' in found_functions}} cdef CUresult cuCtxSynchronize() except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxSynchronize() + return cydriver._cuCtxSynchronize() {{endif}} {{if 'cuCtxSetLimit' in found_functions}} cdef CUresult cuCtxSetLimit(CUlimit limit, size_t value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxSetLimit(limit, value) + return cydriver._cuCtxSetLimit(limit, value) {{endif}} {{if 'cuCtxGetLimit' in found_functions}} cdef CUresult cuCtxGetLimit(size_t* pvalue, CUlimit limit) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetLimit(pvalue, limit) + return cydriver._cuCtxGetLimit(pvalue, limit) {{endif}} {{if 'cuCtxGetCacheConfig' in found_functions}} cdef CUresult cuCtxGetCacheConfig(CUfunc_cache* pconfig) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetCacheConfig(pconfig) + return cydriver._cuCtxGetCacheConfig(pconfig) {{endif}} {{if 'cuCtxSetCacheConfig' in found_functions}} cdef CUresult cuCtxSetCacheConfig(CUfunc_cache config) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxSetCacheConfig(config) + return cydriver._cuCtxSetCacheConfig(config) {{endif}} {{if 'cuCtxGetApiVersion' in found_functions}} cdef CUresult cuCtxGetApiVersion(CUcontext ctx, unsigned int* version) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetApiVersion(ctx, version) + return cydriver._cuCtxGetApiVersion(ctx, version) {{endif}} {{if 'cuCtxGetStreamPriorityRange' in found_functions}} cdef CUresult cuCtxGetStreamPriorityRange(int* leastPriority, int* greatestPriority) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetStreamPriorityRange(leastPriority, greatestPriority) + return cydriver._cuCtxGetStreamPriorityRange(leastPriority, greatestPriority) {{endif}} {{if 'cuCtxResetPersistingL2Cache' in found_functions}} cdef CUresult cuCtxResetPersistingL2Cache() except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxResetPersistingL2Cache() + return cydriver._cuCtxResetPersistingL2Cache() {{endif}} {{if 'cuCtxGetExecAffinity' in found_functions}} cdef CUresult cuCtxGetExecAffinity(CUexecAffinityParam* pExecAffinity, CUexecAffinityType typename) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetExecAffinity(pExecAffinity, typename) + return cydriver._cuCtxGetExecAffinity(pExecAffinity, typename) {{endif}} {{if 'cuCtxRecordEvent' in found_functions}} cdef CUresult cuCtxRecordEvent(CUcontext hCtx, CUevent hEvent) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxRecordEvent(hCtx, hEvent) + return cydriver._cuCtxRecordEvent(hCtx, hEvent) {{endif}} {{if 'cuCtxWaitEvent' in found_functions}} cdef CUresult cuCtxWaitEvent(CUcontext hCtx, CUevent hEvent) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxWaitEvent(hCtx, hEvent) + return cydriver._cuCtxWaitEvent(hCtx, hEvent) {{endif}} {{if 'cuCtxAttach' in found_functions}} cdef CUresult cuCtxAttach(CUcontext* pctx, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxAttach(pctx, flags) + return cydriver._cuCtxAttach(pctx, flags) {{endif}} {{if 'cuCtxDetach' in found_functions}} cdef CUresult cuCtxDetach(CUcontext ctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxDetach(ctx) + return cydriver._cuCtxDetach(ctx) {{endif}} {{if 'cuCtxGetSharedMemConfig' in found_functions}} cdef CUresult cuCtxGetSharedMemConfig(CUsharedconfig* pConfig) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetSharedMemConfig(pConfig) + return cydriver._cuCtxGetSharedMemConfig(pConfig) {{endif}} {{if 'cuCtxSetSharedMemConfig' in found_functions}} cdef CUresult cuCtxSetSharedMemConfig(CUsharedconfig config) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxSetSharedMemConfig(config) + return cydriver._cuCtxSetSharedMemConfig(config) {{endif}} {{if 'cuModuleLoad' in found_functions}} cdef CUresult cuModuleLoad(CUmodule* module, const char* fname) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleLoad(module, fname) + return cydriver._cuModuleLoad(module, fname) {{endif}} {{if 'cuModuleLoadData' in found_functions}} cdef CUresult cuModuleLoadData(CUmodule* module, const void* image) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleLoadData(module, image) + return cydriver._cuModuleLoadData(module, image) {{endif}} {{if 'cuModuleLoadDataEx' in found_functions}} cdef CUresult cuModuleLoadDataEx(CUmodule* module, const void* image, unsigned int numOptions, CUjit_option* options, void** optionValues) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleLoadDataEx(module, image, numOptions, options, optionValues) + return cydriver._cuModuleLoadDataEx(module, image, numOptions, options, optionValues) {{endif}} {{if 'cuModuleLoadFatBinary' in found_functions}} cdef CUresult cuModuleLoadFatBinary(CUmodule* module, const void* fatCubin) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleLoadFatBinary(module, fatCubin) + return cydriver._cuModuleLoadFatBinary(module, fatCubin) {{endif}} {{if 'cuModuleUnload' in found_functions}} cdef CUresult cuModuleUnload(CUmodule hmod) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleUnload(hmod) + return cydriver._cuModuleUnload(hmod) {{endif}} {{if 'cuModuleGetLoadingMode' in found_functions}} cdef CUresult cuModuleGetLoadingMode(CUmoduleLoadingMode* mode) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleGetLoadingMode(mode) + return cydriver._cuModuleGetLoadingMode(mode) {{endif}} {{if 'cuModuleGetFunction' in found_functions}} cdef CUresult cuModuleGetFunction(CUfunction* hfunc, CUmodule hmod, const char* name) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleGetFunction(hfunc, hmod, name) + return cydriver._cuModuleGetFunction(hfunc, hmod, name) {{endif}} {{if 'cuModuleGetFunctionCount' in found_functions}} cdef CUresult cuModuleGetFunctionCount(unsigned int* count, CUmodule mod) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleGetFunctionCount(count, mod) + return cydriver._cuModuleGetFunctionCount(count, mod) {{endif}} {{if 'cuModuleEnumerateFunctions' in found_functions}} cdef CUresult cuModuleEnumerateFunctions(CUfunction* functions, unsigned int numFunctions, CUmodule mod) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleEnumerateFunctions(functions, numFunctions, mod) + return cydriver._cuModuleEnumerateFunctions(functions, numFunctions, mod) {{endif}} {{if 'cuModuleGetGlobal_v2' in found_functions}} cdef CUresult cuModuleGetGlobal(CUdeviceptr* dptr, size_t* numbytes, CUmodule hmod, const char* name) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleGetGlobal_v2(dptr, numbytes, hmod, name) + return cydriver._cuModuleGetGlobal_v2(dptr, numbytes, hmod, name) {{endif}} {{if 'cuLinkCreate_v2' in found_functions}} cdef CUresult cuLinkCreate(unsigned int numOptions, CUjit_option* options, void** optionValues, CUlinkState* stateOut) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLinkCreate_v2(numOptions, options, optionValues, stateOut) + return cydriver._cuLinkCreate_v2(numOptions, options, optionValues, stateOut) {{endif}} {{if 'cuLinkAddData_v2' in found_functions}} cdef CUresult cuLinkAddData(CUlinkState state, CUjitInputType typename, void* data, size_t size, const char* name, unsigned int numOptions, CUjit_option* options, void** optionValues) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLinkAddData_v2(state, typename, data, size, name, numOptions, options, optionValues) + return cydriver._cuLinkAddData_v2(state, typename, data, size, name, numOptions, options, optionValues) {{endif}} {{if 'cuLinkAddFile_v2' in found_functions}} cdef CUresult cuLinkAddFile(CUlinkState state, CUjitInputType typename, const char* path, unsigned int numOptions, CUjit_option* options, void** optionValues) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLinkAddFile_v2(state, typename, path, numOptions, options, optionValues) + return cydriver._cuLinkAddFile_v2(state, typename, path, numOptions, options, optionValues) {{endif}} {{if 'cuLinkComplete' in found_functions}} cdef CUresult cuLinkComplete(CUlinkState state, void** cubinOut, size_t* sizeOut) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLinkComplete(state, cubinOut, sizeOut) + return cydriver._cuLinkComplete(state, cubinOut, sizeOut) {{endif}} {{if 'cuLinkDestroy' in found_functions}} cdef CUresult cuLinkDestroy(CUlinkState state) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLinkDestroy(state) + return cydriver._cuLinkDestroy(state) {{endif}} {{if 'cuModuleGetTexRef' in found_functions}} cdef CUresult cuModuleGetTexRef(CUtexref* pTexRef, CUmodule hmod, const char* name) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleGetTexRef(pTexRef, hmod, name) + return cydriver._cuModuleGetTexRef(pTexRef, hmod, name) {{endif}} {{if 'cuModuleGetSurfRef' in found_functions}} cdef CUresult cuModuleGetSurfRef(CUsurfref* pSurfRef, CUmodule hmod, const char* name) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuModuleGetSurfRef(pSurfRef, hmod, name) + return cydriver._cuModuleGetSurfRef(pSurfRef, hmod, name) {{endif}} {{if 'cuLibraryLoadData' in found_functions}} cdef CUresult cuLibraryLoadData(CUlibrary* library, const void* code, CUjit_option* jitOptions, void** jitOptionsValues, unsigned int numJitOptions, CUlibraryOption* libraryOptions, void** libraryOptionValues, unsigned int numLibraryOptions) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryLoadData(library, code, jitOptions, jitOptionsValues, numJitOptions, libraryOptions, libraryOptionValues, numLibraryOptions) + return cydriver._cuLibraryLoadData(library, code, jitOptions, jitOptionsValues, numJitOptions, libraryOptions, libraryOptionValues, numLibraryOptions) {{endif}} {{if 'cuLibraryLoadFromFile' in found_functions}} cdef CUresult cuLibraryLoadFromFile(CUlibrary* library, const char* fileName, CUjit_option* jitOptions, void** jitOptionsValues, unsigned int numJitOptions, CUlibraryOption* libraryOptions, void** libraryOptionValues, unsigned int numLibraryOptions) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryLoadFromFile(library, fileName, jitOptions, jitOptionsValues, numJitOptions, libraryOptions, libraryOptionValues, numLibraryOptions) + return cydriver._cuLibraryLoadFromFile(library, fileName, jitOptions, jitOptionsValues, numJitOptions, libraryOptions, libraryOptionValues, numLibraryOptions) {{endif}} {{if 'cuLibraryUnload' in found_functions}} cdef CUresult cuLibraryUnload(CUlibrary library) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryUnload(library) + return cydriver._cuLibraryUnload(library) {{endif}} {{if 'cuLibraryGetKernel' in found_functions}} cdef CUresult cuLibraryGetKernel(CUkernel* pKernel, CUlibrary library, const char* name) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryGetKernel(pKernel, library, name) + return cydriver._cuLibraryGetKernel(pKernel, library, name) {{endif}} {{if 'cuLibraryGetKernelCount' in found_functions}} cdef CUresult cuLibraryGetKernelCount(unsigned int* count, CUlibrary lib) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryGetKernelCount(count, lib) + return cydriver._cuLibraryGetKernelCount(count, lib) {{endif}} {{if 'cuLibraryEnumerateKernels' in found_functions}} cdef CUresult cuLibraryEnumerateKernels(CUkernel* kernels, unsigned int numKernels, CUlibrary lib) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryEnumerateKernels(kernels, numKernels, lib) + return cydriver._cuLibraryEnumerateKernels(kernels, numKernels, lib) {{endif}} {{if 'cuLibraryGetModule' in found_functions}} cdef CUresult cuLibraryGetModule(CUmodule* pMod, CUlibrary library) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryGetModule(pMod, library) + return cydriver._cuLibraryGetModule(pMod, library) {{endif}} {{if 'cuKernelGetFunction' in found_functions}} cdef CUresult cuKernelGetFunction(CUfunction* pFunc, CUkernel kernel) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuKernelGetFunction(pFunc, kernel) + return cydriver._cuKernelGetFunction(pFunc, kernel) {{endif}} {{if 'cuKernelGetLibrary' in found_functions}} cdef CUresult cuKernelGetLibrary(CUlibrary* pLib, CUkernel kernel) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuKernelGetLibrary(pLib, kernel) + return cydriver._cuKernelGetLibrary(pLib, kernel) {{endif}} {{if 'cuLibraryGetGlobal' in found_functions}} cdef CUresult cuLibraryGetGlobal(CUdeviceptr* dptr, size_t* numbytes, CUlibrary library, const char* name) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryGetGlobal(dptr, numbytes, library, name) + return cydriver._cuLibraryGetGlobal(dptr, numbytes, library, name) {{endif}} {{if 'cuLibraryGetManaged' in found_functions}} cdef CUresult cuLibraryGetManaged(CUdeviceptr* dptr, size_t* numbytes, CUlibrary library, const char* name) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryGetManaged(dptr, numbytes, library, name) + return cydriver._cuLibraryGetManaged(dptr, numbytes, library, name) {{endif}} {{if 'cuLibraryGetUnifiedFunction' in found_functions}} cdef CUresult cuLibraryGetUnifiedFunction(void** fptr, CUlibrary library, const char* symbol) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLibraryGetUnifiedFunction(fptr, library, symbol) + return cydriver._cuLibraryGetUnifiedFunction(fptr, library, symbol) {{endif}} {{if 'cuKernelGetAttribute' in found_functions}} cdef CUresult cuKernelGetAttribute(int* pi, CUfunction_attribute attrib, CUkernel kernel, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuKernelGetAttribute(pi, attrib, kernel, dev) + return cydriver._cuKernelGetAttribute(pi, attrib, kernel, dev) {{endif}} {{if 'cuKernelSetAttribute' in found_functions}} cdef CUresult cuKernelSetAttribute(CUfunction_attribute attrib, int val, CUkernel kernel, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuKernelSetAttribute(attrib, val, kernel, dev) + return cydriver._cuKernelSetAttribute(attrib, val, kernel, dev) {{endif}} {{if 'cuKernelSetCacheConfig' in found_functions}} cdef CUresult cuKernelSetCacheConfig(CUkernel kernel, CUfunc_cache config, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuKernelSetCacheConfig(kernel, config, dev) + return cydriver._cuKernelSetCacheConfig(kernel, config, dev) {{endif}} {{if 'cuKernelGetName' in found_functions}} cdef CUresult cuKernelGetName(const char** name, CUkernel hfunc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuKernelGetName(name, hfunc) + return cydriver._cuKernelGetName(name, hfunc) {{endif}} {{if 'cuKernelGetParamInfo' in found_functions}} cdef CUresult cuKernelGetParamInfo(CUkernel kernel, size_t paramIndex, size_t* paramOffset, size_t* paramSize) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuKernelGetParamInfo(kernel, paramIndex, paramOffset, paramSize) + return cydriver._cuKernelGetParamInfo(kernel, paramIndex, paramOffset, paramSize) {{endif}} {{if 'cuMemGetInfo_v2' in found_functions}} cdef CUresult cuMemGetInfo(size_t* free, size_t* total) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemGetInfo_v2(free, total) + return cydriver._cuMemGetInfo_v2(free, total) {{endif}} {{if 'cuMemAlloc_v2' in found_functions}} cdef CUresult cuMemAlloc(CUdeviceptr* dptr, size_t bytesize) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAlloc_v2(dptr, bytesize) + return cydriver._cuMemAlloc_v2(dptr, bytesize) {{endif}} {{if 'cuMemAllocPitch_v2' in found_functions}} cdef CUresult cuMemAllocPitch(CUdeviceptr* dptr, size_t* pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAllocPitch_v2(dptr, pPitch, WidthInBytes, Height, ElementSizeBytes) + return cydriver._cuMemAllocPitch_v2(dptr, pPitch, WidthInBytes, Height, ElementSizeBytes) {{endif}} {{if 'cuMemFree_v2' in found_functions}} cdef CUresult cuMemFree(CUdeviceptr dptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemFree_v2(dptr) + return cydriver._cuMemFree_v2(dptr) {{endif}} {{if 'cuMemGetAddressRange_v2' in found_functions}} cdef CUresult cuMemGetAddressRange(CUdeviceptr* pbase, size_t* psize, CUdeviceptr dptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemGetAddressRange_v2(pbase, psize, dptr) + return cydriver._cuMemGetAddressRange_v2(pbase, psize, dptr) {{endif}} {{if 'cuMemAllocHost_v2' in found_functions}} cdef CUresult cuMemAllocHost(void** pp, size_t bytesize) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAllocHost_v2(pp, bytesize) + return cydriver._cuMemAllocHost_v2(pp, bytesize) {{endif}} {{if 'cuMemFreeHost' in found_functions}} cdef CUresult cuMemFreeHost(void* p) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemFreeHost(p) + return cydriver._cuMemFreeHost(p) {{endif}} {{if 'cuMemHostAlloc' in found_functions}} cdef CUresult cuMemHostAlloc(void** pp, size_t bytesize, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemHostAlloc(pp, bytesize, Flags) + return cydriver._cuMemHostAlloc(pp, bytesize, Flags) {{endif}} {{if 'cuMemHostGetDevicePointer_v2' in found_functions}} cdef CUresult cuMemHostGetDevicePointer(CUdeviceptr* pdptr, void* p, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemHostGetDevicePointer_v2(pdptr, p, Flags) + return cydriver._cuMemHostGetDevicePointer_v2(pdptr, p, Flags) {{endif}} {{if 'cuMemHostGetFlags' in found_functions}} cdef CUresult cuMemHostGetFlags(unsigned int* pFlags, void* p) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemHostGetFlags(pFlags, p) + return cydriver._cuMemHostGetFlags(pFlags, p) {{endif}} {{if 'cuMemAllocManaged' in found_functions}} cdef CUresult cuMemAllocManaged(CUdeviceptr* dptr, size_t bytesize, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAllocManaged(dptr, bytesize, flags) + return cydriver._cuMemAllocManaged(dptr, bytesize, flags) {{endif}} {{if 'cuDeviceRegisterAsyncNotification' in found_functions}} cdef CUresult cuDeviceRegisterAsyncNotification(CUdevice device, CUasyncCallback callbackFunc, void* userData, CUasyncCallbackHandle* callback) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceRegisterAsyncNotification(device, callbackFunc, userData, callback) + return cydriver._cuDeviceRegisterAsyncNotification(device, callbackFunc, userData, callback) {{endif}} {{if 'cuDeviceUnregisterAsyncNotification' in found_functions}} cdef CUresult cuDeviceUnregisterAsyncNotification(CUdevice device, CUasyncCallbackHandle callback) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceUnregisterAsyncNotification(device, callback) + return cydriver._cuDeviceUnregisterAsyncNotification(device, callback) {{endif}} {{if 'cuDeviceGetByPCIBusId' in found_functions}} cdef CUresult cuDeviceGetByPCIBusId(CUdevice* dev, const char* pciBusId) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetByPCIBusId(dev, pciBusId) + return cydriver._cuDeviceGetByPCIBusId(dev, pciBusId) {{endif}} {{if 'cuDeviceGetPCIBusId' in found_functions}} cdef CUresult cuDeviceGetPCIBusId(char* pciBusId, int length, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetPCIBusId(pciBusId, length, dev) + return cydriver._cuDeviceGetPCIBusId(pciBusId, length, dev) {{endif}} {{if 'cuIpcGetEventHandle' in found_functions}} cdef CUresult cuIpcGetEventHandle(CUipcEventHandle* pHandle, CUevent event) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuIpcGetEventHandle(pHandle, event) + return cydriver._cuIpcGetEventHandle(pHandle, event) {{endif}} {{if 'cuIpcOpenEventHandle' in found_functions}} cdef CUresult cuIpcOpenEventHandle(CUevent* phEvent, CUipcEventHandle handle) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuIpcOpenEventHandle(phEvent, handle) + return cydriver._cuIpcOpenEventHandle(phEvent, handle) {{endif}} {{if 'cuIpcGetMemHandle' in found_functions}} cdef CUresult cuIpcGetMemHandle(CUipcMemHandle* pHandle, CUdeviceptr dptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuIpcGetMemHandle(pHandle, dptr) + return cydriver._cuIpcGetMemHandle(pHandle, dptr) {{endif}} {{if 'cuIpcOpenMemHandle_v2' in found_functions}} cdef CUresult cuIpcOpenMemHandle(CUdeviceptr* pdptr, CUipcMemHandle handle, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuIpcOpenMemHandle_v2(pdptr, handle, Flags) + return cydriver._cuIpcOpenMemHandle_v2(pdptr, handle, Flags) {{endif}} {{if 'cuIpcCloseMemHandle' in found_functions}} cdef CUresult cuIpcCloseMemHandle(CUdeviceptr dptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuIpcCloseMemHandle(dptr) + return cydriver._cuIpcCloseMemHandle(dptr) {{endif}} {{if 'cuMemHostRegister_v2' in found_functions}} cdef CUresult cuMemHostRegister(void* p, size_t bytesize, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemHostRegister_v2(p, bytesize, Flags) + return cydriver._cuMemHostRegister_v2(p, bytesize, Flags) {{endif}} {{if 'cuMemHostUnregister' in found_functions}} cdef CUresult cuMemHostUnregister(void* p) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemHostUnregister(p) + return cydriver._cuMemHostUnregister(p) {{endif}} {{if 'cuMemcpy' in found_functions}} cdef CUresult cuMemcpy(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpy(dst, src, ByteCount) + return cydriver._cuMemcpy(dst, src, ByteCount) {{endif}} {{if 'cuMemcpyPeer' in found_functions}} cdef CUresult cuMemcpyPeer(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyPeer(dstDevice, dstContext, srcDevice, srcContext, ByteCount) + return cydriver._cuMemcpyPeer(dstDevice, dstContext, srcDevice, srcContext, ByteCount) {{endif}} {{if 'cuMemcpyHtoD_v2' in found_functions}} cdef CUresult cuMemcpyHtoD(CUdeviceptr dstDevice, const void* srcHost, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyHtoD_v2(dstDevice, srcHost, ByteCount) + return cydriver._cuMemcpyHtoD_v2(dstDevice, srcHost, ByteCount) {{endif}} {{if 'cuMemcpyDtoH_v2' in found_functions}} cdef CUresult cuMemcpyDtoH(void* dstHost, CUdeviceptr srcDevice, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyDtoH_v2(dstHost, srcDevice, ByteCount) + return cydriver._cuMemcpyDtoH_v2(dstHost, srcDevice, ByteCount) {{endif}} {{if 'cuMemcpyDtoD_v2' in found_functions}} cdef CUresult cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyDtoD_v2(dstDevice, srcDevice, ByteCount) + return cydriver._cuMemcpyDtoD_v2(dstDevice, srcDevice, ByteCount) {{endif}} {{if 'cuMemcpyDtoA_v2' in found_functions}} cdef CUresult cuMemcpyDtoA(CUarray dstArray, size_t dstOffset, CUdeviceptr srcDevice, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyDtoA_v2(dstArray, dstOffset, srcDevice, ByteCount) + return cydriver._cuMemcpyDtoA_v2(dstArray, dstOffset, srcDevice, ByteCount) {{endif}} {{if 'cuMemcpyAtoD_v2' in found_functions}} cdef CUresult cuMemcpyAtoD(CUdeviceptr dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyAtoD_v2(dstDevice, srcArray, srcOffset, ByteCount) + return cydriver._cuMemcpyAtoD_v2(dstDevice, srcArray, srcOffset, ByteCount) {{endif}} {{if 'cuMemcpyHtoA_v2' in found_functions}} cdef CUresult cuMemcpyHtoA(CUarray dstArray, size_t dstOffset, const void* srcHost, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyHtoA_v2(dstArray, dstOffset, srcHost, ByteCount) + return cydriver._cuMemcpyHtoA_v2(dstArray, dstOffset, srcHost, ByteCount) {{endif}} {{if 'cuMemcpyAtoH_v2' in found_functions}} cdef CUresult cuMemcpyAtoH(void* dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyAtoH_v2(dstHost, srcArray, srcOffset, ByteCount) + return cydriver._cuMemcpyAtoH_v2(dstHost, srcArray, srcOffset, ByteCount) {{endif}} {{if 'cuMemcpyAtoA_v2' in found_functions}} cdef CUresult cuMemcpyAtoA(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyAtoA_v2(dstArray, dstOffset, srcArray, srcOffset, ByteCount) + return cydriver._cuMemcpyAtoA_v2(dstArray, dstOffset, srcArray, srcOffset, ByteCount) {{endif}} {{if 'cuMemcpy2D_v2' in found_functions}} cdef CUresult cuMemcpy2D(const CUDA_MEMCPY2D* pCopy) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpy2D_v2(pCopy) + return cydriver._cuMemcpy2D_v2(pCopy) {{endif}} {{if 'cuMemcpy2DUnaligned_v2' in found_functions}} cdef CUresult cuMemcpy2DUnaligned(const CUDA_MEMCPY2D* pCopy) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpy2DUnaligned_v2(pCopy) + return cydriver._cuMemcpy2DUnaligned_v2(pCopy) {{endif}} {{if 'cuMemcpy3D_v2' in found_functions}} cdef CUresult cuMemcpy3D(const CUDA_MEMCPY3D* pCopy) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpy3D_v2(pCopy) + return cydriver._cuMemcpy3D_v2(pCopy) {{endif}} {{if 'cuMemcpy3DPeer' in found_functions}} cdef CUresult cuMemcpy3DPeer(const CUDA_MEMCPY3D_PEER* pCopy) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpy3DPeer(pCopy) + return cydriver._cuMemcpy3DPeer(pCopy) {{endif}} {{if 'cuMemcpyAsync' in found_functions}} cdef CUresult cuMemcpyAsync(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyAsync(dst, src, ByteCount, hStream) + return cydriver._cuMemcpyAsync(dst, src, ByteCount, hStream) {{endif}} {{if 'cuMemcpyPeerAsync' in found_functions}} cdef CUresult cuMemcpyPeerAsync(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyPeerAsync(dstDevice, dstContext, srcDevice, srcContext, ByteCount, hStream) + return cydriver._cuMemcpyPeerAsync(dstDevice, dstContext, srcDevice, srcContext, ByteCount, hStream) {{endif}} {{if 'cuMemcpyHtoDAsync_v2' in found_functions}} cdef CUresult cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void* srcHost, size_t ByteCount, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyHtoDAsync_v2(dstDevice, srcHost, ByteCount, hStream) + return cydriver._cuMemcpyHtoDAsync_v2(dstDevice, srcHost, ByteCount, hStream) {{endif}} {{if 'cuMemcpyDtoHAsync_v2' in found_functions}} cdef CUresult cuMemcpyDtoHAsync(void* dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyDtoHAsync_v2(dstHost, srcDevice, ByteCount, hStream) + return cydriver._cuMemcpyDtoHAsync_v2(dstHost, srcDevice, ByteCount, hStream) {{endif}} {{if 'cuMemcpyDtoDAsync_v2' in found_functions}} cdef CUresult cuMemcpyDtoDAsync(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyDtoDAsync_v2(dstDevice, srcDevice, ByteCount, hStream) + return cydriver._cuMemcpyDtoDAsync_v2(dstDevice, srcDevice, ByteCount, hStream) {{endif}} {{if 'cuMemcpyHtoAAsync_v2' in found_functions}} cdef CUresult cuMemcpyHtoAAsync(CUarray dstArray, size_t dstOffset, const void* srcHost, size_t ByteCount, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyHtoAAsync_v2(dstArray, dstOffset, srcHost, ByteCount, hStream) + return cydriver._cuMemcpyHtoAAsync_v2(dstArray, dstOffset, srcHost, ByteCount, hStream) {{endif}} {{if 'cuMemcpyAtoHAsync_v2' in found_functions}} cdef CUresult cuMemcpyAtoHAsync(void* dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpyAtoHAsync_v2(dstHost, srcArray, srcOffset, ByteCount, hStream) + return cydriver._cuMemcpyAtoHAsync_v2(dstHost, srcArray, srcOffset, ByteCount, hStream) {{endif}} {{if 'cuMemcpy2DAsync_v2' in found_functions}} cdef CUresult cuMemcpy2DAsync(const CUDA_MEMCPY2D* pCopy, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpy2DAsync_v2(pCopy, hStream) + return cydriver._cuMemcpy2DAsync_v2(pCopy, hStream) {{endif}} {{if 'cuMemcpy3DAsync_v2' in found_functions}} cdef CUresult cuMemcpy3DAsync(const CUDA_MEMCPY3D* pCopy, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpy3DAsync_v2(pCopy, hStream) + return cydriver._cuMemcpy3DAsync_v2(pCopy, hStream) {{endif}} {{if 'cuMemcpy3DPeerAsync' in found_functions}} cdef CUresult cuMemcpy3DPeerAsync(const CUDA_MEMCPY3D_PEER* pCopy, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemcpy3DPeerAsync(pCopy, hStream) + return cydriver._cuMemcpy3DPeerAsync(pCopy, hStream) {{endif}} {{if 'cuMemsetD8_v2' in found_functions}} cdef CUresult cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD8_v2(dstDevice, uc, N) + return cydriver._cuMemsetD8_v2(dstDevice, uc, N) {{endif}} {{if 'cuMemsetD16_v2' in found_functions}} cdef CUresult cuMemsetD16(CUdeviceptr dstDevice, unsigned short us, size_t N) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD16_v2(dstDevice, us, N) + return cydriver._cuMemsetD16_v2(dstDevice, us, N) {{endif}} {{if 'cuMemsetD32_v2' in found_functions}} cdef CUresult cuMemsetD32(CUdeviceptr dstDevice, unsigned int ui, size_t N) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD32_v2(dstDevice, ui, N) + return cydriver._cuMemsetD32_v2(dstDevice, ui, N) {{endif}} {{if 'cuMemsetD2D8_v2' in found_functions}} cdef CUresult cuMemsetD2D8(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD2D8_v2(dstDevice, dstPitch, uc, Width, Height) + return cydriver._cuMemsetD2D8_v2(dstDevice, dstPitch, uc, Width, Height) {{endif}} {{if 'cuMemsetD2D16_v2' in found_functions}} cdef CUresult cuMemsetD2D16(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD2D16_v2(dstDevice, dstPitch, us, Width, Height) + return cydriver._cuMemsetD2D16_v2(dstDevice, dstPitch, us, Width, Height) {{endif}} {{if 'cuMemsetD2D32_v2' in found_functions}} cdef CUresult cuMemsetD2D32(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD2D32_v2(dstDevice, dstPitch, ui, Width, Height) + return cydriver._cuMemsetD2D32_v2(dstDevice, dstPitch, ui, Width, Height) {{endif}} {{if 'cuMemsetD8Async' in found_functions}} cdef CUresult cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD8Async(dstDevice, uc, N, hStream) + return cydriver._cuMemsetD8Async(dstDevice, uc, N, hStream) {{endif}} {{if 'cuMemsetD16Async' in found_functions}} cdef CUresult cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD16Async(dstDevice, us, N, hStream) + return cydriver._cuMemsetD16Async(dstDevice, us, N, hStream) {{endif}} {{if 'cuMemsetD32Async' in found_functions}} cdef CUresult cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD32Async(dstDevice, ui, N, hStream) + return cydriver._cuMemsetD32Async(dstDevice, ui, N, hStream) {{endif}} {{if 'cuMemsetD2D8Async' in found_functions}} cdef CUresult cuMemsetD2D8Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD2D8Async(dstDevice, dstPitch, uc, Width, Height, hStream) + return cydriver._cuMemsetD2D8Async(dstDevice, dstPitch, uc, Width, Height, hStream) {{endif}} {{if 'cuMemsetD2D16Async' in found_functions}} cdef CUresult cuMemsetD2D16Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD2D16Async(dstDevice, dstPitch, us, Width, Height, hStream) + return cydriver._cuMemsetD2D16Async(dstDevice, dstPitch, us, Width, Height, hStream) {{endif}} {{if 'cuMemsetD2D32Async' in found_functions}} cdef CUresult cuMemsetD2D32Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemsetD2D32Async(dstDevice, dstPitch, ui, Width, Height, hStream) + return cydriver._cuMemsetD2D32Async(dstDevice, dstPitch, ui, Width, Height, hStream) {{endif}} {{if 'cuArrayCreate_v2' in found_functions}} cdef CUresult cuArrayCreate(CUarray* pHandle, const CUDA_ARRAY_DESCRIPTOR* pAllocateArray) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuArrayCreate_v2(pHandle, pAllocateArray) + return cydriver._cuArrayCreate_v2(pHandle, pAllocateArray) {{endif}} {{if 'cuArrayGetDescriptor_v2' in found_functions}} cdef CUresult cuArrayGetDescriptor(CUDA_ARRAY_DESCRIPTOR* pArrayDescriptor, CUarray hArray) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuArrayGetDescriptor_v2(pArrayDescriptor, hArray) + return cydriver._cuArrayGetDescriptor_v2(pArrayDescriptor, hArray) {{endif}} {{if 'cuArrayGetSparseProperties' in found_functions}} cdef CUresult cuArrayGetSparseProperties(CUDA_ARRAY_SPARSE_PROPERTIES* sparseProperties, CUarray array) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuArrayGetSparseProperties(sparseProperties, array) + return cydriver._cuArrayGetSparseProperties(sparseProperties, array) {{endif}} {{if 'cuMipmappedArrayGetSparseProperties' in found_functions}} cdef CUresult cuMipmappedArrayGetSparseProperties(CUDA_ARRAY_SPARSE_PROPERTIES* sparseProperties, CUmipmappedArray mipmap) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMipmappedArrayGetSparseProperties(sparseProperties, mipmap) + return cydriver._cuMipmappedArrayGetSparseProperties(sparseProperties, mipmap) {{endif}} {{if 'cuArrayGetMemoryRequirements' in found_functions}} cdef CUresult cuArrayGetMemoryRequirements(CUDA_ARRAY_MEMORY_REQUIREMENTS* memoryRequirements, CUarray array, CUdevice device) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuArrayGetMemoryRequirements(memoryRequirements, array, device) + return cydriver._cuArrayGetMemoryRequirements(memoryRequirements, array, device) {{endif}} {{if 'cuMipmappedArrayGetMemoryRequirements' in found_functions}} cdef CUresult cuMipmappedArrayGetMemoryRequirements(CUDA_ARRAY_MEMORY_REQUIREMENTS* memoryRequirements, CUmipmappedArray mipmap, CUdevice device) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMipmappedArrayGetMemoryRequirements(memoryRequirements, mipmap, device) + return cydriver._cuMipmappedArrayGetMemoryRequirements(memoryRequirements, mipmap, device) {{endif}} {{if 'cuArrayGetPlane' in found_functions}} cdef CUresult cuArrayGetPlane(CUarray* pPlaneArray, CUarray hArray, unsigned int planeIdx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuArrayGetPlane(pPlaneArray, hArray, planeIdx) + return cydriver._cuArrayGetPlane(pPlaneArray, hArray, planeIdx) {{endif}} {{if 'cuArrayDestroy' in found_functions}} cdef CUresult cuArrayDestroy(CUarray hArray) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuArrayDestroy(hArray) + return cydriver._cuArrayDestroy(hArray) {{endif}} {{if 'cuArray3DCreate_v2' in found_functions}} cdef CUresult cuArray3DCreate(CUarray* pHandle, const CUDA_ARRAY3D_DESCRIPTOR* pAllocateArray) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuArray3DCreate_v2(pHandle, pAllocateArray) + return cydriver._cuArray3DCreate_v2(pHandle, pAllocateArray) {{endif}} {{if 'cuArray3DGetDescriptor_v2' in found_functions}} cdef CUresult cuArray3DGetDescriptor(CUDA_ARRAY3D_DESCRIPTOR* pArrayDescriptor, CUarray hArray) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuArray3DGetDescriptor_v2(pArrayDescriptor, hArray) + return cydriver._cuArray3DGetDescriptor_v2(pArrayDescriptor, hArray) {{endif}} {{if 'cuMipmappedArrayCreate' in found_functions}} cdef CUresult cuMipmappedArrayCreate(CUmipmappedArray* pHandle, const CUDA_ARRAY3D_DESCRIPTOR* pMipmappedArrayDesc, unsigned int numMipmapLevels) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMipmappedArrayCreate(pHandle, pMipmappedArrayDesc, numMipmapLevels) + return cydriver._cuMipmappedArrayCreate(pHandle, pMipmappedArrayDesc, numMipmapLevels) {{endif}} {{if 'cuMipmappedArrayGetLevel' in found_functions}} cdef CUresult cuMipmappedArrayGetLevel(CUarray* pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMipmappedArrayGetLevel(pLevelArray, hMipmappedArray, level) + return cydriver._cuMipmappedArrayGetLevel(pLevelArray, hMipmappedArray, level) {{endif}} {{if 'cuMipmappedArrayDestroy' in found_functions}} cdef CUresult cuMipmappedArrayDestroy(CUmipmappedArray hMipmappedArray) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMipmappedArrayDestroy(hMipmappedArray) + return cydriver._cuMipmappedArrayDestroy(hMipmappedArray) {{endif}} {{if 'cuMemGetHandleForAddressRange' in found_functions}} cdef CUresult cuMemGetHandleForAddressRange(void* handle, CUdeviceptr dptr, size_t size, CUmemRangeHandleType handleType, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemGetHandleForAddressRange(handle, dptr, size, handleType, flags) + return cydriver._cuMemGetHandleForAddressRange(handle, dptr, size, handleType, flags) {{endif}} {{if 'cuMemAddressReserve' in found_functions}} cdef CUresult cuMemAddressReserve(CUdeviceptr* ptr, size_t size, size_t alignment, CUdeviceptr addr, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAddressReserve(ptr, size, alignment, addr, flags) + return cydriver._cuMemAddressReserve(ptr, size, alignment, addr, flags) {{endif}} {{if 'cuMemAddressFree' in found_functions}} cdef CUresult cuMemAddressFree(CUdeviceptr ptr, size_t size) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAddressFree(ptr, size) + return cydriver._cuMemAddressFree(ptr, size) {{endif}} {{if 'cuMemCreate' in found_functions}} cdef CUresult cuMemCreate(CUmemGenericAllocationHandle* handle, size_t size, const CUmemAllocationProp* prop, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemCreate(handle, size, prop, flags) + return cydriver._cuMemCreate(handle, size, prop, flags) {{endif}} {{if 'cuMemRelease' in found_functions}} cdef CUresult cuMemRelease(CUmemGenericAllocationHandle handle) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemRelease(handle) + return cydriver._cuMemRelease(handle) {{endif}} {{if 'cuMemMap' in found_functions}} cdef CUresult cuMemMap(CUdeviceptr ptr, size_t size, size_t offset, CUmemGenericAllocationHandle handle, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemMap(ptr, size, offset, handle, flags) + return cydriver._cuMemMap(ptr, size, offset, handle, flags) {{endif}} {{if 'cuMemMapArrayAsync' in found_functions}} cdef CUresult cuMemMapArrayAsync(CUarrayMapInfo* mapInfoList, unsigned int count, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemMapArrayAsync(mapInfoList, count, hStream) + return cydriver._cuMemMapArrayAsync(mapInfoList, count, hStream) {{endif}} {{if 'cuMemUnmap' in found_functions}} cdef CUresult cuMemUnmap(CUdeviceptr ptr, size_t size) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemUnmap(ptr, size) + return cydriver._cuMemUnmap(ptr, size) {{endif}} {{if 'cuMemSetAccess' in found_functions}} cdef CUresult cuMemSetAccess(CUdeviceptr ptr, size_t size, const CUmemAccessDesc* desc, size_t count) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemSetAccess(ptr, size, desc, count) + return cydriver._cuMemSetAccess(ptr, size, desc, count) {{endif}} {{if 'cuMemGetAccess' in found_functions}} cdef CUresult cuMemGetAccess(unsigned long long* flags, const CUmemLocation* location, CUdeviceptr ptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemGetAccess(flags, location, ptr) + return cydriver._cuMemGetAccess(flags, location, ptr) {{endif}} {{if 'cuMemExportToShareableHandle' in found_functions}} cdef CUresult cuMemExportToShareableHandle(void* shareableHandle, CUmemGenericAllocationHandle handle, CUmemAllocationHandleType handleType, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemExportToShareableHandle(shareableHandle, handle, handleType, flags) + return cydriver._cuMemExportToShareableHandle(shareableHandle, handle, handleType, flags) {{endif}} {{if 'cuMemImportFromShareableHandle' in found_functions}} cdef CUresult cuMemImportFromShareableHandle(CUmemGenericAllocationHandle* handle, void* osHandle, CUmemAllocationHandleType shHandleType) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemImportFromShareableHandle(handle, osHandle, shHandleType) + return cydriver._cuMemImportFromShareableHandle(handle, osHandle, shHandleType) {{endif}} {{if 'cuMemGetAllocationGranularity' in found_functions}} cdef CUresult cuMemGetAllocationGranularity(size_t* granularity, const CUmemAllocationProp* prop, CUmemAllocationGranularity_flags option) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemGetAllocationGranularity(granularity, prop, option) + return cydriver._cuMemGetAllocationGranularity(granularity, prop, option) {{endif}} {{if 'cuMemGetAllocationPropertiesFromHandle' in found_functions}} cdef CUresult cuMemGetAllocationPropertiesFromHandle(CUmemAllocationProp* prop, CUmemGenericAllocationHandle handle) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemGetAllocationPropertiesFromHandle(prop, handle) + return cydriver._cuMemGetAllocationPropertiesFromHandle(prop, handle) {{endif}} {{if 'cuMemRetainAllocationHandle' in found_functions}} cdef CUresult cuMemRetainAllocationHandle(CUmemGenericAllocationHandle* handle, void* addr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemRetainAllocationHandle(handle, addr) + return cydriver._cuMemRetainAllocationHandle(handle, addr) {{endif}} {{if 'cuMemFreeAsync' in found_functions}} cdef CUresult cuMemFreeAsync(CUdeviceptr dptr, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemFreeAsync(dptr, hStream) + return cydriver._cuMemFreeAsync(dptr, hStream) {{endif}} {{if 'cuMemAllocAsync' in found_functions}} cdef CUresult cuMemAllocAsync(CUdeviceptr* dptr, size_t bytesize, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAllocAsync(dptr, bytesize, hStream) + return cydriver._cuMemAllocAsync(dptr, bytesize, hStream) {{endif}} {{if 'cuMemPoolTrimTo' in found_functions}} cdef CUresult cuMemPoolTrimTo(CUmemoryPool pool, size_t minBytesToKeep) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolTrimTo(pool, minBytesToKeep) + return cydriver._cuMemPoolTrimTo(pool, minBytesToKeep) {{endif}} {{if 'cuMemPoolSetAttribute' in found_functions}} cdef CUresult cuMemPoolSetAttribute(CUmemoryPool pool, CUmemPool_attribute attr, void* value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolSetAttribute(pool, attr, value) + return cydriver._cuMemPoolSetAttribute(pool, attr, value) {{endif}} {{if 'cuMemPoolGetAttribute' in found_functions}} cdef CUresult cuMemPoolGetAttribute(CUmemoryPool pool, CUmemPool_attribute attr, void* value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolGetAttribute(pool, attr, value) + return cydriver._cuMemPoolGetAttribute(pool, attr, value) {{endif}} {{if 'cuMemPoolSetAccess' in found_functions}} cdef CUresult cuMemPoolSetAccess(CUmemoryPool pool, const CUmemAccessDesc* map, size_t count) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolSetAccess(pool, map, count) + return cydriver._cuMemPoolSetAccess(pool, map, count) {{endif}} {{if 'cuMemPoolGetAccess' in found_functions}} cdef CUresult cuMemPoolGetAccess(CUmemAccess_flags* flags, CUmemoryPool memPool, CUmemLocation* location) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolGetAccess(flags, memPool, location) + return cydriver._cuMemPoolGetAccess(flags, memPool, location) {{endif}} {{if 'cuMemPoolCreate' in found_functions}} cdef CUresult cuMemPoolCreate(CUmemoryPool* pool, const CUmemPoolProps* poolProps) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolCreate(pool, poolProps) + return cydriver._cuMemPoolCreate(pool, poolProps) {{endif}} {{if 'cuMemPoolDestroy' in found_functions}} cdef CUresult cuMemPoolDestroy(CUmemoryPool pool) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolDestroy(pool) + return cydriver._cuMemPoolDestroy(pool) {{endif}} {{if 'cuMemAllocFromPoolAsync' in found_functions}} cdef CUresult cuMemAllocFromPoolAsync(CUdeviceptr* dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAllocFromPoolAsync(dptr, bytesize, pool, hStream) + return cydriver._cuMemAllocFromPoolAsync(dptr, bytesize, pool, hStream) {{endif}} {{if 'cuMemPoolExportToShareableHandle' in found_functions}} cdef CUresult cuMemPoolExportToShareableHandle(void* handle_out, CUmemoryPool pool, CUmemAllocationHandleType handleType, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolExportToShareableHandle(handle_out, pool, handleType, flags) + return cydriver._cuMemPoolExportToShareableHandle(handle_out, pool, handleType, flags) {{endif}} {{if 'cuMemPoolImportFromShareableHandle' in found_functions}} cdef CUresult cuMemPoolImportFromShareableHandle(CUmemoryPool* pool_out, void* handle, CUmemAllocationHandleType handleType, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolImportFromShareableHandle(pool_out, handle, handleType, flags) + return cydriver._cuMemPoolImportFromShareableHandle(pool_out, handle, handleType, flags) {{endif}} {{if 'cuMemPoolExportPointer' in found_functions}} cdef CUresult cuMemPoolExportPointer(CUmemPoolPtrExportData* shareData_out, CUdeviceptr ptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolExportPointer(shareData_out, ptr) + return cydriver._cuMemPoolExportPointer(shareData_out, ptr) {{endif}} {{if 'cuMemPoolImportPointer' in found_functions}} cdef CUresult cuMemPoolImportPointer(CUdeviceptr* ptr_out, CUmemoryPool pool, CUmemPoolPtrExportData* shareData) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPoolImportPointer(ptr_out, pool, shareData) + return cydriver._cuMemPoolImportPointer(ptr_out, pool, shareData) {{endif}} {{if 'cuMulticastCreate' in found_functions}} cdef CUresult cuMulticastCreate(CUmemGenericAllocationHandle* mcHandle, const CUmulticastObjectProp* prop) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMulticastCreate(mcHandle, prop) + return cydriver._cuMulticastCreate(mcHandle, prop) {{endif}} {{if 'cuMulticastAddDevice' in found_functions}} cdef CUresult cuMulticastAddDevice(CUmemGenericAllocationHandle mcHandle, CUdevice dev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMulticastAddDevice(mcHandle, dev) + return cydriver._cuMulticastAddDevice(mcHandle, dev) {{endif}} {{if 'cuMulticastBindMem' in found_functions}} cdef CUresult cuMulticastBindMem(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUmemGenericAllocationHandle memHandle, size_t memOffset, size_t size, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMulticastBindMem(mcHandle, mcOffset, memHandle, memOffset, size, flags) + return cydriver._cuMulticastBindMem(mcHandle, mcOffset, memHandle, memOffset, size, flags) {{endif}} {{if 'cuMulticastBindAddr' in found_functions}} cdef CUresult cuMulticastBindAddr(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUdeviceptr memptr, size_t size, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMulticastBindAddr(mcHandle, mcOffset, memptr, size, flags) + return cydriver._cuMulticastBindAddr(mcHandle, mcOffset, memptr, size, flags) {{endif}} {{if 'cuMulticastUnbind' in found_functions}} cdef CUresult cuMulticastUnbind(CUmemGenericAllocationHandle mcHandle, CUdevice dev, size_t mcOffset, size_t size) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMulticastUnbind(mcHandle, dev, mcOffset, size) + return cydriver._cuMulticastUnbind(mcHandle, dev, mcOffset, size) {{endif}} {{if 'cuMulticastGetGranularity' in found_functions}} cdef CUresult cuMulticastGetGranularity(size_t* granularity, const CUmulticastObjectProp* prop, CUmulticastGranularity_flags option) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMulticastGetGranularity(granularity, prop, option) + return cydriver._cuMulticastGetGranularity(granularity, prop, option) {{endif}} {{if 'cuPointerGetAttribute' in found_functions}} cdef CUresult cuPointerGetAttribute(void* data, CUpointer_attribute attribute, CUdeviceptr ptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuPointerGetAttribute(data, attribute, ptr) + return cydriver._cuPointerGetAttribute(data, attribute, ptr) {{endif}} {{if 'cuMemPrefetchAsync' in found_functions}} cdef CUresult cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPrefetchAsync(devPtr, count, dstDevice, hStream) + return cydriver._cuMemPrefetchAsync(devPtr, count, dstDevice, hStream) {{endif}} {{if 'cuMemPrefetchAsync_v2' in found_functions}} cdef CUresult cuMemPrefetchAsync_v2(CUdeviceptr devPtr, size_t count, CUmemLocation location, unsigned int flags, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemPrefetchAsync_v2(devPtr, count, location, flags, hStream) + return cydriver._cuMemPrefetchAsync_v2(devPtr, count, location, flags, hStream) {{endif}} {{if 'cuMemAdvise' in found_functions}} cdef CUresult cuMemAdvise(CUdeviceptr devPtr, size_t count, CUmem_advise advice, CUdevice device) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAdvise(devPtr, count, advice, device) + return cydriver._cuMemAdvise(devPtr, count, advice, device) {{endif}} {{if 'cuMemAdvise_v2' in found_functions}} cdef CUresult cuMemAdvise_v2(CUdeviceptr devPtr, size_t count, CUmem_advise advice, CUmemLocation location) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemAdvise_v2(devPtr, count, advice, location) + return cydriver._cuMemAdvise_v2(devPtr, count, advice, location) {{endif}} {{if 'cuMemRangeGetAttribute' in found_functions}} cdef CUresult cuMemRangeGetAttribute(void* data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr devPtr, size_t count) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemRangeGetAttribute(data, dataSize, attribute, devPtr, count) + return cydriver._cuMemRangeGetAttribute(data, dataSize, attribute, devPtr, count) {{endif}} {{if 'cuMemRangeGetAttributes' in found_functions}} cdef CUresult cuMemRangeGetAttributes(void** data, size_t* dataSizes, CUmem_range_attribute* attributes, size_t numAttributes, CUdeviceptr devPtr, size_t count) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuMemRangeGetAttributes(data, dataSizes, attributes, numAttributes, devPtr, count) + return cydriver._cuMemRangeGetAttributes(data, dataSizes, attributes, numAttributes, devPtr, count) {{endif}} {{if 'cuPointerSetAttribute' in found_functions}} cdef CUresult cuPointerSetAttribute(const void* value, CUpointer_attribute attribute, CUdeviceptr ptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuPointerSetAttribute(value, attribute, ptr) + return cydriver._cuPointerSetAttribute(value, attribute, ptr) {{endif}} {{if 'cuPointerGetAttributes' in found_functions}} cdef CUresult cuPointerGetAttributes(unsigned int numAttributes, CUpointer_attribute* attributes, void** data, CUdeviceptr ptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuPointerGetAttributes(numAttributes, attributes, data, ptr) + return cydriver._cuPointerGetAttributes(numAttributes, attributes, data, ptr) {{endif}} {{if 'cuStreamCreate' in found_functions}} cdef CUresult cuStreamCreate(CUstream* phStream, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamCreate(phStream, Flags) + return cydriver._cuStreamCreate(phStream, Flags) {{endif}} {{if 'cuStreamCreateWithPriority' in found_functions}} cdef CUresult cuStreamCreateWithPriority(CUstream* phStream, unsigned int flags, int priority) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamCreateWithPriority(phStream, flags, priority) + return cydriver._cuStreamCreateWithPriority(phStream, flags, priority) {{endif}} {{if 'cuStreamGetPriority' in found_functions}} cdef CUresult cuStreamGetPriority(CUstream hStream, int* priority) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamGetPriority(hStream, priority) + return cydriver._cuStreamGetPriority(hStream, priority) {{endif}} {{if 'cuStreamGetFlags' in found_functions}} cdef CUresult cuStreamGetFlags(CUstream hStream, unsigned int* flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamGetFlags(hStream, flags) + return cydriver._cuStreamGetFlags(hStream, flags) {{endif}} {{if 'cuStreamGetId' in found_functions}} cdef CUresult cuStreamGetId(CUstream hStream, unsigned long long* streamId) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamGetId(hStream, streamId) + return cydriver._cuStreamGetId(hStream, streamId) {{endif}} {{if 'cuStreamGetCtx' in found_functions}} cdef CUresult cuStreamGetCtx(CUstream hStream, CUcontext* pctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamGetCtx(hStream, pctx) + return cydriver._cuStreamGetCtx(hStream, pctx) {{endif}} {{if 'cuStreamGetCtx_v2' in found_functions}} cdef CUresult cuStreamGetCtx_v2(CUstream hStream, CUcontext* pCtx, CUgreenCtx* pGreenCtx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamGetCtx_v2(hStream, pCtx, pGreenCtx) + return cydriver._cuStreamGetCtx_v2(hStream, pCtx, pGreenCtx) {{endif}} {{if 'cuStreamWaitEvent' in found_functions}} cdef CUresult cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamWaitEvent(hStream, hEvent, Flags) + return cydriver._cuStreamWaitEvent(hStream, hEvent, Flags) {{endif}} {{if 'cuStreamAddCallback' in found_functions}} cdef CUresult cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void* userData, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamAddCallback(hStream, callback, userData, flags) + return cydriver._cuStreamAddCallback(hStream, callback, userData, flags) {{endif}} {{if 'cuStreamBeginCapture_v2' in found_functions}} cdef CUresult cuStreamBeginCapture(CUstream hStream, CUstreamCaptureMode mode) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamBeginCapture_v2(hStream, mode) + return cydriver._cuStreamBeginCapture_v2(hStream, mode) {{endif}} {{if 'cuStreamBeginCaptureToGraph' in found_functions}} cdef CUresult cuStreamBeginCaptureToGraph(CUstream hStream, CUgraph hGraph, const CUgraphNode* dependencies, const CUgraphEdgeData* dependencyData, size_t numDependencies, CUstreamCaptureMode mode) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamBeginCaptureToGraph(hStream, hGraph, dependencies, dependencyData, numDependencies, mode) + return cydriver._cuStreamBeginCaptureToGraph(hStream, hGraph, dependencies, dependencyData, numDependencies, mode) {{endif}} {{if 'cuThreadExchangeStreamCaptureMode' in found_functions}} cdef CUresult cuThreadExchangeStreamCaptureMode(CUstreamCaptureMode* mode) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuThreadExchangeStreamCaptureMode(mode) + return cydriver._cuThreadExchangeStreamCaptureMode(mode) {{endif}} {{if 'cuStreamEndCapture' in found_functions}} cdef CUresult cuStreamEndCapture(CUstream hStream, CUgraph* phGraph) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamEndCapture(hStream, phGraph) + return cydriver._cuStreamEndCapture(hStream, phGraph) {{endif}} {{if 'cuStreamIsCapturing' in found_functions}} cdef CUresult cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus* captureStatus) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamIsCapturing(hStream, captureStatus) + return cydriver._cuStreamIsCapturing(hStream, captureStatus) {{endif}} {{if 'cuStreamGetCaptureInfo_v2' in found_functions}} cdef CUresult cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus* captureStatus_out, cuuint64_t* id_out, CUgraph* graph_out, const CUgraphNode** dependencies_out, size_t* numDependencies_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamGetCaptureInfo_v2(hStream, captureStatus_out, id_out, graph_out, dependencies_out, numDependencies_out) + return cydriver._cuStreamGetCaptureInfo_v2(hStream, captureStatus_out, id_out, graph_out, dependencies_out, numDependencies_out) {{endif}} {{if 'cuStreamGetCaptureInfo_v3' in found_functions}} cdef CUresult cuStreamGetCaptureInfo_v3(CUstream hStream, CUstreamCaptureStatus* captureStatus_out, cuuint64_t* id_out, CUgraph* graph_out, const CUgraphNode** dependencies_out, const CUgraphEdgeData** edgeData_out, size_t* numDependencies_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamGetCaptureInfo_v3(hStream, captureStatus_out, id_out, graph_out, dependencies_out, edgeData_out, numDependencies_out) + return cydriver._cuStreamGetCaptureInfo_v3(hStream, captureStatus_out, id_out, graph_out, dependencies_out, edgeData_out, numDependencies_out) {{endif}} {{if 'cuStreamUpdateCaptureDependencies' in found_functions}} cdef CUresult cuStreamUpdateCaptureDependencies(CUstream hStream, CUgraphNode* dependencies, size_t numDependencies, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamUpdateCaptureDependencies(hStream, dependencies, numDependencies, flags) + return cydriver._cuStreamUpdateCaptureDependencies(hStream, dependencies, numDependencies, flags) {{endif}} {{if 'cuStreamUpdateCaptureDependencies_v2' in found_functions}} cdef CUresult cuStreamUpdateCaptureDependencies_v2(CUstream hStream, CUgraphNode* dependencies, const CUgraphEdgeData* dependencyData, size_t numDependencies, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamUpdateCaptureDependencies_v2(hStream, dependencies, dependencyData, numDependencies, flags) + return cydriver._cuStreamUpdateCaptureDependencies_v2(hStream, dependencies, dependencyData, numDependencies, flags) {{endif}} {{if 'cuStreamAttachMemAsync' in found_functions}} cdef CUresult cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamAttachMemAsync(hStream, dptr, length, flags) + return cydriver._cuStreamAttachMemAsync(hStream, dptr, length, flags) {{endif}} {{if 'cuStreamQuery' in found_functions}} cdef CUresult cuStreamQuery(CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamQuery(hStream) + return cydriver._cuStreamQuery(hStream) {{endif}} {{if 'cuStreamSynchronize' in found_functions}} cdef CUresult cuStreamSynchronize(CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamSynchronize(hStream) + return cydriver._cuStreamSynchronize(hStream) {{endif}} {{if 'cuStreamDestroy_v2' in found_functions}} cdef CUresult cuStreamDestroy(CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamDestroy_v2(hStream) + return cydriver._cuStreamDestroy_v2(hStream) {{endif}} {{if 'cuStreamCopyAttributes' in found_functions}} cdef CUresult cuStreamCopyAttributes(CUstream dst, CUstream src) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamCopyAttributes(dst, src) + return cydriver._cuStreamCopyAttributes(dst, src) {{endif}} {{if 'cuStreamGetAttribute' in found_functions}} cdef CUresult cuStreamGetAttribute(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue* value_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamGetAttribute(hStream, attr, value_out) + return cydriver._cuStreamGetAttribute(hStream, attr, value_out) {{endif}} {{if 'cuStreamSetAttribute' in found_functions}} cdef CUresult cuStreamSetAttribute(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue* value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamSetAttribute(hStream, attr, value) + return cydriver._cuStreamSetAttribute(hStream, attr, value) {{endif}} {{if 'cuEventCreate' in found_functions}} cdef CUresult cuEventCreate(CUevent* phEvent, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEventCreate(phEvent, Flags) + return cydriver._cuEventCreate(phEvent, Flags) {{endif}} {{if 'cuEventRecord' in found_functions}} cdef CUresult cuEventRecord(CUevent hEvent, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEventRecord(hEvent, hStream) + return cydriver._cuEventRecord(hEvent, hStream) {{endif}} {{if 'cuEventRecordWithFlags' in found_functions}} cdef CUresult cuEventRecordWithFlags(CUevent hEvent, CUstream hStream, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEventRecordWithFlags(hEvent, hStream, flags) + return cydriver._cuEventRecordWithFlags(hEvent, hStream, flags) {{endif}} {{if 'cuEventQuery' in found_functions}} cdef CUresult cuEventQuery(CUevent hEvent) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEventQuery(hEvent) + return cydriver._cuEventQuery(hEvent) {{endif}} {{if 'cuEventSynchronize' in found_functions}} cdef CUresult cuEventSynchronize(CUevent hEvent) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEventSynchronize(hEvent) + return cydriver._cuEventSynchronize(hEvent) {{endif}} {{if 'cuEventDestroy_v2' in found_functions}} cdef CUresult cuEventDestroy(CUevent hEvent) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEventDestroy_v2(hEvent) + return cydriver._cuEventDestroy_v2(hEvent) {{endif}} {{if 'cuEventElapsedTime' in found_functions}} cdef CUresult cuEventElapsedTime(float* pMilliseconds, CUevent hStart, CUevent hEnd) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEventElapsedTime(pMilliseconds, hStart, hEnd) + return cydriver._cuEventElapsedTime(pMilliseconds, hStart, hEnd) {{endif}} {{if 'cuImportExternalMemory' in found_functions}} cdef CUresult cuImportExternalMemory(CUexternalMemory* extMem_out, const CUDA_EXTERNAL_MEMORY_HANDLE_DESC* memHandleDesc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuImportExternalMemory(extMem_out, memHandleDesc) + return cydriver._cuImportExternalMemory(extMem_out, memHandleDesc) {{endif}} {{if 'cuExternalMemoryGetMappedBuffer' in found_functions}} cdef CUresult cuExternalMemoryGetMappedBuffer(CUdeviceptr* devPtr, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_BUFFER_DESC* bufferDesc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuExternalMemoryGetMappedBuffer(devPtr, extMem, bufferDesc) + return cydriver._cuExternalMemoryGetMappedBuffer(devPtr, extMem, bufferDesc) {{endif}} {{if 'cuExternalMemoryGetMappedMipmappedArray' in found_functions}} cdef CUresult cuExternalMemoryGetMappedMipmappedArray(CUmipmappedArray* mipmap, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC* mipmapDesc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuExternalMemoryGetMappedMipmappedArray(mipmap, extMem, mipmapDesc) + return cydriver._cuExternalMemoryGetMappedMipmappedArray(mipmap, extMem, mipmapDesc) {{endif}} {{if 'cuDestroyExternalMemory' in found_functions}} cdef CUresult cuDestroyExternalMemory(CUexternalMemory extMem) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDestroyExternalMemory(extMem) + return cydriver._cuDestroyExternalMemory(extMem) {{endif}} {{if 'cuImportExternalSemaphore' in found_functions}} cdef CUresult cuImportExternalSemaphore(CUexternalSemaphore* extSem_out, const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC* semHandleDesc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuImportExternalSemaphore(extSem_out, semHandleDesc) + return cydriver._cuImportExternalSemaphore(extSem_out, semHandleDesc) {{endif}} {{if 'cuSignalExternalSemaphoresAsync' in found_functions}} cdef CUresult cuSignalExternalSemaphoresAsync(const CUexternalSemaphore* extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* paramsArray, unsigned int numExtSems, CUstream stream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuSignalExternalSemaphoresAsync(extSemArray, paramsArray, numExtSems, stream) + return cydriver._cuSignalExternalSemaphoresAsync(extSemArray, paramsArray, numExtSems, stream) {{endif}} {{if 'cuWaitExternalSemaphoresAsync' in found_functions}} cdef CUresult cuWaitExternalSemaphoresAsync(const CUexternalSemaphore* extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* paramsArray, unsigned int numExtSems, CUstream stream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuWaitExternalSemaphoresAsync(extSemArray, paramsArray, numExtSems, stream) + return cydriver._cuWaitExternalSemaphoresAsync(extSemArray, paramsArray, numExtSems, stream) {{endif}} {{if 'cuDestroyExternalSemaphore' in found_functions}} cdef CUresult cuDestroyExternalSemaphore(CUexternalSemaphore extSem) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDestroyExternalSemaphore(extSem) + return cydriver._cuDestroyExternalSemaphore(extSem) {{endif}} {{if 'cuStreamWaitValue32_v2' in found_functions}} cdef CUresult cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamWaitValue32_v2(stream, addr, value, flags) + return cydriver._cuStreamWaitValue32_v2(stream, addr, value, flags) {{endif}} {{if 'cuStreamWaitValue64_v2' in found_functions}} cdef CUresult cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamWaitValue64_v2(stream, addr, value, flags) + return cydriver._cuStreamWaitValue64_v2(stream, addr, value, flags) {{endif}} {{if 'cuStreamWriteValue32_v2' in found_functions}} cdef CUresult cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamWriteValue32_v2(stream, addr, value, flags) + return cydriver._cuStreamWriteValue32_v2(stream, addr, value, flags) {{endif}} {{if 'cuStreamWriteValue64_v2' in found_functions}} cdef CUresult cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamWriteValue64_v2(stream, addr, value, flags) + return cydriver._cuStreamWriteValue64_v2(stream, addr, value, flags) {{endif}} {{if 'cuStreamBatchMemOp_v2' in found_functions}} cdef CUresult cuStreamBatchMemOp(CUstream stream, unsigned int count, CUstreamBatchMemOpParams* paramArray, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamBatchMemOp_v2(stream, count, paramArray, flags) + return cydriver._cuStreamBatchMemOp_v2(stream, count, paramArray, flags) {{endif}} {{if 'cuFuncGetAttribute' in found_functions}} cdef CUresult cuFuncGetAttribute(int* pi, CUfunction_attribute attrib, CUfunction hfunc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncGetAttribute(pi, attrib, hfunc) + return cydriver._cuFuncGetAttribute(pi, attrib, hfunc) {{endif}} {{if 'cuFuncSetAttribute' in found_functions}} cdef CUresult cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncSetAttribute(hfunc, attrib, value) + return cydriver._cuFuncSetAttribute(hfunc, attrib, value) {{endif}} {{if 'cuFuncSetCacheConfig' in found_functions}} cdef CUresult cuFuncSetCacheConfig(CUfunction hfunc, CUfunc_cache config) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncSetCacheConfig(hfunc, config) + return cydriver._cuFuncSetCacheConfig(hfunc, config) {{endif}} {{if 'cuFuncGetModule' in found_functions}} cdef CUresult cuFuncGetModule(CUmodule* hmod, CUfunction hfunc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncGetModule(hmod, hfunc) + return cydriver._cuFuncGetModule(hmod, hfunc) {{endif}} {{if 'cuFuncGetName' in found_functions}} cdef CUresult cuFuncGetName(const char** name, CUfunction hfunc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncGetName(name, hfunc) + return cydriver._cuFuncGetName(name, hfunc) {{endif}} {{if 'cuFuncGetParamInfo' in found_functions}} cdef CUresult cuFuncGetParamInfo(CUfunction func, size_t paramIndex, size_t* paramOffset, size_t* paramSize) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncGetParamInfo(func, paramIndex, paramOffset, paramSize) + return cydriver._cuFuncGetParamInfo(func, paramIndex, paramOffset, paramSize) {{endif}} {{if 'cuFuncIsLoaded' in found_functions}} cdef CUresult cuFuncIsLoaded(CUfunctionLoadingState* state, CUfunction function) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncIsLoaded(state, function) + return cydriver._cuFuncIsLoaded(state, function) {{endif}} {{if 'cuFuncLoad' in found_functions}} cdef CUresult cuFuncLoad(CUfunction function) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncLoad(function) + return cydriver._cuFuncLoad(function) {{endif}} {{if 'cuLaunchKernel' in found_functions}} cdef CUresult cuLaunchKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void** kernelParams, void** extra) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLaunchKernel(f, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, hStream, kernelParams, extra) + return cydriver._cuLaunchKernel(f, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, hStream, kernelParams, extra) {{endif}} {{if 'cuLaunchKernelEx' in found_functions}} cdef CUresult cuLaunchKernelEx(const CUlaunchConfig* config, CUfunction f, void** kernelParams, void** extra) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLaunchKernelEx(config, f, kernelParams, extra) + return cydriver._cuLaunchKernelEx(config, f, kernelParams, extra) {{endif}} {{if 'cuLaunchCooperativeKernel' in found_functions}} cdef CUresult cuLaunchCooperativeKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void** kernelParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLaunchCooperativeKernel(f, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, hStream, kernelParams) + return cydriver._cuLaunchCooperativeKernel(f, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, hStream, kernelParams) {{endif}} {{if 'cuLaunchCooperativeKernelMultiDevice' in found_functions}} cdef CUresult cuLaunchCooperativeKernelMultiDevice(CUDA_LAUNCH_PARAMS* launchParamsList, unsigned int numDevices, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLaunchCooperativeKernelMultiDevice(launchParamsList, numDevices, flags) + return cydriver._cuLaunchCooperativeKernelMultiDevice(launchParamsList, numDevices, flags) {{endif}} {{if 'cuLaunchHostFunc' in found_functions}} cdef CUresult cuLaunchHostFunc(CUstream hStream, CUhostFn fn, void* userData) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLaunchHostFunc(hStream, fn, userData) + return cydriver._cuLaunchHostFunc(hStream, fn, userData) {{endif}} {{if 'cuFuncSetBlockShape' in found_functions}} cdef CUresult cuFuncSetBlockShape(CUfunction hfunc, int x, int y, int z) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncSetBlockShape(hfunc, x, y, z) + return cydriver._cuFuncSetBlockShape(hfunc, x, y, z) {{endif}} {{if 'cuFuncSetSharedSize' in found_functions}} cdef CUresult cuFuncSetSharedSize(CUfunction hfunc, unsigned int numbytes) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncSetSharedSize(hfunc, numbytes) + return cydriver._cuFuncSetSharedSize(hfunc, numbytes) {{endif}} {{if 'cuParamSetSize' in found_functions}} cdef CUresult cuParamSetSize(CUfunction hfunc, unsigned int numbytes) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuParamSetSize(hfunc, numbytes) + return cydriver._cuParamSetSize(hfunc, numbytes) {{endif}} {{if 'cuParamSeti' in found_functions}} cdef CUresult cuParamSeti(CUfunction hfunc, int offset, unsigned int value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuParamSeti(hfunc, offset, value) + return cydriver._cuParamSeti(hfunc, offset, value) {{endif}} {{if 'cuParamSetf' in found_functions}} cdef CUresult cuParamSetf(CUfunction hfunc, int offset, float value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuParamSetf(hfunc, offset, value) + return cydriver._cuParamSetf(hfunc, offset, value) {{endif}} {{if 'cuParamSetv' in found_functions}} cdef CUresult cuParamSetv(CUfunction hfunc, int offset, void* ptr, unsigned int numbytes) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuParamSetv(hfunc, offset, ptr, numbytes) + return cydriver._cuParamSetv(hfunc, offset, ptr, numbytes) {{endif}} {{if 'cuLaunch' in found_functions}} cdef CUresult cuLaunch(CUfunction f) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLaunch(f) + return cydriver._cuLaunch(f) {{endif}} {{if 'cuLaunchGrid' in found_functions}} cdef CUresult cuLaunchGrid(CUfunction f, int grid_width, int grid_height) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLaunchGrid(f, grid_width, grid_height) + return cydriver._cuLaunchGrid(f, grid_width, grid_height) {{endif}} {{if 'cuLaunchGridAsync' in found_functions}} cdef CUresult cuLaunchGridAsync(CUfunction f, int grid_width, int grid_height, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuLaunchGridAsync(f, grid_width, grid_height, hStream) + return cydriver._cuLaunchGridAsync(f, grid_width, grid_height, hStream) {{endif}} {{if 'cuParamSetTexRef' in found_functions}} cdef CUresult cuParamSetTexRef(CUfunction hfunc, int texunit, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuParamSetTexRef(hfunc, texunit, hTexRef) + return cydriver._cuParamSetTexRef(hfunc, texunit, hTexRef) {{endif}} {{if 'cuFuncSetSharedMemConfig' in found_functions}} cdef CUresult cuFuncSetSharedMemConfig(CUfunction hfunc, CUsharedconfig config) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuFuncSetSharedMemConfig(hfunc, config) + return cydriver._cuFuncSetSharedMemConfig(hfunc, config) {{endif}} {{if 'cuGraphCreate' in found_functions}} cdef CUresult cuGraphCreate(CUgraph* phGraph, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphCreate(phGraph, flags) + return cydriver._cuGraphCreate(phGraph, flags) {{endif}} {{if 'cuGraphAddKernelNode_v2' in found_functions}} cdef CUresult cuGraphAddKernelNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddKernelNode_v2(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) + return cydriver._cuGraphAddKernelNode_v2(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) {{endif}} {{if 'cuGraphKernelNodeGetParams_v2' in found_functions}} cdef CUresult cuGraphKernelNodeGetParams(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphKernelNodeGetParams_v2(hNode, nodeParams) + return cydriver._cuGraphKernelNodeGetParams_v2(hNode, nodeParams) {{endif}} {{if 'cuGraphKernelNodeSetParams_v2' in found_functions}} cdef CUresult cuGraphKernelNodeSetParams(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphKernelNodeSetParams_v2(hNode, nodeParams) + return cydriver._cuGraphKernelNodeSetParams_v2(hNode, nodeParams) {{endif}} {{if 'cuGraphAddMemcpyNode' in found_functions}} cdef CUresult cuGraphAddMemcpyNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, const CUDA_MEMCPY3D* copyParams, CUcontext ctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddMemcpyNode(phGraphNode, hGraph, dependencies, numDependencies, copyParams, ctx) + return cydriver._cuGraphAddMemcpyNode(phGraphNode, hGraph, dependencies, numDependencies, copyParams, ctx) {{endif}} {{if 'cuGraphMemcpyNodeGetParams' in found_functions}} cdef CUresult cuGraphMemcpyNodeGetParams(CUgraphNode hNode, CUDA_MEMCPY3D* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphMemcpyNodeGetParams(hNode, nodeParams) + return cydriver._cuGraphMemcpyNodeGetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphMemcpyNodeSetParams' in found_functions}} cdef CUresult cuGraphMemcpyNodeSetParams(CUgraphNode hNode, const CUDA_MEMCPY3D* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphMemcpyNodeSetParams(hNode, nodeParams) + return cydriver._cuGraphMemcpyNodeSetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphAddMemsetNode' in found_functions}} cdef CUresult cuGraphAddMemsetNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, const CUDA_MEMSET_NODE_PARAMS* memsetParams, CUcontext ctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddMemsetNode(phGraphNode, hGraph, dependencies, numDependencies, memsetParams, ctx) + return cydriver._cuGraphAddMemsetNode(phGraphNode, hGraph, dependencies, numDependencies, memsetParams, ctx) {{endif}} {{if 'cuGraphMemsetNodeGetParams' in found_functions}} cdef CUresult cuGraphMemsetNodeGetParams(CUgraphNode hNode, CUDA_MEMSET_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphMemsetNodeGetParams(hNode, nodeParams) + return cydriver._cuGraphMemsetNodeGetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphMemsetNodeSetParams' in found_functions}} cdef CUresult cuGraphMemsetNodeSetParams(CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphMemsetNodeSetParams(hNode, nodeParams) + return cydriver._cuGraphMemsetNodeSetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphAddHostNode' in found_functions}} cdef CUresult cuGraphAddHostNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, const CUDA_HOST_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddHostNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) + return cydriver._cuGraphAddHostNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) {{endif}} {{if 'cuGraphHostNodeGetParams' in found_functions}} cdef CUresult cuGraphHostNodeGetParams(CUgraphNode hNode, CUDA_HOST_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphHostNodeGetParams(hNode, nodeParams) + return cydriver._cuGraphHostNodeGetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphHostNodeSetParams' in found_functions}} cdef CUresult cuGraphHostNodeSetParams(CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphHostNodeSetParams(hNode, nodeParams) + return cydriver._cuGraphHostNodeSetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphAddChildGraphNode' in found_functions}} cdef CUresult cuGraphAddChildGraphNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, CUgraph childGraph) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddChildGraphNode(phGraphNode, hGraph, dependencies, numDependencies, childGraph) + return cydriver._cuGraphAddChildGraphNode(phGraphNode, hGraph, dependencies, numDependencies, childGraph) {{endif}} {{if 'cuGraphChildGraphNodeGetGraph' in found_functions}} cdef CUresult cuGraphChildGraphNodeGetGraph(CUgraphNode hNode, CUgraph* phGraph) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphChildGraphNodeGetGraph(hNode, phGraph) + return cydriver._cuGraphChildGraphNodeGetGraph(hNode, phGraph) {{endif}} {{if 'cuGraphAddEmptyNode' in found_functions}} cdef CUresult cuGraphAddEmptyNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddEmptyNode(phGraphNode, hGraph, dependencies, numDependencies) + return cydriver._cuGraphAddEmptyNode(phGraphNode, hGraph, dependencies, numDependencies) {{endif}} {{if 'cuGraphAddEventRecordNode' in found_functions}} cdef CUresult cuGraphAddEventRecordNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, CUevent event) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddEventRecordNode(phGraphNode, hGraph, dependencies, numDependencies, event) + return cydriver._cuGraphAddEventRecordNode(phGraphNode, hGraph, dependencies, numDependencies, event) {{endif}} {{if 'cuGraphEventRecordNodeGetEvent' in found_functions}} cdef CUresult cuGraphEventRecordNodeGetEvent(CUgraphNode hNode, CUevent* event_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphEventRecordNodeGetEvent(hNode, event_out) + return cydriver._cuGraphEventRecordNodeGetEvent(hNode, event_out) {{endif}} {{if 'cuGraphEventRecordNodeSetEvent' in found_functions}} cdef CUresult cuGraphEventRecordNodeSetEvent(CUgraphNode hNode, CUevent event) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphEventRecordNodeSetEvent(hNode, event) + return cydriver._cuGraphEventRecordNodeSetEvent(hNode, event) {{endif}} {{if 'cuGraphAddEventWaitNode' in found_functions}} cdef CUresult cuGraphAddEventWaitNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, CUevent event) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddEventWaitNode(phGraphNode, hGraph, dependencies, numDependencies, event) + return cydriver._cuGraphAddEventWaitNode(phGraphNode, hGraph, dependencies, numDependencies, event) {{endif}} {{if 'cuGraphEventWaitNodeGetEvent' in found_functions}} cdef CUresult cuGraphEventWaitNodeGetEvent(CUgraphNode hNode, CUevent* event_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphEventWaitNodeGetEvent(hNode, event_out) + return cydriver._cuGraphEventWaitNodeGetEvent(hNode, event_out) {{endif}} {{if 'cuGraphEventWaitNodeSetEvent' in found_functions}} cdef CUresult cuGraphEventWaitNodeSetEvent(CUgraphNode hNode, CUevent event) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphEventWaitNodeSetEvent(hNode, event) + return cydriver._cuGraphEventWaitNodeSetEvent(hNode, event) {{endif}} {{if 'cuGraphAddExternalSemaphoresSignalNode' in found_functions}} cdef CUresult cuGraphAddExternalSemaphoresSignalNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddExternalSemaphoresSignalNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) + return cydriver._cuGraphAddExternalSemaphoresSignalNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) {{endif}} {{if 'cuGraphExternalSemaphoresSignalNodeGetParams' in found_functions}} cdef CUresult cuGraphExternalSemaphoresSignalNodeGetParams(CUgraphNode hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* params_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExternalSemaphoresSignalNodeGetParams(hNode, params_out) + return cydriver._cuGraphExternalSemaphoresSignalNodeGetParams(hNode, params_out) {{endif}} {{if 'cuGraphExternalSemaphoresSignalNodeSetParams' in found_functions}} cdef CUresult cuGraphExternalSemaphoresSignalNodeSetParams(CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExternalSemaphoresSignalNodeSetParams(hNode, nodeParams) + return cydriver._cuGraphExternalSemaphoresSignalNodeSetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphAddExternalSemaphoresWaitNode' in found_functions}} cdef CUresult cuGraphAddExternalSemaphoresWaitNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, const CUDA_EXT_SEM_WAIT_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddExternalSemaphoresWaitNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) + return cydriver._cuGraphAddExternalSemaphoresWaitNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) {{endif}} {{if 'cuGraphExternalSemaphoresWaitNodeGetParams' in found_functions}} cdef CUresult cuGraphExternalSemaphoresWaitNodeGetParams(CUgraphNode hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS* params_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExternalSemaphoresWaitNodeGetParams(hNode, params_out) + return cydriver._cuGraphExternalSemaphoresWaitNodeGetParams(hNode, params_out) {{endif}} {{if 'cuGraphExternalSemaphoresWaitNodeSetParams' in found_functions}} cdef CUresult cuGraphExternalSemaphoresWaitNodeSetParams(CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExternalSemaphoresWaitNodeSetParams(hNode, nodeParams) + return cydriver._cuGraphExternalSemaphoresWaitNodeSetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphAddBatchMemOpNode' in found_functions}} cdef CUresult cuGraphAddBatchMemOpNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, const CUDA_BATCH_MEM_OP_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddBatchMemOpNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) + return cydriver._cuGraphAddBatchMemOpNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) {{endif}} {{if 'cuGraphBatchMemOpNodeGetParams' in found_functions}} cdef CUresult cuGraphBatchMemOpNodeGetParams(CUgraphNode hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS* nodeParams_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphBatchMemOpNodeGetParams(hNode, nodeParams_out) + return cydriver._cuGraphBatchMemOpNodeGetParams(hNode, nodeParams_out) {{endif}} {{if 'cuGraphBatchMemOpNodeSetParams' in found_functions}} cdef CUresult cuGraphBatchMemOpNodeSetParams(CUgraphNode hNode, const CUDA_BATCH_MEM_OP_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphBatchMemOpNodeSetParams(hNode, nodeParams) + return cydriver._cuGraphBatchMemOpNodeSetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphExecBatchMemOpNodeSetParams' in found_functions}} cdef CUresult cuGraphExecBatchMemOpNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_BATCH_MEM_OP_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecBatchMemOpNodeSetParams(hGraphExec, hNode, nodeParams) + return cydriver._cuGraphExecBatchMemOpNodeSetParams(hGraphExec, hNode, nodeParams) {{endif}} {{if 'cuGraphAddMemAllocNode' in found_functions}} cdef CUresult cuGraphAddMemAllocNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, CUDA_MEM_ALLOC_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddMemAllocNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) + return cydriver._cuGraphAddMemAllocNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) {{endif}} {{if 'cuGraphMemAllocNodeGetParams' in found_functions}} cdef CUresult cuGraphMemAllocNodeGetParams(CUgraphNode hNode, CUDA_MEM_ALLOC_NODE_PARAMS* params_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphMemAllocNodeGetParams(hNode, params_out) + return cydriver._cuGraphMemAllocNodeGetParams(hNode, params_out) {{endif}} {{if 'cuGraphAddMemFreeNode' in found_functions}} cdef CUresult cuGraphAddMemFreeNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, CUdeviceptr dptr) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddMemFreeNode(phGraphNode, hGraph, dependencies, numDependencies, dptr) + return cydriver._cuGraphAddMemFreeNode(phGraphNode, hGraph, dependencies, numDependencies, dptr) {{endif}} {{if 'cuGraphMemFreeNodeGetParams' in found_functions}} cdef CUresult cuGraphMemFreeNodeGetParams(CUgraphNode hNode, CUdeviceptr* dptr_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphMemFreeNodeGetParams(hNode, dptr_out) + return cydriver._cuGraphMemFreeNodeGetParams(hNode, dptr_out) {{endif}} {{if 'cuDeviceGraphMemTrim' in found_functions}} cdef CUresult cuDeviceGraphMemTrim(CUdevice device) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGraphMemTrim(device) + return cydriver._cuDeviceGraphMemTrim(device) {{endif}} {{if 'cuDeviceGetGraphMemAttribute' in found_functions}} cdef CUresult cuDeviceGetGraphMemAttribute(CUdevice device, CUgraphMem_attribute attr, void* value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetGraphMemAttribute(device, attr, value) + return cydriver._cuDeviceGetGraphMemAttribute(device, attr, value) {{endif}} {{if 'cuDeviceSetGraphMemAttribute' in found_functions}} cdef CUresult cuDeviceSetGraphMemAttribute(CUdevice device, CUgraphMem_attribute attr, void* value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceSetGraphMemAttribute(device, attr, value) + return cydriver._cuDeviceSetGraphMemAttribute(device, attr, value) {{endif}} {{if 'cuGraphClone' in found_functions}} cdef CUresult cuGraphClone(CUgraph* phGraphClone, CUgraph originalGraph) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphClone(phGraphClone, originalGraph) + return cydriver._cuGraphClone(phGraphClone, originalGraph) {{endif}} {{if 'cuGraphNodeFindInClone' in found_functions}} cdef CUresult cuGraphNodeFindInClone(CUgraphNode* phNode, CUgraphNode hOriginalNode, CUgraph hClonedGraph) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphNodeFindInClone(phNode, hOriginalNode, hClonedGraph) + return cydriver._cuGraphNodeFindInClone(phNode, hOriginalNode, hClonedGraph) {{endif}} {{if 'cuGraphNodeGetType' in found_functions}} cdef CUresult cuGraphNodeGetType(CUgraphNode hNode, CUgraphNodeType* typename) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphNodeGetType(hNode, typename) + return cydriver._cuGraphNodeGetType(hNode, typename) {{endif}} {{if 'cuGraphGetNodes' in found_functions}} cdef CUresult cuGraphGetNodes(CUgraph hGraph, CUgraphNode* nodes, size_t* numNodes) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphGetNodes(hGraph, nodes, numNodes) + return cydriver._cuGraphGetNodes(hGraph, nodes, numNodes) {{endif}} {{if 'cuGraphGetRootNodes' in found_functions}} cdef CUresult cuGraphGetRootNodes(CUgraph hGraph, CUgraphNode* rootNodes, size_t* numRootNodes) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphGetRootNodes(hGraph, rootNodes, numRootNodes) + return cydriver._cuGraphGetRootNodes(hGraph, rootNodes, numRootNodes) {{endif}} {{if 'cuGraphGetEdges' in found_functions}} cdef CUresult cuGraphGetEdges(CUgraph hGraph, CUgraphNode* from_, CUgraphNode* to, size_t* numEdges) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphGetEdges(hGraph, from_, to, numEdges) + return cydriver._cuGraphGetEdges(hGraph, from_, to, numEdges) {{endif}} {{if 'cuGraphGetEdges_v2' in found_functions}} cdef CUresult cuGraphGetEdges_v2(CUgraph hGraph, CUgraphNode* from_, CUgraphNode* to, CUgraphEdgeData* edgeData, size_t* numEdges) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphGetEdges_v2(hGraph, from_, to, edgeData, numEdges) + return cydriver._cuGraphGetEdges_v2(hGraph, from_, to, edgeData, numEdges) {{endif}} {{if 'cuGraphNodeGetDependencies' in found_functions}} cdef CUresult cuGraphNodeGetDependencies(CUgraphNode hNode, CUgraphNode* dependencies, size_t* numDependencies) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphNodeGetDependencies(hNode, dependencies, numDependencies) + return cydriver._cuGraphNodeGetDependencies(hNode, dependencies, numDependencies) {{endif}} {{if 'cuGraphNodeGetDependencies_v2' in found_functions}} cdef CUresult cuGraphNodeGetDependencies_v2(CUgraphNode hNode, CUgraphNode* dependencies, CUgraphEdgeData* edgeData, size_t* numDependencies) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphNodeGetDependencies_v2(hNode, dependencies, edgeData, numDependencies) + return cydriver._cuGraphNodeGetDependencies_v2(hNode, dependencies, edgeData, numDependencies) {{endif}} {{if 'cuGraphNodeGetDependentNodes' in found_functions}} cdef CUresult cuGraphNodeGetDependentNodes(CUgraphNode hNode, CUgraphNode* dependentNodes, size_t* numDependentNodes) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphNodeGetDependentNodes(hNode, dependentNodes, numDependentNodes) + return cydriver._cuGraphNodeGetDependentNodes(hNode, dependentNodes, numDependentNodes) {{endif}} {{if 'cuGraphNodeGetDependentNodes_v2' in found_functions}} cdef CUresult cuGraphNodeGetDependentNodes_v2(CUgraphNode hNode, CUgraphNode* dependentNodes, CUgraphEdgeData* edgeData, size_t* numDependentNodes) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphNodeGetDependentNodes_v2(hNode, dependentNodes, edgeData, numDependentNodes) + return cydriver._cuGraphNodeGetDependentNodes_v2(hNode, dependentNodes, edgeData, numDependentNodes) {{endif}} {{if 'cuGraphAddDependencies' in found_functions}} cdef CUresult cuGraphAddDependencies(CUgraph hGraph, const CUgraphNode* from_, const CUgraphNode* to, size_t numDependencies) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddDependencies(hGraph, from_, to, numDependencies) + return cydriver._cuGraphAddDependencies(hGraph, from_, to, numDependencies) {{endif}} {{if 'cuGraphAddDependencies_v2' in found_functions}} cdef CUresult cuGraphAddDependencies_v2(CUgraph hGraph, const CUgraphNode* from_, const CUgraphNode* to, const CUgraphEdgeData* edgeData, size_t numDependencies) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddDependencies_v2(hGraph, from_, to, edgeData, numDependencies) + return cydriver._cuGraphAddDependencies_v2(hGraph, from_, to, edgeData, numDependencies) {{endif}} {{if 'cuGraphRemoveDependencies' in found_functions}} cdef CUresult cuGraphRemoveDependencies(CUgraph hGraph, const CUgraphNode* from_, const CUgraphNode* to, size_t numDependencies) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphRemoveDependencies(hGraph, from_, to, numDependencies) + return cydriver._cuGraphRemoveDependencies(hGraph, from_, to, numDependencies) {{endif}} {{if 'cuGraphRemoveDependencies_v2' in found_functions}} cdef CUresult cuGraphRemoveDependencies_v2(CUgraph hGraph, const CUgraphNode* from_, const CUgraphNode* to, const CUgraphEdgeData* edgeData, size_t numDependencies) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphRemoveDependencies_v2(hGraph, from_, to, edgeData, numDependencies) + return cydriver._cuGraphRemoveDependencies_v2(hGraph, from_, to, edgeData, numDependencies) {{endif}} {{if 'cuGraphDestroyNode' in found_functions}} cdef CUresult cuGraphDestroyNode(CUgraphNode hNode) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphDestroyNode(hNode) + return cydriver._cuGraphDestroyNode(hNode) {{endif}} {{if 'cuGraphInstantiateWithFlags' in found_functions}} cdef CUresult cuGraphInstantiate(CUgraphExec* phGraphExec, CUgraph hGraph, unsigned long long flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphInstantiateWithFlags(phGraphExec, hGraph, flags) + return cydriver._cuGraphInstantiateWithFlags(phGraphExec, hGraph, flags) {{endif}} {{if 'cuGraphInstantiateWithParams' in found_functions}} cdef CUresult cuGraphInstantiateWithParams(CUgraphExec* phGraphExec, CUgraph hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS* instantiateParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphInstantiateWithParams(phGraphExec, hGraph, instantiateParams) + return cydriver._cuGraphInstantiateWithParams(phGraphExec, hGraph, instantiateParams) {{endif}} {{if 'cuGraphExecGetFlags' in found_functions}} cdef CUresult cuGraphExecGetFlags(CUgraphExec hGraphExec, cuuint64_t* flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecGetFlags(hGraphExec, flags) + return cydriver._cuGraphExecGetFlags(hGraphExec, flags) {{endif}} {{if 'cuGraphExecKernelNodeSetParams_v2' in found_functions}} cdef CUresult cuGraphExecKernelNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecKernelNodeSetParams_v2(hGraphExec, hNode, nodeParams) + return cydriver._cuGraphExecKernelNodeSetParams_v2(hGraphExec, hNode, nodeParams) {{endif}} {{if 'cuGraphExecMemcpyNodeSetParams' in found_functions}} cdef CUresult cuGraphExecMemcpyNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMCPY3D* copyParams, CUcontext ctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecMemcpyNodeSetParams(hGraphExec, hNode, copyParams, ctx) + return cydriver._cuGraphExecMemcpyNodeSetParams(hGraphExec, hNode, copyParams, ctx) {{endif}} {{if 'cuGraphExecMemsetNodeSetParams' in found_functions}} cdef CUresult cuGraphExecMemsetNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS* memsetParams, CUcontext ctx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecMemsetNodeSetParams(hGraphExec, hNode, memsetParams, ctx) + return cydriver._cuGraphExecMemsetNodeSetParams(hGraphExec, hNode, memsetParams, ctx) {{endif}} {{if 'cuGraphExecHostNodeSetParams' in found_functions}} cdef CUresult cuGraphExecHostNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecHostNodeSetParams(hGraphExec, hNode, nodeParams) + return cydriver._cuGraphExecHostNodeSetParams(hGraphExec, hNode, nodeParams) {{endif}} {{if 'cuGraphExecChildGraphNodeSetParams' in found_functions}} cdef CUresult cuGraphExecChildGraphNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, CUgraph childGraph) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecChildGraphNodeSetParams(hGraphExec, hNode, childGraph) + return cydriver._cuGraphExecChildGraphNodeSetParams(hGraphExec, hNode, childGraph) {{endif}} {{if 'cuGraphExecEventRecordNodeSetEvent' in found_functions}} cdef CUresult cuGraphExecEventRecordNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event) + return cydriver._cuGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event) {{endif}} {{if 'cuGraphExecEventWaitNodeSetEvent' in found_functions}} cdef CUresult cuGraphExecEventWaitNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event) + return cydriver._cuGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event) {{endif}} {{if 'cuGraphExecExternalSemaphoresSignalNodeSetParams' in found_functions}} cdef CUresult cuGraphExecExternalSemaphoresSignalNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, nodeParams) + return cydriver._cuGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, nodeParams) {{endif}} {{if 'cuGraphExecExternalSemaphoresWaitNodeSetParams' in found_functions}} cdef CUresult cuGraphExecExternalSemaphoresWaitNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, nodeParams) + return cydriver._cuGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, nodeParams) {{endif}} {{if 'cuGraphNodeSetEnabled' in found_functions}} cdef CUresult cuGraphNodeSetEnabled(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int isEnabled) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphNodeSetEnabled(hGraphExec, hNode, isEnabled) + return cydriver._cuGraphNodeSetEnabled(hGraphExec, hNode, isEnabled) {{endif}} {{if 'cuGraphNodeGetEnabled' in found_functions}} cdef CUresult cuGraphNodeGetEnabled(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int* isEnabled) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphNodeGetEnabled(hGraphExec, hNode, isEnabled) + return cydriver._cuGraphNodeGetEnabled(hGraphExec, hNode, isEnabled) {{endif}} {{if 'cuGraphUpload' in found_functions}} cdef CUresult cuGraphUpload(CUgraphExec hGraphExec, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphUpload(hGraphExec, hStream) + return cydriver._cuGraphUpload(hGraphExec, hStream) {{endif}} {{if 'cuGraphLaunch' in found_functions}} cdef CUresult cuGraphLaunch(CUgraphExec hGraphExec, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphLaunch(hGraphExec, hStream) + return cydriver._cuGraphLaunch(hGraphExec, hStream) {{endif}} {{if 'cuGraphExecDestroy' in found_functions}} cdef CUresult cuGraphExecDestroy(CUgraphExec hGraphExec) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecDestroy(hGraphExec) + return cydriver._cuGraphExecDestroy(hGraphExec) {{endif}} {{if 'cuGraphDestroy' in found_functions}} cdef CUresult cuGraphDestroy(CUgraph hGraph) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphDestroy(hGraph) + return cydriver._cuGraphDestroy(hGraph) {{endif}} {{if 'cuGraphExecUpdate_v2' in found_functions}} cdef CUresult cuGraphExecUpdate(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphExecUpdateResultInfo* resultInfo) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecUpdate_v2(hGraphExec, hGraph, resultInfo) + return cydriver._cuGraphExecUpdate_v2(hGraphExec, hGraph, resultInfo) {{endif}} {{if 'cuGraphKernelNodeCopyAttributes' in found_functions}} cdef CUresult cuGraphKernelNodeCopyAttributes(CUgraphNode dst, CUgraphNode src) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphKernelNodeCopyAttributes(dst, src) + return cydriver._cuGraphKernelNodeCopyAttributes(dst, src) {{endif}} {{if 'cuGraphKernelNodeGetAttribute' in found_functions}} cdef CUresult cuGraphKernelNodeGetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr, CUkernelNodeAttrValue* value_out) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphKernelNodeGetAttribute(hNode, attr, value_out) + return cydriver._cuGraphKernelNodeGetAttribute(hNode, attr, value_out) {{endif}} {{if 'cuGraphKernelNodeSetAttribute' in found_functions}} cdef CUresult cuGraphKernelNodeSetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr, const CUkernelNodeAttrValue* value) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphKernelNodeSetAttribute(hNode, attr, value) + return cydriver._cuGraphKernelNodeSetAttribute(hNode, attr, value) {{endif}} {{if 'cuGraphDebugDotPrint' in found_functions}} cdef CUresult cuGraphDebugDotPrint(CUgraph hGraph, const char* path, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphDebugDotPrint(hGraph, path, flags) + return cydriver._cuGraphDebugDotPrint(hGraph, path, flags) {{endif}} {{if 'cuUserObjectCreate' in found_functions}} cdef CUresult cuUserObjectCreate(CUuserObject* object_out, void* ptr, CUhostFn destroy, unsigned int initialRefcount, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuUserObjectCreate(object_out, ptr, destroy, initialRefcount, flags) + return cydriver._cuUserObjectCreate(object_out, ptr, destroy, initialRefcount, flags) {{endif}} {{if 'cuUserObjectRetain' in found_functions}} cdef CUresult cuUserObjectRetain(CUuserObject object, unsigned int count) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuUserObjectRetain(object, count) + return cydriver._cuUserObjectRetain(object, count) {{endif}} {{if 'cuUserObjectRelease' in found_functions}} cdef CUresult cuUserObjectRelease(CUuserObject object, unsigned int count) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuUserObjectRelease(object, count) + return cydriver._cuUserObjectRelease(object, count) {{endif}} {{if 'cuGraphRetainUserObject' in found_functions}} cdef CUresult cuGraphRetainUserObject(CUgraph graph, CUuserObject object, unsigned int count, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphRetainUserObject(graph, object, count, flags) + return cydriver._cuGraphRetainUserObject(graph, object, count, flags) {{endif}} {{if 'cuGraphReleaseUserObject' in found_functions}} cdef CUresult cuGraphReleaseUserObject(CUgraph graph, CUuserObject object, unsigned int count) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphReleaseUserObject(graph, object, count) + return cydriver._cuGraphReleaseUserObject(graph, object, count) {{endif}} {{if 'cuGraphAddNode' in found_functions}} cdef CUresult cuGraphAddNode(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, size_t numDependencies, CUgraphNodeParams* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) + return cydriver._cuGraphAddNode(phGraphNode, hGraph, dependencies, numDependencies, nodeParams) {{endif}} {{if 'cuGraphAddNode_v2' in found_functions}} cdef CUresult cuGraphAddNode_v2(CUgraphNode* phGraphNode, CUgraph hGraph, const CUgraphNode* dependencies, const CUgraphEdgeData* dependencyData, size_t numDependencies, CUgraphNodeParams* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphAddNode_v2(phGraphNode, hGraph, dependencies, dependencyData, numDependencies, nodeParams) + return cydriver._cuGraphAddNode_v2(phGraphNode, hGraph, dependencies, dependencyData, numDependencies, nodeParams) {{endif}} {{if 'cuGraphNodeSetParams' in found_functions}} cdef CUresult cuGraphNodeSetParams(CUgraphNode hNode, CUgraphNodeParams* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphNodeSetParams(hNode, nodeParams) + return cydriver._cuGraphNodeSetParams(hNode, nodeParams) {{endif}} {{if 'cuGraphExecNodeSetParams' in found_functions}} cdef CUresult cuGraphExecNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, CUgraphNodeParams* nodeParams) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphExecNodeSetParams(hGraphExec, hNode, nodeParams) + return cydriver._cuGraphExecNodeSetParams(hGraphExec, hNode, nodeParams) {{endif}} {{if 'cuGraphConditionalHandleCreate' in found_functions}} cdef CUresult cuGraphConditionalHandleCreate(CUgraphConditionalHandle* pHandle_out, CUgraph hGraph, CUcontext ctx, unsigned int defaultLaunchValue, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphConditionalHandleCreate(pHandle_out, hGraph, ctx, defaultLaunchValue, flags) + return cydriver._cuGraphConditionalHandleCreate(pHandle_out, hGraph, ctx, defaultLaunchValue, flags) {{endif}} {{if 'cuOccupancyMaxActiveBlocksPerMultiprocessor' in found_functions}} cdef CUresult cuOccupancyMaxActiveBlocksPerMultiprocessor(int* numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func, blockSize, dynamicSMemSize) + return cydriver._cuOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func, blockSize, dynamicSMemSize) {{endif}} {{if 'cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags' in found_functions}} cdef CUresult cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int* numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func, blockSize, dynamicSMemSize, flags) + return cydriver._cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func, blockSize, dynamicSMemSize, flags) {{endif}} {{if 'cuOccupancyMaxPotentialBlockSize' in found_functions}} cdef CUresult cuOccupancyMaxPotentialBlockSize(int* minGridSize, int* blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuOccupancyMaxPotentialBlockSize(minGridSize, blockSize, func, blockSizeToDynamicSMemSize, dynamicSMemSize, blockSizeLimit) + return cydriver._cuOccupancyMaxPotentialBlockSize(minGridSize, blockSize, func, blockSizeToDynamicSMemSize, dynamicSMemSize, blockSizeLimit) {{endif}} {{if 'cuOccupancyMaxPotentialBlockSizeWithFlags' in found_functions}} cdef CUresult cuOccupancyMaxPotentialBlockSizeWithFlags(int* minGridSize, int* blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuOccupancyMaxPotentialBlockSizeWithFlags(minGridSize, blockSize, func, blockSizeToDynamicSMemSize, dynamicSMemSize, blockSizeLimit, flags) + return cydriver._cuOccupancyMaxPotentialBlockSizeWithFlags(minGridSize, blockSize, func, blockSizeToDynamicSMemSize, dynamicSMemSize, blockSizeLimit, flags) {{endif}} {{if 'cuOccupancyAvailableDynamicSMemPerBlock' in found_functions}} cdef CUresult cuOccupancyAvailableDynamicSMemPerBlock(size_t* dynamicSmemSize, CUfunction func, int numBlocks, int blockSize) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuOccupancyAvailableDynamicSMemPerBlock(dynamicSmemSize, func, numBlocks, blockSize) + return cydriver._cuOccupancyAvailableDynamicSMemPerBlock(dynamicSmemSize, func, numBlocks, blockSize) {{endif}} {{if 'cuOccupancyMaxPotentialClusterSize' in found_functions}} cdef CUresult cuOccupancyMaxPotentialClusterSize(int* clusterSize, CUfunction func, const CUlaunchConfig* config) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuOccupancyMaxPotentialClusterSize(clusterSize, func, config) + return cydriver._cuOccupancyMaxPotentialClusterSize(clusterSize, func, config) {{endif}} {{if 'cuOccupancyMaxActiveClusters' in found_functions}} cdef CUresult cuOccupancyMaxActiveClusters(int* numClusters, CUfunction func, const CUlaunchConfig* config) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuOccupancyMaxActiveClusters(numClusters, func, config) + return cydriver._cuOccupancyMaxActiveClusters(numClusters, func, config) {{endif}} {{if 'cuTexRefSetArray' in found_functions}} cdef CUresult cuTexRefSetArray(CUtexref hTexRef, CUarray hArray, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetArray(hTexRef, hArray, Flags) + return cydriver._cuTexRefSetArray(hTexRef, hArray, Flags) {{endif}} {{if 'cuTexRefSetMipmappedArray' in found_functions}} cdef CUresult cuTexRefSetMipmappedArray(CUtexref hTexRef, CUmipmappedArray hMipmappedArray, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetMipmappedArray(hTexRef, hMipmappedArray, Flags) + return cydriver._cuTexRefSetMipmappedArray(hTexRef, hMipmappedArray, Flags) {{endif}} {{if 'cuTexRefSetAddress_v2' in found_functions}} cdef CUresult cuTexRefSetAddress(size_t* ByteOffset, CUtexref hTexRef, CUdeviceptr dptr, size_t numbytes) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetAddress_v2(ByteOffset, hTexRef, dptr, numbytes) + return cydriver._cuTexRefSetAddress_v2(ByteOffset, hTexRef, dptr, numbytes) {{endif}} {{if 'cuTexRefSetAddress2D_v3' in found_functions}} cdef CUresult cuTexRefSetAddress2D(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR* desc, CUdeviceptr dptr, size_t Pitch) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetAddress2D_v3(hTexRef, desc, dptr, Pitch) + return cydriver._cuTexRefSetAddress2D_v3(hTexRef, desc, dptr, Pitch) {{endif}} {{if 'cuTexRefSetFormat' in found_functions}} cdef CUresult cuTexRefSetFormat(CUtexref hTexRef, CUarray_format fmt, int NumPackedComponents) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetFormat(hTexRef, fmt, NumPackedComponents) + return cydriver._cuTexRefSetFormat(hTexRef, fmt, NumPackedComponents) {{endif}} {{if 'cuTexRefSetAddressMode' in found_functions}} cdef CUresult cuTexRefSetAddressMode(CUtexref hTexRef, int dim, CUaddress_mode am) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetAddressMode(hTexRef, dim, am) + return cydriver._cuTexRefSetAddressMode(hTexRef, dim, am) {{endif}} {{if 'cuTexRefSetFilterMode' in found_functions}} cdef CUresult cuTexRefSetFilterMode(CUtexref hTexRef, CUfilter_mode fm) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetFilterMode(hTexRef, fm) + return cydriver._cuTexRefSetFilterMode(hTexRef, fm) {{endif}} {{if 'cuTexRefSetMipmapFilterMode' in found_functions}} cdef CUresult cuTexRefSetMipmapFilterMode(CUtexref hTexRef, CUfilter_mode fm) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetMipmapFilterMode(hTexRef, fm) + return cydriver._cuTexRefSetMipmapFilterMode(hTexRef, fm) {{endif}} {{if 'cuTexRefSetMipmapLevelBias' in found_functions}} cdef CUresult cuTexRefSetMipmapLevelBias(CUtexref hTexRef, float bias) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetMipmapLevelBias(hTexRef, bias) + return cydriver._cuTexRefSetMipmapLevelBias(hTexRef, bias) {{endif}} {{if 'cuTexRefSetMipmapLevelClamp' in found_functions}} cdef CUresult cuTexRefSetMipmapLevelClamp(CUtexref hTexRef, float minMipmapLevelClamp, float maxMipmapLevelClamp) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetMipmapLevelClamp(hTexRef, minMipmapLevelClamp, maxMipmapLevelClamp) + return cydriver._cuTexRefSetMipmapLevelClamp(hTexRef, minMipmapLevelClamp, maxMipmapLevelClamp) {{endif}} {{if 'cuTexRefSetMaxAnisotropy' in found_functions}} cdef CUresult cuTexRefSetMaxAnisotropy(CUtexref hTexRef, unsigned int maxAniso) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetMaxAnisotropy(hTexRef, maxAniso) + return cydriver._cuTexRefSetMaxAnisotropy(hTexRef, maxAniso) {{endif}} {{if 'cuTexRefSetBorderColor' in found_functions}} cdef CUresult cuTexRefSetBorderColor(CUtexref hTexRef, float* pBorderColor) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetBorderColor(hTexRef, pBorderColor) + return cydriver._cuTexRefSetBorderColor(hTexRef, pBorderColor) {{endif}} {{if 'cuTexRefSetFlags' in found_functions}} cdef CUresult cuTexRefSetFlags(CUtexref hTexRef, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefSetFlags(hTexRef, Flags) + return cydriver._cuTexRefSetFlags(hTexRef, Flags) {{endif}} {{if 'cuTexRefGetAddress_v2' in found_functions}} cdef CUresult cuTexRefGetAddress(CUdeviceptr* pdptr, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetAddress_v2(pdptr, hTexRef) + return cydriver._cuTexRefGetAddress_v2(pdptr, hTexRef) {{endif}} {{if 'cuTexRefGetArray' in found_functions}} cdef CUresult cuTexRefGetArray(CUarray* phArray, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetArray(phArray, hTexRef) + return cydriver._cuTexRefGetArray(phArray, hTexRef) {{endif}} {{if 'cuTexRefGetMipmappedArray' in found_functions}} cdef CUresult cuTexRefGetMipmappedArray(CUmipmappedArray* phMipmappedArray, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetMipmappedArray(phMipmappedArray, hTexRef) + return cydriver._cuTexRefGetMipmappedArray(phMipmappedArray, hTexRef) {{endif}} {{if 'cuTexRefGetAddressMode' in found_functions}} cdef CUresult cuTexRefGetAddressMode(CUaddress_mode* pam, CUtexref hTexRef, int dim) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetAddressMode(pam, hTexRef, dim) + return cydriver._cuTexRefGetAddressMode(pam, hTexRef, dim) {{endif}} {{if 'cuTexRefGetFilterMode' in found_functions}} cdef CUresult cuTexRefGetFilterMode(CUfilter_mode* pfm, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetFilterMode(pfm, hTexRef) + return cydriver._cuTexRefGetFilterMode(pfm, hTexRef) {{endif}} {{if 'cuTexRefGetFormat' in found_functions}} cdef CUresult cuTexRefGetFormat(CUarray_format* pFormat, int* pNumChannels, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetFormat(pFormat, pNumChannels, hTexRef) + return cydriver._cuTexRefGetFormat(pFormat, pNumChannels, hTexRef) {{endif}} {{if 'cuTexRefGetMipmapFilterMode' in found_functions}} cdef CUresult cuTexRefGetMipmapFilterMode(CUfilter_mode* pfm, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetMipmapFilterMode(pfm, hTexRef) + return cydriver._cuTexRefGetMipmapFilterMode(pfm, hTexRef) {{endif}} {{if 'cuTexRefGetMipmapLevelBias' in found_functions}} cdef CUresult cuTexRefGetMipmapLevelBias(float* pbias, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetMipmapLevelBias(pbias, hTexRef) + return cydriver._cuTexRefGetMipmapLevelBias(pbias, hTexRef) {{endif}} {{if 'cuTexRefGetMipmapLevelClamp' in found_functions}} cdef CUresult cuTexRefGetMipmapLevelClamp(float* pminMipmapLevelClamp, float* pmaxMipmapLevelClamp, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetMipmapLevelClamp(pminMipmapLevelClamp, pmaxMipmapLevelClamp, hTexRef) + return cydriver._cuTexRefGetMipmapLevelClamp(pminMipmapLevelClamp, pmaxMipmapLevelClamp, hTexRef) {{endif}} {{if 'cuTexRefGetMaxAnisotropy' in found_functions}} cdef CUresult cuTexRefGetMaxAnisotropy(int* pmaxAniso, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetMaxAnisotropy(pmaxAniso, hTexRef) + return cydriver._cuTexRefGetMaxAnisotropy(pmaxAniso, hTexRef) {{endif}} {{if 'cuTexRefGetBorderColor' in found_functions}} cdef CUresult cuTexRefGetBorderColor(float* pBorderColor, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetBorderColor(pBorderColor, hTexRef) + return cydriver._cuTexRefGetBorderColor(pBorderColor, hTexRef) {{endif}} {{if 'cuTexRefGetFlags' in found_functions}} cdef CUresult cuTexRefGetFlags(unsigned int* pFlags, CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefGetFlags(pFlags, hTexRef) + return cydriver._cuTexRefGetFlags(pFlags, hTexRef) {{endif}} {{if 'cuTexRefCreate' in found_functions}} cdef CUresult cuTexRefCreate(CUtexref* pTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefCreate(pTexRef) + return cydriver._cuTexRefCreate(pTexRef) {{endif}} {{if 'cuTexRefDestroy' in found_functions}} cdef CUresult cuTexRefDestroy(CUtexref hTexRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexRefDestroy(hTexRef) + return cydriver._cuTexRefDestroy(hTexRef) {{endif}} {{if 'cuSurfRefSetArray' in found_functions}} cdef CUresult cuSurfRefSetArray(CUsurfref hSurfRef, CUarray hArray, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuSurfRefSetArray(hSurfRef, hArray, Flags) + return cydriver._cuSurfRefSetArray(hSurfRef, hArray, Flags) {{endif}} {{if 'cuSurfRefGetArray' in found_functions}} cdef CUresult cuSurfRefGetArray(CUarray* phArray, CUsurfref hSurfRef) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuSurfRefGetArray(phArray, hSurfRef) + return cydriver._cuSurfRefGetArray(phArray, hSurfRef) {{endif}} {{if 'cuTexObjectCreate' in found_functions}} cdef CUresult cuTexObjectCreate(CUtexObject* pTexObject, const CUDA_RESOURCE_DESC* pResDesc, const CUDA_TEXTURE_DESC* pTexDesc, const CUDA_RESOURCE_VIEW_DESC* pResViewDesc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexObjectCreate(pTexObject, pResDesc, pTexDesc, pResViewDesc) + return cydriver._cuTexObjectCreate(pTexObject, pResDesc, pTexDesc, pResViewDesc) {{endif}} {{if 'cuTexObjectDestroy' in found_functions}} cdef CUresult cuTexObjectDestroy(CUtexObject texObject) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexObjectDestroy(texObject) + return cydriver._cuTexObjectDestroy(texObject) {{endif}} {{if 'cuTexObjectGetResourceDesc' in found_functions}} cdef CUresult cuTexObjectGetResourceDesc(CUDA_RESOURCE_DESC* pResDesc, CUtexObject texObject) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexObjectGetResourceDesc(pResDesc, texObject) + return cydriver._cuTexObjectGetResourceDesc(pResDesc, texObject) {{endif}} {{if 'cuTexObjectGetTextureDesc' in found_functions}} cdef CUresult cuTexObjectGetTextureDesc(CUDA_TEXTURE_DESC* pTexDesc, CUtexObject texObject) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexObjectGetTextureDesc(pTexDesc, texObject) + return cydriver._cuTexObjectGetTextureDesc(pTexDesc, texObject) {{endif}} {{if 'cuTexObjectGetResourceViewDesc' in found_functions}} cdef CUresult cuTexObjectGetResourceViewDesc(CUDA_RESOURCE_VIEW_DESC* pResViewDesc, CUtexObject texObject) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTexObjectGetResourceViewDesc(pResViewDesc, texObject) + return cydriver._cuTexObjectGetResourceViewDesc(pResViewDesc, texObject) {{endif}} {{if 'cuSurfObjectCreate' in found_functions}} cdef CUresult cuSurfObjectCreate(CUsurfObject* pSurfObject, const CUDA_RESOURCE_DESC* pResDesc) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuSurfObjectCreate(pSurfObject, pResDesc) + return cydriver._cuSurfObjectCreate(pSurfObject, pResDesc) {{endif}} {{if 'cuSurfObjectDestroy' in found_functions}} cdef CUresult cuSurfObjectDestroy(CUsurfObject surfObject) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuSurfObjectDestroy(surfObject) + return cydriver._cuSurfObjectDestroy(surfObject) {{endif}} {{if 'cuSurfObjectGetResourceDesc' in found_functions}} cdef CUresult cuSurfObjectGetResourceDesc(CUDA_RESOURCE_DESC* pResDesc, CUsurfObject surfObject) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuSurfObjectGetResourceDesc(pResDesc, surfObject) + return cydriver._cuSurfObjectGetResourceDesc(pResDesc, surfObject) {{endif}} {{if 'cuTensorMapEncodeTiled' in found_functions}} cdef CUresult cuTensorMapEncodeTiled(CUtensorMap* tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void* globalAddress, const cuuint64_t* globalDim, const cuuint64_t* globalStrides, const cuuint32_t* boxDim, const cuuint32_t* elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTensorMapEncodeTiled(tensorMap, tensorDataType, tensorRank, globalAddress, globalDim, globalStrides, boxDim, elementStrides, interleave, swizzle, l2Promotion, oobFill) + return cydriver._cuTensorMapEncodeTiled(tensorMap, tensorDataType, tensorRank, globalAddress, globalDim, globalStrides, boxDim, elementStrides, interleave, swizzle, l2Promotion, oobFill) {{endif}} {{if 'cuTensorMapEncodeIm2col' in found_functions}} cdef CUresult cuTensorMapEncodeIm2col(CUtensorMap* tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void* globalAddress, const cuuint64_t* globalDim, const cuuint64_t* globalStrides, const int* pixelBoxLowerCorner, const int* pixelBoxUpperCorner, cuuint32_t channelsPerPixel, cuuint32_t pixelsPerColumn, const cuuint32_t* elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTensorMapEncodeIm2col(tensorMap, tensorDataType, tensorRank, globalAddress, globalDim, globalStrides, pixelBoxLowerCorner, pixelBoxUpperCorner, channelsPerPixel, pixelsPerColumn, elementStrides, interleave, swizzle, l2Promotion, oobFill) + return cydriver._cuTensorMapEncodeIm2col(tensorMap, tensorDataType, tensorRank, globalAddress, globalDim, globalStrides, pixelBoxLowerCorner, pixelBoxUpperCorner, channelsPerPixel, pixelsPerColumn, elementStrides, interleave, swizzle, l2Promotion, oobFill) {{endif}} {{if 'cuTensorMapReplaceAddress' in found_functions}} cdef CUresult cuTensorMapReplaceAddress(CUtensorMap* tensorMap, void* globalAddress) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuTensorMapReplaceAddress(tensorMap, globalAddress) + return cydriver._cuTensorMapReplaceAddress(tensorMap, globalAddress) {{endif}} {{if 'cuDeviceCanAccessPeer' in found_functions}} cdef CUresult cuDeviceCanAccessPeer(int* canAccessPeer, CUdevice dev, CUdevice peerDev) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceCanAccessPeer(canAccessPeer, dev, peerDev) + return cydriver._cuDeviceCanAccessPeer(canAccessPeer, dev, peerDev) {{endif}} {{if 'cuCtxEnablePeerAccess' in found_functions}} cdef CUresult cuCtxEnablePeerAccess(CUcontext peerContext, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxEnablePeerAccess(peerContext, Flags) + return cydriver._cuCtxEnablePeerAccess(peerContext, Flags) {{endif}} {{if 'cuCtxDisablePeerAccess' in found_functions}} cdef CUresult cuCtxDisablePeerAccess(CUcontext peerContext) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxDisablePeerAccess(peerContext) + return cydriver._cuCtxDisablePeerAccess(peerContext) {{endif}} {{if 'cuDeviceGetP2PAttribute' in found_functions}} cdef CUresult cuDeviceGetP2PAttribute(int* value, CUdevice_P2PAttribute attrib, CUdevice srcDevice, CUdevice dstDevice) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetP2PAttribute(value, attrib, srcDevice, dstDevice) + return cydriver._cuDeviceGetP2PAttribute(value, attrib, srcDevice, dstDevice) {{endif}} {{if 'cuGraphicsUnregisterResource' in found_functions}} cdef CUresult cuGraphicsUnregisterResource(CUgraphicsResource resource) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsUnregisterResource(resource) + return cydriver._cuGraphicsUnregisterResource(resource) {{endif}} {{if 'cuGraphicsSubResourceGetMappedArray' in found_functions}} cdef CUresult cuGraphicsSubResourceGetMappedArray(CUarray* pArray, CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsSubResourceGetMappedArray(pArray, resource, arrayIndex, mipLevel) + return cydriver._cuGraphicsSubResourceGetMappedArray(pArray, resource, arrayIndex, mipLevel) {{endif}} {{if 'cuGraphicsResourceGetMappedMipmappedArray' in found_functions}} cdef CUresult cuGraphicsResourceGetMappedMipmappedArray(CUmipmappedArray* pMipmappedArray, CUgraphicsResource resource) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsResourceGetMappedMipmappedArray(pMipmappedArray, resource) + return cydriver._cuGraphicsResourceGetMappedMipmappedArray(pMipmappedArray, resource) {{endif}} {{if 'cuGraphicsResourceGetMappedPointer_v2' in found_functions}} cdef CUresult cuGraphicsResourceGetMappedPointer(CUdeviceptr* pDevPtr, size_t* pSize, CUgraphicsResource resource) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsResourceGetMappedPointer_v2(pDevPtr, pSize, resource) + return cydriver._cuGraphicsResourceGetMappedPointer_v2(pDevPtr, pSize, resource) {{endif}} {{if 'cuGraphicsResourceSetMapFlags_v2' in found_functions}} cdef CUresult cuGraphicsResourceSetMapFlags(CUgraphicsResource resource, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsResourceSetMapFlags_v2(resource, flags) + return cydriver._cuGraphicsResourceSetMapFlags_v2(resource, flags) {{endif}} {{if 'cuGraphicsMapResources' in found_functions}} cdef CUresult cuGraphicsMapResources(unsigned int count, CUgraphicsResource* resources, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsMapResources(count, resources, hStream) + return cydriver._cuGraphicsMapResources(count, resources, hStream) {{endif}} {{if 'cuGraphicsUnmapResources' in found_functions}} cdef CUresult cuGraphicsUnmapResources(unsigned int count, CUgraphicsResource* resources, CUstream hStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsUnmapResources(count, resources, hStream) + return cydriver._cuGraphicsUnmapResources(count, resources, hStream) {{endif}} {{if 'cuGetProcAddress_v2' in found_functions}} cdef CUresult cuGetProcAddress(const char* symbol, void** pfn, int cudaVersion, cuuint64_t flags, CUdriverProcAddressQueryResult* symbolStatus) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGetProcAddress_v2(symbol, pfn, cudaVersion, flags, symbolStatus) + return cydriver._cuGetProcAddress_v2(symbol, pfn, cudaVersion, flags, symbolStatus) {{endif}} {{if 'cuCoredumpGetAttribute' in found_functions}} cdef CUresult cuCoredumpGetAttribute(CUcoredumpSettings attrib, void* value, size_t* size) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCoredumpGetAttribute(attrib, value, size) + return cydriver._cuCoredumpGetAttribute(attrib, value, size) {{endif}} {{if 'cuCoredumpGetAttributeGlobal' in found_functions}} cdef CUresult cuCoredumpGetAttributeGlobal(CUcoredumpSettings attrib, void* value, size_t* size) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCoredumpGetAttributeGlobal(attrib, value, size) + return cydriver._cuCoredumpGetAttributeGlobal(attrib, value, size) {{endif}} {{if 'cuCoredumpSetAttribute' in found_functions}} cdef CUresult cuCoredumpSetAttribute(CUcoredumpSettings attrib, void* value, size_t* size) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCoredumpSetAttribute(attrib, value, size) + return cydriver._cuCoredumpSetAttribute(attrib, value, size) {{endif}} {{if 'cuCoredumpSetAttributeGlobal' in found_functions}} cdef CUresult cuCoredumpSetAttributeGlobal(CUcoredumpSettings attrib, void* value, size_t* size) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCoredumpSetAttributeGlobal(attrib, value, size) + return cydriver._cuCoredumpSetAttributeGlobal(attrib, value, size) {{endif}} {{if 'cuGetExportTable' in found_functions}} cdef CUresult cuGetExportTable(const void** ppExportTable, const CUuuid* pExportTableId) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGetExportTable(ppExportTable, pExportTableId) + return cydriver._cuGetExportTable(ppExportTable, pExportTableId) {{endif}} {{if 'cuGreenCtxCreate' in found_functions}} cdef CUresult cuGreenCtxCreate(CUgreenCtx* phCtx, CUdevResourceDesc desc, CUdevice dev, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGreenCtxCreate(phCtx, desc, dev, flags) + return cydriver._cuGreenCtxCreate(phCtx, desc, dev, flags) {{endif}} {{if 'cuGreenCtxDestroy' in found_functions}} cdef CUresult cuGreenCtxDestroy(CUgreenCtx hCtx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGreenCtxDestroy(hCtx) + return cydriver._cuGreenCtxDestroy(hCtx) {{endif}} {{if 'cuCtxFromGreenCtx' in found_functions}} cdef CUresult cuCtxFromGreenCtx(CUcontext* pContext, CUgreenCtx hCtx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxFromGreenCtx(pContext, hCtx) + return cydriver._cuCtxFromGreenCtx(pContext, hCtx) {{endif}} {{if 'cuDeviceGetDevResource' in found_functions}} cdef CUresult cuDeviceGetDevResource(CUdevice device, CUdevResource* resource, CUdevResourceType typename) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDeviceGetDevResource(device, resource, typename) + return cydriver._cuDeviceGetDevResource(device, resource, typename) {{endif}} {{if 'cuCtxGetDevResource' in found_functions}} cdef CUresult cuCtxGetDevResource(CUcontext hCtx, CUdevResource* resource, CUdevResourceType typename) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuCtxGetDevResource(hCtx, resource, typename) + return cydriver._cuCtxGetDevResource(hCtx, resource, typename) {{endif}} {{if 'cuGreenCtxGetDevResource' in found_functions}} cdef CUresult cuGreenCtxGetDevResource(CUgreenCtx hCtx, CUdevResource* resource, CUdevResourceType typename) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGreenCtxGetDevResource(hCtx, resource, typename) + return cydriver._cuGreenCtxGetDevResource(hCtx, resource, typename) {{endif}} {{if 'cuDevSmResourceSplitByCount' in found_functions}} cdef CUresult cuDevSmResourceSplitByCount(CUdevResource* result, unsigned int* nbGroups, const CUdevResource* input, CUdevResource* remaining, unsigned int useFlags, unsigned int minCount) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDevSmResourceSplitByCount(result, nbGroups, input, remaining, useFlags, minCount) + return cydriver._cuDevSmResourceSplitByCount(result, nbGroups, input, remaining, useFlags, minCount) {{endif}} {{if 'cuDevResourceGenerateDesc' in found_functions}} cdef CUresult cuDevResourceGenerateDesc(CUdevResourceDesc* phDesc, CUdevResource* resources, unsigned int nbResources) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuDevResourceGenerateDesc(phDesc, resources, nbResources) + return cydriver._cuDevResourceGenerateDesc(phDesc, resources, nbResources) {{endif}} {{if 'cuGreenCtxRecordEvent' in found_functions}} cdef CUresult cuGreenCtxRecordEvent(CUgreenCtx hCtx, CUevent hEvent) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGreenCtxRecordEvent(hCtx, hEvent) + return cydriver._cuGreenCtxRecordEvent(hCtx, hEvent) {{endif}} {{if 'cuGreenCtxWaitEvent' in found_functions}} cdef CUresult cuGreenCtxWaitEvent(CUgreenCtx hCtx, CUevent hEvent) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGreenCtxWaitEvent(hCtx, hEvent) + return cydriver._cuGreenCtxWaitEvent(hCtx, hEvent) {{endif}} {{if 'cuStreamGetGreenCtx' in found_functions}} cdef CUresult cuStreamGetGreenCtx(CUstream hStream, CUgreenCtx* phCtx) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuStreamGetGreenCtx(hStream, phCtx) + return cydriver._cuStreamGetGreenCtx(hStream, phCtx) {{endif}} {{if 'cuGreenCtxStreamCreate' in found_functions}} cdef CUresult cuGreenCtxStreamCreate(CUstream* phStream, CUgreenCtx greenCtx, unsigned int flags, int priority) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGreenCtxStreamCreate(phStream, greenCtx, flags, priority) + return cydriver._cuGreenCtxStreamCreate(phStream, greenCtx, flags, priority) {{endif}} {{if 'cuProfilerStart' in found_functions}} cdef CUresult cuProfilerStart() except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuProfilerStart() + return cydriver._cuProfilerStart() {{endif}} {{if 'cuProfilerStop' in found_functions}} cdef CUresult cuProfilerStop() except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuProfilerStop() + return cydriver._cuProfilerStop() {{endif}} {{if True}} cdef CUresult cuGraphicsEGLRegisterImage(CUgraphicsResource* pCudaResource, EGLImageKHR image, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsEGLRegisterImage(pCudaResource, image, flags) + return cydriver._cuGraphicsEGLRegisterImage(pCudaResource, image, flags) {{endif}} {{if True}} cdef CUresult cuEGLStreamConsumerConnect(CUeglStreamConnection* conn, EGLStreamKHR stream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEGLStreamConsumerConnect(conn, stream) + return cydriver._cuEGLStreamConsumerConnect(conn, stream) {{endif}} {{if True}} cdef CUresult cuEGLStreamConsumerConnectWithFlags(CUeglStreamConnection* conn, EGLStreamKHR stream, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEGLStreamConsumerConnectWithFlags(conn, stream, flags) + return cydriver._cuEGLStreamConsumerConnectWithFlags(conn, stream, flags) {{endif}} {{if True}} cdef CUresult cuEGLStreamConsumerDisconnect(CUeglStreamConnection* conn) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEGLStreamConsumerDisconnect(conn) + return cydriver._cuEGLStreamConsumerDisconnect(conn) {{endif}} {{if True}} cdef CUresult cuEGLStreamConsumerAcquireFrame(CUeglStreamConnection* conn, CUgraphicsResource* pCudaResource, CUstream* pStream, unsigned int timeout) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, timeout) + return cydriver._cuEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, timeout) {{endif}} {{if True}} cdef CUresult cuEGLStreamConsumerReleaseFrame(CUeglStreamConnection* conn, CUgraphicsResource pCudaResource, CUstream* pStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream) + return cydriver._cuEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream) {{endif}} {{if True}} cdef CUresult cuEGLStreamProducerConnect(CUeglStreamConnection* conn, EGLStreamKHR stream, EGLint width, EGLint height) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEGLStreamProducerConnect(conn, stream, width, height) + return cydriver._cuEGLStreamProducerConnect(conn, stream, width, height) {{endif}} {{if True}} cdef CUresult cuEGLStreamProducerDisconnect(CUeglStreamConnection* conn) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEGLStreamProducerDisconnect(conn) + return cydriver._cuEGLStreamProducerDisconnect(conn) {{endif}} {{if True}} cdef CUresult cuEGLStreamProducerPresentFrame(CUeglStreamConnection* conn, CUeglFrame eglframe, CUstream* pStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEGLStreamProducerPresentFrame(conn, eglframe, pStream) + return cydriver._cuEGLStreamProducerPresentFrame(conn, eglframe, pStream) {{endif}} {{if True}} cdef CUresult cuEGLStreamProducerReturnFrame(CUeglStreamConnection* conn, CUeglFrame* eglframe, CUstream* pStream) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEGLStreamProducerReturnFrame(conn, eglframe, pStream) + return cydriver._cuEGLStreamProducerReturnFrame(conn, eglframe, pStream) {{endif}} {{if True}} cdef CUresult cuGraphicsResourceGetMappedEglFrame(CUeglFrame* eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsResourceGetMappedEglFrame(eglFrame, resource, index, mipLevel) + return cydriver._cuGraphicsResourceGetMappedEglFrame(eglFrame, resource, index, mipLevel) {{endif}} {{if True}} cdef CUresult cuEventCreateFromEGLSync(CUevent* phEvent, EGLSyncKHR eglSync, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuEventCreateFromEGLSync(phEvent, eglSync, flags) + return cydriver._cuEventCreateFromEGLSync(phEvent, eglSync, flags) {{endif}} {{if True}} cdef CUresult cuGraphicsGLRegisterBuffer(CUgraphicsResource* pCudaResource, GLuint buffer, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsGLRegisterBuffer(pCudaResource, buffer, Flags) + return cydriver._cuGraphicsGLRegisterBuffer(pCudaResource, buffer, Flags) {{endif}} {{if True}} cdef CUresult cuGraphicsGLRegisterImage(CUgraphicsResource* pCudaResource, GLuint image, GLenum target, unsigned int Flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsGLRegisterImage(pCudaResource, image, target, Flags) + return cydriver._cuGraphicsGLRegisterImage(pCudaResource, image, target, Flags) {{endif}} {{if True}} cdef CUresult cuGLGetDevices(unsigned int* pCudaDeviceCount, CUdevice* pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGLGetDevices_v2(pCudaDeviceCount, pCudaDevices, cudaDeviceCount, deviceList) + return cydriver._cuGLGetDevices_v2(pCudaDeviceCount, pCudaDevices, cudaDeviceCount, deviceList) {{endif}} {{if True}} cdef CUresult cuVDPAUGetDevice(CUdevice* pDevice, VdpDevice vdpDevice, VdpGetProcAddress* vdpGetProcAddress) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuVDPAUGetDevice(pDevice, vdpDevice, vdpGetProcAddress) + return cydriver._cuVDPAUGetDevice(pDevice, vdpDevice, vdpGetProcAddress) {{endif}} {{if True}} cdef CUresult cuVDPAUCtxCreate(CUcontext* pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress* vdpGetProcAddress) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuVDPAUCtxCreate_v2(pCtx, flags, device, vdpDevice, vdpGetProcAddress) + return cydriver._cuVDPAUCtxCreate_v2(pCtx, flags, device, vdpDevice, vdpGetProcAddress) {{endif}} {{if True}} cdef CUresult cuGraphicsVDPAURegisterVideoSurface(CUgraphicsResource* pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsVDPAURegisterVideoSurface(pCudaResource, vdpSurface, flags) + return cydriver._cuGraphicsVDPAURegisterVideoSurface(pCudaResource, vdpSurface, flags) {{endif}} {{if True}} cdef CUresult cuGraphicsVDPAURegisterOutputSurface(CUgraphicsResource* pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags) except ?CUDA_ERROR_NOT_FOUND nogil: - return ccuda._cuGraphicsVDPAURegisterOutputSurface(pCudaResource, vdpSurface, flags) + return cydriver._cuGraphicsVDPAURegisterOutputSurface(pCudaResource, vdpSurface, flags) {{endif}} diff --git a/cuda/cnvrtc.pxd.in b/cuda/cuda/bindings/cynvrtc.pxd.in similarity index 100% rename from cuda/cnvrtc.pxd.in rename to cuda/cuda/bindings/cynvrtc.pxd.in diff --git a/cuda/cnvrtc.pyx.in b/cuda/cuda/bindings/cynvrtc.pyx.in similarity index 76% rename from cuda/cnvrtc.pyx.in rename to cuda/cuda/bindings/cynvrtc.pyx.in index 9781b65f..cf02cce9 100644 --- a/cuda/cnvrtc.pyx.in +++ b/cuda/cuda/bindings/cynvrtc.pyx.in @@ -5,130 +5,130 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -cimport cuda._cuda.cnvrtc as cnvrtc +cimport cuda.bindings._bindings.cynvrtc as cynvrtc {{if 'nvrtcGetErrorString' in found_functions}} cdef const char* nvrtcGetErrorString(nvrtcResult result) except ?NULL nogil: - return cnvrtc._nvrtcGetErrorString(result) + return cynvrtc._nvrtcGetErrorString(result) {{endif}} {{if 'nvrtcVersion' in found_functions}} cdef nvrtcResult nvrtcVersion(int* major, int* minor) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcVersion(major, minor) + return cynvrtc._nvrtcVersion(major, minor) {{endif}} {{if 'nvrtcGetNumSupportedArchs' in found_functions}} cdef nvrtcResult nvrtcGetNumSupportedArchs(int* numArchs) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetNumSupportedArchs(numArchs) + return cynvrtc._nvrtcGetNumSupportedArchs(numArchs) {{endif}} {{if 'nvrtcGetSupportedArchs' in found_functions}} cdef nvrtcResult nvrtcGetSupportedArchs(int* supportedArchs) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetSupportedArchs(supportedArchs) + return cynvrtc._nvrtcGetSupportedArchs(supportedArchs) {{endif}} {{if 'nvrtcCreateProgram' in found_functions}} cdef nvrtcResult nvrtcCreateProgram(nvrtcProgram* prog, const char* src, const char* name, int numHeaders, const char** headers, const char** includeNames) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcCreateProgram(prog, src, name, numHeaders, headers, includeNames) + return cynvrtc._nvrtcCreateProgram(prog, src, name, numHeaders, headers, includeNames) {{endif}} {{if 'nvrtcDestroyProgram' in found_functions}} cdef nvrtcResult nvrtcDestroyProgram(nvrtcProgram* prog) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcDestroyProgram(prog) + return cynvrtc._nvrtcDestroyProgram(prog) {{endif}} {{if 'nvrtcCompileProgram' in found_functions}} cdef nvrtcResult nvrtcCompileProgram(nvrtcProgram prog, int numOptions, const char** options) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcCompileProgram(prog, numOptions, options) + return cynvrtc._nvrtcCompileProgram(prog, numOptions, options) {{endif}} {{if 'nvrtcGetPTXSize' in found_functions}} cdef nvrtcResult nvrtcGetPTXSize(nvrtcProgram prog, size_t* ptxSizeRet) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetPTXSize(prog, ptxSizeRet) + return cynvrtc._nvrtcGetPTXSize(prog, ptxSizeRet) {{endif}} {{if 'nvrtcGetPTX' in found_functions}} cdef nvrtcResult nvrtcGetPTX(nvrtcProgram prog, char* ptx) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetPTX(prog, ptx) + return cynvrtc._nvrtcGetPTX(prog, ptx) {{endif}} {{if 'nvrtcGetCUBINSize' in found_functions}} cdef nvrtcResult nvrtcGetCUBINSize(nvrtcProgram prog, size_t* cubinSizeRet) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetCUBINSize(prog, cubinSizeRet) + return cynvrtc._nvrtcGetCUBINSize(prog, cubinSizeRet) {{endif}} {{if 'nvrtcGetCUBIN' in found_functions}} cdef nvrtcResult nvrtcGetCUBIN(nvrtcProgram prog, char* cubin) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetCUBIN(prog, cubin) + return cynvrtc._nvrtcGetCUBIN(prog, cubin) {{endif}} {{if 'nvrtcGetNVVMSize' in found_functions}} cdef nvrtcResult nvrtcGetNVVMSize(nvrtcProgram prog, size_t* nvvmSizeRet) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetNVVMSize(prog, nvvmSizeRet) + return cynvrtc._nvrtcGetNVVMSize(prog, nvvmSizeRet) {{endif}} {{if 'nvrtcGetNVVM' in found_functions}} cdef nvrtcResult nvrtcGetNVVM(nvrtcProgram prog, char* nvvm) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetNVVM(prog, nvvm) + return cynvrtc._nvrtcGetNVVM(prog, nvvm) {{endif}} {{if 'nvrtcGetLTOIRSize' in found_functions}} cdef nvrtcResult nvrtcGetLTOIRSize(nvrtcProgram prog, size_t* LTOIRSizeRet) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetLTOIRSize(prog, LTOIRSizeRet) + return cynvrtc._nvrtcGetLTOIRSize(prog, LTOIRSizeRet) {{endif}} {{if 'nvrtcGetLTOIR' in found_functions}} cdef nvrtcResult nvrtcGetLTOIR(nvrtcProgram prog, char* LTOIR) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetLTOIR(prog, LTOIR) + return cynvrtc._nvrtcGetLTOIR(prog, LTOIR) {{endif}} {{if 'nvrtcGetOptiXIRSize' in found_functions}} cdef nvrtcResult nvrtcGetOptiXIRSize(nvrtcProgram prog, size_t* optixirSizeRet) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetOptiXIRSize(prog, optixirSizeRet) + return cynvrtc._nvrtcGetOptiXIRSize(prog, optixirSizeRet) {{endif}} {{if 'nvrtcGetOptiXIR' in found_functions}} cdef nvrtcResult nvrtcGetOptiXIR(nvrtcProgram prog, char* optixir) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetOptiXIR(prog, optixir) + return cynvrtc._nvrtcGetOptiXIR(prog, optixir) {{endif}} {{if 'nvrtcGetProgramLogSize' in found_functions}} cdef nvrtcResult nvrtcGetProgramLogSize(nvrtcProgram prog, size_t* logSizeRet) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetProgramLogSize(prog, logSizeRet) + return cynvrtc._nvrtcGetProgramLogSize(prog, logSizeRet) {{endif}} {{if 'nvrtcGetProgramLog' in found_functions}} cdef nvrtcResult nvrtcGetProgramLog(nvrtcProgram prog, char* log) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetProgramLog(prog, log) + return cynvrtc._nvrtcGetProgramLog(prog, log) {{endif}} {{if 'nvrtcAddNameExpression' in found_functions}} cdef nvrtcResult nvrtcAddNameExpression(nvrtcProgram prog, const char* name_expression) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcAddNameExpression(prog, name_expression) + return cynvrtc._nvrtcAddNameExpression(prog, name_expression) {{endif}} {{if 'nvrtcGetLoweredName' in found_functions}} cdef nvrtcResult nvrtcGetLoweredName(nvrtcProgram prog, const char* name_expression, const char** lowered_name) except ?NVRTC_ERROR_INVALID_INPUT nogil: - return cnvrtc._nvrtcGetLoweredName(prog, name_expression, lowered_name) + return cynvrtc._nvrtcGetLoweredName(prog, name_expression, lowered_name) {{endif}} diff --git a/cuda/ccudart.pxd.in b/cuda/cuda/bindings/cyruntime.pxd.in similarity index 100% rename from cuda/ccudart.pxd.in rename to cuda/cuda/bindings/cyruntime.pxd.in diff --git a/cuda/ccudart.pyx.in b/cuda/cuda/bindings/cyruntime.pyx.in similarity index 99% rename from cuda/ccudart.pyx.in rename to cuda/cuda/bindings/cyruntime.pyx.in index 85acc771..18b9fe40 100644 --- a/cuda/ccudart.pyx.in +++ b/cuda/cuda/bindings/cyruntime.pyx.in @@ -8,9 +8,9 @@ # cython: show_performance_hints=False -cimport cuda._cuda.ccuda as ccuda -from cuda._lib.ccudart.ccudart cimport * -from cuda._lib.ccudart.utils cimport * +cimport cuda.bindings._bindings.cydriver as cydriver +from cuda.bindings._lib.cyruntime.cyruntime cimport * +from cuda.bindings._lib.cyruntime.utils cimport * from libc.stdlib cimport malloc, free, calloc from libc cimport string from libcpp cimport bool diff --git a/cuda/cuda.pxd.in b/cuda/cuda/bindings/driver.pxd.in similarity index 92% rename from cuda/cuda.pxd.in rename to cuda/cuda/bindings/driver.pxd.in index 265b6dd3..9be46cb2 100644 --- a/cuda/cuda.pxd.in +++ b/cuda/cuda/bindings/driver.pxd.in @@ -5,8 +5,8 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -cimport cuda.ccuda as ccuda -cimport cuda._lib.utils as utils +cimport cuda.bindings.cydriver as cydriver +cimport cuda.bindings._lib.utils as utils {{if 'CUcontext' in found_types}} @@ -21,8 +21,8 @@ cdef class CUcontext: Get memory address of class instance """ - cdef ccuda.CUcontext __val - cdef ccuda.CUcontext* _ptr + cdef cydriver.CUcontext __val + cdef cydriver.CUcontext* _ptr {{endif}} {{if 'CUmodule' in found_types}} @@ -38,8 +38,8 @@ cdef class CUmodule: Get memory address of class instance """ - cdef ccuda.CUmodule __val - cdef ccuda.CUmodule* _ptr + cdef cydriver.CUmodule __val + cdef cydriver.CUmodule* _ptr {{endif}} {{if 'CUfunction' in found_types}} @@ -55,8 +55,8 @@ cdef class CUfunction: Get memory address of class instance """ - cdef ccuda.CUfunction __val - cdef ccuda.CUfunction* _ptr + cdef cydriver.CUfunction __val + cdef cydriver.CUfunction* _ptr {{endif}} {{if 'CUlibrary' in found_types}} @@ -72,8 +72,8 @@ cdef class CUlibrary: Get memory address of class instance """ - cdef ccuda.CUlibrary __val - cdef ccuda.CUlibrary* _ptr + cdef cydriver.CUlibrary __val + cdef cydriver.CUlibrary* _ptr {{endif}} {{if 'CUkernel' in found_types}} @@ -89,8 +89,8 @@ cdef class CUkernel: Get memory address of class instance """ - cdef ccuda.CUkernel __val - cdef ccuda.CUkernel* _ptr + cdef cydriver.CUkernel __val + cdef cydriver.CUkernel* _ptr {{endif}} {{if 'CUarray' in found_types}} @@ -106,8 +106,8 @@ cdef class CUarray: Get memory address of class instance """ - cdef ccuda.CUarray __val - cdef ccuda.CUarray* _ptr + cdef cydriver.CUarray __val + cdef cydriver.CUarray* _ptr {{endif}} {{if 'CUmipmappedArray' in found_types}} @@ -123,8 +123,8 @@ cdef class CUmipmappedArray: Get memory address of class instance """ - cdef ccuda.CUmipmappedArray __val - cdef ccuda.CUmipmappedArray* _ptr + cdef cydriver.CUmipmappedArray __val + cdef cydriver.CUmipmappedArray* _ptr {{endif}} {{if 'CUtexref' in found_types}} @@ -140,8 +140,8 @@ cdef class CUtexref: Get memory address of class instance """ - cdef ccuda.CUtexref __val - cdef ccuda.CUtexref* _ptr + cdef cydriver.CUtexref __val + cdef cydriver.CUtexref* _ptr {{endif}} {{if 'CUsurfref' in found_types}} @@ -157,8 +157,8 @@ cdef class CUsurfref: Get memory address of class instance """ - cdef ccuda.CUsurfref __val - cdef ccuda.CUsurfref* _ptr + cdef cydriver.CUsurfref __val + cdef cydriver.CUsurfref* _ptr {{endif}} {{if 'CUevent' in found_types}} @@ -174,8 +174,8 @@ cdef class CUevent: Get memory address of class instance """ - cdef ccuda.CUevent __val - cdef ccuda.CUevent* _ptr + cdef cydriver.CUevent __val + cdef cydriver.CUevent* _ptr {{endif}} {{if 'CUstream' in found_types}} @@ -191,8 +191,8 @@ cdef class CUstream: Get memory address of class instance """ - cdef ccuda.CUstream __val - cdef ccuda.CUstream* _ptr + cdef cydriver.CUstream __val + cdef cydriver.CUstream* _ptr {{endif}} {{if 'CUgraphicsResource' in found_types}} @@ -208,8 +208,8 @@ cdef class CUgraphicsResource: Get memory address of class instance """ - cdef ccuda.CUgraphicsResource __val - cdef ccuda.CUgraphicsResource* _ptr + cdef cydriver.CUgraphicsResource __val + cdef cydriver.CUgraphicsResource* _ptr {{endif}} {{if 'CUexternalMemory' in found_types}} @@ -225,8 +225,8 @@ cdef class CUexternalMemory: Get memory address of class instance """ - cdef ccuda.CUexternalMemory __val - cdef ccuda.CUexternalMemory* _ptr + cdef cydriver.CUexternalMemory __val + cdef cydriver.CUexternalMemory* _ptr {{endif}} {{if 'CUexternalSemaphore' in found_types}} @@ -242,8 +242,8 @@ cdef class CUexternalSemaphore: Get memory address of class instance """ - cdef ccuda.CUexternalSemaphore __val - cdef ccuda.CUexternalSemaphore* _ptr + cdef cydriver.CUexternalSemaphore __val + cdef cydriver.CUexternalSemaphore* _ptr {{endif}} {{if 'CUgraph' in found_types}} @@ -259,8 +259,8 @@ cdef class CUgraph: Get memory address of class instance """ - cdef ccuda.CUgraph __val - cdef ccuda.CUgraph* _ptr + cdef cydriver.CUgraph __val + cdef cydriver.CUgraph* _ptr {{endif}} {{if 'CUgraphNode' in found_types}} @@ -276,8 +276,8 @@ cdef class CUgraphNode: Get memory address of class instance """ - cdef ccuda.CUgraphNode __val - cdef ccuda.CUgraphNode* _ptr + cdef cydriver.CUgraphNode __val + cdef cydriver.CUgraphNode* _ptr {{endif}} {{if 'CUgraphExec' in found_types}} @@ -293,8 +293,8 @@ cdef class CUgraphExec: Get memory address of class instance """ - cdef ccuda.CUgraphExec __val - cdef ccuda.CUgraphExec* _ptr + cdef cydriver.CUgraphExec __val + cdef cydriver.CUgraphExec* _ptr {{endif}} {{if 'CUmemoryPool' in found_types}} @@ -310,8 +310,8 @@ cdef class CUmemoryPool: Get memory address of class instance """ - cdef ccuda.CUmemoryPool __val - cdef ccuda.CUmemoryPool* _ptr + cdef cydriver.CUmemoryPool __val + cdef cydriver.CUmemoryPool* _ptr {{endif}} {{if 'CUuserObject' in found_types}} @@ -327,8 +327,8 @@ cdef class CUuserObject: Get memory address of class instance """ - cdef ccuda.CUuserObject __val - cdef ccuda.CUuserObject* _ptr + cdef cydriver.CUuserObject __val + cdef cydriver.CUuserObject* _ptr {{endif}} {{if 'CUgraphDeviceNode' in found_types}} @@ -344,8 +344,8 @@ cdef class CUgraphDeviceNode: Get memory address of class instance """ - cdef ccuda.CUgraphDeviceNode __val - cdef ccuda.CUgraphDeviceNode* _ptr + cdef cydriver.CUgraphDeviceNode __val + cdef cydriver.CUgraphDeviceNode* _ptr {{endif}} {{if 'CUasyncCallbackHandle' in found_types}} @@ -361,8 +361,8 @@ cdef class CUasyncCallbackHandle: Get memory address of class instance """ - cdef ccuda.CUasyncCallbackHandle __val - cdef ccuda.CUasyncCallbackHandle* _ptr + cdef cydriver.CUasyncCallbackHandle __val + cdef cydriver.CUasyncCallbackHandle* _ptr {{endif}} {{if 'CUgreenCtx' in found_types}} @@ -378,8 +378,8 @@ cdef class CUgreenCtx: Get memory address of class instance """ - cdef ccuda.CUgreenCtx __val - cdef ccuda.CUgreenCtx* _ptr + cdef cydriver.CUgreenCtx __val + cdef cydriver.CUgreenCtx* _ptr {{endif}} {{if 'CUlinkState' in found_types}} @@ -393,8 +393,8 @@ cdef class CUlinkState: Get memory address of class instance """ - cdef ccuda.CUlinkState __val - cdef ccuda.CUlinkState* _ptr + cdef cydriver.CUlinkState __val + cdef cydriver.CUlinkState* _ptr cdef list _keepalive {{endif}} @@ -411,8 +411,8 @@ cdef class CUdevResourceDesc: Get memory address of class instance """ - cdef ccuda.CUdevResourceDesc __val - cdef ccuda.CUdevResourceDesc* _ptr + cdef cydriver.CUdevResourceDesc __val + cdef cydriver.CUdevResourceDesc* _ptr {{endif}} {{if True}} @@ -428,8 +428,8 @@ cdef class CUeglStreamConnection: Get memory address of class instance """ - cdef ccuda.CUeglStreamConnection __val - cdef ccuda.CUeglStreamConnection* _ptr + cdef cydriver.CUeglStreamConnection __val + cdef cydriver.CUeglStreamConnection* _ptr {{endif}} {{if True}} @@ -443,8 +443,8 @@ cdef class EGLImageKHR: Get memory address of class instance """ - cdef ccuda.EGLImageKHR __val - cdef ccuda.EGLImageKHR* _ptr + cdef cydriver.EGLImageKHR __val + cdef cydriver.EGLImageKHR* _ptr {{endif}} {{if True}} @@ -458,8 +458,8 @@ cdef class EGLStreamKHR: Get memory address of class instance """ - cdef ccuda.EGLStreamKHR __val - cdef ccuda.EGLStreamKHR* _ptr + cdef cydriver.EGLStreamKHR __val + cdef cydriver.EGLStreamKHR* _ptr {{endif}} {{if True}} @@ -473,8 +473,8 @@ cdef class EGLSyncKHR: Get memory address of class instance """ - cdef ccuda.EGLSyncKHR __val - cdef ccuda.EGLSyncKHR* _ptr + cdef cydriver.EGLSyncKHR __val + cdef cydriver.EGLSyncKHR* _ptr {{endif}} {{if 'CUasyncCallback' in found_types}} @@ -488,8 +488,8 @@ cdef class CUasyncCallback: Get memory address of class instance """ - cdef ccuda.CUasyncCallback __val - cdef ccuda.CUasyncCallback* _ptr + cdef cydriver.CUasyncCallback __val + cdef cydriver.CUasyncCallback* _ptr {{endif}} {{if 'CUhostFn' in found_types}} @@ -503,8 +503,8 @@ cdef class CUhostFn: Get memory address of class instance """ - cdef ccuda.CUhostFn __val - cdef ccuda.CUhostFn* _ptr + cdef cydriver.CUhostFn __val + cdef cydriver.CUhostFn* _ptr {{endif}} {{if 'CUstreamCallback' in found_types}} @@ -518,8 +518,8 @@ cdef class CUstreamCallback: Get memory address of class instance """ - cdef ccuda.CUstreamCallback __val - cdef ccuda.CUstreamCallback* _ptr + cdef cydriver.CUstreamCallback __val + cdef cydriver.CUstreamCallback* _ptr {{endif}} {{if 'CUoccupancyB2DSize' in found_types}} @@ -533,8 +533,8 @@ cdef class CUoccupancyB2DSize: Get memory address of class instance """ - cdef ccuda.CUoccupancyB2DSize __val - cdef ccuda.CUoccupancyB2DSize* _ptr + cdef cydriver.CUoccupancyB2DSize __val + cdef cydriver.CUoccupancyB2DSize* _ptr {{endif}} {{if 'struct CUuuid_st' in found_types}} @@ -552,8 +552,8 @@ cdef class CUuuid_st: Get memory address of class instance """ - cdef ccuda.CUuuid_st __val - cdef ccuda.CUuuid_st* _ptr + cdef cydriver.CUuuid_st __val + cdef cydriver.CUuuid_st* _ptr {{endif}} {{if 'struct CUmemFabricHandle_st' in found_types}} @@ -575,8 +575,8 @@ cdef class CUmemFabricHandle_st: Get memory address of class instance """ - cdef ccuda.CUmemFabricHandle_st __val - cdef ccuda.CUmemFabricHandle_st* _ptr + cdef cydriver.CUmemFabricHandle_st __val + cdef cydriver.CUmemFabricHandle_st* _ptr {{endif}} {{if 'struct CUipcEventHandle_st' in found_types}} @@ -595,8 +595,8 @@ cdef class CUipcEventHandle_st: Get memory address of class instance """ - cdef ccuda.CUipcEventHandle_st __val - cdef ccuda.CUipcEventHandle_st* _ptr + cdef cydriver.CUipcEventHandle_st __val + cdef cydriver.CUipcEventHandle_st* _ptr {{endif}} {{if 'struct CUipcMemHandle_st' in found_types}} @@ -615,8 +615,8 @@ cdef class CUipcMemHandle_st: Get memory address of class instance """ - cdef ccuda.CUipcMemHandle_st __val - cdef ccuda.CUipcMemHandle_st* _ptr + cdef cydriver.CUipcMemHandle_st __val + cdef cydriver.CUipcMemHandle_st* _ptr {{endif}} {{if 'union CUstreamBatchMemOpParams_union' in found_types}} @@ -643,7 +643,7 @@ cdef class CUstreamMemOpWaitValueParams_st: Get memory address of class instance """ - cdef ccuda.CUstreamBatchMemOpParams_union* _ptr + cdef cydriver.CUstreamBatchMemOpParams_union* _ptr cdef CUdeviceptr _address cdef cuuint32_t _value cdef cuuint64_t _value64 @@ -674,7 +674,7 @@ cdef class CUstreamMemOpWriteValueParams_st: Get memory address of class instance """ - cdef ccuda.CUstreamBatchMemOpParams_union* _ptr + cdef cydriver.CUstreamBatchMemOpParams_union* _ptr cdef CUdeviceptr _address cdef cuuint32_t _value cdef cuuint64_t _value64 @@ -697,7 +697,7 @@ cdef class CUstreamMemOpFlushRemoteWritesParams_st: Get memory address of class instance """ - cdef ccuda.CUstreamBatchMemOpParams_union* _ptr + cdef cydriver.CUstreamBatchMemOpParams_union* _ptr {{endif}} {{if 'union CUstreamBatchMemOpParams_union' in found_types}} @@ -716,7 +716,7 @@ cdef class CUstreamMemOpMemoryBarrierParams_st: Get memory address of class instance """ - cdef ccuda.CUstreamBatchMemOpParams_union* _ptr + cdef cydriver.CUstreamBatchMemOpParams_union* _ptr {{endif}} {{if 'union CUstreamBatchMemOpParams_union' in found_types}} @@ -745,8 +745,8 @@ cdef class CUstreamBatchMemOpParams_union: Get memory address of class instance """ - cdef ccuda.CUstreamBatchMemOpParams_union __val - cdef ccuda.CUstreamBatchMemOpParams_union* _ptr + cdef cydriver.CUstreamBatchMemOpParams_union __val + cdef cydriver.CUstreamBatchMemOpParams_union* _ptr cdef CUstreamMemOpWaitValueParams_st _waitValue cdef CUstreamMemOpWriteValueParams_st _writeValue cdef CUstreamMemOpFlushRemoteWritesParams_st _flushRemoteWrites @@ -773,11 +773,11 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st: Get memory address of class instance """ - cdef ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st __val - cdef ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st* _ptr + cdef cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st __val + cdef cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st* _ptr cdef CUcontext _ctx cdef size_t _paramArray_length - cdef ccuda.CUstreamBatchMemOpParams* _paramArray + cdef cydriver.CUstreamBatchMemOpParams* _paramArray {{endif}} {{if 'struct CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st' in found_types}} @@ -803,11 +803,11 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st: Get memory address of class instance """ - cdef ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st __val - cdef ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st* _ptr + cdef cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st __val + cdef cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st* _ptr cdef CUcontext _ctx cdef size_t _paramArray_length - cdef ccuda.CUstreamBatchMemOpParams* _paramArray + cdef cydriver.CUstreamBatchMemOpParams* _paramArray {{endif}} {{if 'struct CUasyncNotificationInfo_st' in found_types}} @@ -825,7 +825,7 @@ cdef class anon_struct0: Get memory address of class instance """ - cdef ccuda.CUasyncNotificationInfo_st* _ptr + cdef cydriver.CUasyncNotificationInfo_st* _ptr {{endif}} {{if 'struct CUasyncNotificationInfo_st' in found_types}} @@ -842,7 +842,7 @@ cdef class anon_union2: Get memory address of class instance """ - cdef ccuda.CUasyncNotificationInfo_st* _ptr + cdef cydriver.CUasyncNotificationInfo_st* _ptr cdef anon_struct0 _overBudget {{endif}} {{if 'struct CUasyncNotificationInfo_st' in found_types}} @@ -864,8 +864,8 @@ cdef class CUasyncNotificationInfo_st: Get memory address of class instance """ - cdef ccuda.CUasyncNotificationInfo_st* _val_ptr - cdef ccuda.CUasyncNotificationInfo_st* _ptr + cdef cydriver.CUasyncNotificationInfo_st* _val_ptr + cdef cydriver.CUasyncNotificationInfo_st* _ptr cdef anon_union2 _info {{endif}} {{if 'struct CUdevprop_st' in found_types}} @@ -903,8 +903,8 @@ cdef class CUdevprop_st: Get memory address of class instance """ - cdef ccuda.CUdevprop_st __val - cdef ccuda.CUdevprop_st* _ptr + cdef cydriver.CUdevprop_st __val + cdef cydriver.CUdevprop_st* _ptr {{endif}} {{if 'struct CUaccessPolicyWindow_st' in found_types}} @@ -943,8 +943,8 @@ cdef class CUaccessPolicyWindow_st: Get memory address of class instance """ - cdef ccuda.CUaccessPolicyWindow_st __val - cdef ccuda.CUaccessPolicyWindow_st* _ptr + cdef cydriver.CUaccessPolicyWindow_st __val + cdef cydriver.CUaccessPolicyWindow_st* _ptr {{endif}} {{if 'struct CUDA_KERNEL_NODE_PARAMS_st' in found_types}} @@ -981,10 +981,10 @@ cdef class CUDA_KERNEL_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_KERNEL_NODE_PARAMS_st __val - cdef ccuda.CUDA_KERNEL_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_KERNEL_NODE_PARAMS_st __val + cdef cydriver.CUDA_KERNEL_NODE_PARAMS_st* _ptr cdef CUfunction _func - cdef utils.HelperKernelParams _ckernelParams + cdef utils.HelperKernelParams _cykernelParams {{endif}} {{if 'struct CUDA_KERNEL_NODE_PARAMS_v2_st' in found_types}} @@ -1027,10 +1027,10 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v2_st: Get memory address of class instance """ - cdef ccuda.CUDA_KERNEL_NODE_PARAMS_v2_st __val - cdef ccuda.CUDA_KERNEL_NODE_PARAMS_v2_st* _ptr + cdef cydriver.CUDA_KERNEL_NODE_PARAMS_v2_st __val + cdef cydriver.CUDA_KERNEL_NODE_PARAMS_v2_st* _ptr cdef CUfunction _func - cdef utils.HelperKernelParams _ckernelParams + cdef utils.HelperKernelParams _cykernelParams cdef CUkernel _kern cdef CUcontext _ctx {{endif}} @@ -1075,10 +1075,10 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v3_st: Get memory address of class instance """ - cdef ccuda.CUDA_KERNEL_NODE_PARAMS_v3_st __val - cdef ccuda.CUDA_KERNEL_NODE_PARAMS_v3_st* _ptr + cdef cydriver.CUDA_KERNEL_NODE_PARAMS_v3_st __val + cdef cydriver.CUDA_KERNEL_NODE_PARAMS_v3_st* _ptr cdef CUfunction _func - cdef utils.HelperKernelParams _ckernelParams + cdef utils.HelperKernelParams _cykernelParams cdef CUkernel _kern cdef CUcontext _ctx {{endif}} @@ -1109,8 +1109,8 @@ cdef class CUDA_MEMSET_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_MEMSET_NODE_PARAMS_st __val - cdef ccuda.CUDA_MEMSET_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_MEMSET_NODE_PARAMS_st __val + cdef cydriver.CUDA_MEMSET_NODE_PARAMS_st* _ptr cdef CUdeviceptr _dst {{endif}} {{if 'struct CUDA_MEMSET_NODE_PARAMS_v2_st' in found_types}} @@ -1142,8 +1142,8 @@ cdef class CUDA_MEMSET_NODE_PARAMS_v2_st: Get memory address of class instance """ - cdef ccuda.CUDA_MEMSET_NODE_PARAMS_v2_st __val - cdef ccuda.CUDA_MEMSET_NODE_PARAMS_v2_st* _ptr + cdef cydriver.CUDA_MEMSET_NODE_PARAMS_v2_st __val + cdef cydriver.CUDA_MEMSET_NODE_PARAMS_v2_st* _ptr cdef CUdeviceptr _dst cdef CUcontext _ctx {{endif}} @@ -1166,8 +1166,8 @@ cdef class CUDA_HOST_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_HOST_NODE_PARAMS_st __val - cdef ccuda.CUDA_HOST_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_HOST_NODE_PARAMS_st __val + cdef cydriver.CUDA_HOST_NODE_PARAMS_st* _ptr cdef CUhostFn _fn {{endif}} {{if 'struct CUDA_HOST_NODE_PARAMS_v2_st' in found_types}} @@ -1189,8 +1189,8 @@ cdef class CUDA_HOST_NODE_PARAMS_v2_st: Get memory address of class instance """ - cdef ccuda.CUDA_HOST_NODE_PARAMS_v2_st __val - cdef ccuda.CUDA_HOST_NODE_PARAMS_v2_st* _ptr + cdef cydriver.CUDA_HOST_NODE_PARAMS_v2_st __val + cdef cydriver.CUDA_HOST_NODE_PARAMS_v2_st* _ptr cdef CUhostFn _fn {{endif}} {{if 'struct CUDA_CONDITIONAL_NODE_PARAMS' in found_types}} @@ -1229,11 +1229,11 @@ cdef class CUDA_CONDITIONAL_NODE_PARAMS: Get memory address of class instance """ - cdef ccuda.CUDA_CONDITIONAL_NODE_PARAMS __val - cdef ccuda.CUDA_CONDITIONAL_NODE_PARAMS* _ptr + cdef cydriver.CUDA_CONDITIONAL_NODE_PARAMS __val + cdef cydriver.CUDA_CONDITIONAL_NODE_PARAMS* _ptr cdef CUgraphConditionalHandle _handle cdef size_t _phGraph_out_length - cdef ccuda.CUgraph* _phGraph_out + cdef cydriver.CUgraph* _phGraph_out cdef CUcontext _ctx {{endif}} @@ -1279,8 +1279,8 @@ cdef class CUgraphEdgeData_st: Get memory address of class instance """ - cdef ccuda.CUgraphEdgeData_st __val - cdef ccuda.CUgraphEdgeData_st* _ptr + cdef cydriver.CUgraphEdgeData_st __val + cdef cydriver.CUgraphEdgeData_st* _ptr {{endif}} {{if 'struct CUDA_GRAPH_INSTANTIATE_PARAMS_st' in found_types}} @@ -1305,8 +1305,8 @@ cdef class CUDA_GRAPH_INSTANTIATE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_GRAPH_INSTANTIATE_PARAMS_st __val - cdef ccuda.CUDA_GRAPH_INSTANTIATE_PARAMS_st* _ptr + cdef cydriver.CUDA_GRAPH_INSTANTIATE_PARAMS_st __val + cdef cydriver.CUDA_GRAPH_INSTANTIATE_PARAMS_st* _ptr cdef cuuint64_t _flags cdef CUstream _hUploadStream cdef CUgraphNode _hErrNode_out @@ -1336,8 +1336,8 @@ cdef class CUlaunchMemSyncDomainMap_st: Get memory address of class instance """ - cdef ccuda.CUlaunchMemSyncDomainMap_st __val - cdef ccuda.CUlaunchMemSyncDomainMap_st* _ptr + cdef cydriver.CUlaunchMemSyncDomainMap_st __val + cdef cydriver.CUlaunchMemSyncDomainMap_st* _ptr {{endif}} {{if 'union CUlaunchAttributeValue_union' in found_types}} @@ -1358,7 +1358,7 @@ cdef class anon_struct1: Get memory address of class instance """ - cdef ccuda.CUlaunchAttributeValue_union* _ptr + cdef cydriver.CUlaunchAttributeValue_union* _ptr {{endif}} {{if 'union CUlaunchAttributeValue_union' in found_types}} @@ -1379,7 +1379,7 @@ cdef class anon_struct2: Get memory address of class instance """ - cdef ccuda.CUlaunchAttributeValue_union* _ptr + cdef cydriver.CUlaunchAttributeValue_union* _ptr cdef CUevent _event {{endif}} {{if 'union CUlaunchAttributeValue_union' in found_types}} @@ -1399,7 +1399,7 @@ cdef class anon_struct3: Get memory address of class instance """ - cdef ccuda.CUlaunchAttributeValue_union* _ptr + cdef cydriver.CUlaunchAttributeValue_union* _ptr cdef CUevent _event {{endif}} {{if 'union CUlaunchAttributeValue_union' in found_types}} @@ -1419,7 +1419,7 @@ cdef class anon_struct4: Get memory address of class instance """ - cdef ccuda.CUlaunchAttributeValue_union* _ptr + cdef cydriver.CUlaunchAttributeValue_union* _ptr cdef CUgraphDeviceNode _devNode {{endif}} {{if 'union CUlaunchAttributeValue_union' in found_types}} @@ -1495,8 +1495,8 @@ cdef class CUlaunchAttributeValue_union: Get memory address of class instance """ - cdef ccuda.CUlaunchAttributeValue_union __val - cdef ccuda.CUlaunchAttributeValue_union* _ptr + cdef cydriver.CUlaunchAttributeValue_union __val + cdef cydriver.CUlaunchAttributeValue_union* _ptr cdef CUaccessPolicyWindow _accessPolicyWindow cdef anon_struct1 _clusterDim cdef anon_struct2 _programmaticEvent @@ -1523,8 +1523,8 @@ cdef class CUlaunchAttribute_st: Get memory address of class instance """ - cdef ccuda.CUlaunchAttribute_st __val - cdef ccuda.CUlaunchAttribute_st* _ptr + cdef cydriver.CUlaunchAttribute_st __val + cdef cydriver.CUlaunchAttribute_st* _ptr cdef CUlaunchAttributeValue _value {{endif}} {{if 'struct CUlaunchConfig_st' in found_types}} @@ -1562,11 +1562,11 @@ cdef class CUlaunchConfig_st: Get memory address of class instance """ - cdef ccuda.CUlaunchConfig_st __val - cdef ccuda.CUlaunchConfig_st* _ptr + cdef cydriver.CUlaunchConfig_st __val + cdef cydriver.CUlaunchConfig_st* _ptr cdef CUstream _hStream cdef size_t _attrs_length - cdef ccuda.CUlaunchAttribute* _attrs + cdef cydriver.CUlaunchAttribute* _attrs {{endif}} {{if 'struct CUexecAffinitySmCount_st' in found_types}} @@ -1586,8 +1586,8 @@ cdef class CUexecAffinitySmCount_st: Get memory address of class instance """ - cdef ccuda.CUexecAffinitySmCount_st __val - cdef ccuda.CUexecAffinitySmCount_st* _ptr + cdef cydriver.CUexecAffinitySmCount_st __val + cdef cydriver.CUexecAffinitySmCount_st* _ptr {{endif}} {{if 'struct CUexecAffinityParam_st' in found_types}} @@ -1604,7 +1604,7 @@ cdef class anon_union3: Get memory address of class instance """ - cdef ccuda.CUexecAffinityParam_st* _ptr + cdef cydriver.CUexecAffinityParam_st* _ptr cdef CUexecAffinitySmCount _smCount {{endif}} {{if 'struct CUexecAffinityParam_st' in found_types}} @@ -1626,8 +1626,8 @@ cdef class CUexecAffinityParam_st: Get memory address of class instance """ - cdef ccuda.CUexecAffinityParam_st* _val_ptr - cdef ccuda.CUexecAffinityParam_st* _ptr + cdef cydriver.CUexecAffinityParam_st* _val_ptr + cdef cydriver.CUexecAffinityParam_st* _ptr cdef anon_union3 _param {{endif}} {{if 'struct CUctxCigParam_st' in found_types}} @@ -1649,8 +1649,8 @@ cdef class CUctxCigParam_st: Get memory address of class instance """ - cdef ccuda.CUctxCigParam_st __val - cdef ccuda.CUctxCigParam_st* _ptr + cdef cydriver.CUctxCigParam_st __val + cdef cydriver.CUctxCigParam_st* _ptr {{endif}} {{if 'struct CUctxCreateParams_st' in found_types}} @@ -1674,13 +1674,13 @@ cdef class CUctxCreateParams_st: Get memory address of class instance """ - cdef ccuda.CUctxCreateParams_st __val - cdef ccuda.CUctxCreateParams_st* _ptr + cdef cydriver.CUctxCreateParams_st __val + cdef cydriver.CUctxCreateParams_st* _ptr cdef size_t _execAffinityParams_length - cdef ccuda.CUexecAffinityParam* _execAffinityParams + cdef cydriver.CUexecAffinityParam* _execAffinityParams cdef size_t _cigParams_length - cdef ccuda.CUctxCigParam* _cigParams + cdef cydriver.CUctxCigParam* _cigParams {{endif}} {{if 'struct CUlibraryHostUniversalFunctionAndDataTable_st' in found_types}} @@ -1704,8 +1704,8 @@ cdef class CUlibraryHostUniversalFunctionAndDataTable_st: Get memory address of class instance """ - cdef ccuda.CUlibraryHostUniversalFunctionAndDataTable_st __val - cdef ccuda.CUlibraryHostUniversalFunctionAndDataTable_st* _ptr + cdef cydriver.CUlibraryHostUniversalFunctionAndDataTable_st __val + cdef cydriver.CUlibraryHostUniversalFunctionAndDataTable_st* _ptr {{endif}} {{if 'struct CUDA_MEMCPY2D_st' in found_types}} @@ -1754,8 +1754,8 @@ cdef class CUDA_MEMCPY2D_st: Get memory address of class instance """ - cdef ccuda.CUDA_MEMCPY2D_st __val - cdef ccuda.CUDA_MEMCPY2D_st* _ptr + cdef cydriver.CUDA_MEMCPY2D_st __val + cdef cydriver.CUDA_MEMCPY2D_st* _ptr cdef CUdeviceptr _srcDevice cdef CUarray _srcArray cdef CUdeviceptr _dstDevice @@ -1827,8 +1827,8 @@ cdef class CUDA_MEMCPY3D_st: Get memory address of class instance """ - cdef ccuda.CUDA_MEMCPY3D_st __val - cdef ccuda.CUDA_MEMCPY3D_st* _ptr + cdef cydriver.CUDA_MEMCPY3D_st __val + cdef cydriver.CUDA_MEMCPY3D_st* _ptr cdef CUdeviceptr _srcDevice cdef CUarray _srcArray cdef CUdeviceptr _dstDevice @@ -1901,8 +1901,8 @@ cdef class CUDA_MEMCPY3D_PEER_st: Get memory address of class instance """ - cdef ccuda.CUDA_MEMCPY3D_PEER_st __val - cdef ccuda.CUDA_MEMCPY3D_PEER_st* _ptr + cdef cydriver.CUDA_MEMCPY3D_PEER_st __val + cdef cydriver.CUDA_MEMCPY3D_PEER_st* _ptr cdef CUdeviceptr _srcDevice cdef CUarray _srcArray cdef CUcontext _srcContext @@ -1933,8 +1933,8 @@ cdef class CUDA_MEMCPY_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_MEMCPY_NODE_PARAMS_st __val - cdef ccuda.CUDA_MEMCPY_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_MEMCPY_NODE_PARAMS_st __val + cdef cydriver.CUDA_MEMCPY_NODE_PARAMS_st* _ptr cdef CUcontext _copyCtx cdef CUDA_MEMCPY3D _copyParams {{endif}} @@ -1961,8 +1961,8 @@ cdef class CUDA_ARRAY_DESCRIPTOR_st: Get memory address of class instance """ - cdef ccuda.CUDA_ARRAY_DESCRIPTOR_st __val - cdef ccuda.CUDA_ARRAY_DESCRIPTOR_st* _ptr + cdef cydriver.CUDA_ARRAY_DESCRIPTOR_st __val + cdef cydriver.CUDA_ARRAY_DESCRIPTOR_st* _ptr {{endif}} {{if 'struct CUDA_ARRAY3D_DESCRIPTOR_st' in found_types}} @@ -1991,8 +1991,8 @@ cdef class CUDA_ARRAY3D_DESCRIPTOR_st: Get memory address of class instance """ - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR_st __val - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR_st* _ptr + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR_st __val + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR_st* _ptr {{endif}} {{if 'struct CUDA_ARRAY_SPARSE_PROPERTIES_st' in found_types}} @@ -2013,7 +2013,7 @@ cdef class anon_struct5: Get memory address of class instance """ - cdef ccuda.CUDA_ARRAY_SPARSE_PROPERTIES_st* _ptr + cdef cydriver.CUDA_ARRAY_SPARSE_PROPERTIES_st* _ptr {{endif}} {{if 'struct CUDA_ARRAY_SPARSE_PROPERTIES_st' in found_types}} @@ -2041,8 +2041,8 @@ cdef class CUDA_ARRAY_SPARSE_PROPERTIES_st: Get memory address of class instance """ - cdef ccuda.CUDA_ARRAY_SPARSE_PROPERTIES_st __val - cdef ccuda.CUDA_ARRAY_SPARSE_PROPERTIES_st* _ptr + cdef cydriver.CUDA_ARRAY_SPARSE_PROPERTIES_st __val + cdef cydriver.CUDA_ARRAY_SPARSE_PROPERTIES_st* _ptr cdef anon_struct5 _tileExtent {{endif}} {{if 'struct CUDA_ARRAY_MEMORY_REQUIREMENTS_st' in found_types}} @@ -2066,8 +2066,8 @@ cdef class CUDA_ARRAY_MEMORY_REQUIREMENTS_st: Get memory address of class instance """ - cdef ccuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_st __val - cdef ccuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_st* _ptr + cdef cydriver.CUDA_ARRAY_MEMORY_REQUIREMENTS_st __val + cdef cydriver.CUDA_ARRAY_MEMORY_REQUIREMENTS_st* _ptr {{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} @@ -2084,7 +2084,7 @@ cdef class anon_struct6: Get memory address of class instance """ - cdef ccuda.CUDA_RESOURCE_DESC_st* _ptr + cdef cydriver.CUDA_RESOURCE_DESC_st* _ptr cdef CUarray _hArray {{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} @@ -2102,7 +2102,7 @@ cdef class anon_struct7: Get memory address of class instance """ - cdef ccuda.CUDA_RESOURCE_DESC_st* _ptr + cdef cydriver.CUDA_RESOURCE_DESC_st* _ptr cdef CUmipmappedArray _hMipmappedArray {{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} @@ -2126,7 +2126,7 @@ cdef class anon_struct8: Get memory address of class instance """ - cdef ccuda.CUDA_RESOURCE_DESC_st* _ptr + cdef cydriver.CUDA_RESOURCE_DESC_st* _ptr cdef CUdeviceptr _devPtr {{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} @@ -2154,7 +2154,7 @@ cdef class anon_struct9: Get memory address of class instance """ - cdef ccuda.CUDA_RESOURCE_DESC_st* _ptr + cdef cydriver.CUDA_RESOURCE_DESC_st* _ptr cdef CUdeviceptr _devPtr {{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} @@ -2172,7 +2172,7 @@ cdef class anon_struct10: Get memory address of class instance """ - cdef ccuda.CUDA_RESOURCE_DESC_st* _ptr + cdef cydriver.CUDA_RESOURCE_DESC_st* _ptr {{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} @@ -2197,7 +2197,7 @@ cdef class anon_union4: Get memory address of class instance """ - cdef ccuda.CUDA_RESOURCE_DESC_st* _ptr + cdef cydriver.CUDA_RESOURCE_DESC_st* _ptr cdef anon_struct6 _array cdef anon_struct7 _mipmap cdef anon_struct8 _linear @@ -2225,8 +2225,8 @@ cdef class CUDA_RESOURCE_DESC_st: Get memory address of class instance """ - cdef ccuda.CUDA_RESOURCE_DESC_st* _val_ptr - cdef ccuda.CUDA_RESOURCE_DESC_st* _ptr + cdef cydriver.CUDA_RESOURCE_DESC_st* _val_ptr + cdef cydriver.CUDA_RESOURCE_DESC_st* _ptr cdef anon_union4 _res {{endif}} {{if 'struct CUDA_TEXTURE_DESC_st' in found_types}} @@ -2264,8 +2264,8 @@ cdef class CUDA_TEXTURE_DESC_st: Get memory address of class instance """ - cdef ccuda.CUDA_TEXTURE_DESC_st __val - cdef ccuda.CUDA_TEXTURE_DESC_st* _ptr + cdef cydriver.CUDA_TEXTURE_DESC_st __val + cdef cydriver.CUDA_TEXTURE_DESC_st* _ptr {{endif}} {{if 'struct CUDA_RESOURCE_VIEW_DESC_st' in found_types}} @@ -2300,8 +2300,8 @@ cdef class CUDA_RESOURCE_VIEW_DESC_st: Get memory address of class instance """ - cdef ccuda.CUDA_RESOURCE_VIEW_DESC_st __val - cdef ccuda.CUDA_RESOURCE_VIEW_DESC_st* _ptr + cdef cydriver.CUDA_RESOURCE_VIEW_DESC_st __val + cdef cydriver.CUDA_RESOURCE_VIEW_DESC_st* _ptr {{endif}} {{if 'struct CUtensorMap_st' in found_types}} @@ -2321,8 +2321,8 @@ cdef class CUtensorMap_st: Get memory address of class instance """ - cdef ccuda.CUtensorMap_st __val - cdef ccuda.CUtensorMap_st* _ptr + cdef cydriver.CUtensorMap_st __val + cdef cydriver.CUtensorMap_st* _ptr {{endif}} {{if 'struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st' in found_types}} @@ -2343,8 +2343,8 @@ cdef class CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st: Get memory address of class instance """ - cdef ccuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st __val - cdef ccuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st* _ptr + cdef cydriver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st __val + cdef cydriver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st* _ptr {{endif}} {{if 'struct CUDA_LAUNCH_PARAMS_st' in found_types}} @@ -2381,11 +2381,11 @@ cdef class CUDA_LAUNCH_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_LAUNCH_PARAMS_st __val - cdef ccuda.CUDA_LAUNCH_PARAMS_st* _ptr + cdef cydriver.CUDA_LAUNCH_PARAMS_st __val + cdef cydriver.CUDA_LAUNCH_PARAMS_st* _ptr cdef CUfunction _function cdef CUstream _hStream - cdef utils.HelperKernelParams _ckernelParams + cdef utils.HelperKernelParams _cykernelParams {{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st' in found_types}} @@ -2404,7 +2404,7 @@ cdef class anon_struct11: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st* _ptr + cdef cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st* _ptr {{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st' in found_types}} @@ -2425,7 +2425,7 @@ cdef class anon_union5: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st* _ptr + cdef cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st* _ptr cdef anon_struct11 _win32 {{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st' in found_types}} @@ -2453,8 +2453,8 @@ cdef class CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st* _val_ptr - cdef ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st* _ptr + cdef cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st* _val_ptr + cdef cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st* _ptr cdef anon_union5 _handle {{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st' in found_types}} @@ -2480,8 +2480,8 @@ cdef class CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st __val - cdef ccuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st* _ptr + cdef cydriver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st __val + cdef cydriver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st* _ptr {{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st' in found_types}} @@ -2507,8 +2507,8 @@ cdef class CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st __val - cdef ccuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st* _ptr + cdef cydriver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st __val + cdef cydriver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st* _ptr cdef CUDA_ARRAY3D_DESCRIPTOR _arrayDesc {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st' in found_types}} @@ -2528,7 +2528,7 @@ cdef class anon_struct12: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st* _ptr {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st' in found_types}} @@ -2549,7 +2549,7 @@ cdef class anon_union6: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st* _ptr cdef anon_struct12 _win32 {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st' in found_types}} @@ -2575,8 +2575,8 @@ cdef class CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st* _val_ptr - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st* _val_ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st* _ptr cdef anon_union6 _handle {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st' in found_types}} @@ -2594,7 +2594,7 @@ cdef class anon_struct13: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st' in found_types}} @@ -2613,7 +2613,7 @@ cdef class anon_union7: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st' in found_types}} @@ -2630,7 +2630,7 @@ cdef class anon_struct14: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st' in found_types}} @@ -2653,7 +2653,7 @@ cdef class anon_struct15: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr cdef anon_struct13 _fence cdef anon_union7 _nvSciSync cdef anon_struct14 _keyedMutex @@ -2686,8 +2686,8 @@ cdef class CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st __val - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st __val + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st* _ptr cdef anon_struct15 _params {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st' in found_types}} @@ -2705,7 +2705,7 @@ cdef class anon_struct16: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st' in found_types}} @@ -2724,7 +2724,7 @@ cdef class anon_union8: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st' in found_types}} @@ -2743,7 +2743,7 @@ cdef class anon_struct17: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st' in found_types}} @@ -2766,7 +2766,7 @@ cdef class anon_struct18: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr cdef anon_struct16 _fence cdef anon_union8 _nvSciSync cdef anon_struct17 _keyedMutex @@ -2799,8 +2799,8 @@ cdef class CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st __val - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st __val + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st* _ptr cdef anon_struct18 _params {{endif}} {{if 'struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st' in found_types}} @@ -2825,13 +2825,13 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st __val - cdef ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st __val + cdef cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st* _ptr cdef size_t _extSemArray_length - cdef ccuda.CUexternalSemaphore* _extSemArray + cdef cydriver.CUexternalSemaphore* _extSemArray cdef size_t _paramsArray_length - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* _paramsArray + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* _paramsArray {{endif}} {{if 'struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st' in found_types}} @@ -2856,13 +2856,13 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st __val - cdef ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st* _ptr + cdef cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st __val + cdef cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st* _ptr cdef size_t _extSemArray_length - cdef ccuda.CUexternalSemaphore* _extSemArray + cdef cydriver.CUexternalSemaphore* _extSemArray cdef size_t _paramsArray_length - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* _paramsArray + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* _paramsArray {{endif}} {{if 'struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_st' in found_types}} @@ -2887,13 +2887,13 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st __val - cdef ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st __val + cdef cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st* _ptr cdef size_t _extSemArray_length - cdef ccuda.CUexternalSemaphore* _extSemArray + cdef cydriver.CUexternalSemaphore* _extSemArray cdef size_t _paramsArray_length - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* _paramsArray + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* _paramsArray {{endif}} {{if 'struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st' in found_types}} @@ -2918,13 +2918,13 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st: Get memory address of class instance """ - cdef ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st __val - cdef ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st* _ptr + cdef cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st __val + cdef cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st* _ptr cdef size_t _extSemArray_length - cdef ccuda.CUexternalSemaphore* _extSemArray + cdef cydriver.CUexternalSemaphore* _extSemArray cdef size_t _paramsArray_length - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* _paramsArray + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* _paramsArray {{endif}} {{if 'struct CUarrayMapInfo_st' in found_types}} @@ -2944,7 +2944,7 @@ cdef class anon_union9: Get memory address of class instance """ - cdef ccuda.CUarrayMapInfo_st* _ptr + cdef cydriver.CUarrayMapInfo_st* _ptr cdef CUmipmappedArray _mipmap cdef CUarray _array {{endif}} @@ -2977,7 +2977,7 @@ cdef class anon_struct19: Get memory address of class instance """ - cdef ccuda.CUarrayMapInfo_st* _ptr + cdef cydriver.CUarrayMapInfo_st* _ptr {{endif}} {{if 'struct CUarrayMapInfo_st' in found_types}} @@ -2998,7 +2998,7 @@ cdef class anon_struct20: Get memory address of class instance """ - cdef ccuda.CUarrayMapInfo_st* _ptr + cdef cydriver.CUarrayMapInfo_st* _ptr {{endif}} {{if 'struct CUarrayMapInfo_st' in found_types}} @@ -3017,7 +3017,7 @@ cdef class anon_union10: Get memory address of class instance """ - cdef ccuda.CUarrayMapInfo_st* _ptr + cdef cydriver.CUarrayMapInfo_st* _ptr cdef anon_struct19 _sparseLevel cdef anon_struct20 _miptail {{endif}} @@ -3036,7 +3036,7 @@ cdef class anon_union11: Get memory address of class instance """ - cdef ccuda.CUarrayMapInfo_st* _ptr + cdef cydriver.CUarrayMapInfo_st* _ptr cdef CUmemGenericAllocationHandle _memHandle {{endif}} {{if 'struct CUarrayMapInfo_st' in found_types}} @@ -3077,8 +3077,8 @@ cdef class CUarrayMapInfo_st: Get memory address of class instance """ - cdef ccuda.CUarrayMapInfo_st* _val_ptr - cdef ccuda.CUarrayMapInfo_st* _ptr + cdef cydriver.CUarrayMapInfo_st* _val_ptr + cdef cydriver.CUarrayMapInfo_st* _ptr cdef anon_union9 _resource cdef anon_union10 _subresource cdef anon_union11 _memHandle @@ -3102,8 +3102,8 @@ cdef class CUmemLocation_st: Get memory address of class instance """ - cdef ccuda.CUmemLocation_st __val - cdef ccuda.CUmemLocation_st* _ptr + cdef cydriver.CUmemLocation_st __val + cdef cydriver.CUmemLocation_st* _ptr {{endif}} {{if 'struct CUmemAllocationProp_st' in found_types}} @@ -3126,7 +3126,7 @@ cdef class anon_struct21: Get memory address of class instance """ - cdef ccuda.CUmemAllocationProp_st* _ptr + cdef cydriver.CUmemAllocationProp_st* _ptr {{endif}} {{if 'struct CUmemAllocationProp_st' in found_types}} @@ -3157,8 +3157,8 @@ cdef class CUmemAllocationProp_st: Get memory address of class instance """ - cdef ccuda.CUmemAllocationProp_st __val - cdef ccuda.CUmemAllocationProp_st* _ptr + cdef cydriver.CUmemAllocationProp_st __val + cdef cydriver.CUmemAllocationProp_st* _ptr cdef CUmemLocation _location cdef anon_struct21 _allocFlags {{endif}} @@ -3188,8 +3188,8 @@ cdef class CUmulticastObjectProp_st: Get memory address of class instance """ - cdef ccuda.CUmulticastObjectProp_st __val - cdef ccuda.CUmulticastObjectProp_st* _ptr + cdef cydriver.CUmulticastObjectProp_st __val + cdef cydriver.CUmulticastObjectProp_st* _ptr {{endif}} {{if 'struct CUmemAccessDesc_st' in found_types}} @@ -3210,8 +3210,8 @@ cdef class CUmemAccessDesc_st: Get memory address of class instance """ - cdef ccuda.CUmemAccessDesc_st __val - cdef ccuda.CUmemAccessDesc_st* _ptr + cdef cydriver.CUmemAccessDesc_st __val + cdef cydriver.CUmemAccessDesc_st* _ptr cdef CUmemLocation _location {{endif}} {{if 'struct CUgraphExecUpdateResultInfo_st' in found_types}} @@ -3238,8 +3238,8 @@ cdef class CUgraphExecUpdateResultInfo_st: Get memory address of class instance """ - cdef ccuda.CUgraphExecUpdateResultInfo_st __val - cdef ccuda.CUgraphExecUpdateResultInfo_st* _ptr + cdef cydriver.CUgraphExecUpdateResultInfo_st __val + cdef cydriver.CUgraphExecUpdateResultInfo_st* _ptr cdef CUgraphNode _errorNode cdef CUgraphNode _errorFromNode {{endif}} @@ -3278,8 +3278,8 @@ cdef class CUmemPoolProps_st: Get memory address of class instance """ - cdef ccuda.CUmemPoolProps_st __val - cdef ccuda.CUmemPoolProps_st* _ptr + cdef cydriver.CUmemPoolProps_st __val + cdef cydriver.CUmemPoolProps_st* _ptr cdef CUmemLocation _location {{endif}} {{if 'struct CUmemPoolPtrExportData_st' in found_types}} @@ -3299,8 +3299,8 @@ cdef class CUmemPoolPtrExportData_st: Get memory address of class instance """ - cdef ccuda.CUmemPoolPtrExportData_st __val - cdef ccuda.CUmemPoolPtrExportData_st* _ptr + cdef cydriver.CUmemPoolPtrExportData_st __val + cdef cydriver.CUmemPoolPtrExportData_st* _ptr {{endif}} {{if 'struct CUDA_MEM_ALLOC_NODE_PARAMS_v1_st' in found_types}} @@ -3331,11 +3331,11 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v1_st: Get memory address of class instance """ - cdef ccuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st __val - cdef ccuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st* _ptr + cdef cydriver.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st __val + cdef cydriver.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st* _ptr cdef CUmemPoolProps _poolProps cdef size_t _accessDescs_length - cdef ccuda.CUmemAccessDesc* _accessDescs + cdef cydriver.CUmemAccessDesc* _accessDescs cdef CUdeviceptr _dptr {{endif}} @@ -3368,11 +3368,11 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v2_st: Get memory address of class instance """ - cdef ccuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st __val - cdef ccuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st* _ptr + cdef cydriver.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st __val + cdef cydriver.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st* _ptr cdef CUmemPoolProps _poolProps cdef size_t _accessDescs_length - cdef ccuda.CUmemAccessDesc* _accessDescs + cdef cydriver.CUmemAccessDesc* _accessDescs cdef CUdeviceptr _dptr {{endif}} @@ -3393,8 +3393,8 @@ cdef class CUDA_MEM_FREE_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_MEM_FREE_NODE_PARAMS_st __val - cdef ccuda.CUDA_MEM_FREE_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_MEM_FREE_NODE_PARAMS_st __val + cdef cydriver.CUDA_MEM_FREE_NODE_PARAMS_st* _ptr cdef CUdeviceptr _dptr {{endif}} {{if 'struct CUDA_CHILD_GRAPH_NODE_PARAMS_st' in found_types}} @@ -3415,8 +3415,8 @@ cdef class CUDA_CHILD_GRAPH_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_CHILD_GRAPH_NODE_PARAMS_st __val - cdef ccuda.CUDA_CHILD_GRAPH_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_CHILD_GRAPH_NODE_PARAMS_st __val + cdef cydriver.CUDA_CHILD_GRAPH_NODE_PARAMS_st* _ptr cdef CUgraph _graph {{endif}} {{if 'struct CUDA_EVENT_RECORD_NODE_PARAMS_st' in found_types}} @@ -3436,8 +3436,8 @@ cdef class CUDA_EVENT_RECORD_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_EVENT_RECORD_NODE_PARAMS_st __val - cdef ccuda.CUDA_EVENT_RECORD_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_EVENT_RECORD_NODE_PARAMS_st __val + cdef cydriver.CUDA_EVENT_RECORD_NODE_PARAMS_st* _ptr cdef CUevent _event {{endif}} {{if 'struct CUDA_EVENT_WAIT_NODE_PARAMS_st' in found_types}} @@ -3457,8 +3457,8 @@ cdef class CUDA_EVENT_WAIT_NODE_PARAMS_st: Get memory address of class instance """ - cdef ccuda.CUDA_EVENT_WAIT_NODE_PARAMS_st __val - cdef ccuda.CUDA_EVENT_WAIT_NODE_PARAMS_st* _ptr + cdef cydriver.CUDA_EVENT_WAIT_NODE_PARAMS_st __val + cdef cydriver.CUDA_EVENT_WAIT_NODE_PARAMS_st* _ptr cdef CUevent _event {{endif}} {{if 'struct CUgraphNodeParams_st' in found_types}} @@ -3510,8 +3510,8 @@ cdef class CUgraphNodeParams_st: Get memory address of class instance """ - cdef ccuda.CUgraphNodeParams_st* _val_ptr - cdef ccuda.CUgraphNodeParams_st* _ptr + cdef cydriver.CUgraphNodeParams_st* _val_ptr + cdef cydriver.CUgraphNodeParams_st* _ptr cdef CUDA_KERNEL_NODE_PARAMS_v3 _kernel cdef CUDA_MEMCPY_NODE_PARAMS _memcpy cdef CUDA_MEMSET_NODE_PARAMS_v2 _memset @@ -3542,8 +3542,8 @@ cdef class CUdevSmResource_st: Get memory address of class instance """ - cdef ccuda.CUdevSmResource_st __val - cdef ccuda.CUdevSmResource_st* _ptr + cdef cydriver.CUdevSmResource_st __val + cdef cydriver.CUdevSmResource_st* _ptr {{endif}} {{if 'struct CUdevResource_st' in found_types}} @@ -3566,8 +3566,8 @@ cdef class CUdevResource_st: Get memory address of class instance """ - cdef ccuda.CUdevResource_st* _val_ptr - cdef ccuda.CUdevResource_st* _ptr + cdef cydriver.CUdevResource_st* _val_ptr + cdef cydriver.CUdevResource_st* _ptr cdef CUdevSmResource _sm {{endif}} {{if True}} @@ -3587,7 +3587,7 @@ cdef class anon_union14: Get memory address of class instance """ - cdef ccuda.CUeglFrame_st* _ptr + cdef cydriver.CUeglFrame_st* _ptr {{endif}} {{if True}} @@ -3626,8 +3626,8 @@ cdef class CUeglFrame_st: Get memory address of class instance """ - cdef ccuda.CUeglFrame_st* _val_ptr - cdef ccuda.CUeglFrame_st* _ptr + cdef cydriver.CUeglFrame_st* _val_ptr + cdef cydriver.CUeglFrame_st* _ptr cdef anon_union14 _frame {{endif}} {{if 'CUdeviceptr' in found_types}} @@ -3643,8 +3643,8 @@ cdef class CUdeviceptr: Get memory address of class instance """ - cdef ccuda.CUdeviceptr __val - cdef ccuda.CUdeviceptr* _ptr + cdef cydriver.CUdeviceptr __val + cdef cydriver.CUdeviceptr* _ptr {{endif}} {{if 'CUdevice' in found_types}} @@ -3659,8 +3659,8 @@ cdef class CUdevice: Get memory address of class instance """ - cdef ccuda.CUdevice __val - cdef ccuda.CUdevice* _ptr + cdef cydriver.CUdevice __val + cdef cydriver.CUdevice* _ptr {{endif}} {{if 'CUtexObject' in found_types}} @@ -3675,8 +3675,8 @@ cdef class CUtexObject: Get memory address of class instance """ - cdef ccuda.CUtexObject __val - cdef ccuda.CUtexObject* _ptr + cdef cydriver.CUtexObject __val + cdef cydriver.CUtexObject* _ptr {{endif}} {{if 'CUsurfObject' in found_types}} @@ -3691,8 +3691,8 @@ cdef class CUsurfObject: Get memory address of class instance """ - cdef ccuda.CUsurfObject __val - cdef ccuda.CUsurfObject* _ptr + cdef cydriver.CUsurfObject __val + cdef cydriver.CUsurfObject* _ptr {{endif}} {{if 'CUgraphConditionalHandle' in found_types}} @@ -3705,8 +3705,8 @@ cdef class CUgraphConditionalHandle: Get memory address of class instance """ - cdef ccuda.CUgraphConditionalHandle __val - cdef ccuda.CUgraphConditionalHandle* _ptr + cdef cydriver.CUgraphConditionalHandle __val + cdef cydriver.CUgraphConditionalHandle* _ptr {{endif}} {{if 'CUuuid' in found_types}} @@ -6556,8 +6556,8 @@ cdef class CUmemGenericAllocationHandle: Get memory address of class instance """ - cdef ccuda.CUmemGenericAllocationHandle __val - cdef ccuda.CUmemGenericAllocationHandle* _ptr + cdef cydriver.CUmemGenericAllocationHandle __val + cdef cydriver.CUmemGenericAllocationHandle* _ptr {{endif}} {{if 'CUarrayMapInfo_v1' in found_types}} @@ -7379,8 +7379,8 @@ cdef class cuuint32_t: Get memory address of class instance """ - cdef ccuda.cuuint32_t __val - cdef ccuda.cuuint32_t* _ptr + cdef cydriver.cuuint32_t __val + cdef cydriver.cuuint32_t* _ptr {{endif}} {{if 'cuuint64_t' in found_types}} @@ -7394,8 +7394,8 @@ cdef class cuuint64_t: Get memory address of class instance """ - cdef ccuda.cuuint64_t __val - cdef ccuda.cuuint64_t* _ptr + cdef cydriver.cuuint64_t __val + cdef cydriver.cuuint64_t* _ptr {{endif}} {{if 'CUdeviceptr_v2' in found_types}} @@ -7411,8 +7411,8 @@ cdef class CUdeviceptr_v2: Get memory address of class instance """ - cdef ccuda.CUdeviceptr_v2 __val - cdef ccuda.CUdeviceptr_v2* _ptr + cdef cydriver.CUdeviceptr_v2 __val + cdef cydriver.CUdeviceptr_v2* _ptr {{endif}} {{if 'CUdevice_v1' in found_types}} @@ -7428,8 +7428,8 @@ cdef class CUdevice_v1: Get memory address of class instance """ - cdef ccuda.CUdevice_v1 __val - cdef ccuda.CUdevice_v1* _ptr + cdef cydriver.CUdevice_v1 __val + cdef cydriver.CUdevice_v1* _ptr {{endif}} {{if 'CUtexObject_v1' in found_types}} @@ -7445,8 +7445,8 @@ cdef class CUtexObject_v1: Get memory address of class instance """ - cdef ccuda.CUtexObject_v1 __val - cdef ccuda.CUtexObject_v1* _ptr + cdef cydriver.CUtexObject_v1 __val + cdef cydriver.CUtexObject_v1* _ptr {{endif}} {{if 'CUsurfObject_v1' in found_types}} @@ -7462,8 +7462,8 @@ cdef class CUsurfObject_v1: Get memory address of class instance """ - cdef ccuda.CUsurfObject_v1 __val - cdef ccuda.CUsurfObject_v1* _ptr + cdef cydriver.CUsurfObject_v1 __val + cdef cydriver.CUsurfObject_v1* _ptr {{endif}} {{if 'CUmemGenericAllocationHandle_v1' in found_types}} @@ -7477,8 +7477,8 @@ cdef class CUmemGenericAllocationHandle_v1: Get memory address of class instance """ - cdef ccuda.CUmemGenericAllocationHandle_v1 __val - cdef ccuda.CUmemGenericAllocationHandle_v1* _ptr + cdef cydriver.CUmemGenericAllocationHandle_v1 __val + cdef cydriver.CUmemGenericAllocationHandle_v1* _ptr {{endif}} {{if True}} @@ -7492,8 +7492,8 @@ cdef class GLenum: Get memory address of class instance """ - cdef ccuda.GLenum __val - cdef ccuda.GLenum* _ptr + cdef cydriver.GLenum __val + cdef cydriver.GLenum* _ptr {{endif}} {{if True}} @@ -7507,8 +7507,8 @@ cdef class GLuint: Get memory address of class instance """ - cdef ccuda.GLuint __val - cdef ccuda.GLuint* _ptr + cdef cydriver.GLuint __val + cdef cydriver.GLuint* _ptr {{endif}} {{if True}} @@ -7522,8 +7522,8 @@ cdef class EGLint: Get memory address of class instance """ - cdef ccuda.EGLint __val - cdef ccuda.EGLint* _ptr + cdef cydriver.EGLint __val + cdef cydriver.EGLint* _ptr {{endif}} {{if True}} @@ -7537,8 +7537,8 @@ cdef class VdpDevice: Get memory address of class instance """ - cdef ccuda.VdpDevice __val - cdef ccuda.VdpDevice* _ptr + cdef cydriver.VdpDevice __val + cdef cydriver.VdpDevice* _ptr {{endif}} {{if True}} @@ -7552,8 +7552,8 @@ cdef class VdpGetProcAddress: Get memory address of class instance """ - cdef ccuda.VdpGetProcAddress __val - cdef ccuda.VdpGetProcAddress* _ptr + cdef cydriver.VdpGetProcAddress __val + cdef cydriver.VdpGetProcAddress* _ptr {{endif}} {{if True}} @@ -7567,8 +7567,8 @@ cdef class VdpVideoSurface: Get memory address of class instance """ - cdef ccuda.VdpVideoSurface __val - cdef ccuda.VdpVideoSurface* _ptr + cdef cydriver.VdpVideoSurface __val + cdef cydriver.VdpVideoSurface* _ptr {{endif}} {{if True}} @@ -7582,6 +7582,6 @@ cdef class VdpOutputSurface: Get memory address of class instance """ - cdef ccuda.VdpOutputSurface __val - cdef ccuda.VdpOutputSurface* _ptr + cdef cydriver.VdpOutputSurface __val + cdef cydriver.VdpOutputSurface* _ptr {{endif}} diff --git a/cuda/cuda.pyx.in b/cuda/cuda/bindings/driver.pyx.in similarity index 82% rename from cuda/cuda.pyx.in rename to cuda/cuda/bindings/driver.pyx.in index f091e479..8bbdd980 100644 --- a/cuda/cuda.pyx.in +++ b/cuda/cuda/bindings/driver.pyx.in @@ -42,10 +42,10 @@ ctypedef unsigned long long double_ptr ctypedef unsigned long long void_ptr #: CUDA API version number -CUDA_VERSION = ccuda.CUDA_VERSION +CUDA_VERSION = cydriver.CUDA_VERSION #: CUDA IPC handle size -CU_IPC_HANDLE_SIZE = ccuda.CU_IPC_HANDLE_SIZE +CU_IPC_HANDLE_SIZE = cydriver.CU_IPC_HANDLE_SIZE #: Legacy stream handle #: @@ -53,7 +53,7 @@ CU_IPC_HANDLE_SIZE = ccuda.CU_IPC_HANDLE_SIZE #: with legacy synchronization behavior. #: #: See details of the \link_sync_behavior -CU_STREAM_LEGACY = ccuda.CU_STREAM_LEGACY +CU_STREAM_LEGACY = cydriver.CU_STREAM_LEGACY #: Per-thread stream handle #: @@ -61,77 +61,77 @@ CU_STREAM_LEGACY = ccuda.CU_STREAM_LEGACY #: with per-thread synchronization behavior. #: #: See details of the \link_sync_behavior -CU_STREAM_PER_THREAD = ccuda.CU_STREAM_PER_THREAD +CU_STREAM_PER_THREAD = cydriver.CU_STREAM_PER_THREAD -CU_COMPUTE_ACCELERATED_TARGET_BASE = ccuda.CU_COMPUTE_ACCELERATED_TARGET_BASE +CU_COMPUTE_ACCELERATED_TARGET_BASE = cydriver.CU_COMPUTE_ACCELERATED_TARGET_BASE #: Conditional node handle flags Default value is applied when graph is #: launched. -CU_GRAPH_COND_ASSIGN_DEFAULT = ccuda.CU_GRAPH_COND_ASSIGN_DEFAULT +CU_GRAPH_COND_ASSIGN_DEFAULT = cydriver.CU_GRAPH_COND_ASSIGN_DEFAULT #: This port activates when the kernel has finished executing. -CU_GRAPH_KERNEL_NODE_PORT_DEFAULT = ccuda.CU_GRAPH_KERNEL_NODE_PORT_DEFAULT +CU_GRAPH_KERNEL_NODE_PORT_DEFAULT = cydriver.CU_GRAPH_KERNEL_NODE_PORT_DEFAULT #: This port activates when all blocks of the kernel have performed #: cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be #: used with edge type :py:obj:`~.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC`. #: See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT`. -CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC = ccuda.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC +CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC = cydriver.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC #: This port activates when all blocks of the kernel have begun execution. #: See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT`. -CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER = ccuda.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER +CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER = cydriver.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER -CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW = ccuda.CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW +CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW = cydriver.CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW -CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE = ccuda.CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE +CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE = cydriver.CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE -CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION = ccuda.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION +CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION = cydriver.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION -CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = ccuda.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE +CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = cydriver.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE -CU_KERNEL_NODE_ATTRIBUTE_PRIORITY = ccuda.CU_KERNEL_NODE_ATTRIBUTE_PRIORITY +CU_KERNEL_NODE_ATTRIBUTE_PRIORITY = cydriver.CU_KERNEL_NODE_ATTRIBUTE_PRIORITY -CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = ccuda.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = cydriver.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP -CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN = ccuda.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN +CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN = cydriver.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN -CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = ccuda.CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE +CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = cydriver.CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE -CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = ccuda.CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT +CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = cydriver.CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT -CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW = ccuda.CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW +CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW = cydriver.CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW -CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY = ccuda.CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY +CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY = cydriver.CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY -CU_STREAM_ATTRIBUTE_PRIORITY = ccuda.CU_STREAM_ATTRIBUTE_PRIORITY +CU_STREAM_ATTRIBUTE_PRIORITY = cydriver.CU_STREAM_ATTRIBUTE_PRIORITY -CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = ccuda.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = cydriver.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP -CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN = ccuda.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN +CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN = cydriver.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN #: If set, host memory is portable between CUDA contexts. Flag for #: :py:obj:`~.cuMemHostAlloc()` -CU_MEMHOSTALLOC_PORTABLE = ccuda.CU_MEMHOSTALLOC_PORTABLE +CU_MEMHOSTALLOC_PORTABLE = cydriver.CU_MEMHOSTALLOC_PORTABLE #: If set, host memory is mapped into CUDA address space and #: :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host #: pointer. Flag for :py:obj:`~.cuMemHostAlloc()` -CU_MEMHOSTALLOC_DEVICEMAP = ccuda.CU_MEMHOSTALLOC_DEVICEMAP +CU_MEMHOSTALLOC_DEVICEMAP = cydriver.CU_MEMHOSTALLOC_DEVICEMAP #: If set, host memory is allocated as write-combined - fast to write, #: faster to DMA, slow to read except via SSE4 streaming load instruction #: (MOVNTDQA). Flag for :py:obj:`~.cuMemHostAlloc()` -CU_MEMHOSTALLOC_WRITECOMBINED = ccuda.CU_MEMHOSTALLOC_WRITECOMBINED +CU_MEMHOSTALLOC_WRITECOMBINED = cydriver.CU_MEMHOSTALLOC_WRITECOMBINED #: If set, host memory is portable between CUDA contexts. Flag for #: :py:obj:`~.cuMemHostRegister()` -CU_MEMHOSTREGISTER_PORTABLE = ccuda.CU_MEMHOSTREGISTER_PORTABLE +CU_MEMHOSTREGISTER_PORTABLE = cydriver.CU_MEMHOSTREGISTER_PORTABLE #: If set, host memory is mapped into CUDA address space and #: :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host #: pointer. Flag for :py:obj:`~.cuMemHostRegister()` -CU_MEMHOSTREGISTER_DEVICEMAP = ccuda.CU_MEMHOSTREGISTER_DEVICEMAP +CU_MEMHOSTREGISTER_DEVICEMAP = cydriver.CU_MEMHOSTREGISTER_DEVICEMAP #: If set, the passed memory pointer is treated as pointing to some memory- #: mapped I/O space, e.g. belonging to a third-party PCIe device. On @@ -142,7 +142,7 @@ CU_MEMHOSTREGISTER_DEVICEMAP = ccuda.CU_MEMHOSTREGISTER_DEVICEMAP #: kernel versions. On all other platforms, it is not supported and #: :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED` is returned. Flag for #: :py:obj:`~.cuMemHostRegister()` -CU_MEMHOSTREGISTER_IOMEMORY = ccuda.CU_MEMHOSTREGISTER_IOMEMORY +CU_MEMHOSTREGISTER_IOMEMORY = cydriver.CU_MEMHOSTREGISTER_IOMEMORY #: If set, the passed memory pointer is treated as pointing to memory that #: is considered read-only by the device. On platforms without @@ -154,17 +154,17 @@ CU_MEMHOSTREGISTER_IOMEMORY = ccuda.CU_MEMHOSTREGISTER_IOMEMORY #: this flag with a current context associated with a device that does not #: have this attribute set will cause :py:obj:`~.cuMemHostRegister` to #: error with :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED`. -CU_MEMHOSTREGISTER_READ_ONLY = ccuda.CU_MEMHOSTREGISTER_READ_ONLY +CU_MEMHOSTREGISTER_READ_ONLY = cydriver.CU_MEMHOSTREGISTER_READ_ONLY #: Indicates that the layered sparse CUDA array or CUDA mipmapped array has #: a single mip tail region for all layers -CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL = ccuda.CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL +CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL = cydriver.CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL #: Size of tensor map descriptor -CU_TENSOR_MAP_NUM_QWORDS = ccuda.CU_TENSOR_MAP_NUM_QWORDS +CU_TENSOR_MAP_NUM_QWORDS = cydriver.CU_TENSOR_MAP_NUM_QWORDS #: Indicates that the external memory object is a dedicated resource -CUDA_EXTERNAL_MEMORY_DEDICATED = ccuda.CUDA_EXTERNAL_MEMORY_DEDICATED +CUDA_EXTERNAL_MEMORY_DEDICATED = cydriver.CUDA_EXTERNAL_MEMORY_DEDICATED #: When the `flags` parameter of #: :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS` contains this flag, it @@ -174,7 +174,7 @@ CUDA_EXTERNAL_MEMORY_DEDICATED = ccuda.CUDA_EXTERNAL_MEMORY_DEDICATED #: :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are #: performed by default to ensure data coherency with other importers of #: the same NvSciBuf memory objects. -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC = ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC = cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC #: When the `flags` parameter of #: :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS` contains this flag, it @@ -184,112 +184,112 @@ CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC = ccuda.CUDA_EXTERNAL_SEMAP #: :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are #: performed by default to ensure data coherency with other importers of #: the same NvSciBuf memory objects. -CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC = ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC +CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC = cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC #: When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to #: this, it indicates that application needs signaler specific #: NvSciSyncAttr to be filled by #: :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. -CUDA_NVSCISYNC_ATTR_SIGNAL = ccuda.CUDA_NVSCISYNC_ATTR_SIGNAL +CUDA_NVSCISYNC_ATTR_SIGNAL = cydriver.CUDA_NVSCISYNC_ATTR_SIGNAL #: When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to #: this, it indicates that application needs waiter specific NvSciSyncAttr #: to be filled by :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. -CUDA_NVSCISYNC_ATTR_WAIT = ccuda.CUDA_NVSCISYNC_ATTR_WAIT +CUDA_NVSCISYNC_ATTR_WAIT = cydriver.CUDA_NVSCISYNC_ATTR_WAIT #: This flag if set indicates that the memory will be used as a tile pool. -CU_MEM_CREATE_USAGE_TILE_POOL = ccuda.CU_MEM_CREATE_USAGE_TILE_POOL +CU_MEM_CREATE_USAGE_TILE_POOL = cydriver.CU_MEM_CREATE_USAGE_TILE_POOL #: If set, each kernel launched as part of #: :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` only waits for prior #: work in the stream corresponding to that GPU to complete before the #: kernel begins execution. -CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC = ccuda.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC +CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC = cydriver.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC #: If set, any subsequent work pushed in a stream that participated in a #: call to :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` will only wait #: for the kernel launched on the GPU corresponding to that stream to #: complete before it begins execution. -CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC = ccuda.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC +CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC = cydriver.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC #: If set, the CUDA array is a collection of layers, where each layer is #: either a 1D or a 2D array and the Depth member of #: CUDA_ARRAY3D_DESCRIPTOR specifies the number of layers, not the depth of #: a 3D array. -CUDA_ARRAY3D_LAYERED = ccuda.CUDA_ARRAY3D_LAYERED +CUDA_ARRAY3D_LAYERED = cydriver.CUDA_ARRAY3D_LAYERED #: Deprecated, use CUDA_ARRAY3D_LAYERED -CUDA_ARRAY3D_2DARRAY = ccuda.CUDA_ARRAY3D_2DARRAY +CUDA_ARRAY3D_2DARRAY = cydriver.CUDA_ARRAY3D_2DARRAY #: This flag must be set in order to bind a surface reference to the CUDA #: array -CUDA_ARRAY3D_SURFACE_LDST = ccuda.CUDA_ARRAY3D_SURFACE_LDST +CUDA_ARRAY3D_SURFACE_LDST = cydriver.CUDA_ARRAY3D_SURFACE_LDST #: If set, the CUDA array is a collection of six 2D arrays, representing #: faces of a cube. The width of such a CUDA array must be equal to its #: height, and Depth must be six. If :py:obj:`~.CUDA_ARRAY3D_LAYERED` flag #: is also set, then the CUDA array is a collection of cubemaps and Depth #: must be a multiple of six. -CUDA_ARRAY3D_CUBEMAP = ccuda.CUDA_ARRAY3D_CUBEMAP +CUDA_ARRAY3D_CUBEMAP = cydriver.CUDA_ARRAY3D_CUBEMAP #: This flag must be set in order to perform texture gather operations on a #: CUDA array. -CUDA_ARRAY3D_TEXTURE_GATHER = ccuda.CUDA_ARRAY3D_TEXTURE_GATHER +CUDA_ARRAY3D_TEXTURE_GATHER = cydriver.CUDA_ARRAY3D_TEXTURE_GATHER #: This flag if set indicates that the CUDA array is a DEPTH_TEXTURE. -CUDA_ARRAY3D_DEPTH_TEXTURE = ccuda.CUDA_ARRAY3D_DEPTH_TEXTURE +CUDA_ARRAY3D_DEPTH_TEXTURE = cydriver.CUDA_ARRAY3D_DEPTH_TEXTURE #: This flag indicates that the CUDA array may be bound as a color target #: in an external graphics API -CUDA_ARRAY3D_COLOR_ATTACHMENT = ccuda.CUDA_ARRAY3D_COLOR_ATTACHMENT +CUDA_ARRAY3D_COLOR_ATTACHMENT = cydriver.CUDA_ARRAY3D_COLOR_ATTACHMENT #: This flag if set indicates that the CUDA array or CUDA mipmapped array #: is a sparse CUDA array or CUDA mipmapped array respectively -CUDA_ARRAY3D_SPARSE = ccuda.CUDA_ARRAY3D_SPARSE +CUDA_ARRAY3D_SPARSE = cydriver.CUDA_ARRAY3D_SPARSE #: This flag if set indicates that the CUDA array or CUDA mipmapped array #: will allow deferred memory mapping -CUDA_ARRAY3D_DEFERRED_MAPPING = ccuda.CUDA_ARRAY3D_DEFERRED_MAPPING +CUDA_ARRAY3D_DEFERRED_MAPPING = cydriver.CUDA_ARRAY3D_DEFERRED_MAPPING #: This flag indicates that the CUDA array will be used for hardware #: accelerated video encode/decode operations. -CUDA_ARRAY3D_VIDEO_ENCODE_DECODE = ccuda.CUDA_ARRAY3D_VIDEO_ENCODE_DECODE +CUDA_ARRAY3D_VIDEO_ENCODE_DECODE = cydriver.CUDA_ARRAY3D_VIDEO_ENCODE_DECODE #: Override the texref format with a format inferred from the array. Flag #: for :py:obj:`~.cuTexRefSetArray()` -CU_TRSA_OVERRIDE_FORMAT = ccuda.CU_TRSA_OVERRIDE_FORMAT +CU_TRSA_OVERRIDE_FORMAT = cydriver.CU_TRSA_OVERRIDE_FORMAT #: Read the texture as integers rather than promoting the values to floats #: in the range [0,1]. Flag for :py:obj:`~.cuTexRefSetFlags()` and #: :py:obj:`~.cuTexObjectCreate()` -CU_TRSF_READ_AS_INTEGER = ccuda.CU_TRSF_READ_AS_INTEGER +CU_TRSF_READ_AS_INTEGER = cydriver.CU_TRSF_READ_AS_INTEGER #: Use normalized texture coordinates in the range [0,1) instead of #: [0,dim). Flag for :py:obj:`~.cuTexRefSetFlags()` and #: :py:obj:`~.cuTexObjectCreate()` -CU_TRSF_NORMALIZED_COORDINATES = ccuda.CU_TRSF_NORMALIZED_COORDINATES +CU_TRSF_NORMALIZED_COORDINATES = cydriver.CU_TRSF_NORMALIZED_COORDINATES #: Perform sRGB->linear conversion during texture read. Flag for #: :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` -CU_TRSF_SRGB = ccuda.CU_TRSF_SRGB +CU_TRSF_SRGB = cydriver.CU_TRSF_SRGB #: Disable any trilinear filtering optimizations. Flag for #: :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` -CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION = ccuda.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION +CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION = cydriver.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION #: Enable seamless cube map filtering. Flag for #: :py:obj:`~.cuTexObjectCreate()` -CU_TRSF_SEAMLESS_CUBEMAP = ccuda.CU_TRSF_SEAMLESS_CUBEMAP +CU_TRSF_SEAMLESS_CUBEMAP = cydriver.CU_TRSF_SEAMLESS_CUBEMAP #: C++ compile time constant for CU_LAUNCH_PARAM_END -CU_LAUNCH_PARAM_END_AS_INT = ccuda.CU_LAUNCH_PARAM_END_AS_INT +CU_LAUNCH_PARAM_END_AS_INT = cydriver.CU_LAUNCH_PARAM_END_AS_INT #: End of array terminator for the `extra` parameter to #: :py:obj:`~.cuLaunchKernel` -CU_LAUNCH_PARAM_END = ccuda.CU_LAUNCH_PARAM_END +CU_LAUNCH_PARAM_END = cydriver.CU_LAUNCH_PARAM_END #: C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_POINTER -CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT = ccuda.CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT +CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT = cydriver.CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT #: Indicator that the next value in the `extra` parameter to #: :py:obj:`~.cuLaunchKernel` will be a pointer to a buffer containing all @@ -298,10 +298,10 @@ CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT = ccuda.CU_LAUNCH_PARAM_BUFFER_POINTER_AS_ #: If :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not also specified in the #: `extra` array, then :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` will have #: no effect. -CU_LAUNCH_PARAM_BUFFER_POINTER = ccuda.CU_LAUNCH_PARAM_BUFFER_POINTER +CU_LAUNCH_PARAM_BUFFER_POINTER = cydriver.CU_LAUNCH_PARAM_BUFFER_POINTER #: C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_SIZE -CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT = ccuda.CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT +CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT = cydriver.CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT #: Indicator that the next value in the `extra` parameter to #: :py:obj:`~.cuLaunchKernel` will be a pointer to a size_t which contains @@ -310,28 +310,28 @@ CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT = ccuda.CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT #: :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` also be specified in the #: `extra` array if the value associated with #: :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not zero. -CU_LAUNCH_PARAM_BUFFER_SIZE = ccuda.CU_LAUNCH_PARAM_BUFFER_SIZE +CU_LAUNCH_PARAM_BUFFER_SIZE = cydriver.CU_LAUNCH_PARAM_BUFFER_SIZE #: For texture references loaded into the module, use default texunit from #: texture reference. -CU_PARAM_TR_DEFAULT = ccuda.CU_PARAM_TR_DEFAULT +CU_PARAM_TR_DEFAULT = cydriver.CU_PARAM_TR_DEFAULT #: Device that represents the CPU -CU_DEVICE_CPU = ccuda.CU_DEVICE_CPU +CU_DEVICE_CPU = cydriver.CU_DEVICE_CPU #: Device that represents an invalid device -CU_DEVICE_INVALID = ccuda.CU_DEVICE_INVALID +CU_DEVICE_INVALID = cydriver.CU_DEVICE_INVALID -RESOURCE_ABI_VERSION = ccuda.RESOURCE_ABI_VERSION +RESOURCE_ABI_VERSION = cydriver.RESOURCE_ABI_VERSION -RESOURCE_ABI_EXTERNAL_BYTES = ccuda.RESOURCE_ABI_EXTERNAL_BYTES +RESOURCE_ABI_EXTERNAL_BYTES = cydriver.RESOURCE_ABI_EXTERNAL_BYTES #: Maximum number of planes per frame -MAX_PLANES = ccuda.MAX_PLANES +MAX_PLANES = cydriver.MAX_PLANES #: Indicates that timeout for :py:obj:`~.cuEGLStreamConsumerAcquireFrame` #: is infinite. -CUDA_EGL_INFINITE_TIMEOUT = ccuda.CUDA_EGL_INFINITE_TIMEOUT +CUDA_EGL_INFINITE_TIMEOUT = cydriver.CUDA_EGL_INFINITE_TIMEOUT {{if 'CUipcMem_flags_enum' in found_types}} @@ -342,7 +342,7 @@ class CUipcMem_flags(IntEnum): {{if 'CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS' in found_values}} #: Automatically enable peer access between remote devices as needed - CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS = ccuda.CUipcMem_flags_enum.CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS{{endif}} + CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS = cydriver.CUipcMem_flags_enum.CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS{{endif}} {{endif}} {{if 'CUmemAttach_flags_enum' in found_types}} @@ -353,16 +353,16 @@ class CUmemAttach_flags(IntEnum): {{if 'CU_MEM_ATTACH_GLOBAL' in found_values}} #: Memory can be accessed by any stream on any device - CU_MEM_ATTACH_GLOBAL = ccuda.CUmemAttach_flags_enum.CU_MEM_ATTACH_GLOBAL{{endif}} + CU_MEM_ATTACH_GLOBAL = cydriver.CUmemAttach_flags_enum.CU_MEM_ATTACH_GLOBAL{{endif}} {{if 'CU_MEM_ATTACH_HOST' in found_values}} #: Memory cannot be accessed by any stream on any device - CU_MEM_ATTACH_HOST = ccuda.CUmemAttach_flags_enum.CU_MEM_ATTACH_HOST{{endif}} + CU_MEM_ATTACH_HOST = cydriver.CUmemAttach_flags_enum.CU_MEM_ATTACH_HOST{{endif}} {{if 'CU_MEM_ATTACH_SINGLE' in found_values}} #: Memory can only be accessed by a single stream on the associated #: device - CU_MEM_ATTACH_SINGLE = ccuda.CUmemAttach_flags_enum.CU_MEM_ATTACH_SINGLE{{endif}} + CU_MEM_ATTACH_SINGLE = cydriver.CUmemAttach_flags_enum.CU_MEM_ATTACH_SINGLE{{endif}} {{endif}} {{if 'CUctx_flags_enum' in found_types}} @@ -373,48 +373,48 @@ class CUctx_flags(IntEnum): {{if 'CU_CTX_SCHED_AUTO' in found_values}} #: Automatic scheduling - CU_CTX_SCHED_AUTO = ccuda.CUctx_flags_enum.CU_CTX_SCHED_AUTO{{endif}} + CU_CTX_SCHED_AUTO = cydriver.CUctx_flags_enum.CU_CTX_SCHED_AUTO{{endif}} {{if 'CU_CTX_SCHED_SPIN' in found_values}} #: Set spin as default scheduling - CU_CTX_SCHED_SPIN = ccuda.CUctx_flags_enum.CU_CTX_SCHED_SPIN{{endif}} + CU_CTX_SCHED_SPIN = cydriver.CUctx_flags_enum.CU_CTX_SCHED_SPIN{{endif}} {{if 'CU_CTX_SCHED_YIELD' in found_values}} #: Set yield as default scheduling - CU_CTX_SCHED_YIELD = ccuda.CUctx_flags_enum.CU_CTX_SCHED_YIELD{{endif}} + CU_CTX_SCHED_YIELD = cydriver.CUctx_flags_enum.CU_CTX_SCHED_YIELD{{endif}} {{if 'CU_CTX_SCHED_BLOCKING_SYNC' in found_values}} #: Set blocking synchronization as default scheduling - CU_CTX_SCHED_BLOCKING_SYNC = ccuda.CUctx_flags_enum.CU_CTX_SCHED_BLOCKING_SYNC{{endif}} + CU_CTX_SCHED_BLOCKING_SYNC = cydriver.CUctx_flags_enum.CU_CTX_SCHED_BLOCKING_SYNC{{endif}} {{if 'CU_CTX_BLOCKING_SYNC' in found_values}} #: Set blocking synchronization as default scheduling [Deprecated] - CU_CTX_BLOCKING_SYNC = ccuda.CUctx_flags_enum.CU_CTX_BLOCKING_SYNC{{endif}} + CU_CTX_BLOCKING_SYNC = cydriver.CUctx_flags_enum.CU_CTX_BLOCKING_SYNC{{endif}} {{if 'CU_CTX_SCHED_MASK' in found_values}} - CU_CTX_SCHED_MASK = ccuda.CUctx_flags_enum.CU_CTX_SCHED_MASK{{endif}} + CU_CTX_SCHED_MASK = cydriver.CUctx_flags_enum.CU_CTX_SCHED_MASK{{endif}} {{if 'CU_CTX_MAP_HOST' in found_values}} #: [Deprecated] - CU_CTX_MAP_HOST = ccuda.CUctx_flags_enum.CU_CTX_MAP_HOST{{endif}} + CU_CTX_MAP_HOST = cydriver.CUctx_flags_enum.CU_CTX_MAP_HOST{{endif}} {{if 'CU_CTX_LMEM_RESIZE_TO_MAX' in found_values}} #: Keep local memory allocation after launch - CU_CTX_LMEM_RESIZE_TO_MAX = ccuda.CUctx_flags_enum.CU_CTX_LMEM_RESIZE_TO_MAX{{endif}} + CU_CTX_LMEM_RESIZE_TO_MAX = cydriver.CUctx_flags_enum.CU_CTX_LMEM_RESIZE_TO_MAX{{endif}} {{if 'CU_CTX_COREDUMP_ENABLE' in found_values}} #: Trigger coredumps from exceptions in this context - CU_CTX_COREDUMP_ENABLE = ccuda.CUctx_flags_enum.CU_CTX_COREDUMP_ENABLE{{endif}} + CU_CTX_COREDUMP_ENABLE = cydriver.CUctx_flags_enum.CU_CTX_COREDUMP_ENABLE{{endif}} {{if 'CU_CTX_USER_COREDUMP_ENABLE' in found_values}} #: Enable user pipe to trigger coredumps in this context - CU_CTX_USER_COREDUMP_ENABLE = ccuda.CUctx_flags_enum.CU_CTX_USER_COREDUMP_ENABLE{{endif}} + CU_CTX_USER_COREDUMP_ENABLE = cydriver.CUctx_flags_enum.CU_CTX_USER_COREDUMP_ENABLE{{endif}} {{if 'CU_CTX_SYNC_MEMOPS' in found_values}} #: Ensure synchronous memory operations on this context will #: synchronize - CU_CTX_SYNC_MEMOPS = ccuda.CUctx_flags_enum.CU_CTX_SYNC_MEMOPS{{endif}} + CU_CTX_SYNC_MEMOPS = cydriver.CUctx_flags_enum.CU_CTX_SYNC_MEMOPS{{endif}} {{if 'CU_CTX_FLAGS_MASK' in found_values}} - CU_CTX_FLAGS_MASK = ccuda.CUctx_flags_enum.CU_CTX_FLAGS_MASK{{endif}} + CU_CTX_FLAGS_MASK = cydriver.CUctx_flags_enum.CU_CTX_FLAGS_MASK{{endif}} {{endif}} {{if 'CUevent_sched_flags_enum' in found_types}} @@ -425,19 +425,19 @@ class CUevent_sched_flags(IntEnum): {{if 'CU_EVENT_SCHED_AUTO' in found_values}} #: Automatic scheduling - CU_EVENT_SCHED_AUTO = ccuda.CUevent_sched_flags_enum.CU_EVENT_SCHED_AUTO{{endif}} + CU_EVENT_SCHED_AUTO = cydriver.CUevent_sched_flags_enum.CU_EVENT_SCHED_AUTO{{endif}} {{if 'CU_EVENT_SCHED_SPIN' in found_values}} #: Set spin as default scheduling - CU_EVENT_SCHED_SPIN = ccuda.CUevent_sched_flags_enum.CU_EVENT_SCHED_SPIN{{endif}} + CU_EVENT_SCHED_SPIN = cydriver.CUevent_sched_flags_enum.CU_EVENT_SCHED_SPIN{{endif}} {{if 'CU_EVENT_SCHED_YIELD' in found_values}} #: Set yield as default scheduling - CU_EVENT_SCHED_YIELD = ccuda.CUevent_sched_flags_enum.CU_EVENT_SCHED_YIELD{{endif}} + CU_EVENT_SCHED_YIELD = cydriver.CUevent_sched_flags_enum.CU_EVENT_SCHED_YIELD{{endif}} {{if 'CU_EVENT_SCHED_BLOCKING_SYNC' in found_values}} #: Set blocking synchronization as default scheduling - CU_EVENT_SCHED_BLOCKING_SYNC = ccuda.CUevent_sched_flags_enum.CU_EVENT_SCHED_BLOCKING_SYNC{{endif}} + CU_EVENT_SCHED_BLOCKING_SYNC = cydriver.CUevent_sched_flags_enum.CU_EVENT_SCHED_BLOCKING_SYNC{{endif}} {{endif}} {{if 'cl_event_flags_enum' in found_types}} @@ -448,19 +448,19 @@ class cl_event_flags(IntEnum): {{if 'NVCL_EVENT_SCHED_AUTO' in found_values}} #: Automatic scheduling - NVCL_EVENT_SCHED_AUTO = ccuda.cl_event_flags_enum.NVCL_EVENT_SCHED_AUTO{{endif}} + NVCL_EVENT_SCHED_AUTO = cydriver.cl_event_flags_enum.NVCL_EVENT_SCHED_AUTO{{endif}} {{if 'NVCL_EVENT_SCHED_SPIN' in found_values}} #: Set spin as default scheduling - NVCL_EVENT_SCHED_SPIN = ccuda.cl_event_flags_enum.NVCL_EVENT_SCHED_SPIN{{endif}} + NVCL_EVENT_SCHED_SPIN = cydriver.cl_event_flags_enum.NVCL_EVENT_SCHED_SPIN{{endif}} {{if 'NVCL_EVENT_SCHED_YIELD' in found_values}} #: Set yield as default scheduling - NVCL_EVENT_SCHED_YIELD = ccuda.cl_event_flags_enum.NVCL_EVENT_SCHED_YIELD{{endif}} + NVCL_EVENT_SCHED_YIELD = cydriver.cl_event_flags_enum.NVCL_EVENT_SCHED_YIELD{{endif}} {{if 'NVCL_EVENT_SCHED_BLOCKING_SYNC' in found_values}} #: Set blocking synchronization as default scheduling - NVCL_EVENT_SCHED_BLOCKING_SYNC = ccuda.cl_event_flags_enum.NVCL_EVENT_SCHED_BLOCKING_SYNC{{endif}} + NVCL_EVENT_SCHED_BLOCKING_SYNC = cydriver.cl_event_flags_enum.NVCL_EVENT_SCHED_BLOCKING_SYNC{{endif}} {{endif}} {{if 'cl_context_flags_enum' in found_types}} @@ -471,19 +471,19 @@ class cl_context_flags(IntEnum): {{if 'NVCL_CTX_SCHED_AUTO' in found_values}} #: Automatic scheduling - NVCL_CTX_SCHED_AUTO = ccuda.cl_context_flags_enum.NVCL_CTX_SCHED_AUTO{{endif}} + NVCL_CTX_SCHED_AUTO = cydriver.cl_context_flags_enum.NVCL_CTX_SCHED_AUTO{{endif}} {{if 'NVCL_CTX_SCHED_SPIN' in found_values}} #: Set spin as default scheduling - NVCL_CTX_SCHED_SPIN = ccuda.cl_context_flags_enum.NVCL_CTX_SCHED_SPIN{{endif}} + NVCL_CTX_SCHED_SPIN = cydriver.cl_context_flags_enum.NVCL_CTX_SCHED_SPIN{{endif}} {{if 'NVCL_CTX_SCHED_YIELD' in found_values}} #: Set yield as default scheduling - NVCL_CTX_SCHED_YIELD = ccuda.cl_context_flags_enum.NVCL_CTX_SCHED_YIELD{{endif}} + NVCL_CTX_SCHED_YIELD = cydriver.cl_context_flags_enum.NVCL_CTX_SCHED_YIELD{{endif}} {{if 'NVCL_CTX_SCHED_BLOCKING_SYNC' in found_values}} #: Set blocking synchronization as default scheduling - NVCL_CTX_SCHED_BLOCKING_SYNC = ccuda.cl_context_flags_enum.NVCL_CTX_SCHED_BLOCKING_SYNC{{endif}} + NVCL_CTX_SCHED_BLOCKING_SYNC = cydriver.cl_context_flags_enum.NVCL_CTX_SCHED_BLOCKING_SYNC{{endif}} {{endif}} {{if 'CUstream_flags_enum' in found_types}} @@ -494,11 +494,11 @@ class CUstream_flags(IntEnum): {{if 'CU_STREAM_DEFAULT' in found_values}} #: Default stream flag - CU_STREAM_DEFAULT = ccuda.CUstream_flags_enum.CU_STREAM_DEFAULT{{endif}} + CU_STREAM_DEFAULT = cydriver.CUstream_flags_enum.CU_STREAM_DEFAULT{{endif}} {{if 'CU_STREAM_NON_BLOCKING' in found_values}} #: Stream does not synchronize with stream 0 (the NULL stream) - CU_STREAM_NON_BLOCKING = ccuda.CUstream_flags_enum.CU_STREAM_NON_BLOCKING{{endif}} + CU_STREAM_NON_BLOCKING = cydriver.CUstream_flags_enum.CU_STREAM_NON_BLOCKING{{endif}} {{endif}} {{if 'CUevent_flags_enum' in found_types}} @@ -509,20 +509,20 @@ class CUevent_flags(IntEnum): {{if 'CU_EVENT_DEFAULT' in found_values}} #: Default event flag - CU_EVENT_DEFAULT = ccuda.CUevent_flags_enum.CU_EVENT_DEFAULT{{endif}} + CU_EVENT_DEFAULT = cydriver.CUevent_flags_enum.CU_EVENT_DEFAULT{{endif}} {{if 'CU_EVENT_BLOCKING_SYNC' in found_values}} #: Event uses blocking synchronization - CU_EVENT_BLOCKING_SYNC = ccuda.CUevent_flags_enum.CU_EVENT_BLOCKING_SYNC{{endif}} + CU_EVENT_BLOCKING_SYNC = cydriver.CUevent_flags_enum.CU_EVENT_BLOCKING_SYNC{{endif}} {{if 'CU_EVENT_DISABLE_TIMING' in found_values}} #: Event will not record timing data - CU_EVENT_DISABLE_TIMING = ccuda.CUevent_flags_enum.CU_EVENT_DISABLE_TIMING{{endif}} + CU_EVENT_DISABLE_TIMING = cydriver.CUevent_flags_enum.CU_EVENT_DISABLE_TIMING{{endif}} {{if 'CU_EVENT_INTERPROCESS' in found_values}} #: Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must #: be set - CU_EVENT_INTERPROCESS = ccuda.CUevent_flags_enum.CU_EVENT_INTERPROCESS{{endif}} + CU_EVENT_INTERPROCESS = cydriver.CUevent_flags_enum.CU_EVENT_INTERPROCESS{{endif}} {{endif}} {{if 'CUevent_record_flags_enum' in found_types}} @@ -533,13 +533,13 @@ class CUevent_record_flags(IntEnum): {{if 'CU_EVENT_RECORD_DEFAULT' in found_values}} #: Default event record flag - CU_EVENT_RECORD_DEFAULT = ccuda.CUevent_record_flags_enum.CU_EVENT_RECORD_DEFAULT{{endif}} + CU_EVENT_RECORD_DEFAULT = cydriver.CUevent_record_flags_enum.CU_EVENT_RECORD_DEFAULT{{endif}} {{if 'CU_EVENT_RECORD_EXTERNAL' in found_values}} #: When using stream capture, create an event record node instead of #: the default behavior. This flag is invalid when used outside of #: capture. - CU_EVENT_RECORD_EXTERNAL = ccuda.CUevent_record_flags_enum.CU_EVENT_RECORD_EXTERNAL{{endif}} + CU_EVENT_RECORD_EXTERNAL = cydriver.CUevent_record_flags_enum.CU_EVENT_RECORD_EXTERNAL{{endif}} {{endif}} {{if 'CUevent_wait_flags_enum' in found_types}} @@ -550,12 +550,12 @@ class CUevent_wait_flags(IntEnum): {{if 'CU_EVENT_WAIT_DEFAULT' in found_values}} #: Default event wait flag - CU_EVENT_WAIT_DEFAULT = ccuda.CUevent_wait_flags_enum.CU_EVENT_WAIT_DEFAULT{{endif}} + CU_EVENT_WAIT_DEFAULT = cydriver.CUevent_wait_flags_enum.CU_EVENT_WAIT_DEFAULT{{endif}} {{if 'CU_EVENT_WAIT_EXTERNAL' in found_values}} #: When using stream capture, create an event wait node instead of the #: default behavior. This flag is invalid when used outside of capture. - CU_EVENT_WAIT_EXTERNAL = ccuda.CUevent_wait_flags_enum.CU_EVENT_WAIT_EXTERNAL{{endif}} + CU_EVENT_WAIT_EXTERNAL = cydriver.CUevent_wait_flags_enum.CU_EVENT_WAIT_EXTERNAL{{endif}} {{endif}} {{if 'CUstreamWaitValue_flags_enum' in found_types}} @@ -569,21 +569,21 @@ class CUstreamWaitValue_flags(IntEnum): #: Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit #: values). Note this is a cyclic comparison which ignores wraparound. #: (Default behavior.) - CU_STREAM_WAIT_VALUE_GEQ = ccuda.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_GEQ{{endif}} + CU_STREAM_WAIT_VALUE_GEQ = cydriver.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_GEQ{{endif}} {{if 'CU_STREAM_WAIT_VALUE_EQ' in found_values}} #: Wait until *addr == value. - CU_STREAM_WAIT_VALUE_EQ = ccuda.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_EQ{{endif}} + CU_STREAM_WAIT_VALUE_EQ = cydriver.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_EQ{{endif}} {{if 'CU_STREAM_WAIT_VALUE_AND' in found_values}} #: Wait until (*addr & value) != 0. - CU_STREAM_WAIT_VALUE_AND = ccuda.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_AND{{endif}} + CU_STREAM_WAIT_VALUE_AND = cydriver.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_AND{{endif}} {{if 'CU_STREAM_WAIT_VALUE_NOR' in found_values}} #: Wait until ~(*addr | value) != 0. Support for this operation can be #: queried with :py:obj:`~.cuDeviceGetAttribute()` and #: :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR`. - CU_STREAM_WAIT_VALUE_NOR = ccuda.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_NOR{{endif}} + CU_STREAM_WAIT_VALUE_NOR = cydriver.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_NOR{{endif}} {{if 'CU_STREAM_WAIT_VALUE_FLUSH' in found_values}} #: Follow the wait operation with a flush of outstanding remote writes. @@ -596,7 +596,7 @@ class CUstreamWaitValue_flags(IntEnum): #: work needs to observe the first write. Support for this operation is #: restricted to selected platforms and can be queried with #: :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES`. - CU_STREAM_WAIT_VALUE_FLUSH = ccuda.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_FLUSH{{endif}} + CU_STREAM_WAIT_VALUE_FLUSH = cydriver.CUstreamWaitValue_flags_enum.CU_STREAM_WAIT_VALUE_FLUSH{{endif}} {{endif}} {{if 'CUstreamWriteValue_flags_enum' in found_types}} @@ -607,7 +607,7 @@ class CUstreamWriteValue_flags(IntEnum): {{if 'CU_STREAM_WRITE_VALUE_DEFAULT' in found_values}} #: Default behavior - CU_STREAM_WRITE_VALUE_DEFAULT = ccuda.CUstreamWriteValue_flags_enum.CU_STREAM_WRITE_VALUE_DEFAULT{{endif}} + CU_STREAM_WRITE_VALUE_DEFAULT = cydriver.CUstreamWriteValue_flags_enum.CU_STREAM_WRITE_VALUE_DEFAULT{{endif}} {{if 'CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER' in found_values}} #: Permits the write to be reordered with writes which were issued @@ -616,7 +616,7 @@ class CUstreamWriteValue_flags(IntEnum): #: the write, which has similar semantics to __threadfence_system() but #: is scoped to the stream rather than a CUDA thread. This flag is not #: supported in the v2 API. - CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER = ccuda.CUstreamWriteValue_flags_enum.CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER{{endif}} + CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER = cydriver.CUstreamWriteValue_flags_enum.CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER{{endif}} {{endif}} {{if 'CUstreamBatchMemOpType_enum' in found_types}} @@ -627,28 +627,28 @@ class CUstreamBatchMemOpType(IntEnum): {{if 'CU_STREAM_MEM_OP_WAIT_VALUE_32' in found_values}} #: Represents a :py:obj:`~.cuStreamWaitValue32` operation - CU_STREAM_MEM_OP_WAIT_VALUE_32 = ccuda.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_WAIT_VALUE_32{{endif}} + CU_STREAM_MEM_OP_WAIT_VALUE_32 = cydriver.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_WAIT_VALUE_32{{endif}} {{if 'CU_STREAM_MEM_OP_WRITE_VALUE_32' in found_values}} #: Represents a :py:obj:`~.cuStreamWriteValue32` operation - CU_STREAM_MEM_OP_WRITE_VALUE_32 = ccuda.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_WRITE_VALUE_32{{endif}} + CU_STREAM_MEM_OP_WRITE_VALUE_32 = cydriver.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_WRITE_VALUE_32{{endif}} {{if 'CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES' in found_values}} #: This has the same effect as :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH`, #: but as a standalone operation. - CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES = ccuda.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES{{endif}} + CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES = cydriver.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES{{endif}} {{if 'CU_STREAM_MEM_OP_WAIT_VALUE_64' in found_values}} #: Represents a :py:obj:`~.cuStreamWaitValue64` operation - CU_STREAM_MEM_OP_WAIT_VALUE_64 = ccuda.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_WAIT_VALUE_64{{endif}} + CU_STREAM_MEM_OP_WAIT_VALUE_64 = cydriver.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_WAIT_VALUE_64{{endif}} {{if 'CU_STREAM_MEM_OP_WRITE_VALUE_64' in found_values}} #: Represents a :py:obj:`~.cuStreamWriteValue64` operation - CU_STREAM_MEM_OP_WRITE_VALUE_64 = ccuda.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_WRITE_VALUE_64{{endif}} + CU_STREAM_MEM_OP_WRITE_VALUE_64 = cydriver.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_WRITE_VALUE_64{{endif}} {{if 'CU_STREAM_MEM_OP_BARRIER' in found_values}} #: Insert a memory barrier of the specified type - CU_STREAM_MEM_OP_BARRIER = ccuda.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_BARRIER{{endif}} + CU_STREAM_MEM_OP_BARRIER = cydriver.CUstreamBatchMemOpType_enum.CU_STREAM_MEM_OP_BARRIER{{endif}} {{endif}} {{if 'CUstreamMemoryBarrier_flags_enum' in found_types}} @@ -659,11 +659,11 @@ class CUstreamMemoryBarrier_flags(IntEnum): {{if 'CU_STREAM_MEMORY_BARRIER_TYPE_SYS' in found_values}} #: System-wide memory barrier. - CU_STREAM_MEMORY_BARRIER_TYPE_SYS = ccuda.CUstreamMemoryBarrier_flags_enum.CU_STREAM_MEMORY_BARRIER_TYPE_SYS{{endif}} + CU_STREAM_MEMORY_BARRIER_TYPE_SYS = cydriver.CUstreamMemoryBarrier_flags_enum.CU_STREAM_MEMORY_BARRIER_TYPE_SYS{{endif}} {{if 'CU_STREAM_MEMORY_BARRIER_TYPE_GPU' in found_values}} #: Limit memory barrier scope to the GPU. - CU_STREAM_MEMORY_BARRIER_TYPE_GPU = ccuda.CUstreamMemoryBarrier_flags_enum.CU_STREAM_MEMORY_BARRIER_TYPE_GPU{{endif}} + CU_STREAM_MEMORY_BARRIER_TYPE_GPU = cydriver.CUstreamMemoryBarrier_flags_enum.CU_STREAM_MEMORY_BARRIER_TYPE_GPU{{endif}} {{endif}} {{if 'CUoccupancy_flags_enum' in found_types}} @@ -674,12 +674,12 @@ class CUoccupancy_flags(IntEnum): {{if 'CU_OCCUPANCY_DEFAULT' in found_values}} #: Default behavior - CU_OCCUPANCY_DEFAULT = ccuda.CUoccupancy_flags_enum.CU_OCCUPANCY_DEFAULT{{endif}} + CU_OCCUPANCY_DEFAULT = cydriver.CUoccupancy_flags_enum.CU_OCCUPANCY_DEFAULT{{endif}} {{if 'CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE' in found_values}} #: Assume global caching is enabled and cannot be automatically turned #: off - CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE = ccuda.CUoccupancy_flags_enum.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE{{endif}} + CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE = cydriver.CUoccupancy_flags_enum.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE{{endif}} {{endif}} {{if 'CUstreamUpdateCaptureDependencies_flags_enum' in found_types}} @@ -690,11 +690,11 @@ class CUstreamUpdateCaptureDependencies_flags(IntEnum): {{if 'CU_STREAM_ADD_CAPTURE_DEPENDENCIES' in found_values}} #: Add new nodes to the dependency set - CU_STREAM_ADD_CAPTURE_DEPENDENCIES = ccuda.CUstreamUpdateCaptureDependencies_flags_enum.CU_STREAM_ADD_CAPTURE_DEPENDENCIES{{endif}} + CU_STREAM_ADD_CAPTURE_DEPENDENCIES = cydriver.CUstreamUpdateCaptureDependencies_flags_enum.CU_STREAM_ADD_CAPTURE_DEPENDENCIES{{endif}} {{if 'CU_STREAM_SET_CAPTURE_DEPENDENCIES' in found_values}} #: Replace the dependency set with the new nodes - CU_STREAM_SET_CAPTURE_DEPENDENCIES = ccuda.CUstreamUpdateCaptureDependencies_flags_enum.CU_STREAM_SET_CAPTURE_DEPENDENCIES{{endif}} + CU_STREAM_SET_CAPTURE_DEPENDENCIES = cydriver.CUstreamUpdateCaptureDependencies_flags_enum.CU_STREAM_SET_CAPTURE_DEPENDENCIES{{endif}} {{endif}} {{if 'CUasyncNotificationType_enum' in found_types}} @@ -703,7 +703,7 @@ class CUasyncNotificationType(IntEnum): Types of async notification that can be sent """ {{if 'CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET' in found_values}} - CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET = ccuda.CUasyncNotificationType_enum.CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET{{endif}} + CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET = cydriver.CUasyncNotificationType_enum.CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET{{endif}} {{endif}} {{if 'CUarray_format_enum' in found_types}} @@ -714,211 +714,211 @@ class CUarray_format(IntEnum): {{if 'CU_AD_FORMAT_UNSIGNED_INT8' in found_values}} #: Unsigned 8-bit integers - CU_AD_FORMAT_UNSIGNED_INT8 = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8{{endif}} + CU_AD_FORMAT_UNSIGNED_INT8 = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT8{{endif}} {{if 'CU_AD_FORMAT_UNSIGNED_INT16' in found_values}} #: Unsigned 16-bit integers - CU_AD_FORMAT_UNSIGNED_INT16 = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16{{endif}} + CU_AD_FORMAT_UNSIGNED_INT16 = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT16{{endif}} {{if 'CU_AD_FORMAT_UNSIGNED_INT32' in found_values}} #: Unsigned 32-bit integers - CU_AD_FORMAT_UNSIGNED_INT32 = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32{{endif}} + CU_AD_FORMAT_UNSIGNED_INT32 = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNSIGNED_INT32{{endif}} {{if 'CU_AD_FORMAT_SIGNED_INT8' in found_values}} #: Signed 8-bit integers - CU_AD_FORMAT_SIGNED_INT8 = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8{{endif}} + CU_AD_FORMAT_SIGNED_INT8 = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT8{{endif}} {{if 'CU_AD_FORMAT_SIGNED_INT16' in found_values}} #: Signed 16-bit integers - CU_AD_FORMAT_SIGNED_INT16 = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16{{endif}} + CU_AD_FORMAT_SIGNED_INT16 = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT16{{endif}} {{if 'CU_AD_FORMAT_SIGNED_INT32' in found_values}} #: Signed 32-bit integers - CU_AD_FORMAT_SIGNED_INT32 = ccuda.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32{{endif}} + CU_AD_FORMAT_SIGNED_INT32 = cydriver.CUarray_format_enum.CU_AD_FORMAT_SIGNED_INT32{{endif}} {{if 'CU_AD_FORMAT_HALF' in found_values}} #: 16-bit floating point - CU_AD_FORMAT_HALF = ccuda.CUarray_format_enum.CU_AD_FORMAT_HALF{{endif}} + CU_AD_FORMAT_HALF = cydriver.CUarray_format_enum.CU_AD_FORMAT_HALF{{endif}} {{if 'CU_AD_FORMAT_FLOAT' in found_values}} #: 32-bit floating point - CU_AD_FORMAT_FLOAT = ccuda.CUarray_format_enum.CU_AD_FORMAT_FLOAT{{endif}} + CU_AD_FORMAT_FLOAT = cydriver.CUarray_format_enum.CU_AD_FORMAT_FLOAT{{endif}} {{if 'CU_AD_FORMAT_BC1_UNORM' in found_values}} #: 4 channel unsigned normalized block-compressed (BC1 compression) #: format - CU_AD_FORMAT_BC1_UNORM = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC1_UNORM{{endif}} + CU_AD_FORMAT_BC1_UNORM = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC1_UNORM{{endif}} {{if 'CU_AD_FORMAT_BC1_UNORM_SRGB' in found_values}} #: 4 channel unsigned normalized block-compressed (BC1 compression) #: format with sRGB encoding - CU_AD_FORMAT_BC1_UNORM_SRGB = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC1_UNORM_SRGB{{endif}} + CU_AD_FORMAT_BC1_UNORM_SRGB = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC1_UNORM_SRGB{{endif}} {{if 'CU_AD_FORMAT_BC2_UNORM' in found_values}} #: 4 channel unsigned normalized block-compressed (BC2 compression) #: format - CU_AD_FORMAT_BC2_UNORM = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC2_UNORM{{endif}} + CU_AD_FORMAT_BC2_UNORM = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC2_UNORM{{endif}} {{if 'CU_AD_FORMAT_BC2_UNORM_SRGB' in found_values}} #: 4 channel unsigned normalized block-compressed (BC2 compression) #: format with sRGB encoding - CU_AD_FORMAT_BC2_UNORM_SRGB = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC2_UNORM_SRGB{{endif}} + CU_AD_FORMAT_BC2_UNORM_SRGB = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC2_UNORM_SRGB{{endif}} {{if 'CU_AD_FORMAT_BC3_UNORM' in found_values}} #: 4 channel unsigned normalized block-compressed (BC3 compression) #: format - CU_AD_FORMAT_BC3_UNORM = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC3_UNORM{{endif}} + CU_AD_FORMAT_BC3_UNORM = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC3_UNORM{{endif}} {{if 'CU_AD_FORMAT_BC3_UNORM_SRGB' in found_values}} #: 4 channel unsigned normalized block-compressed (BC3 compression) #: format with sRGB encoding - CU_AD_FORMAT_BC3_UNORM_SRGB = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC3_UNORM_SRGB{{endif}} + CU_AD_FORMAT_BC3_UNORM_SRGB = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC3_UNORM_SRGB{{endif}} {{if 'CU_AD_FORMAT_BC4_UNORM' in found_values}} #: 1 channel unsigned normalized block-compressed (BC4 compression) #: format - CU_AD_FORMAT_BC4_UNORM = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC4_UNORM{{endif}} + CU_AD_FORMAT_BC4_UNORM = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC4_UNORM{{endif}} {{if 'CU_AD_FORMAT_BC4_SNORM' in found_values}} #: 1 channel signed normalized block-compressed (BC4 compression) #: format - CU_AD_FORMAT_BC4_SNORM = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC4_SNORM{{endif}} + CU_AD_FORMAT_BC4_SNORM = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC4_SNORM{{endif}} {{if 'CU_AD_FORMAT_BC5_UNORM' in found_values}} #: 2 channel unsigned normalized block-compressed (BC5 compression) #: format - CU_AD_FORMAT_BC5_UNORM = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC5_UNORM{{endif}} + CU_AD_FORMAT_BC5_UNORM = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC5_UNORM{{endif}} {{if 'CU_AD_FORMAT_BC5_SNORM' in found_values}} #: 2 channel signed normalized block-compressed (BC5 compression) #: format - CU_AD_FORMAT_BC5_SNORM = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC5_SNORM{{endif}} + CU_AD_FORMAT_BC5_SNORM = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC5_SNORM{{endif}} {{if 'CU_AD_FORMAT_BC6H_UF16' in found_values}} #: 3 channel unsigned half-float block-compressed (BC6H compression) #: format - CU_AD_FORMAT_BC6H_UF16 = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC6H_UF16{{endif}} + CU_AD_FORMAT_BC6H_UF16 = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC6H_UF16{{endif}} {{if 'CU_AD_FORMAT_BC6H_SF16' in found_values}} #: 3 channel signed half-float block-compressed (BC6H compression) #: format - CU_AD_FORMAT_BC6H_SF16 = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC6H_SF16{{endif}} + CU_AD_FORMAT_BC6H_SF16 = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC6H_SF16{{endif}} {{if 'CU_AD_FORMAT_BC7_UNORM' in found_values}} #: 4 channel unsigned normalized block-compressed (BC7 compression) #: format - CU_AD_FORMAT_BC7_UNORM = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC7_UNORM{{endif}} + CU_AD_FORMAT_BC7_UNORM = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC7_UNORM{{endif}} {{if 'CU_AD_FORMAT_BC7_UNORM_SRGB' in found_values}} #: 4 channel unsigned normalized block-compressed (BC7 compression) #: format with sRGB encoding - CU_AD_FORMAT_BC7_UNORM_SRGB = ccuda.CUarray_format_enum.CU_AD_FORMAT_BC7_UNORM_SRGB{{endif}} + CU_AD_FORMAT_BC7_UNORM_SRGB = cydriver.CUarray_format_enum.CU_AD_FORMAT_BC7_UNORM_SRGB{{endif}} {{if 'CU_AD_FORMAT_P010' in found_values}} #: 10-bit YUV planar format, with 4:2:0 sampling - CU_AD_FORMAT_P010 = ccuda.CUarray_format_enum.CU_AD_FORMAT_P010{{endif}} + CU_AD_FORMAT_P010 = cydriver.CUarray_format_enum.CU_AD_FORMAT_P010{{endif}} {{if 'CU_AD_FORMAT_P016' in found_values}} #: 16-bit YUV planar format, with 4:2:0 sampling - CU_AD_FORMAT_P016 = ccuda.CUarray_format_enum.CU_AD_FORMAT_P016{{endif}} + CU_AD_FORMAT_P016 = cydriver.CUarray_format_enum.CU_AD_FORMAT_P016{{endif}} {{if 'CU_AD_FORMAT_NV16' in found_values}} #: 8-bit YUV planar format, with 4:2:2 sampling - CU_AD_FORMAT_NV16 = ccuda.CUarray_format_enum.CU_AD_FORMAT_NV16{{endif}} + CU_AD_FORMAT_NV16 = cydriver.CUarray_format_enum.CU_AD_FORMAT_NV16{{endif}} {{if 'CU_AD_FORMAT_P210' in found_values}} #: 10-bit YUV planar format, with 4:2:2 sampling - CU_AD_FORMAT_P210 = ccuda.CUarray_format_enum.CU_AD_FORMAT_P210{{endif}} + CU_AD_FORMAT_P210 = cydriver.CUarray_format_enum.CU_AD_FORMAT_P210{{endif}} {{if 'CU_AD_FORMAT_P216' in found_values}} #: 16-bit YUV planar format, with 4:2:2 sampling - CU_AD_FORMAT_P216 = ccuda.CUarray_format_enum.CU_AD_FORMAT_P216{{endif}} + CU_AD_FORMAT_P216 = cydriver.CUarray_format_enum.CU_AD_FORMAT_P216{{endif}} {{if 'CU_AD_FORMAT_YUY2' in found_values}} #: 2 channel, 8-bit YUV packed planar format, with 4:2:2 sampling - CU_AD_FORMAT_YUY2 = ccuda.CUarray_format_enum.CU_AD_FORMAT_YUY2{{endif}} + CU_AD_FORMAT_YUY2 = cydriver.CUarray_format_enum.CU_AD_FORMAT_YUY2{{endif}} {{if 'CU_AD_FORMAT_Y210' in found_values}} #: 2 channel, 10-bit YUV packed planar format, with 4:2:2 sampling - CU_AD_FORMAT_Y210 = ccuda.CUarray_format_enum.CU_AD_FORMAT_Y210{{endif}} + CU_AD_FORMAT_Y210 = cydriver.CUarray_format_enum.CU_AD_FORMAT_Y210{{endif}} {{if 'CU_AD_FORMAT_Y216' in found_values}} #: 2 channel, 16-bit YUV packed planar format, with 4:2:2 sampling - CU_AD_FORMAT_Y216 = ccuda.CUarray_format_enum.CU_AD_FORMAT_Y216{{endif}} + CU_AD_FORMAT_Y216 = cydriver.CUarray_format_enum.CU_AD_FORMAT_Y216{{endif}} {{if 'CU_AD_FORMAT_AYUV' in found_values}} #: 4 channel, 8-bit YUV packed planar format, with 4:4:4 sampling - CU_AD_FORMAT_AYUV = ccuda.CUarray_format_enum.CU_AD_FORMAT_AYUV{{endif}} + CU_AD_FORMAT_AYUV = cydriver.CUarray_format_enum.CU_AD_FORMAT_AYUV{{endif}} {{if 'CU_AD_FORMAT_Y410' in found_values}} #: 10-bit YUV packed planar format, with 4:4:4 sampling - CU_AD_FORMAT_Y410 = ccuda.CUarray_format_enum.CU_AD_FORMAT_Y410{{endif}} + CU_AD_FORMAT_Y410 = cydriver.CUarray_format_enum.CU_AD_FORMAT_Y410{{endif}} {{if 'CU_AD_FORMAT_NV12' in found_values}} #: 8-bit YUV planar format, with 4:2:0 sampling - CU_AD_FORMAT_NV12 = ccuda.CUarray_format_enum.CU_AD_FORMAT_NV12{{endif}} + CU_AD_FORMAT_NV12 = cydriver.CUarray_format_enum.CU_AD_FORMAT_NV12{{endif}} {{if 'CU_AD_FORMAT_Y416' in found_values}} #: 4 channel, 12-bit YUV packed planar format, with 4:4:4 sampling - CU_AD_FORMAT_Y416 = ccuda.CUarray_format_enum.CU_AD_FORMAT_Y416{{endif}} + CU_AD_FORMAT_Y416 = cydriver.CUarray_format_enum.CU_AD_FORMAT_Y416{{endif}} {{if 'CU_AD_FORMAT_Y444_PLANAR8' in found_values}} #: 3 channel 8-bit YUV planar format, with 4:4:4 sampling - CU_AD_FORMAT_Y444_PLANAR8 = ccuda.CUarray_format_enum.CU_AD_FORMAT_Y444_PLANAR8{{endif}} + CU_AD_FORMAT_Y444_PLANAR8 = cydriver.CUarray_format_enum.CU_AD_FORMAT_Y444_PLANAR8{{endif}} {{if 'CU_AD_FORMAT_Y444_PLANAR10' in found_values}} #: 3 channel 10-bit YUV planar format, with 4:4:4 sampling - CU_AD_FORMAT_Y444_PLANAR10 = ccuda.CUarray_format_enum.CU_AD_FORMAT_Y444_PLANAR10{{endif}} + CU_AD_FORMAT_Y444_PLANAR10 = cydriver.CUarray_format_enum.CU_AD_FORMAT_Y444_PLANAR10{{endif}} {{if 'CU_AD_FORMAT_UNORM_INT8X1' in found_values}} #: 1 channel unsigned 8-bit normalized integer - CU_AD_FORMAT_UNORM_INT8X1 = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT8X1{{endif}} + CU_AD_FORMAT_UNORM_INT8X1 = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT8X1{{endif}} {{if 'CU_AD_FORMAT_UNORM_INT8X2' in found_values}} #: 2 channel unsigned 8-bit normalized integer - CU_AD_FORMAT_UNORM_INT8X2 = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT8X2{{endif}} + CU_AD_FORMAT_UNORM_INT8X2 = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT8X2{{endif}} {{if 'CU_AD_FORMAT_UNORM_INT8X4' in found_values}} #: 4 channel unsigned 8-bit normalized integer - CU_AD_FORMAT_UNORM_INT8X4 = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT8X4{{endif}} + CU_AD_FORMAT_UNORM_INT8X4 = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT8X4{{endif}} {{if 'CU_AD_FORMAT_UNORM_INT16X1' in found_values}} #: 1 channel unsigned 16-bit normalized integer - CU_AD_FORMAT_UNORM_INT16X1 = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT16X1{{endif}} + CU_AD_FORMAT_UNORM_INT16X1 = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT16X1{{endif}} {{if 'CU_AD_FORMAT_UNORM_INT16X2' in found_values}} #: 2 channel unsigned 16-bit normalized integer - CU_AD_FORMAT_UNORM_INT16X2 = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT16X2{{endif}} + CU_AD_FORMAT_UNORM_INT16X2 = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT16X2{{endif}} {{if 'CU_AD_FORMAT_UNORM_INT16X4' in found_values}} #: 4 channel unsigned 16-bit normalized integer - CU_AD_FORMAT_UNORM_INT16X4 = ccuda.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT16X4{{endif}} + CU_AD_FORMAT_UNORM_INT16X4 = cydriver.CUarray_format_enum.CU_AD_FORMAT_UNORM_INT16X4{{endif}} {{if 'CU_AD_FORMAT_SNORM_INT8X1' in found_values}} #: 1 channel signed 8-bit normalized integer - CU_AD_FORMAT_SNORM_INT8X1 = ccuda.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT8X1{{endif}} + CU_AD_FORMAT_SNORM_INT8X1 = cydriver.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT8X1{{endif}} {{if 'CU_AD_FORMAT_SNORM_INT8X2' in found_values}} #: 2 channel signed 8-bit normalized integer - CU_AD_FORMAT_SNORM_INT8X2 = ccuda.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT8X2{{endif}} + CU_AD_FORMAT_SNORM_INT8X2 = cydriver.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT8X2{{endif}} {{if 'CU_AD_FORMAT_SNORM_INT8X4' in found_values}} #: 4 channel signed 8-bit normalized integer - CU_AD_FORMAT_SNORM_INT8X4 = ccuda.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT8X4{{endif}} + CU_AD_FORMAT_SNORM_INT8X4 = cydriver.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT8X4{{endif}} {{if 'CU_AD_FORMAT_SNORM_INT16X1' in found_values}} #: 1 channel signed 16-bit normalized integer - CU_AD_FORMAT_SNORM_INT16X1 = ccuda.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT16X1{{endif}} + CU_AD_FORMAT_SNORM_INT16X1 = cydriver.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT16X1{{endif}} {{if 'CU_AD_FORMAT_SNORM_INT16X2' in found_values}} #: 2 channel signed 16-bit normalized integer - CU_AD_FORMAT_SNORM_INT16X2 = ccuda.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT16X2{{endif}} + CU_AD_FORMAT_SNORM_INT16X2 = cydriver.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT16X2{{endif}} {{if 'CU_AD_FORMAT_SNORM_INT16X4' in found_values}} #: 4 channel signed 16-bit normalized integer - CU_AD_FORMAT_SNORM_INT16X4 = ccuda.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT16X4{{endif}} + CU_AD_FORMAT_SNORM_INT16X4 = cydriver.CUarray_format_enum.CU_AD_FORMAT_SNORM_INT16X4{{endif}} {{if 'CU_AD_FORMAT_MAX' in found_values}} - CU_AD_FORMAT_MAX = ccuda.CUarray_format_enum.CU_AD_FORMAT_MAX{{endif}} + CU_AD_FORMAT_MAX = cydriver.CUarray_format_enum.CU_AD_FORMAT_MAX{{endif}} {{endif}} {{if 'CUaddress_mode_enum' in found_types}} @@ -929,19 +929,19 @@ class CUaddress_mode(IntEnum): {{if 'CU_TR_ADDRESS_MODE_WRAP' in found_values}} #: Wrapping address mode - CU_TR_ADDRESS_MODE_WRAP = ccuda.CUaddress_mode_enum.CU_TR_ADDRESS_MODE_WRAP{{endif}} + CU_TR_ADDRESS_MODE_WRAP = cydriver.CUaddress_mode_enum.CU_TR_ADDRESS_MODE_WRAP{{endif}} {{if 'CU_TR_ADDRESS_MODE_CLAMP' in found_values}} #: Clamp to edge address mode - CU_TR_ADDRESS_MODE_CLAMP = ccuda.CUaddress_mode_enum.CU_TR_ADDRESS_MODE_CLAMP{{endif}} + CU_TR_ADDRESS_MODE_CLAMP = cydriver.CUaddress_mode_enum.CU_TR_ADDRESS_MODE_CLAMP{{endif}} {{if 'CU_TR_ADDRESS_MODE_MIRROR' in found_values}} #: Mirror address mode - CU_TR_ADDRESS_MODE_MIRROR = ccuda.CUaddress_mode_enum.CU_TR_ADDRESS_MODE_MIRROR{{endif}} + CU_TR_ADDRESS_MODE_MIRROR = cydriver.CUaddress_mode_enum.CU_TR_ADDRESS_MODE_MIRROR{{endif}} {{if 'CU_TR_ADDRESS_MODE_BORDER' in found_values}} #: Border address mode - CU_TR_ADDRESS_MODE_BORDER = ccuda.CUaddress_mode_enum.CU_TR_ADDRESS_MODE_BORDER{{endif}} + CU_TR_ADDRESS_MODE_BORDER = cydriver.CUaddress_mode_enum.CU_TR_ADDRESS_MODE_BORDER{{endif}} {{endif}} {{if 'CUfilter_mode_enum' in found_types}} @@ -952,11 +952,11 @@ class CUfilter_mode(IntEnum): {{if 'CU_TR_FILTER_MODE_POINT' in found_values}} #: Point filter mode - CU_TR_FILTER_MODE_POINT = ccuda.CUfilter_mode_enum.CU_TR_FILTER_MODE_POINT{{endif}} + CU_TR_FILTER_MODE_POINT = cydriver.CUfilter_mode_enum.CU_TR_FILTER_MODE_POINT{{endif}} {{if 'CU_TR_FILTER_MODE_LINEAR' in found_values}} #: Linear filter mode - CU_TR_FILTER_MODE_LINEAR = ccuda.CUfilter_mode_enum.CU_TR_FILTER_MODE_LINEAR{{endif}} + CU_TR_FILTER_MODE_LINEAR = cydriver.CUfilter_mode_enum.CU_TR_FILTER_MODE_LINEAR{{endif}} {{endif}} {{if 'CUdevice_attribute_enum' in found_types}} @@ -967,613 +967,613 @@ class CUdevice_attribute(IntEnum): {{if 'CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK' in found_values}} #: Maximum number of threads per block - CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X' in found_values}} #: Maximum block dimension X - CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y' in found_values}} #: Maximum block dimension Y - CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z' in found_values}} #: Maximum block dimension Z - CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X' in found_values}} #: Maximum grid dimension X - CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y' in found_values}} #: Maximum grid dimension Y - CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z' in found_values}} #: Maximum grid dimension Z - CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK' in found_values}} #: Maximum shared memory available per block in bytes - CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK' in found_values}} #: Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK - CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK{{endif}} + CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY' in found_values}} #: Memory available on device for constant variables in a CUDA C kernel #: in bytes - CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY{{endif}} + CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_WARP_SIZE' in found_values}} #: Warp size in threads - CU_DEVICE_ATTRIBUTE_WARP_SIZE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_WARP_SIZE{{endif}} + CU_DEVICE_ATTRIBUTE_WARP_SIZE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_WARP_SIZE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_PITCH' in found_values}} #: Maximum pitch in bytes allowed by memory copies - CU_DEVICE_ATTRIBUTE_MAX_PITCH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_PITCH{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_PITCH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_PITCH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK' in found_values}} #: Maximum number of 32-bit registers available per block - CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK' in found_values}} #: Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK - CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK{{endif}} + CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CLOCK_RATE' in found_values}} #: Typical clock frequency in kilohertz - CU_DEVICE_ATTRIBUTE_CLOCK_RATE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CLOCK_RATE{{endif}} + CU_DEVICE_ATTRIBUTE_CLOCK_RATE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CLOCK_RATE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT' in found_values}} #: Alignment requirement for textures - CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT{{endif}} + CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_GPU_OVERLAP' in found_values}} #: Device can possibly copy memory and execute a kernel concurrently. #: Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. - CU_DEVICE_ATTRIBUTE_GPU_OVERLAP = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_OVERLAP{{endif}} + CU_DEVICE_ATTRIBUTE_GPU_OVERLAP = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_OVERLAP{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT' in found_values}} #: Number of multiprocessors on device - CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT{{endif}} + CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT' in found_values}} #: Specifies whether there is a run time limit on kernels - CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT{{endif}} + CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_INTEGRATED' in found_values}} #: Device is integrated with host memory - CU_DEVICE_ATTRIBUTE_INTEGRATED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_INTEGRATED{{endif}} + CU_DEVICE_ATTRIBUTE_INTEGRATED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_INTEGRATED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY' in found_values}} #: Device can map host memory into CUDA address space - CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY{{endif}} + CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_COMPUTE_MODE' in found_values}} #: Compute mode (See :py:obj:`~.CUcomputemode` for details) - CU_DEVICE_ATTRIBUTE_COMPUTE_MODE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COMPUTE_MODE{{endif}} + CU_DEVICE_ATTRIBUTE_COMPUTE_MODE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COMPUTE_MODE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH' in found_values}} #: Maximum 1D texture width - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH' in found_values}} #: Maximum 2D texture width - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT' in found_values}} #: Maximum 2D texture height - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH' in found_values}} #: Maximum 3D texture width - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT' in found_values}} #: Maximum 3D texture height - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH' in found_values}} #: Maximum 3D texture depth - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH' in found_values}} #: Maximum 2D layered texture width - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH' in found_values}} #: Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT' in found_values}} #: Maximum 2D layered texture height - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT' in found_values}} #: Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS' in found_values}} #: Maximum layers in a 2D layered texture - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES' in found_values}} #: Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT' in found_values}} #: Alignment requirement for surfaces - CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT{{endif}} + CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS' in found_values}} #: Device can possibly execute multiple kernels concurrently - CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS{{endif}} + CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_ECC_ENABLED' in found_values}} #: Device has ECC support enabled - CU_DEVICE_ATTRIBUTE_ECC_ENABLED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_ECC_ENABLED{{endif}} + CU_DEVICE_ATTRIBUTE_ECC_ENABLED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_ECC_ENABLED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_PCI_BUS_ID' in found_values}} #: PCI bus ID of the device - CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PCI_BUS_ID{{endif}} + CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PCI_BUS_ID{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID' in found_values}} #: PCI device ID of the device - CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID{{endif}} + CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_TCC_DRIVER' in found_values}} #: Device is using TCC driver model - CU_DEVICE_ATTRIBUTE_TCC_DRIVER = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TCC_DRIVER{{endif}} + CU_DEVICE_ATTRIBUTE_TCC_DRIVER = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TCC_DRIVER{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE' in found_values}} #: Peak memory clock frequency in kilohertz - CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE{{endif}} + CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH' in found_values}} #: Global memory bus width in bits - CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE' in found_values}} #: Size of L2 cache in bytes - CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE{{endif}} + CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR' in found_values}} #: Maximum resident threads per multiprocessor - CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT' in found_values}} #: Number of asynchronous engines - CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT{{endif}} + CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING' in found_values}} #: Device shares a unified address space with the host - CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING{{endif}} + CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH' in found_values}} #: Maximum 1D layered texture width - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS' in found_values}} #: Maximum layers in a 1D layered texture - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER' in found_values}} #: Deprecated, do not use. - CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER{{endif}} + CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH' in found_values}} #: Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT' in found_values}} #: Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE' in found_values}} #: Alternate maximum 3D texture width - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE' in found_values}} #: Alternate maximum 3D texture height - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE' in found_values}} #: Alternate maximum 3D texture depth - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID' in found_values}} #: PCI domain ID of the device - CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID{{endif}} + CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT' in found_values}} #: Pitch alignment requirement for textures - CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT{{endif}} + CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH' in found_values}} #: Maximum cubemap texture width/height - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH' in found_values}} #: Maximum cubemap layered texture width/height - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS' in found_values}} #: Maximum layers in a cubemap layered texture - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH' in found_values}} #: Maximum 1D surface width - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH' in found_values}} #: Maximum 2D surface width - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT' in found_values}} #: Maximum 2D surface height - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH' in found_values}} #: Maximum 3D surface width - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT' in found_values}} #: Maximum 3D surface height - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH' in found_values}} #: Maximum 3D surface depth - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH' in found_values}} #: Maximum 1D layered surface width - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS' in found_values}} #: Maximum layers in a 1D layered surface - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH' in found_values}} #: Maximum 2D layered surface width - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT' in found_values}} #: Maximum 2D layered surface height - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS' in found_values}} #: Maximum layers in a 2D layered surface - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH' in found_values}} #: Maximum cubemap surface width - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH' in found_values}} #: Maximum cubemap layered surface width - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS' in found_values}} #: Maximum layers in a cubemap layered surface - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH' in found_values}} #: Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() #: or :py:obj:`~.cuDeviceGetTexture1DLinearMaxWidth()` instead. - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH' in found_values}} #: Maximum 2D linear texture width - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT' in found_values}} #: Maximum 2D linear texture height - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH' in found_values}} #: Maximum 2D linear texture pitch in bytes - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH' in found_values}} #: Maximum mipmapped 2D texture width - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT' in found_values}} #: Maximum mipmapped 2D texture height - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR' in found_values}} #: Major compute capability version number - CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR{{endif}} + CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR' in found_values}} #: Minor compute capability version number - CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR{{endif}} + CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH' in found_values}} #: Maximum mipmapped 1D texture width - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH{{endif}} + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED' in found_values}} #: Device supports stream priorities - CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED' in found_values}} #: Device supports caching globals in L1 - CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED' in found_values}} #: Device supports caching locals in L1 - CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR' in found_values}} #: Maximum shared memory available per multiprocessor in bytes - CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR' in found_values}} #: Maximum number of 32-bit registers available per multiprocessor - CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY' in found_values}} #: Device can allocate managed memory on this system - CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY{{endif}} + CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD' in found_values}} #: Device is on a multi-GPU board - CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD{{endif}} + CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID' in found_values}} #: Unique id for a group of devices on the same multi-GPU board - CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID{{endif}} + CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED' in found_values}} #: Link between the device and the host supports native atomic #: operations (this is a placeholder attribute, and is not supported on #: any current hardware) - CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO' in found_values}} #: Ratio of single precision performance (in floating-point operations #: per second) to double precision performance - CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO{{endif}} + CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS' in found_values}} #: Device supports coherently accessing pageable memory without calling #: cudaHostRegister on it - CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS{{endif}} + CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS' in found_values}} #: Device can coherently access managed memory concurrently with the #: CPU - CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS{{endif}} + CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED' in found_values}} #: Device supports compute preemption. - CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM' in found_values}} #: Device can access host registered memory at the same virtual address #: as the CPU - CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM{{endif}} + CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1' in found_values}} #: Deprecated, along with v1 MemOps API, :py:obj:`~.cuStreamBatchMemOp` #: and related APIs are supported. - CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1{{endif}} + CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1' in found_values}} #: Deprecated, along with v1 MemOps API, 64-bit operations are #: supported in :py:obj:`~.cuStreamBatchMemOp` and related APIs. - CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1{{endif}} + CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1' in found_values}} #: Deprecated, along with v1 MemOps API, #: :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported. - CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1{{endif}} + CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH' in found_values}} #: Device supports launching cooperative kernels via #: :py:obj:`~.cuLaunchCooperativeKernel` - CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH{{endif}} + CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH' in found_values}} #: Deprecated, :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` is #: deprecated. - CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH{{endif}} + CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN' in found_values}} #: Maximum optin shared memory per block - CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES' in found_values}} #: The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the #: :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported #: on the device. See :py:obj:`~.Stream Memory Operations` for #: additional details. - CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES{{endif}} + CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED' in found_values}} #: Device supports host memory registration via #: :py:obj:`~.cudaHostRegister`. - CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES' in found_values}} #: Device accesses pageable memory via the host's page tables. - CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES{{endif}} + CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST' in found_values}} #: The host can directly access managed memory on the device without #: migration. - CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST{{endif}} + CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED' in found_values}} #: Deprecated, Use #: CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED - CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED' in found_values}} #: Device supports virtual memory management APIs like #: :py:obj:`~.cuMemAddressReserve`, :py:obj:`~.cuMemCreate`, #: :py:obj:`~.cuMemMap` and related APIs - CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED' in found_values}} #: Device supports exporting memory to a posix file descriptor with #: :py:obj:`~.cuMemExportToShareableHandle`, if requested via #: :py:obj:`~.cuMemCreate` - CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED' in found_values}} #: Device supports exporting memory to a Win32 NT handle with #: :py:obj:`~.cuMemExportToShareableHandle`, if requested via #: :py:obj:`~.cuMemCreate` - CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED' in found_values}} #: Device supports exporting memory to a Win32 KMT handle with #: :py:obj:`~.cuMemExportToShareableHandle`, if requested via #: :py:obj:`~.cuMemCreate` - CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR' in found_values}} #: Maximum number of blocks per multiprocessor - CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED' in found_values}} #: Device supports compression of memory - CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE' in found_values}} #: Maximum L2 persisting lines capacity setting in bytes. - CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE' in found_values}} #: Maximum value of :py:obj:`~.CUaccessPolicyWindow.num_bytes`. - CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE{{endif}} + CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED' in found_values}} #: Device supports specifying the GPUDirect RDMA flag with #: :py:obj:`~.cuMemCreate` - CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK' in found_values}} #: Shared memory reserved by CUDA driver per block in bytes - CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK{{endif}} + CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED' in found_values}} #: Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays - CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED' in found_values}} #: Device supports using the :py:obj:`~.cuMemHostRegister` flag #: :py:obj:`~.CU_MEMHOSTERGISTER_READ_ONLY` to register memory that #: must be mapped as read-only to the GPU - CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED' in found_values}} #: External timeline semaphore interop is supported on the device - CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED' in found_values}} #: Device supports using the :py:obj:`~.cuMemAllocAsync` and #: :py:obj:`~.cuMemPool` family of APIs - CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED' in found_values}} #: Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see #: https://docs.nvidia.com/cuda/gpudirect-rdma for more information) - CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS' in found_values}} #: The returned attribute shall be interpreted as a bitmask, where the #: individual bits are described by the #: :py:obj:`~.CUflushGPUDirectRDMAWritesOptions` enum - CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS{{endif}} + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING' in found_values}} #: GPUDirect RDMA writes to the device do not need to be flushed for #: consumers within the scope indicated by the returned attribute. See #: :py:obj:`~.CUGPUDirectRDMAWritesOrdering` for the numerical values #: returned here. - CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING{{endif}} + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES' in found_values}} #: Handle types supported with mempool based IPC - CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES{{endif}} + CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH' in found_values}} #: Indicates device supports cluster launch - CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH{{endif}} + CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED' in found_values}} #: Device supports deferred mapping CUDA arrays and CUDA mipmapped #: arrays - CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS' in found_values}} #: 64-bit operations are supported in :py:obj:`~.cuStreamBatchMemOp` #: and related MemOp APIs. - CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS{{endif}} + CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR' in found_values}} #: :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported by MemOp APIs. - CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR{{endif}} + CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED' in found_values}} #: Device supports buffer sharing with dma_buf mechanism. - CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED' in found_values}} #: Device supports IPC Events. - CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT' in found_values}} #: Number of memory domains the device supports. - CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT{{endif}} + CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED' in found_values}} #: Device supports accessing memory using Tensor Map. - CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED' in found_values}} #: Device supports exporting memory to a fabric handle with #: :py:obj:`~.cuMemExportToShareableHandle()` or requested with #: :py:obj:`~.cuMemCreate()` - CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS' in found_values}} #: Device supports unified function pointers. - CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS{{endif}} + CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_NUMA_CONFIG' in found_values}} #: NUMA configuration of a device: value is of type #: :py:obj:`~.CUdeviceNumaConfig` enum - CU_DEVICE_ATTRIBUTE_NUMA_CONFIG = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_NUMA_CONFIG{{endif}} + CU_DEVICE_ATTRIBUTE_NUMA_CONFIG = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_NUMA_CONFIG{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_NUMA_ID' in found_values}} #: NUMA node ID of the GPU memory - CU_DEVICE_ATTRIBUTE_NUMA_ID = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_NUMA_ID{{endif}} + CU_DEVICE_ATTRIBUTE_NUMA_ID = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_NUMA_ID{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED' in found_values}} #: Device supports switch multicast and reduction operations. - CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MPS_ENABLED' in found_values}} #: Indicates if contexts created on this device will be shared via MPS - CU_DEVICE_ATTRIBUTE_MPS_ENABLED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MPS_ENABLED{{endif}} + CU_DEVICE_ATTRIBUTE_MPS_ENABLED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MPS_ENABLED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID' in found_values}} #: NUMA ID of the host node closest to the device. Returns -1 when #: system does not support NUMA. - CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID{{endif}} + CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED' in found_values}} #: Device supports CIG with D3D12. - CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED{{endif}} + CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED{{endif}} {{if 'CU_DEVICE_ATTRIBUTE_MAX' in found_values}} - CU_DEVICE_ATTRIBUTE_MAX = ccuda.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX{{endif}} + CU_DEVICE_ATTRIBUTE_MAX = cydriver.CUdevice_attribute_enum.CU_DEVICE_ATTRIBUTE_MAX{{endif}} {{endif}} {{if 'CUpointer_attribute_enum' in found_types}} @@ -1585,95 +1585,95 @@ class CUpointer_attribute(IntEnum): #: The :py:obj:`~.CUcontext` on which a pointer was allocated or #: registered - CU_POINTER_ATTRIBUTE_CONTEXT = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_CONTEXT{{endif}} + CU_POINTER_ATTRIBUTE_CONTEXT = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_CONTEXT{{endif}} {{if 'CU_POINTER_ATTRIBUTE_MEMORY_TYPE' in found_values}} #: The :py:obj:`~.CUmemorytype` describing the physical location of a #: pointer - CU_POINTER_ATTRIBUTE_MEMORY_TYPE = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_TYPE{{endif}} + CU_POINTER_ATTRIBUTE_MEMORY_TYPE = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_TYPE{{endif}} {{if 'CU_POINTER_ATTRIBUTE_DEVICE_POINTER' in found_values}} #: The address at which a pointer's memory may be accessed on the #: device - CU_POINTER_ATTRIBUTE_DEVICE_POINTER = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_POINTER{{endif}} + CU_POINTER_ATTRIBUTE_DEVICE_POINTER = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_POINTER{{endif}} {{if 'CU_POINTER_ATTRIBUTE_HOST_POINTER' in found_values}} #: The address at which a pointer's memory may be accessed on the host - CU_POINTER_ATTRIBUTE_HOST_POINTER = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_HOST_POINTER{{endif}} + CU_POINTER_ATTRIBUTE_HOST_POINTER = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_HOST_POINTER{{endif}} {{if 'CU_POINTER_ATTRIBUTE_P2P_TOKENS' in found_values}} #: A pair of tokens for use with the nv-p2p.h Linux kernel interface - CU_POINTER_ATTRIBUTE_P2P_TOKENS = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_P2P_TOKENS{{endif}} + CU_POINTER_ATTRIBUTE_P2P_TOKENS = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_P2P_TOKENS{{endif}} {{if 'CU_POINTER_ATTRIBUTE_SYNC_MEMOPS' in found_values}} #: Synchronize every synchronous memory operation initiated on this #: region - CU_POINTER_ATTRIBUTE_SYNC_MEMOPS = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS{{endif}} + CU_POINTER_ATTRIBUTE_SYNC_MEMOPS = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS{{endif}} {{if 'CU_POINTER_ATTRIBUTE_BUFFER_ID' in found_values}} #: A process-wide unique ID for an allocated memory region - CU_POINTER_ATTRIBUTE_BUFFER_ID = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_BUFFER_ID{{endif}} + CU_POINTER_ATTRIBUTE_BUFFER_ID = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_BUFFER_ID{{endif}} {{if 'CU_POINTER_ATTRIBUTE_IS_MANAGED' in found_values}} #: Indicates if the pointer points to managed memory - CU_POINTER_ATTRIBUTE_IS_MANAGED = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_MANAGED{{endif}} + CU_POINTER_ATTRIBUTE_IS_MANAGED = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_MANAGED{{endif}} {{if 'CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL' in found_values}} #: A device ordinal of a device on which a pointer was allocated or #: registered - CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL{{endif}} + CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL{{endif}} {{if 'CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE' in found_values}} #: 1 if this pointer maps to an allocation that is suitable for #: :py:obj:`~.cudaIpcGetMemHandle`, 0 otherwise - CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE{{endif}} + CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE{{endif}} {{if 'CU_POINTER_ATTRIBUTE_RANGE_START_ADDR' in found_values}} #: Starting address for this requested pointer - CU_POINTER_ATTRIBUTE_RANGE_START_ADDR = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR{{endif}} + CU_POINTER_ATTRIBUTE_RANGE_START_ADDR = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR{{endif}} {{if 'CU_POINTER_ATTRIBUTE_RANGE_SIZE' in found_values}} #: Size of the address range for this requested pointer - CU_POINTER_ATTRIBUTE_RANGE_SIZE = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_SIZE{{endif}} + CU_POINTER_ATTRIBUTE_RANGE_SIZE = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_RANGE_SIZE{{endif}} {{if 'CU_POINTER_ATTRIBUTE_MAPPED' in found_values}} #: 1 if this pointer is in a valid address range that is mapped to a #: backing allocation, 0 otherwise - CU_POINTER_ATTRIBUTE_MAPPED = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPED{{endif}} + CU_POINTER_ATTRIBUTE_MAPPED = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPED{{endif}} {{if 'CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES' in found_values}} #: Bitmask of allowed :py:obj:`~.CUmemAllocationHandleType` for this #: allocation - CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES{{endif}} + CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES{{endif}} {{if 'CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE' in found_values}} #: 1 if the memory this pointer is referencing can be used with the #: GPUDirect RDMA API - CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE{{endif}} + CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE{{endif}} {{if 'CU_POINTER_ATTRIBUTE_ACCESS_FLAGS' in found_values}} #: Returns the access flags the device associated with the current #: context has on the corresponding memory referenced by the pointer #: given - CU_POINTER_ATTRIBUTE_ACCESS_FLAGS = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS{{endif}} + CU_POINTER_ATTRIBUTE_ACCESS_FLAGS = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS{{endif}} {{if 'CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE' in found_values}} #: Returns the mempool handle for the allocation if it was allocated #: from a mempool. Otherwise returns NULL. - CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE{{endif}} + CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE{{endif}} {{if 'CU_POINTER_ATTRIBUTE_MAPPING_SIZE' in found_values}} #: Size of the actual underlying mapping that the pointer belongs to - CU_POINTER_ATTRIBUTE_MAPPING_SIZE = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPING_SIZE{{endif}} + CU_POINTER_ATTRIBUTE_MAPPING_SIZE = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPING_SIZE{{endif}} {{if 'CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR' in found_values}} #: The start address of the mapping that the pointer belongs to - CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR{{endif}} + CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR{{endif}} {{if 'CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID' in found_values}} #: A process-wide unique id corresponding to the physical allocation #: the pointer belongs to - CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID = ccuda.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID{{endif}} + CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID = cydriver.CUpointer_attribute_enum.CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID{{endif}} {{endif}} {{if 'CUfunction_attribute_enum' in found_types}} @@ -1686,27 +1686,27 @@ class CUfunction_attribute(IntEnum): #: The maximum number of threads per block, beyond which a launch of #: the function would fail. This number depends on both the function #: and the device on which the function is currently loaded. - CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK{{endif}} + CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK{{endif}} {{if 'CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES' in found_values}} #: The size in bytes of statically-allocated shared memory required by #: this function. This does not include dynamically-allocated shared #: memory requested by the user at runtime. - CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES{{endif}} + CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES{{endif}} {{if 'CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES' in found_values}} #: The size in bytes of user-allocated constant memory required by this #: function. - CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES{{endif}} + CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES{{endif}} {{if 'CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES' in found_values}} #: The size in bytes of local memory used by each thread of this #: function. - CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES{{endif}} + CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES{{endif}} {{if 'CU_FUNC_ATTRIBUTE_NUM_REGS' in found_values}} #: The number of registers used by each thread of this function. - CU_FUNC_ATTRIBUTE_NUM_REGS = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_NUM_REGS{{endif}} + CU_FUNC_ATTRIBUTE_NUM_REGS = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_NUM_REGS{{endif}} {{if 'CU_FUNC_ATTRIBUTE_PTX_VERSION' in found_values}} #: The PTX virtual architecture version for which the function was @@ -1714,7 +1714,7 @@ class CUfunction_attribute(IntEnum): #: version, so a PTX version 1.3 function would return the value 13. #: Note that this may return the undefined value of 0 for cubins #: compiled prior to CUDA 3.0. - CU_FUNC_ATTRIBUTE_PTX_VERSION = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_PTX_VERSION{{endif}} + CU_FUNC_ATTRIBUTE_PTX_VERSION = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_PTX_VERSION{{endif}} {{if 'CU_FUNC_ATTRIBUTE_BINARY_VERSION' in found_values}} #: The binary architecture version for which the function was compiled. @@ -1722,19 +1722,19 @@ class CUfunction_attribute(IntEnum): #: version, so a binary version 1.3 function would return the value 13. #: Note that this will return a value of 10 for legacy cubins that do #: not have a properly-encoded binary architecture version. - CU_FUNC_ATTRIBUTE_BINARY_VERSION = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_BINARY_VERSION{{endif}} + CU_FUNC_ATTRIBUTE_BINARY_VERSION = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_BINARY_VERSION{{endif}} {{if 'CU_FUNC_ATTRIBUTE_CACHE_MODE_CA' in found_values}} #: The attribute to indicate whether the function has been compiled #: with user specified option "-Xptxas --dlcm=ca" set . - CU_FUNC_ATTRIBUTE_CACHE_MODE_CA = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CACHE_MODE_CA{{endif}} + CU_FUNC_ATTRIBUTE_CACHE_MODE_CA = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CACHE_MODE_CA{{endif}} {{if 'CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES' in found_values}} #: The maximum size in bytes of dynamically-allocated shared memory #: that can be used by this function. If the user-specified dynamic #: shared memory size is larger than this value, the launch will fail. #: See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES{{endif}} + CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES{{endif}} {{if 'CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT' in found_values}} #: On devices where the L1 cache and shared memory use the same @@ -1744,13 +1744,13 @@ class CUfunction_attribute(IntEnum): #: This is only a hint, and the driver can choose a different ratio if #: required to execute the function. See #: :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT{{endif}} + CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT{{endif}} {{if 'CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET' in found_values}} #: If this attribute is set, the kernel must launch with a valid #: cluster size specified. See :py:obj:`~.cuFuncSetAttribute`, #: :py:obj:`~.cuKernelSetAttribute` - CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET{{endif}} + CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET{{endif}} {{if 'CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH' in found_values}} #: The required cluster width in blocks. The values must either all be @@ -1760,7 +1760,7 @@ class CUfunction_attribute(IntEnum): #: If the value is set during compile time, it cannot be set at #: runtime. Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. #: See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH{{endif}} + CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH{{endif}} {{if 'CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT' in found_values}} #: The required cluster height in blocks. The values must either all be @@ -1771,7 +1771,7 @@ class CUfunction_attribute(IntEnum): #: runtime. Setting it at runtime should return #: CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, #: :py:obj:`~.cuKernelSetAttribute` - CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT{{endif}} + CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT{{endif}} {{if 'CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH' in found_values}} #: The required cluster depth in blocks. The values must either all be @@ -1782,7 +1782,7 @@ class CUfunction_attribute(IntEnum): #: runtime. Setting it at runtime should return #: CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, #: :py:obj:`~.cuKernelSetAttribute` - CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH{{endif}} + CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH{{endif}} {{if 'CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED' in found_values}} #: Whether the function can be launched with non-portable cluster size. @@ -1804,15 +1804,15 @@ class CUfunction_attribute(IntEnum): #: The specific hardware unit may support higher cluster sizes that’s #: not guaranteed to be portable. See :py:obj:`~.cuFuncSetAttribute`, #: :py:obj:`~.cuKernelSetAttribute` - CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED{{endif}} + CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED{{endif}} {{if 'CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE' in found_values}} #: The block scheduling policy of a function. The value type is #: CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. See #: :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE{{endif}} + CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE{{endif}} {{if 'CU_FUNC_ATTRIBUTE_MAX' in found_values}} - CU_FUNC_ATTRIBUTE_MAX = ccuda.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX{{endif}} + CU_FUNC_ATTRIBUTE_MAX = cydriver.CUfunction_attribute_enum.CU_FUNC_ATTRIBUTE_MAX{{endif}} {{endif}} {{if 'CUfunc_cache_enum' in found_types}} @@ -1823,19 +1823,19 @@ class CUfunc_cache(IntEnum): {{if 'CU_FUNC_CACHE_PREFER_NONE' in found_values}} #: no preference for shared memory or L1 (default) - CU_FUNC_CACHE_PREFER_NONE = ccuda.CUfunc_cache_enum.CU_FUNC_CACHE_PREFER_NONE{{endif}} + CU_FUNC_CACHE_PREFER_NONE = cydriver.CUfunc_cache_enum.CU_FUNC_CACHE_PREFER_NONE{{endif}} {{if 'CU_FUNC_CACHE_PREFER_SHARED' in found_values}} #: prefer larger shared memory and smaller L1 cache - CU_FUNC_CACHE_PREFER_SHARED = ccuda.CUfunc_cache_enum.CU_FUNC_CACHE_PREFER_SHARED{{endif}} + CU_FUNC_CACHE_PREFER_SHARED = cydriver.CUfunc_cache_enum.CU_FUNC_CACHE_PREFER_SHARED{{endif}} {{if 'CU_FUNC_CACHE_PREFER_L1' in found_values}} #: prefer larger L1 cache and smaller shared memory - CU_FUNC_CACHE_PREFER_L1 = ccuda.CUfunc_cache_enum.CU_FUNC_CACHE_PREFER_L1{{endif}} + CU_FUNC_CACHE_PREFER_L1 = cydriver.CUfunc_cache_enum.CU_FUNC_CACHE_PREFER_L1{{endif}} {{if 'CU_FUNC_CACHE_PREFER_EQUAL' in found_values}} #: prefer equal sized L1 cache and shared memory - CU_FUNC_CACHE_PREFER_EQUAL = ccuda.CUfunc_cache_enum.CU_FUNC_CACHE_PREFER_EQUAL{{endif}} + CU_FUNC_CACHE_PREFER_EQUAL = cydriver.CUfunc_cache_enum.CU_FUNC_CACHE_PREFER_EQUAL{{endif}} {{endif}} {{if 'CUsharedconfig_enum' in found_types}} @@ -1846,15 +1846,15 @@ class CUsharedconfig(IntEnum): {{if 'CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE' in found_values}} #: set default shared memory bank size - CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = ccuda.CUsharedconfig_enum.CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE{{endif}} + CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = cydriver.CUsharedconfig_enum.CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE{{endif}} {{if 'CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE' in found_values}} #: set shared memory bank width to four bytes - CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = ccuda.CUsharedconfig_enum.CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE{{endif}} + CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = cydriver.CUsharedconfig_enum.CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE{{endif}} {{if 'CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE' in found_values}} #: set shared memory bank width to eight bytes - CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = ccuda.CUsharedconfig_enum.CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE{{endif}} + CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = cydriver.CUsharedconfig_enum.CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE{{endif}} {{endif}} {{if 'CUshared_carveout_enum' in found_types}} @@ -1866,15 +1866,15 @@ class CUshared_carveout(IntEnum): {{if 'CU_SHAREDMEM_CARVEOUT_DEFAULT' in found_values}} #: No preference for shared memory or L1 (default) - CU_SHAREDMEM_CARVEOUT_DEFAULT = ccuda.CUshared_carveout_enum.CU_SHAREDMEM_CARVEOUT_DEFAULT{{endif}} + CU_SHAREDMEM_CARVEOUT_DEFAULT = cydriver.CUshared_carveout_enum.CU_SHAREDMEM_CARVEOUT_DEFAULT{{endif}} {{if 'CU_SHAREDMEM_CARVEOUT_MAX_L1' in found_values}} #: Prefer maximum available L1 cache, minimum shared memory - CU_SHAREDMEM_CARVEOUT_MAX_L1 = ccuda.CUshared_carveout_enum.CU_SHAREDMEM_CARVEOUT_MAX_L1{{endif}} + CU_SHAREDMEM_CARVEOUT_MAX_L1 = cydriver.CUshared_carveout_enum.CU_SHAREDMEM_CARVEOUT_MAX_L1{{endif}} {{if 'CU_SHAREDMEM_CARVEOUT_MAX_SHARED' in found_values}} #: Prefer maximum available shared memory, minimum L1 cache - CU_SHAREDMEM_CARVEOUT_MAX_SHARED = ccuda.CUshared_carveout_enum.CU_SHAREDMEM_CARVEOUT_MAX_SHARED{{endif}} + CU_SHAREDMEM_CARVEOUT_MAX_SHARED = cydriver.CUshared_carveout_enum.CU_SHAREDMEM_CARVEOUT_MAX_SHARED{{endif}} {{endif}} {{if 'CUmemorytype_enum' in found_types}} @@ -1885,19 +1885,19 @@ class CUmemorytype(IntEnum): {{if 'CU_MEMORYTYPE_HOST' in found_values}} #: Host memory - CU_MEMORYTYPE_HOST = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_HOST{{endif}} + CU_MEMORYTYPE_HOST = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_HOST{{endif}} {{if 'CU_MEMORYTYPE_DEVICE' in found_values}} #: Device memory - CU_MEMORYTYPE_DEVICE = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE{{endif}} + CU_MEMORYTYPE_DEVICE = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_DEVICE{{endif}} {{if 'CU_MEMORYTYPE_ARRAY' in found_values}} #: Array memory - CU_MEMORYTYPE_ARRAY = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY{{endif}} + CU_MEMORYTYPE_ARRAY = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_ARRAY{{endif}} {{if 'CU_MEMORYTYPE_UNIFIED' in found_values}} #: Unified device or host memory - CU_MEMORYTYPE_UNIFIED = ccuda.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED{{endif}} + CU_MEMORYTYPE_UNIFIED = cydriver.CUmemorytype_enum.CU_MEMORYTYPE_UNIFIED{{endif}} {{endif}} {{if 'CUcomputemode_enum' in found_types}} @@ -1908,17 +1908,17 @@ class CUcomputemode(IntEnum): {{if 'CU_COMPUTEMODE_DEFAULT' in found_values}} #: Default compute mode (Multiple contexts allowed per device) - CU_COMPUTEMODE_DEFAULT = ccuda.CUcomputemode_enum.CU_COMPUTEMODE_DEFAULT{{endif}} + CU_COMPUTEMODE_DEFAULT = cydriver.CUcomputemode_enum.CU_COMPUTEMODE_DEFAULT{{endif}} {{if 'CU_COMPUTEMODE_PROHIBITED' in found_values}} #: Compute-prohibited mode (No contexts can be created on this device #: at this time) - CU_COMPUTEMODE_PROHIBITED = ccuda.CUcomputemode_enum.CU_COMPUTEMODE_PROHIBITED{{endif}} + CU_COMPUTEMODE_PROHIBITED = cydriver.CUcomputemode_enum.CU_COMPUTEMODE_PROHIBITED{{endif}} {{if 'CU_COMPUTEMODE_EXCLUSIVE_PROCESS' in found_values}} #: Compute-exclusive-process mode (Only one context used by a single #: process can be present on this device at a time) - CU_COMPUTEMODE_EXCLUSIVE_PROCESS = ccuda.CUcomputemode_enum.CU_COMPUTEMODE_EXCLUSIVE_PROCESS{{endif}} + CU_COMPUTEMODE_EXCLUSIVE_PROCESS = cydriver.CUcomputemode_enum.CU_COMPUTEMODE_EXCLUSIVE_PROCESS{{endif}} {{endif}} {{if 'CUmem_advise_enum' in found_types}} @@ -1929,29 +1929,29 @@ class CUmem_advise(IntEnum): {{if 'CU_MEM_ADVISE_SET_READ_MOSTLY' in found_values}} #: Data will mostly be read and only occasionally be written to - CU_MEM_ADVISE_SET_READ_MOSTLY = ccuda.CUmem_advise_enum.CU_MEM_ADVISE_SET_READ_MOSTLY{{endif}} + CU_MEM_ADVISE_SET_READ_MOSTLY = cydriver.CUmem_advise_enum.CU_MEM_ADVISE_SET_READ_MOSTLY{{endif}} {{if 'CU_MEM_ADVISE_UNSET_READ_MOSTLY' in found_values}} #: Undo the effect of :py:obj:`~.CU_MEM_ADVISE_SET_READ_MOSTLY` - CU_MEM_ADVISE_UNSET_READ_MOSTLY = ccuda.CUmem_advise_enum.CU_MEM_ADVISE_UNSET_READ_MOSTLY{{endif}} + CU_MEM_ADVISE_UNSET_READ_MOSTLY = cydriver.CUmem_advise_enum.CU_MEM_ADVISE_UNSET_READ_MOSTLY{{endif}} {{if 'CU_MEM_ADVISE_SET_PREFERRED_LOCATION' in found_values}} #: Set the preferred location for the data as the specified device - CU_MEM_ADVISE_SET_PREFERRED_LOCATION = ccuda.CUmem_advise_enum.CU_MEM_ADVISE_SET_PREFERRED_LOCATION{{endif}} + CU_MEM_ADVISE_SET_PREFERRED_LOCATION = cydriver.CUmem_advise_enum.CU_MEM_ADVISE_SET_PREFERRED_LOCATION{{endif}} {{if 'CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION' in found_values}} #: Clear the preferred location for the data - CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION = ccuda.CUmem_advise_enum.CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION{{endif}} + CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION = cydriver.CUmem_advise_enum.CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION{{endif}} {{if 'CU_MEM_ADVISE_SET_ACCESSED_BY' in found_values}} #: Data will be accessed by the specified device, so prevent page #: faults as much as possible - CU_MEM_ADVISE_SET_ACCESSED_BY = ccuda.CUmem_advise_enum.CU_MEM_ADVISE_SET_ACCESSED_BY{{endif}} + CU_MEM_ADVISE_SET_ACCESSED_BY = cydriver.CUmem_advise_enum.CU_MEM_ADVISE_SET_ACCESSED_BY{{endif}} {{if 'CU_MEM_ADVISE_UNSET_ACCESSED_BY' in found_values}} #: Let the Unified Memory subsystem decide on the page faulting policy #: for the specified device - CU_MEM_ADVISE_UNSET_ACCESSED_BY = ccuda.CUmem_advise_enum.CU_MEM_ADVISE_UNSET_ACCESSED_BY{{endif}} + CU_MEM_ADVISE_UNSET_ACCESSED_BY = cydriver.CUmem_advise_enum.CU_MEM_ADVISE_UNSET_ACCESSED_BY{{endif}} {{endif}} {{if 'CUmem_range_attribute_enum' in found_types}} @@ -1963,36 +1963,36 @@ class CUmem_range_attribute(IntEnum): #: Whether the range will mostly be read and only occasionally be #: written to - CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY = ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY{{endif}} + CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY = cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY{{endif}} {{if 'CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION' in found_values}} #: The preferred location of the range - CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION = ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION{{endif}} + CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION = cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION{{endif}} {{if 'CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY' in found_values}} #: Memory range has :py:obj:`~.CU_MEM_ADVISE_SET_ACCESSED_BY` set for #: specified device - CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY = ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY{{endif}} + CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY = cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY{{endif}} {{if 'CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION' in found_values}} #: The last location to which the range was prefetched - CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION = ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION{{endif}} + CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION = cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION{{endif}} {{if 'CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE' in found_values}} #: The preferred location type of the range - CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE = ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE{{endif}} + CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE = cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE{{endif}} {{if 'CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID' in found_values}} #: The preferred location id of the range - CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID = ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID{{endif}} + CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID = cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID{{endif}} {{if 'CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE' in found_values}} #: The last location type to which the range was prefetched - CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE = ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE{{endif}} + CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE = cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE{{endif}} {{if 'CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID' in found_values}} #: The last location id to which the range was prefetched - CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID = ccuda.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID{{endif}} + CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID = cydriver.CUmem_range_attribute_enum.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID{{endif}} {{endif}} {{if 'CUjit_option_enum' in found_types}} @@ -2005,7 +2005,7 @@ class CUjit_option(IntEnum): #: Max number of registers that a thread may use. #: Option type: unsigned int #: Applies to: compiler only - CU_JIT_MAX_REGISTERS = ccuda.CUjit_option_enum.CU_JIT_MAX_REGISTERS{{endif}} + CU_JIT_MAX_REGISTERS = cydriver.CUjit_option_enum.CU_JIT_MAX_REGISTERS{{endif}} {{if 'CU_JIT_THREADS_PER_BLOCK' in found_values}} #: IN: Specifies minimum number of threads per block to target @@ -2019,14 +2019,14 @@ class CUjit_option(IntEnum): #: Cannot be combined with :py:obj:`~.CU_JIT_TARGET`. #: Option type: unsigned int #: Applies to: compiler only - CU_JIT_THREADS_PER_BLOCK = ccuda.CUjit_option_enum.CU_JIT_THREADS_PER_BLOCK{{endif}} + CU_JIT_THREADS_PER_BLOCK = cydriver.CUjit_option_enum.CU_JIT_THREADS_PER_BLOCK{{endif}} {{if 'CU_JIT_WALL_TIME' in found_values}} #: Overwrites the option value with the total wall clock time, in #: milliseconds, spent in the compiler and linker #: Option type: float #: Applies to: compiler and linker - CU_JIT_WALL_TIME = ccuda.CUjit_option_enum.CU_JIT_WALL_TIME{{endif}} + CU_JIT_WALL_TIME = cydriver.CUjit_option_enum.CU_JIT_WALL_TIME{{endif}} {{if 'CU_JIT_INFO_LOG_BUFFER' in found_values}} #: Pointer to a buffer in which to print any log messages that are @@ -2034,7 +2034,7 @@ class CUjit_option(IntEnum): #: :py:obj:`~.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES`) #: Option type: char * #: Applies to: compiler and linker - CU_JIT_INFO_LOG_BUFFER = ccuda.CUjit_option_enum.CU_JIT_INFO_LOG_BUFFER{{endif}} + CU_JIT_INFO_LOG_BUFFER = cydriver.CUjit_option_enum.CU_JIT_INFO_LOG_BUFFER{{endif}} {{if 'CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES' in found_values}} #: IN: Log buffer size in bytes. Log messages will be capped at this @@ -2042,7 +2042,7 @@ class CUjit_option(IntEnum): #: OUT: Amount of log buffer filled with messages #: Option type: unsigned int #: Applies to: compiler and linker - CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES = ccuda.CUjit_option_enum.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES{{endif}} + CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES = cydriver.CUjit_option_enum.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES{{endif}} {{if 'CU_JIT_ERROR_LOG_BUFFER' in found_values}} #: Pointer to a buffer in which to print any log messages that reflect @@ -2050,7 +2050,7 @@ class CUjit_option(IntEnum): #: :py:obj:`~.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES`) #: Option type: char * #: Applies to: compiler and linker - CU_JIT_ERROR_LOG_BUFFER = ccuda.CUjit_option_enum.CU_JIT_ERROR_LOG_BUFFER{{endif}} + CU_JIT_ERROR_LOG_BUFFER = cydriver.CUjit_option_enum.CU_JIT_ERROR_LOG_BUFFER{{endif}} {{if 'CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES' in found_values}} #: IN: Log buffer size in bytes. Log messages will be capped at this @@ -2058,21 +2058,21 @@ class CUjit_option(IntEnum): #: OUT: Amount of log buffer filled with messages #: Option type: unsigned int #: Applies to: compiler and linker - CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES = ccuda.CUjit_option_enum.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES{{endif}} + CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES = cydriver.CUjit_option_enum.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES{{endif}} {{if 'CU_JIT_OPTIMIZATION_LEVEL' in found_values}} #: Level of optimizations to apply to generated code (0 - 4), with 4 #: being the default and highest level of optimizations. #: Option type: unsigned int #: Applies to: compiler only - CU_JIT_OPTIMIZATION_LEVEL = ccuda.CUjit_option_enum.CU_JIT_OPTIMIZATION_LEVEL{{endif}} + CU_JIT_OPTIMIZATION_LEVEL = cydriver.CUjit_option_enum.CU_JIT_OPTIMIZATION_LEVEL{{endif}} {{if 'CU_JIT_TARGET_FROM_CUCONTEXT' in found_values}} #: No option value required. Determines the target based on the current #: attached context (default) #: Option type: No option value needed #: Applies to: compiler and linker - CU_JIT_TARGET_FROM_CUCONTEXT = ccuda.CUjit_option_enum.CU_JIT_TARGET_FROM_CUCONTEXT{{endif}} + CU_JIT_TARGET_FROM_CUCONTEXT = cydriver.CUjit_option_enum.CU_JIT_TARGET_FROM_CUCONTEXT{{endif}} {{if 'CU_JIT_TARGET' in found_values}} #: Target is chosen based on supplied :py:obj:`~.CUjit_target`. Cannot @@ -2080,7 +2080,7 @@ class CUjit_option(IntEnum): #: Option type: unsigned int for enumerated type #: :py:obj:`~.CUjit_target` #: Applies to: compiler and linker - CU_JIT_TARGET = ccuda.CUjit_option_enum.CU_JIT_TARGET{{endif}} + CU_JIT_TARGET = cydriver.CUjit_option_enum.CU_JIT_TARGET{{endif}} {{if 'CU_JIT_FALLBACK_STRATEGY' in found_values}} #: Specifies choice of fallback strategy if matching cubin is not @@ -2090,26 +2090,26 @@ class CUjit_option(IntEnum): #: Option type: unsigned int for enumerated type #: :py:obj:`~.CUjit_fallback` #: Applies to: compiler only - CU_JIT_FALLBACK_STRATEGY = ccuda.CUjit_option_enum.CU_JIT_FALLBACK_STRATEGY{{endif}} + CU_JIT_FALLBACK_STRATEGY = cydriver.CUjit_option_enum.CU_JIT_FALLBACK_STRATEGY{{endif}} {{if 'CU_JIT_GENERATE_DEBUG_INFO' in found_values}} #: Specifies whether to create debug information in output (-g) (0: #: false, default) #: Option type: int #: Applies to: compiler and linker - CU_JIT_GENERATE_DEBUG_INFO = ccuda.CUjit_option_enum.CU_JIT_GENERATE_DEBUG_INFO{{endif}} + CU_JIT_GENERATE_DEBUG_INFO = cydriver.CUjit_option_enum.CU_JIT_GENERATE_DEBUG_INFO{{endif}} {{if 'CU_JIT_LOG_VERBOSE' in found_values}} #: Generate verbose log messages (0: false, default) #: Option type: int #: Applies to: compiler and linker - CU_JIT_LOG_VERBOSE = ccuda.CUjit_option_enum.CU_JIT_LOG_VERBOSE{{endif}} + CU_JIT_LOG_VERBOSE = cydriver.CUjit_option_enum.CU_JIT_LOG_VERBOSE{{endif}} {{if 'CU_JIT_GENERATE_LINE_INFO' in found_values}} #: Generate line number information (-lineinfo) (0: false, default) #: Option type: int #: Applies to: compiler only - CU_JIT_GENERATE_LINE_INFO = ccuda.CUjit_option_enum.CU_JIT_GENERATE_LINE_INFO{{endif}} + CU_JIT_GENERATE_LINE_INFO = cydriver.CUjit_option_enum.CU_JIT_GENERATE_LINE_INFO{{endif}} {{if 'CU_JIT_CACHE_MODE' in found_values}} #: Specifies whether to enable caching explicitly (-dlcm) @@ -2117,15 +2117,15 @@ class CUjit_option(IntEnum): #: Option type: unsigned int for enumerated type #: :py:obj:`~.CUjit_cacheMode_enum` #: Applies to: compiler only - CU_JIT_CACHE_MODE = ccuda.CUjit_option_enum.CU_JIT_CACHE_MODE{{endif}} + CU_JIT_CACHE_MODE = cydriver.CUjit_option_enum.CU_JIT_CACHE_MODE{{endif}} {{if 'CU_JIT_NEW_SM3X_OPT' in found_values}} #: [Deprecated] - CU_JIT_NEW_SM3X_OPT = ccuda.CUjit_option_enum.CU_JIT_NEW_SM3X_OPT{{endif}} + CU_JIT_NEW_SM3X_OPT = cydriver.CUjit_option_enum.CU_JIT_NEW_SM3X_OPT{{endif}} {{if 'CU_JIT_FAST_COMPILE' in found_values}} #: This jit option is used for internal purpose only. - CU_JIT_FAST_COMPILE = ccuda.CUjit_option_enum.CU_JIT_FAST_COMPILE{{endif}} + CU_JIT_FAST_COMPILE = cydriver.CUjit_option_enum.CU_JIT_FAST_COMPILE{{endif}} {{if 'CU_JIT_GLOBAL_SYMBOL_NAMES' in found_values}} #: Array of device symbol names that will be relocated to the @@ -2140,7 +2140,7 @@ class CUjit_option(IntEnum): #: addresses. #: Option type: const char ** #: Applies to: dynamic linker only - CU_JIT_GLOBAL_SYMBOL_NAMES = ccuda.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_NAMES{{endif}} + CU_JIT_GLOBAL_SYMBOL_NAMES = cydriver.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_NAMES{{endif}} {{if 'CU_JIT_GLOBAL_SYMBOL_ADDRESSES' in found_values}} #: Array of host addresses that will be used to relocate corresponding @@ -2148,80 +2148,80 @@ class CUjit_option(IntEnum): #: Must contain :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_COUNT` entries. #: Option type: void ** #: Applies to: dynamic linker only - CU_JIT_GLOBAL_SYMBOL_ADDRESSES = ccuda.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_ADDRESSES{{endif}} + CU_JIT_GLOBAL_SYMBOL_ADDRESSES = cydriver.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_ADDRESSES{{endif}} {{if 'CU_JIT_GLOBAL_SYMBOL_COUNT' in found_values}} #: Number of entries in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_NAMES` and #: :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_ADDRESSES` arrays. #: Option type: unsigned int #: Applies to: dynamic linker only - CU_JIT_GLOBAL_SYMBOL_COUNT = ccuda.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_COUNT{{endif}} + CU_JIT_GLOBAL_SYMBOL_COUNT = cydriver.CUjit_option_enum.CU_JIT_GLOBAL_SYMBOL_COUNT{{endif}} {{if 'CU_JIT_LTO' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_LTO = ccuda.CUjit_option_enum.CU_JIT_LTO{{endif}} + CU_JIT_LTO = cydriver.CUjit_option_enum.CU_JIT_LTO{{endif}} {{if 'CU_JIT_FTZ' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_FTZ = ccuda.CUjit_option_enum.CU_JIT_FTZ{{endif}} + CU_JIT_FTZ = cydriver.CUjit_option_enum.CU_JIT_FTZ{{endif}} {{if 'CU_JIT_PREC_DIV' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_PREC_DIV = ccuda.CUjit_option_enum.CU_JIT_PREC_DIV{{endif}} + CU_JIT_PREC_DIV = cydriver.CUjit_option_enum.CU_JIT_PREC_DIV{{endif}} {{if 'CU_JIT_PREC_SQRT' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_PREC_SQRT = ccuda.CUjit_option_enum.CU_JIT_PREC_SQRT{{endif}} + CU_JIT_PREC_SQRT = cydriver.CUjit_option_enum.CU_JIT_PREC_SQRT{{endif}} {{if 'CU_JIT_FMA' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_FMA = ccuda.CUjit_option_enum.CU_JIT_FMA{{endif}} + CU_JIT_FMA = cydriver.CUjit_option_enum.CU_JIT_FMA{{endif}} {{if 'CU_JIT_REFERENCED_KERNEL_NAMES' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_REFERENCED_KERNEL_NAMES = ccuda.CUjit_option_enum.CU_JIT_REFERENCED_KERNEL_NAMES{{endif}} + CU_JIT_REFERENCED_KERNEL_NAMES = cydriver.CUjit_option_enum.CU_JIT_REFERENCED_KERNEL_NAMES{{endif}} {{if 'CU_JIT_REFERENCED_KERNEL_COUNT' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_REFERENCED_KERNEL_COUNT = ccuda.CUjit_option_enum.CU_JIT_REFERENCED_KERNEL_COUNT{{endif}} + CU_JIT_REFERENCED_KERNEL_COUNT = cydriver.CUjit_option_enum.CU_JIT_REFERENCED_KERNEL_COUNT{{endif}} {{if 'CU_JIT_REFERENCED_VARIABLE_NAMES' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_REFERENCED_VARIABLE_NAMES = ccuda.CUjit_option_enum.CU_JIT_REFERENCED_VARIABLE_NAMES{{endif}} + CU_JIT_REFERENCED_VARIABLE_NAMES = cydriver.CUjit_option_enum.CU_JIT_REFERENCED_VARIABLE_NAMES{{endif}} {{if 'CU_JIT_REFERENCED_VARIABLE_COUNT' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_REFERENCED_VARIABLE_COUNT = ccuda.CUjit_option_enum.CU_JIT_REFERENCED_VARIABLE_COUNT{{endif}} + CU_JIT_REFERENCED_VARIABLE_COUNT = cydriver.CUjit_option_enum.CU_JIT_REFERENCED_VARIABLE_COUNT{{endif}} {{if 'CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES = ccuda.CUjit_option_enum.CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES{{endif}} + CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES = cydriver.CUjit_option_enum.CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES{{endif}} {{if 'CU_JIT_POSITION_INDEPENDENT_CODE' in found_values}} #: Generate position independent code (0: false) #: Option type: int #: Applies to: compiler only - CU_JIT_POSITION_INDEPENDENT_CODE = ccuda.CUjit_option_enum.CU_JIT_POSITION_INDEPENDENT_CODE{{endif}} + CU_JIT_POSITION_INDEPENDENT_CODE = cydriver.CUjit_option_enum.CU_JIT_POSITION_INDEPENDENT_CODE{{endif}} {{if 'CU_JIT_MIN_CTA_PER_SM' in found_values}} #: This option hints to the JIT compiler the minimum number of CTAs @@ -2234,7 +2234,7 @@ class CUjit_option(IntEnum): #: :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take #: precedence over the PTX directive. Option type: unsigned int #: Applies to: compiler only - CU_JIT_MIN_CTA_PER_SM = ccuda.CUjit_option_enum.CU_JIT_MIN_CTA_PER_SM{{endif}} + CU_JIT_MIN_CTA_PER_SM = cydriver.CUjit_option_enum.CU_JIT_MIN_CTA_PER_SM{{endif}} {{if 'CU_JIT_MAX_THREADS_PER_BLOCK' in found_values}} #: Maximum number threads in a thread block, computed as the product of @@ -2246,7 +2246,7 @@ class CUjit_option(IntEnum): #: :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take #: precedence over the PTX directive. Option type: int #: Applies to: compiler only - CU_JIT_MAX_THREADS_PER_BLOCK = ccuda.CUjit_option_enum.CU_JIT_MAX_THREADS_PER_BLOCK{{endif}} + CU_JIT_MAX_THREADS_PER_BLOCK = cydriver.CUjit_option_enum.CU_JIT_MAX_THREADS_PER_BLOCK{{endif}} {{if 'CU_JIT_OVERRIDE_DIRECTIVE_VALUES' in found_values}} #: This option lets the values specified using @@ -2256,9 +2256,9 @@ class CUjit_option(IntEnum): #: :py:obj:`~.CU_JIT_MIN_CTA_PER_SM` take precedence over any PTX #: directives. (0: Disable, default; 1: Enable) Option type: int #: Applies to: compiler only - CU_JIT_OVERRIDE_DIRECTIVE_VALUES = ccuda.CUjit_option_enum.CU_JIT_OVERRIDE_DIRECTIVE_VALUES{{endif}} + CU_JIT_OVERRIDE_DIRECTIVE_VALUES = cydriver.CUjit_option_enum.CU_JIT_OVERRIDE_DIRECTIVE_VALUES{{endif}} {{if 'CU_JIT_NUM_OPTIONS' in found_values}} - CU_JIT_NUM_OPTIONS = ccuda.CUjit_option_enum.CU_JIT_NUM_OPTIONS{{endif}} + CU_JIT_NUM_OPTIONS = cydriver.CUjit_option_enum.CU_JIT_NUM_OPTIONS{{endif}} {{endif}} {{if 'CUjit_target_enum' in found_types}} @@ -2269,78 +2269,78 @@ class CUjit_target(IntEnum): {{if 'CU_TARGET_COMPUTE_30' in found_values}} #: Compute device class 3.0 - CU_TARGET_COMPUTE_30 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_30{{endif}} + CU_TARGET_COMPUTE_30 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_30{{endif}} {{if 'CU_TARGET_COMPUTE_32' in found_values}} #: Compute device class 3.2 - CU_TARGET_COMPUTE_32 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_32{{endif}} + CU_TARGET_COMPUTE_32 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_32{{endif}} {{if 'CU_TARGET_COMPUTE_35' in found_values}} #: Compute device class 3.5 - CU_TARGET_COMPUTE_35 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_35{{endif}} + CU_TARGET_COMPUTE_35 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_35{{endif}} {{if 'CU_TARGET_COMPUTE_37' in found_values}} #: Compute device class 3.7 - CU_TARGET_COMPUTE_37 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_37{{endif}} + CU_TARGET_COMPUTE_37 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_37{{endif}} {{if 'CU_TARGET_COMPUTE_50' in found_values}} #: Compute device class 5.0 - CU_TARGET_COMPUTE_50 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_50{{endif}} + CU_TARGET_COMPUTE_50 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_50{{endif}} {{if 'CU_TARGET_COMPUTE_52' in found_values}} #: Compute device class 5.2 - CU_TARGET_COMPUTE_52 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_52{{endif}} + CU_TARGET_COMPUTE_52 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_52{{endif}} {{if 'CU_TARGET_COMPUTE_53' in found_values}} #: Compute device class 5.3 - CU_TARGET_COMPUTE_53 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_53{{endif}} + CU_TARGET_COMPUTE_53 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_53{{endif}} {{if 'CU_TARGET_COMPUTE_60' in found_values}} #: Compute device class 6.0. - CU_TARGET_COMPUTE_60 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_60{{endif}} + CU_TARGET_COMPUTE_60 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_60{{endif}} {{if 'CU_TARGET_COMPUTE_61' in found_values}} #: Compute device class 6.1. - CU_TARGET_COMPUTE_61 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_61{{endif}} + CU_TARGET_COMPUTE_61 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_61{{endif}} {{if 'CU_TARGET_COMPUTE_62' in found_values}} #: Compute device class 6.2. - CU_TARGET_COMPUTE_62 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_62{{endif}} + CU_TARGET_COMPUTE_62 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_62{{endif}} {{if 'CU_TARGET_COMPUTE_70' in found_values}} #: Compute device class 7.0. - CU_TARGET_COMPUTE_70 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_70{{endif}} + CU_TARGET_COMPUTE_70 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_70{{endif}} {{if 'CU_TARGET_COMPUTE_72' in found_values}} #: Compute device class 7.2. - CU_TARGET_COMPUTE_72 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_72{{endif}} + CU_TARGET_COMPUTE_72 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_72{{endif}} {{if 'CU_TARGET_COMPUTE_75' in found_values}} #: Compute device class 7.5. - CU_TARGET_COMPUTE_75 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_75{{endif}} + CU_TARGET_COMPUTE_75 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_75{{endif}} {{if 'CU_TARGET_COMPUTE_80' in found_values}} #: Compute device class 8.0. - CU_TARGET_COMPUTE_80 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_80{{endif}} + CU_TARGET_COMPUTE_80 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_80{{endif}} {{if 'CU_TARGET_COMPUTE_86' in found_values}} #: Compute device class 8.6. - CU_TARGET_COMPUTE_86 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_86{{endif}} + CU_TARGET_COMPUTE_86 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_86{{endif}} {{if 'CU_TARGET_COMPUTE_87' in found_values}} #: Compute device class 8.7. - CU_TARGET_COMPUTE_87 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_87{{endif}} + CU_TARGET_COMPUTE_87 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_87{{endif}} {{if 'CU_TARGET_COMPUTE_89' in found_values}} #: Compute device class 8.9. - CU_TARGET_COMPUTE_89 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_89{{endif}} + CU_TARGET_COMPUTE_89 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_89{{endif}} {{if 'CU_TARGET_COMPUTE_90' in found_values}} #: Compute device class 9.0. Compute device class 9.0. with accelerated #: features. - CU_TARGET_COMPUTE_90 = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_90{{endif}} + CU_TARGET_COMPUTE_90 = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_90{{endif}} {{if 'CU_TARGET_COMPUTE_90A' in found_values}} - CU_TARGET_COMPUTE_90A = ccuda.CUjit_target_enum.CU_TARGET_COMPUTE_90A{{endif}} + CU_TARGET_COMPUTE_90A = cydriver.CUjit_target_enum.CU_TARGET_COMPUTE_90A{{endif}} {{endif}} {{if 'CUjit_fallback_enum' in found_types}} @@ -2351,12 +2351,12 @@ class CUjit_fallback(IntEnum): {{if 'CU_PREFER_PTX' in found_values}} #: Prefer to compile ptx if exact binary match not found - CU_PREFER_PTX = ccuda.CUjit_fallback_enum.CU_PREFER_PTX{{endif}} + CU_PREFER_PTX = cydriver.CUjit_fallback_enum.CU_PREFER_PTX{{endif}} {{if 'CU_PREFER_BINARY' in found_values}} #: Prefer to fall back to compatible binary code if exact match not #: found - CU_PREFER_BINARY = ccuda.CUjit_fallback_enum.CU_PREFER_BINARY{{endif}} + CU_PREFER_BINARY = cydriver.CUjit_fallback_enum.CU_PREFER_BINARY{{endif}} {{endif}} {{if 'CUjit_cacheMode_enum' in found_types}} @@ -2367,15 +2367,15 @@ class CUjit_cacheMode(IntEnum): {{if 'CU_JIT_CACHE_OPTION_NONE' in found_values}} #: Compile with no -dlcm flag specified - CU_JIT_CACHE_OPTION_NONE = ccuda.CUjit_cacheMode_enum.CU_JIT_CACHE_OPTION_NONE{{endif}} + CU_JIT_CACHE_OPTION_NONE = cydriver.CUjit_cacheMode_enum.CU_JIT_CACHE_OPTION_NONE{{endif}} {{if 'CU_JIT_CACHE_OPTION_CG' in found_values}} #: Compile with L1 cache disabled - CU_JIT_CACHE_OPTION_CG = ccuda.CUjit_cacheMode_enum.CU_JIT_CACHE_OPTION_CG{{endif}} + CU_JIT_CACHE_OPTION_CG = cydriver.CUjit_cacheMode_enum.CU_JIT_CACHE_OPTION_CG{{endif}} {{if 'CU_JIT_CACHE_OPTION_CA' in found_values}} #: Compile with L1 cache enabled - CU_JIT_CACHE_OPTION_CA = ccuda.CUjit_cacheMode_enum.CU_JIT_CACHE_OPTION_CA{{endif}} + CU_JIT_CACHE_OPTION_CA = cydriver.CUjit_cacheMode_enum.CU_JIT_CACHE_OPTION_CA{{endif}} {{endif}} {{if 'CUjitInputType_enum' in found_types}} @@ -2387,38 +2387,38 @@ class CUjitInputType(IntEnum): #: Compiled device-class-specific device code #: Applicable options: none - CU_JIT_INPUT_CUBIN = ccuda.CUjitInputType_enum.CU_JIT_INPUT_CUBIN{{endif}} + CU_JIT_INPUT_CUBIN = cydriver.CUjitInputType_enum.CU_JIT_INPUT_CUBIN{{endif}} {{if 'CU_JIT_INPUT_PTX' in found_values}} #: PTX source code #: Applicable options: PTX compiler options - CU_JIT_INPUT_PTX = ccuda.CUjitInputType_enum.CU_JIT_INPUT_PTX{{endif}} + CU_JIT_INPUT_PTX = cydriver.CUjitInputType_enum.CU_JIT_INPUT_PTX{{endif}} {{if 'CU_JIT_INPUT_FATBINARY' in found_values}} #: Bundle of multiple cubins and/or PTX of some device code #: Applicable options: PTX compiler options, #: :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` - CU_JIT_INPUT_FATBINARY = ccuda.CUjitInputType_enum.CU_JIT_INPUT_FATBINARY{{endif}} + CU_JIT_INPUT_FATBINARY = cydriver.CUjitInputType_enum.CU_JIT_INPUT_FATBINARY{{endif}} {{if 'CU_JIT_INPUT_OBJECT' in found_values}} #: Host object with embedded device code #: Applicable options: PTX compiler options, #: :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` - CU_JIT_INPUT_OBJECT = ccuda.CUjitInputType_enum.CU_JIT_INPUT_OBJECT{{endif}} + CU_JIT_INPUT_OBJECT = cydriver.CUjitInputType_enum.CU_JIT_INPUT_OBJECT{{endif}} {{if 'CU_JIT_INPUT_LIBRARY' in found_values}} #: Archive of host objects with embedded device code #: Applicable options: PTX compiler options, #: :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` - CU_JIT_INPUT_LIBRARY = ccuda.CUjitInputType_enum.CU_JIT_INPUT_LIBRARY{{endif}} + CU_JIT_INPUT_LIBRARY = cydriver.CUjitInputType_enum.CU_JIT_INPUT_LIBRARY{{endif}} {{if 'CU_JIT_INPUT_NVVM' in found_values}} #: [Deprecated] #: #: Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - CU_JIT_INPUT_NVVM = ccuda.CUjitInputType_enum.CU_JIT_INPUT_NVVM{{endif}} + CU_JIT_INPUT_NVVM = cydriver.CUjitInputType_enum.CU_JIT_INPUT_NVVM{{endif}} {{if 'CU_JIT_NUM_INPUT_TYPES' in found_values}} - CU_JIT_NUM_INPUT_TYPES = ccuda.CUjitInputType_enum.CU_JIT_NUM_INPUT_TYPES{{endif}} + CU_JIT_NUM_INPUT_TYPES = cydriver.CUjitInputType_enum.CU_JIT_NUM_INPUT_TYPES{{endif}} {{endif}} {{if 'CUgraphicsRegisterFlags_enum' in found_types}} @@ -2427,15 +2427,15 @@ class CUgraphicsRegisterFlags(IntEnum): Flags to register a graphics resource """ {{if 'CU_GRAPHICS_REGISTER_FLAGS_NONE' in found_values}} - CU_GRAPHICS_REGISTER_FLAGS_NONE = ccuda.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_NONE{{endif}} + CU_GRAPHICS_REGISTER_FLAGS_NONE = cydriver.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_NONE{{endif}} {{if 'CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY' in found_values}} - CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY = ccuda.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY{{endif}} + CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY = cydriver.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY{{endif}} {{if 'CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD' in found_values}} - CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD = ccuda.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD{{endif}} + CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD = cydriver.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD{{endif}} {{if 'CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST' in found_values}} - CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST = ccuda.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST{{endif}} + CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST = cydriver.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST{{endif}} {{if 'CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER' in found_values}} - CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER = ccuda.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER{{endif}} + CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER = cydriver.CUgraphicsRegisterFlags_enum.CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER{{endif}} {{endif}} {{if 'CUgraphicsMapResourceFlags_enum' in found_types}} @@ -2444,11 +2444,11 @@ class CUgraphicsMapResourceFlags(IntEnum): Flags for mapping and unmapping interop resources """ {{if 'CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE' in found_values}} - CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = ccuda.CUgraphicsMapResourceFlags_enum.CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE{{endif}} + CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = cydriver.CUgraphicsMapResourceFlags_enum.CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE{{endif}} {{if 'CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY' in found_values}} - CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = ccuda.CUgraphicsMapResourceFlags_enum.CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY{{endif}} + CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = cydriver.CUgraphicsMapResourceFlags_enum.CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY{{endif}} {{if 'CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD' in found_values}} - CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD = ccuda.CUgraphicsMapResourceFlags_enum.CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD{{endif}} + CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD = cydriver.CUgraphicsMapResourceFlags_enum.CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD{{endif}} {{endif}} {{if 'CUarray_cubemap_face_enum' in found_types}} @@ -2459,27 +2459,27 @@ class CUarray_cubemap_face(IntEnum): {{if 'CU_CUBEMAP_FACE_POSITIVE_X' in found_values}} #: Positive X face of cubemap - CU_CUBEMAP_FACE_POSITIVE_X = ccuda.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_POSITIVE_X{{endif}} + CU_CUBEMAP_FACE_POSITIVE_X = cydriver.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_POSITIVE_X{{endif}} {{if 'CU_CUBEMAP_FACE_NEGATIVE_X' in found_values}} #: Negative X face of cubemap - CU_CUBEMAP_FACE_NEGATIVE_X = ccuda.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_NEGATIVE_X{{endif}} + CU_CUBEMAP_FACE_NEGATIVE_X = cydriver.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_NEGATIVE_X{{endif}} {{if 'CU_CUBEMAP_FACE_POSITIVE_Y' in found_values}} #: Positive Y face of cubemap - CU_CUBEMAP_FACE_POSITIVE_Y = ccuda.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_POSITIVE_Y{{endif}} + CU_CUBEMAP_FACE_POSITIVE_Y = cydriver.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_POSITIVE_Y{{endif}} {{if 'CU_CUBEMAP_FACE_NEGATIVE_Y' in found_values}} #: Negative Y face of cubemap - CU_CUBEMAP_FACE_NEGATIVE_Y = ccuda.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_NEGATIVE_Y{{endif}} + CU_CUBEMAP_FACE_NEGATIVE_Y = cydriver.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_NEGATIVE_Y{{endif}} {{if 'CU_CUBEMAP_FACE_POSITIVE_Z' in found_values}} #: Positive Z face of cubemap - CU_CUBEMAP_FACE_POSITIVE_Z = ccuda.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_POSITIVE_Z{{endif}} + CU_CUBEMAP_FACE_POSITIVE_Z = cydriver.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_POSITIVE_Z{{endif}} {{if 'CU_CUBEMAP_FACE_NEGATIVE_Z' in found_values}} #: Negative Z face of cubemap - CU_CUBEMAP_FACE_NEGATIVE_Z = ccuda.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_NEGATIVE_Z{{endif}} + CU_CUBEMAP_FACE_NEGATIVE_Z = cydriver.CUarray_cubemap_face_enum.CU_CUBEMAP_FACE_NEGATIVE_Z{{endif}} {{endif}} {{if 'CUlimit_enum' in found_types}} @@ -2490,50 +2490,50 @@ class CUlimit(IntEnum): {{if 'CU_LIMIT_STACK_SIZE' in found_values}} #: GPU thread stack size - CU_LIMIT_STACK_SIZE = ccuda.CUlimit_enum.CU_LIMIT_STACK_SIZE{{endif}} + CU_LIMIT_STACK_SIZE = cydriver.CUlimit_enum.CU_LIMIT_STACK_SIZE{{endif}} {{if 'CU_LIMIT_PRINTF_FIFO_SIZE' in found_values}} #: GPU printf FIFO size - CU_LIMIT_PRINTF_FIFO_SIZE = ccuda.CUlimit_enum.CU_LIMIT_PRINTF_FIFO_SIZE{{endif}} + CU_LIMIT_PRINTF_FIFO_SIZE = cydriver.CUlimit_enum.CU_LIMIT_PRINTF_FIFO_SIZE{{endif}} {{if 'CU_LIMIT_MALLOC_HEAP_SIZE' in found_values}} #: GPU malloc heap size - CU_LIMIT_MALLOC_HEAP_SIZE = ccuda.CUlimit_enum.CU_LIMIT_MALLOC_HEAP_SIZE{{endif}} + CU_LIMIT_MALLOC_HEAP_SIZE = cydriver.CUlimit_enum.CU_LIMIT_MALLOC_HEAP_SIZE{{endif}} {{if 'CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH' in found_values}} #: GPU device runtime launch synchronize depth - CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = ccuda.CUlimit_enum.CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH{{endif}} + CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = cydriver.CUlimit_enum.CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH{{endif}} {{if 'CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT' in found_values}} #: GPU device runtime pending launch count - CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = ccuda.CUlimit_enum.CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT{{endif}} + CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = cydriver.CUlimit_enum.CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT{{endif}} {{if 'CU_LIMIT_MAX_L2_FETCH_GRANULARITY' in found_values}} #: A value between 0 and 128 that indicates the maximum fetch #: granularity of L2 (in Bytes). This is a hint - CU_LIMIT_MAX_L2_FETCH_GRANULARITY = ccuda.CUlimit_enum.CU_LIMIT_MAX_L2_FETCH_GRANULARITY{{endif}} + CU_LIMIT_MAX_L2_FETCH_GRANULARITY = cydriver.CUlimit_enum.CU_LIMIT_MAX_L2_FETCH_GRANULARITY{{endif}} {{if 'CU_LIMIT_PERSISTING_L2_CACHE_SIZE' in found_values}} #: A size in bytes for L2 persisting lines cache size - CU_LIMIT_PERSISTING_L2_CACHE_SIZE = ccuda.CUlimit_enum.CU_LIMIT_PERSISTING_L2_CACHE_SIZE{{endif}} + CU_LIMIT_PERSISTING_L2_CACHE_SIZE = cydriver.CUlimit_enum.CU_LIMIT_PERSISTING_L2_CACHE_SIZE{{endif}} {{if 'CU_LIMIT_SHMEM_SIZE' in found_values}} #: A maximum size in bytes of shared memory available to CUDA kernels #: on a CIG context. Can only be queried, cannot be set - CU_LIMIT_SHMEM_SIZE = ccuda.CUlimit_enum.CU_LIMIT_SHMEM_SIZE{{endif}} + CU_LIMIT_SHMEM_SIZE = cydriver.CUlimit_enum.CU_LIMIT_SHMEM_SIZE{{endif}} {{if 'CU_LIMIT_CIG_ENABLED' in found_values}} #: A non-zero value indicates this CUDA context is a CIG-enabled #: context. Can only be queried, cannot be set - CU_LIMIT_CIG_ENABLED = ccuda.CUlimit_enum.CU_LIMIT_CIG_ENABLED{{endif}} + CU_LIMIT_CIG_ENABLED = cydriver.CUlimit_enum.CU_LIMIT_CIG_ENABLED{{endif}} {{if 'CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED' in found_values}} #: When set to a non-zero value, CUDA will fail to launch a kernel on a #: CIG context, instead of using the fallback path, if the kernel uses #: more shared memory than available - CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED = ccuda.CUlimit_enum.CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED{{endif}} + CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED = cydriver.CUlimit_enum.CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED{{endif}} {{if 'CU_LIMIT_MAX' in found_values}} - CU_LIMIT_MAX = ccuda.CUlimit_enum.CU_LIMIT_MAX{{endif}} + CU_LIMIT_MAX = cydriver.CUlimit_enum.CU_LIMIT_MAX{{endif}} {{endif}} {{if 'CUresourcetype_enum' in found_types}} @@ -2544,19 +2544,19 @@ class CUresourcetype(IntEnum): {{if 'CU_RESOURCE_TYPE_ARRAY' in found_values}} #: Array resource - CU_RESOURCE_TYPE_ARRAY = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_ARRAY{{endif}} + CU_RESOURCE_TYPE_ARRAY = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_ARRAY{{endif}} {{if 'CU_RESOURCE_TYPE_MIPMAPPED_ARRAY' in found_values}} #: Mipmapped array resource - CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY{{endif}} + CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY{{endif}} {{if 'CU_RESOURCE_TYPE_LINEAR' in found_values}} #: Linear resource - CU_RESOURCE_TYPE_LINEAR = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_LINEAR{{endif}} + CU_RESOURCE_TYPE_LINEAR = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_LINEAR{{endif}} {{if 'CU_RESOURCE_TYPE_PITCH2D' in found_values}} #: Pitch 2D resource - CU_RESOURCE_TYPE_PITCH2D = ccuda.CUresourcetype_enum.CU_RESOURCE_TYPE_PITCH2D{{endif}} + CU_RESOURCE_TYPE_PITCH2D = cydriver.CUresourcetype_enum.CU_RESOURCE_TYPE_PITCH2D{{endif}} {{endif}} {{if 'CUaccessProperty_enum' in found_types}} @@ -2568,15 +2568,15 @@ class CUaccessProperty(IntEnum): {{if 'CU_ACCESS_PROPERTY_NORMAL' in found_values}} #: Normal cache persistence. - CU_ACCESS_PROPERTY_NORMAL = ccuda.CUaccessProperty_enum.CU_ACCESS_PROPERTY_NORMAL{{endif}} + CU_ACCESS_PROPERTY_NORMAL = cydriver.CUaccessProperty_enum.CU_ACCESS_PROPERTY_NORMAL{{endif}} {{if 'CU_ACCESS_PROPERTY_STREAMING' in found_values}} #: Streaming access is less likely to persit from cache. - CU_ACCESS_PROPERTY_STREAMING = ccuda.CUaccessProperty_enum.CU_ACCESS_PROPERTY_STREAMING{{endif}} + CU_ACCESS_PROPERTY_STREAMING = cydriver.CUaccessProperty_enum.CU_ACCESS_PROPERTY_STREAMING{{endif}} {{if 'CU_ACCESS_PROPERTY_PERSISTING' in found_values}} #: Persisting access is more likely to persist in cache. - CU_ACCESS_PROPERTY_PERSISTING = ccuda.CUaccessProperty_enum.CU_ACCESS_PROPERTY_PERSISTING{{endif}} + CU_ACCESS_PROPERTY_PERSISTING = cydriver.CUaccessProperty_enum.CU_ACCESS_PROPERTY_PERSISTING{{endif}} {{endif}} {{if 'CUgraphConditionalNodeType_enum' in found_types}} @@ -2588,12 +2588,12 @@ class CUgraphConditionalNodeType(IntEnum): #: Conditional 'if' Node. Body executed once if condition value is non- #: zero. - CU_GRAPH_COND_TYPE_IF = ccuda.CUgraphConditionalNodeType_enum.CU_GRAPH_COND_TYPE_IF{{endif}} + CU_GRAPH_COND_TYPE_IF = cydriver.CUgraphConditionalNodeType_enum.CU_GRAPH_COND_TYPE_IF{{endif}} {{if 'CU_GRAPH_COND_TYPE_WHILE' in found_values}} #: Conditional 'while' Node. Body executed repeatedly while condition #: value is non-zero. - CU_GRAPH_COND_TYPE_WHILE = ccuda.CUgraphConditionalNodeType_enum.CU_GRAPH_COND_TYPE_WHILE{{endif}} + CU_GRAPH_COND_TYPE_WHILE = cydriver.CUgraphConditionalNodeType_enum.CU_GRAPH_COND_TYPE_WHILE{{endif}} {{endif}} {{if 'CUgraphNodeType_enum' in found_types}} @@ -2604,55 +2604,55 @@ class CUgraphNodeType(IntEnum): {{if 'CU_GRAPH_NODE_TYPE_KERNEL' in found_values}} #: GPU kernel node - CU_GRAPH_NODE_TYPE_KERNEL = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_KERNEL{{endif}} + CU_GRAPH_NODE_TYPE_KERNEL = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_KERNEL{{endif}} {{if 'CU_GRAPH_NODE_TYPE_MEMCPY' in found_values}} #: Memcpy node - CU_GRAPH_NODE_TYPE_MEMCPY = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEMCPY{{endif}} + CU_GRAPH_NODE_TYPE_MEMCPY = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEMCPY{{endif}} {{if 'CU_GRAPH_NODE_TYPE_MEMSET' in found_values}} #: Memset node - CU_GRAPH_NODE_TYPE_MEMSET = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEMSET{{endif}} + CU_GRAPH_NODE_TYPE_MEMSET = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEMSET{{endif}} {{if 'CU_GRAPH_NODE_TYPE_HOST' in found_values}} #: Host (executable) node - CU_GRAPH_NODE_TYPE_HOST = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_HOST{{endif}} + CU_GRAPH_NODE_TYPE_HOST = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_HOST{{endif}} {{if 'CU_GRAPH_NODE_TYPE_GRAPH' in found_values}} #: Node which executes an embedded graph - CU_GRAPH_NODE_TYPE_GRAPH = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_GRAPH{{endif}} + CU_GRAPH_NODE_TYPE_GRAPH = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_GRAPH{{endif}} {{if 'CU_GRAPH_NODE_TYPE_EMPTY' in found_values}} #: Empty (no-op) node - CU_GRAPH_NODE_TYPE_EMPTY = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EMPTY{{endif}} + CU_GRAPH_NODE_TYPE_EMPTY = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EMPTY{{endif}} {{if 'CU_GRAPH_NODE_TYPE_WAIT_EVENT' in found_values}} #: External event wait node - CU_GRAPH_NODE_TYPE_WAIT_EVENT = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_WAIT_EVENT{{endif}} + CU_GRAPH_NODE_TYPE_WAIT_EVENT = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_WAIT_EVENT{{endif}} {{if 'CU_GRAPH_NODE_TYPE_EVENT_RECORD' in found_values}} #: External event record node - CU_GRAPH_NODE_TYPE_EVENT_RECORD = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EVENT_RECORD{{endif}} + CU_GRAPH_NODE_TYPE_EVENT_RECORD = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EVENT_RECORD{{endif}} {{if 'CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL' in found_values}} #: External semaphore signal node - CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL{{endif}} + CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL{{endif}} {{if 'CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT' in found_values}} #: External semaphore wait node - CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT{{endif}} + CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT{{endif}} {{if 'CU_GRAPH_NODE_TYPE_MEM_ALLOC' in found_values}} #: Memory Allocation Node - CU_GRAPH_NODE_TYPE_MEM_ALLOC = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_ALLOC{{endif}} + CU_GRAPH_NODE_TYPE_MEM_ALLOC = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_ALLOC{{endif}} {{if 'CU_GRAPH_NODE_TYPE_MEM_FREE' in found_values}} #: Memory Free Node - CU_GRAPH_NODE_TYPE_MEM_FREE = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_FREE{{endif}} + CU_GRAPH_NODE_TYPE_MEM_FREE = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_MEM_FREE{{endif}} {{if 'CU_GRAPH_NODE_TYPE_BATCH_MEM_OP' in found_values}} #: Batch MemOp Node - CU_GRAPH_NODE_TYPE_BATCH_MEM_OP = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_BATCH_MEM_OP{{endif}} + CU_GRAPH_NODE_TYPE_BATCH_MEM_OP = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_BATCH_MEM_OP{{endif}} {{if 'CU_GRAPH_NODE_TYPE_CONDITIONAL' in found_values}} #: Conditional Node May be used @@ -2680,7 +2680,7 @@ class CUgraphNodeType(IntEnum): #: supply a default value when creating the handle and/or #: call #: :py:obj:`~.cudaGraphSetConditional` from device code. - CU_GRAPH_NODE_TYPE_CONDITIONAL = ccuda.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_CONDITIONAL{{endif}} + CU_GRAPH_NODE_TYPE_CONDITIONAL = cydriver.CUgraphNodeType_enum.CU_GRAPH_NODE_TYPE_CONDITIONAL{{endif}} {{endif}} {{if 'CUgraphDependencyType_enum' in found_types}} @@ -2692,7 +2692,7 @@ class CUgraphDependencyType(IntEnum): {{if 'CU_GRAPH_DEPENDENCY_TYPE_DEFAULT' in found_values}} #: This is an ordinary dependency. - CU_GRAPH_DEPENDENCY_TYPE_DEFAULT = ccuda.CUgraphDependencyType_enum.CU_GRAPH_DEPENDENCY_TYPE_DEFAULT{{endif}} + CU_GRAPH_DEPENDENCY_TYPE_DEFAULT = cydriver.CUgraphDependencyType_enum.CU_GRAPH_DEPENDENCY_TYPE_DEFAULT{{endif}} {{if 'CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC' in found_values}} #: This dependency type allows the downstream node to use @@ -2700,7 +2700,7 @@ class CUgraphDependencyType(IntEnum): #: kernel nodes, and must be used with either the #: :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC` or #: :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER` outgoing port. - CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC = ccuda.CUgraphDependencyType_enum.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC{{endif}} + CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC = cydriver.CUgraphDependencyType_enum.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC{{endif}} {{endif}} {{if 'CUgraphInstantiateResult_enum' in found_types}} @@ -2711,26 +2711,26 @@ class CUgraphInstantiateResult(IntEnum): {{if 'CUDA_GRAPH_INSTANTIATE_SUCCESS' in found_values}} #: Instantiation succeeded - CUDA_GRAPH_INSTANTIATE_SUCCESS = ccuda.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_SUCCESS{{endif}} + CUDA_GRAPH_INSTANTIATE_SUCCESS = cydriver.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_SUCCESS{{endif}} {{if 'CUDA_GRAPH_INSTANTIATE_ERROR' in found_values}} #: Instantiation failed for an unexpected reason which is described in #: the return value of the function - CUDA_GRAPH_INSTANTIATE_ERROR = ccuda.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_ERROR{{endif}} + CUDA_GRAPH_INSTANTIATE_ERROR = cydriver.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_ERROR{{endif}} {{if 'CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE' in found_values}} #: Instantiation failed due to invalid structure, such as cycles - CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE = ccuda.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE{{endif}} + CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE = cydriver.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE{{endif}} {{if 'CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED' in found_values}} #: Instantiation for device launch failed because the graph contained #: an unsupported operation - CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED = ccuda.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED{{endif}} + CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED = cydriver.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED{{endif}} {{if 'CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED' in found_values}} #: Instantiation for device launch failed due to the nodes belonging to #: different contexts - CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED = ccuda.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED{{endif}} + CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED = cydriver.CUgraphInstantiateResult_enum.CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED{{endif}} {{endif}} {{if 'CUsynchronizationPolicy_enum' in found_types}} @@ -2739,13 +2739,13 @@ class CUsynchronizationPolicy(IntEnum): """ {{if 'CU_SYNC_POLICY_AUTO' in found_values}} - CU_SYNC_POLICY_AUTO = ccuda.CUsynchronizationPolicy_enum.CU_SYNC_POLICY_AUTO{{endif}} + CU_SYNC_POLICY_AUTO = cydriver.CUsynchronizationPolicy_enum.CU_SYNC_POLICY_AUTO{{endif}} {{if 'CU_SYNC_POLICY_SPIN' in found_values}} - CU_SYNC_POLICY_SPIN = ccuda.CUsynchronizationPolicy_enum.CU_SYNC_POLICY_SPIN{{endif}} + CU_SYNC_POLICY_SPIN = cydriver.CUsynchronizationPolicy_enum.CU_SYNC_POLICY_SPIN{{endif}} {{if 'CU_SYNC_POLICY_YIELD' in found_values}} - CU_SYNC_POLICY_YIELD = ccuda.CUsynchronizationPolicy_enum.CU_SYNC_POLICY_YIELD{{endif}} + CU_SYNC_POLICY_YIELD = cydriver.CUsynchronizationPolicy_enum.CU_SYNC_POLICY_YIELD{{endif}} {{if 'CU_SYNC_POLICY_BLOCKING_SYNC' in found_values}} - CU_SYNC_POLICY_BLOCKING_SYNC = ccuda.CUsynchronizationPolicy_enum.CU_SYNC_POLICY_BLOCKING_SYNC{{endif}} + CU_SYNC_POLICY_BLOCKING_SYNC = cydriver.CUsynchronizationPolicy_enum.CU_SYNC_POLICY_BLOCKING_SYNC{{endif}} {{endif}} {{if 'CUclusterSchedulingPolicy_enum' in found_types}} @@ -2757,16 +2757,16 @@ class CUclusterSchedulingPolicy(IntEnum): {{if 'CU_CLUSTER_SCHEDULING_POLICY_DEFAULT' in found_values}} #: the default policy - CU_CLUSTER_SCHEDULING_POLICY_DEFAULT = ccuda.CUclusterSchedulingPolicy_enum.CU_CLUSTER_SCHEDULING_POLICY_DEFAULT{{endif}} + CU_CLUSTER_SCHEDULING_POLICY_DEFAULT = cydriver.CUclusterSchedulingPolicy_enum.CU_CLUSTER_SCHEDULING_POLICY_DEFAULT{{endif}} {{if 'CU_CLUSTER_SCHEDULING_POLICY_SPREAD' in found_values}} #: spread the blocks within a cluster to the SMs - CU_CLUSTER_SCHEDULING_POLICY_SPREAD = ccuda.CUclusterSchedulingPolicy_enum.CU_CLUSTER_SCHEDULING_POLICY_SPREAD{{endif}} + CU_CLUSTER_SCHEDULING_POLICY_SPREAD = cydriver.CUclusterSchedulingPolicy_enum.CU_CLUSTER_SCHEDULING_POLICY_SPREAD{{endif}} {{if 'CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING' in found_values}} #: allow the hardware to load-balance the blocks in a cluster to the #: SMs - CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING = ccuda.CUclusterSchedulingPolicy_enum.CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING{{endif}} + CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING = cydriver.CUclusterSchedulingPolicy_enum.CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING{{endif}} {{endif}} {{if 'CUlaunchMemSyncDomain_enum' in found_types}} @@ -2794,11 +2794,11 @@ class CUlaunchMemSyncDomain(IntEnum): {{if 'CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT' in found_values}} #: Launch kernels in the default domain - CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT = ccuda.CUlaunchMemSyncDomain_enum.CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT{{endif}} + CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT = cydriver.CUlaunchMemSyncDomain_enum.CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT{{endif}} {{if 'CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE' in found_values}} #: Launch kernels in the remote domain - CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE = ccuda.CUlaunchMemSyncDomain_enum.CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE{{endif}} + CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE = cydriver.CUlaunchMemSyncDomain_enum.CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE{{endif}} {{endif}} {{if 'CUlaunchAttributeID_enum' in found_types}} @@ -2810,32 +2810,32 @@ class CUlaunchAttributeID(IntEnum): {{if 'CU_LAUNCH_ATTRIBUTE_IGNORE' in found_values}} #: Ignored entry, for convenient composition - CU_LAUNCH_ATTRIBUTE_IGNORE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_IGNORE{{endif}} + CU_LAUNCH_ATTRIBUTE_IGNORE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_IGNORE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.accessPolicyWindow`. - CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW{{endif}} + CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_COOPERATIVE' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.cooperative`. - CU_LAUNCH_ATTRIBUTE_COOPERATIVE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_COOPERATIVE{{endif}} + CU_LAUNCH_ATTRIBUTE_COOPERATIVE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_COOPERATIVE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY' in found_values}} #: Valid for streams. See #: :py:obj:`~.CUlaunchAttributeValue.syncPolicy`. - CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY{{endif}} + CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.clusterDim`. - CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION{{endif}} + CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.clusterSchedulingPolicyPreference`. - CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE{{endif}} + CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION' in found_values}} #: Valid for launches. Setting @@ -2847,7 +2847,7 @@ class CUlaunchAttributeID(IntEnum): #: The dependent launches can choose to wait on the dependency using #: the programmatic sync (cudaGridDependencySynchronize() or equivalent #: PTX instructions). - CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION{{endif}} + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT' in found_values}} #: Valid for launches. Set @@ -2871,22 +2871,22 @@ class CUlaunchAttributeID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT{{endif}} + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PRIORITY' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.priority`. - CU_LAUNCH_ATTRIBUTE_PRIORITY = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PRIORITY{{endif}} + CU_LAUNCH_ATTRIBUTE_PRIORITY = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PRIORITY{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.memSyncDomainMap`. - CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP{{endif}} + CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.memSyncDomain`. - CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN{{endif}} + CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT' in found_values}} #: Valid for launches. Set @@ -2907,7 +2907,7 @@ class CUlaunchAttributeID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT{{endif}} + CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE' in found_values}} #: Valid for graph nodes, launches. This attribute is graphs-only, and @@ -2941,7 +2941,7 @@ class CUlaunchAttributeID(IntEnum): #: graph, if host-side executable graph updates are made to the device- #: updatable nodes, the graph must be uploaded before it is launched #: again. - CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE{{endif}} + CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT' in found_values}} #: Valid for launches. On devices where the L1 cache and shared memory @@ -2953,7 +2953,7 @@ class CUlaunchAttributeID(IntEnum): #: :py:obj:`~.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT`. This #: is only a hint, and the CUDA driver can choose a different #: configuration if required for the launch. - CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT{{endif}} + CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT{{endif}} {{endif}} {{if 'CUstreamCaptureStatus_enum' in found_types}} @@ -2965,16 +2965,16 @@ class CUstreamCaptureStatus(IntEnum): {{if 'CU_STREAM_CAPTURE_STATUS_NONE' in found_values}} #: Stream is not capturing - CU_STREAM_CAPTURE_STATUS_NONE = ccuda.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_NONE{{endif}} + CU_STREAM_CAPTURE_STATUS_NONE = cydriver.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_NONE{{endif}} {{if 'CU_STREAM_CAPTURE_STATUS_ACTIVE' in found_values}} #: Stream is actively capturing - CU_STREAM_CAPTURE_STATUS_ACTIVE = ccuda.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_ACTIVE{{endif}} + CU_STREAM_CAPTURE_STATUS_ACTIVE = cydriver.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_ACTIVE{{endif}} {{if 'CU_STREAM_CAPTURE_STATUS_INVALIDATED' in found_values}} #: Stream is part of a capture sequence that has been invalidated, but #: not terminated - CU_STREAM_CAPTURE_STATUS_INVALIDATED = ccuda.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_INVALIDATED{{endif}} + CU_STREAM_CAPTURE_STATUS_INVALIDATED = cydriver.CUstreamCaptureStatus_enum.CU_STREAM_CAPTURE_STATUS_INVALIDATED{{endif}} {{endif}} {{if 'CUstreamCaptureMode_enum' in found_types}} @@ -2985,11 +2985,11 @@ class CUstreamCaptureMode(IntEnum): :py:obj:`~.cuThreadExchangeStreamCaptureMode` """ {{if 'CU_STREAM_CAPTURE_MODE_GLOBAL' in found_values}} - CU_STREAM_CAPTURE_MODE_GLOBAL = ccuda.CUstreamCaptureMode_enum.CU_STREAM_CAPTURE_MODE_GLOBAL{{endif}} + CU_STREAM_CAPTURE_MODE_GLOBAL = cydriver.CUstreamCaptureMode_enum.CU_STREAM_CAPTURE_MODE_GLOBAL{{endif}} {{if 'CU_STREAM_CAPTURE_MODE_THREAD_LOCAL' in found_values}} - CU_STREAM_CAPTURE_MODE_THREAD_LOCAL = ccuda.CUstreamCaptureMode_enum.CU_STREAM_CAPTURE_MODE_THREAD_LOCAL{{endif}} + CU_STREAM_CAPTURE_MODE_THREAD_LOCAL = cydriver.CUstreamCaptureMode_enum.CU_STREAM_CAPTURE_MODE_THREAD_LOCAL{{endif}} {{if 'CU_STREAM_CAPTURE_MODE_RELAXED' in found_values}} - CU_STREAM_CAPTURE_MODE_RELAXED = ccuda.CUstreamCaptureMode_enum.CU_STREAM_CAPTURE_MODE_RELAXED{{endif}} + CU_STREAM_CAPTURE_MODE_RELAXED = cydriver.CUstreamCaptureMode_enum.CU_STREAM_CAPTURE_MODE_RELAXED{{endif}} {{endif}} {{if 'CUdriverProcAddress_flags_enum' in found_types}} @@ -3001,15 +3001,15 @@ class CUdriverProcAddress_flags(IntEnum): {{if 'CU_GET_PROC_ADDRESS_DEFAULT' in found_values}} #: Default search mode for driver symbols. - CU_GET_PROC_ADDRESS_DEFAULT = ccuda.CUdriverProcAddress_flags_enum.CU_GET_PROC_ADDRESS_DEFAULT{{endif}} + CU_GET_PROC_ADDRESS_DEFAULT = cydriver.CUdriverProcAddress_flags_enum.CU_GET_PROC_ADDRESS_DEFAULT{{endif}} {{if 'CU_GET_PROC_ADDRESS_LEGACY_STREAM' in found_values}} #: Search for legacy versions of driver symbols. - CU_GET_PROC_ADDRESS_LEGACY_STREAM = ccuda.CUdriverProcAddress_flags_enum.CU_GET_PROC_ADDRESS_LEGACY_STREAM{{endif}} + CU_GET_PROC_ADDRESS_LEGACY_STREAM = cydriver.CUdriverProcAddress_flags_enum.CU_GET_PROC_ADDRESS_LEGACY_STREAM{{endif}} {{if 'CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM' in found_values}} #: Search for per-thread versions of driver symbols. - CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM = ccuda.CUdriverProcAddress_flags_enum.CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM{{endif}} + CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM = cydriver.CUdriverProcAddress_flags_enum.CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM{{endif}} {{endif}} {{if 'CUdriverProcAddressQueryResult_enum' in found_types}} @@ -3021,15 +3021,15 @@ class CUdriverProcAddressQueryResult(IntEnum): {{if 'CU_GET_PROC_ADDRESS_SUCCESS' in found_values}} #: Symbol was succesfully found - CU_GET_PROC_ADDRESS_SUCCESS = ccuda.CUdriverProcAddressQueryResult_enum.CU_GET_PROC_ADDRESS_SUCCESS{{endif}} + CU_GET_PROC_ADDRESS_SUCCESS = cydriver.CUdriverProcAddressQueryResult_enum.CU_GET_PROC_ADDRESS_SUCCESS{{endif}} {{if 'CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND' in found_values}} #: Symbol was not found in search - CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND = ccuda.CUdriverProcAddressQueryResult_enum.CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND{{endif}} + CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND = cydriver.CUdriverProcAddressQueryResult_enum.CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND{{endif}} {{if 'CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT' in found_values}} #: Symbol was found but version supplied was not sufficient - CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT = ccuda.CUdriverProcAddressQueryResult_enum.CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT{{endif}} + CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT = cydriver.CUdriverProcAddressQueryResult_enum.CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT{{endif}} {{endif}} {{if 'CUexecAffinityType_enum' in found_types}} @@ -3040,9 +3040,9 @@ class CUexecAffinityType(IntEnum): {{if 'CU_EXEC_AFFINITY_TYPE_SM_COUNT' in found_values}} #: Create a context with limited SMs. - CU_EXEC_AFFINITY_TYPE_SM_COUNT = ccuda.CUexecAffinityType_enum.CU_EXEC_AFFINITY_TYPE_SM_COUNT{{endif}} + CU_EXEC_AFFINITY_TYPE_SM_COUNT = cydriver.CUexecAffinityType_enum.CU_EXEC_AFFINITY_TYPE_SM_COUNT{{endif}} {{if 'CU_EXEC_AFFINITY_TYPE_MAX' in found_values}} - CU_EXEC_AFFINITY_TYPE_MAX = ccuda.CUexecAffinityType_enum.CU_EXEC_AFFINITY_TYPE_MAX{{endif}} + CU_EXEC_AFFINITY_TYPE_MAX = cydriver.CUexecAffinityType_enum.CU_EXEC_AFFINITY_TYPE_MAX{{endif}} {{endif}} {{if 'CUcigDataType_enum' in found_types}} @@ -3051,7 +3051,7 @@ class CUcigDataType(IntEnum): """ {{if 'CIG_DATA_TYPE_D3D12_COMMAND_QUEUE' in found_values}} - CIG_DATA_TYPE_D3D12_COMMAND_QUEUE = ccuda.CUcigDataType_enum.CIG_DATA_TYPE_D3D12_COMMAND_QUEUE{{endif}} + CIG_DATA_TYPE_D3D12_COMMAND_QUEUE = cydriver.CUcigDataType_enum.CIG_DATA_TYPE_D3D12_COMMAND_QUEUE{{endif}} {{endif}} {{if 'CUlibraryOption_enum' in found_types}} @@ -3062,7 +3062,7 @@ class CUlibraryOption(IntEnum): :py:obj:`~.cuLibraryLoadFromFile()` """ {{if 'CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE' in found_values}} - CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE = ccuda.CUlibraryOption_enum.CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE{{endif}} + CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE = cydriver.CUlibraryOption_enum.CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE{{endif}} {{if 'CU_LIBRARY_BINARY_IS_PRESERVED' in found_values}} #: Specifes that the argument `code` passed to @@ -3074,9 +3074,9 @@ class CUlibraryOption(IntEnum): #: can choose to ignore it if required. Specifying this option with #: :py:obj:`~.cuLibraryLoadFromFile()` is invalid and will return #: :py:obj:`~.CUDA_ERROR_INVALID_VALUE`. - CU_LIBRARY_BINARY_IS_PRESERVED = ccuda.CUlibraryOption_enum.CU_LIBRARY_BINARY_IS_PRESERVED{{endif}} + CU_LIBRARY_BINARY_IS_PRESERVED = cydriver.CUlibraryOption_enum.CU_LIBRARY_BINARY_IS_PRESERVED{{endif}} {{if 'CU_LIBRARY_NUM_OPTIONS' in found_values}} - CU_LIBRARY_NUM_OPTIONS = ccuda.CUlibraryOption_enum.CU_LIBRARY_NUM_OPTIONS{{endif}} + CU_LIBRARY_NUM_OPTIONS = cydriver.CUlibraryOption_enum.CU_LIBRARY_NUM_OPTIONS{{endif}} {{endif}} {{if 'cudaError_enum' in found_types}} @@ -3089,78 +3089,78 @@ class CUresult(IntEnum): #: The API call returned with no errors. In the case of query calls, #: this also means that the operation being queried is complete (see #: :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`). - CUDA_SUCCESS = ccuda.cudaError_enum.CUDA_SUCCESS{{endif}} + CUDA_SUCCESS = cydriver.cudaError_enum.CUDA_SUCCESS{{endif}} {{if 'CUDA_ERROR_INVALID_VALUE' in found_values}} #: This indicates that one or more of the parameters passed to the API #: call is not within an acceptable range of values. - CUDA_ERROR_INVALID_VALUE = ccuda.cudaError_enum.CUDA_ERROR_INVALID_VALUE{{endif}} + CUDA_ERROR_INVALID_VALUE = cydriver.cudaError_enum.CUDA_ERROR_INVALID_VALUE{{endif}} {{if 'CUDA_ERROR_OUT_OF_MEMORY' in found_values}} #: The API call failed because it was unable to allocate enough memory #: or other resources to perform the requested operation. - CUDA_ERROR_OUT_OF_MEMORY = ccuda.cudaError_enum.CUDA_ERROR_OUT_OF_MEMORY{{endif}} + CUDA_ERROR_OUT_OF_MEMORY = cydriver.cudaError_enum.CUDA_ERROR_OUT_OF_MEMORY{{endif}} {{if 'CUDA_ERROR_NOT_INITIALIZED' in found_values}} #: This indicates that the CUDA driver has not been initialized with #: :py:obj:`~.cuInit()` or that initialization has failed. - CUDA_ERROR_NOT_INITIALIZED = ccuda.cudaError_enum.CUDA_ERROR_NOT_INITIALIZED{{endif}} + CUDA_ERROR_NOT_INITIALIZED = cydriver.cudaError_enum.CUDA_ERROR_NOT_INITIALIZED{{endif}} {{if 'CUDA_ERROR_DEINITIALIZED' in found_values}} #: This indicates that the CUDA driver is in the process of shutting #: down. - CUDA_ERROR_DEINITIALIZED = ccuda.cudaError_enum.CUDA_ERROR_DEINITIALIZED{{endif}} + CUDA_ERROR_DEINITIALIZED = cydriver.cudaError_enum.CUDA_ERROR_DEINITIALIZED{{endif}} {{if 'CUDA_ERROR_PROFILER_DISABLED' in found_values}} #: This indicates profiler is not initialized for this run. This can #: happen when the application is running with external profiling tools #: like visual profiler. - CUDA_ERROR_PROFILER_DISABLED = ccuda.cudaError_enum.CUDA_ERROR_PROFILER_DISABLED{{endif}} + CUDA_ERROR_PROFILER_DISABLED = cydriver.cudaError_enum.CUDA_ERROR_PROFILER_DISABLED{{endif}} {{if 'CUDA_ERROR_PROFILER_NOT_INITIALIZED' in found_values}} #: [Deprecated] - CUDA_ERROR_PROFILER_NOT_INITIALIZED = ccuda.cudaError_enum.CUDA_ERROR_PROFILER_NOT_INITIALIZED{{endif}} + CUDA_ERROR_PROFILER_NOT_INITIALIZED = cydriver.cudaError_enum.CUDA_ERROR_PROFILER_NOT_INITIALIZED{{endif}} {{if 'CUDA_ERROR_PROFILER_ALREADY_STARTED' in found_values}} #: [Deprecated] - CUDA_ERROR_PROFILER_ALREADY_STARTED = ccuda.cudaError_enum.CUDA_ERROR_PROFILER_ALREADY_STARTED{{endif}} + CUDA_ERROR_PROFILER_ALREADY_STARTED = cydriver.cudaError_enum.CUDA_ERROR_PROFILER_ALREADY_STARTED{{endif}} {{if 'CUDA_ERROR_PROFILER_ALREADY_STOPPED' in found_values}} #: [Deprecated] - CUDA_ERROR_PROFILER_ALREADY_STOPPED = ccuda.cudaError_enum.CUDA_ERROR_PROFILER_ALREADY_STOPPED{{endif}} + CUDA_ERROR_PROFILER_ALREADY_STOPPED = cydriver.cudaError_enum.CUDA_ERROR_PROFILER_ALREADY_STOPPED{{endif}} {{if 'CUDA_ERROR_STUB_LIBRARY' in found_values}} #: This indicates that the CUDA driver that the application has loaded #: is a stub library. Applications that run with the stub rather than a #: real driver loaded will result in CUDA API returning this error. - CUDA_ERROR_STUB_LIBRARY = ccuda.cudaError_enum.CUDA_ERROR_STUB_LIBRARY{{endif}} + CUDA_ERROR_STUB_LIBRARY = cydriver.cudaError_enum.CUDA_ERROR_STUB_LIBRARY{{endif}} {{if 'CUDA_ERROR_DEVICE_UNAVAILABLE' in found_values}} #: This indicates that requested CUDA device is unavailable at the #: current time. Devices are often unavailable due to use of #: :py:obj:`~.CU_COMPUTEMODE_EXCLUSIVE_PROCESS` or #: :py:obj:`~.CU_COMPUTEMODE_PROHIBITED`. - CUDA_ERROR_DEVICE_UNAVAILABLE = ccuda.cudaError_enum.CUDA_ERROR_DEVICE_UNAVAILABLE{{endif}} + CUDA_ERROR_DEVICE_UNAVAILABLE = cydriver.cudaError_enum.CUDA_ERROR_DEVICE_UNAVAILABLE{{endif}} {{if 'CUDA_ERROR_NO_DEVICE' in found_values}} #: This indicates that no CUDA-capable devices were detected by the #: installed CUDA driver. - CUDA_ERROR_NO_DEVICE = ccuda.cudaError_enum.CUDA_ERROR_NO_DEVICE{{endif}} + CUDA_ERROR_NO_DEVICE = cydriver.cudaError_enum.CUDA_ERROR_NO_DEVICE{{endif}} {{if 'CUDA_ERROR_INVALID_DEVICE' in found_values}} #: This indicates that the device ordinal supplied by the user does not #: correspond to a valid CUDA device or that the action requested is #: invalid for the specified device. - CUDA_ERROR_INVALID_DEVICE = ccuda.cudaError_enum.CUDA_ERROR_INVALID_DEVICE{{endif}} + CUDA_ERROR_INVALID_DEVICE = cydriver.cudaError_enum.CUDA_ERROR_INVALID_DEVICE{{endif}} {{if 'CUDA_ERROR_DEVICE_NOT_LICENSED' in found_values}} #: This error indicates that the Grid license is not applied. - CUDA_ERROR_DEVICE_NOT_LICENSED = ccuda.cudaError_enum.CUDA_ERROR_DEVICE_NOT_LICENSED{{endif}} + CUDA_ERROR_DEVICE_NOT_LICENSED = cydriver.cudaError_enum.CUDA_ERROR_DEVICE_NOT_LICENSED{{endif}} {{if 'CUDA_ERROR_INVALID_IMAGE' in found_values}} #: This indicates that the device kernel image is invalid. This can #: also indicate an invalid CUDA module. - CUDA_ERROR_INVALID_IMAGE = ccuda.cudaError_enum.CUDA_ERROR_INVALID_IMAGE{{endif}} + CUDA_ERROR_INVALID_IMAGE = cydriver.cudaError_enum.CUDA_ERROR_INVALID_IMAGE{{endif}} {{if 'CUDA_ERROR_INVALID_CONTEXT' in found_values}} #: This most frequently indicates that there is no context bound to the @@ -3172,144 +3172,144 @@ class CUresult(IntEnum): #: This can also be returned if the green context passed to an API call #: was not converted to a :py:obj:`~.CUcontext` using #: :py:obj:`~.cuCtxFromGreenCtx` API. - CUDA_ERROR_INVALID_CONTEXT = ccuda.cudaError_enum.CUDA_ERROR_INVALID_CONTEXT{{endif}} + CUDA_ERROR_INVALID_CONTEXT = cydriver.cudaError_enum.CUDA_ERROR_INVALID_CONTEXT{{endif}} {{if 'CUDA_ERROR_CONTEXT_ALREADY_CURRENT' in found_values}} #: This indicated that the context being supplied as a parameter to the #: API call was already the active context. [Deprecated] - CUDA_ERROR_CONTEXT_ALREADY_CURRENT = ccuda.cudaError_enum.CUDA_ERROR_CONTEXT_ALREADY_CURRENT{{endif}} + CUDA_ERROR_CONTEXT_ALREADY_CURRENT = cydriver.cudaError_enum.CUDA_ERROR_CONTEXT_ALREADY_CURRENT{{endif}} {{if 'CUDA_ERROR_MAP_FAILED' in found_values}} #: This indicates that a map or register operation has failed. - CUDA_ERROR_MAP_FAILED = ccuda.cudaError_enum.CUDA_ERROR_MAP_FAILED{{endif}} + CUDA_ERROR_MAP_FAILED = cydriver.cudaError_enum.CUDA_ERROR_MAP_FAILED{{endif}} {{if 'CUDA_ERROR_UNMAP_FAILED' in found_values}} #: This indicates that an unmap or unregister operation has failed. - CUDA_ERROR_UNMAP_FAILED = ccuda.cudaError_enum.CUDA_ERROR_UNMAP_FAILED{{endif}} + CUDA_ERROR_UNMAP_FAILED = cydriver.cudaError_enum.CUDA_ERROR_UNMAP_FAILED{{endif}} {{if 'CUDA_ERROR_ARRAY_IS_MAPPED' in found_values}} #: This indicates that the specified array is currently mapped and thus #: cannot be destroyed. - CUDA_ERROR_ARRAY_IS_MAPPED = ccuda.cudaError_enum.CUDA_ERROR_ARRAY_IS_MAPPED{{endif}} + CUDA_ERROR_ARRAY_IS_MAPPED = cydriver.cudaError_enum.CUDA_ERROR_ARRAY_IS_MAPPED{{endif}} {{if 'CUDA_ERROR_ALREADY_MAPPED' in found_values}} #: This indicates that the resource is already mapped. - CUDA_ERROR_ALREADY_MAPPED = ccuda.cudaError_enum.CUDA_ERROR_ALREADY_MAPPED{{endif}} + CUDA_ERROR_ALREADY_MAPPED = cydriver.cudaError_enum.CUDA_ERROR_ALREADY_MAPPED{{endif}} {{if 'CUDA_ERROR_NO_BINARY_FOR_GPU' in found_values}} #: This indicates that there is no kernel image available that is #: suitable for the device. This can occur when a user specifies code #: generation options for a particular CUDA source file that do not #: include the corresponding device configuration. - CUDA_ERROR_NO_BINARY_FOR_GPU = ccuda.cudaError_enum.CUDA_ERROR_NO_BINARY_FOR_GPU{{endif}} + CUDA_ERROR_NO_BINARY_FOR_GPU = cydriver.cudaError_enum.CUDA_ERROR_NO_BINARY_FOR_GPU{{endif}} {{if 'CUDA_ERROR_ALREADY_ACQUIRED' in found_values}} #: This indicates that a resource has already been acquired. - CUDA_ERROR_ALREADY_ACQUIRED = ccuda.cudaError_enum.CUDA_ERROR_ALREADY_ACQUIRED{{endif}} + CUDA_ERROR_ALREADY_ACQUIRED = cydriver.cudaError_enum.CUDA_ERROR_ALREADY_ACQUIRED{{endif}} {{if 'CUDA_ERROR_NOT_MAPPED' in found_values}} #: This indicates that a resource is not mapped. - CUDA_ERROR_NOT_MAPPED = ccuda.cudaError_enum.CUDA_ERROR_NOT_MAPPED{{endif}} + CUDA_ERROR_NOT_MAPPED = cydriver.cudaError_enum.CUDA_ERROR_NOT_MAPPED{{endif}} {{if 'CUDA_ERROR_NOT_MAPPED_AS_ARRAY' in found_values}} #: This indicates that a mapped resource is not available for access as #: an array. - CUDA_ERROR_NOT_MAPPED_AS_ARRAY = ccuda.cudaError_enum.CUDA_ERROR_NOT_MAPPED_AS_ARRAY{{endif}} + CUDA_ERROR_NOT_MAPPED_AS_ARRAY = cydriver.cudaError_enum.CUDA_ERROR_NOT_MAPPED_AS_ARRAY{{endif}} {{if 'CUDA_ERROR_NOT_MAPPED_AS_POINTER' in found_values}} #: This indicates that a mapped resource is not available for access as #: a pointer. - CUDA_ERROR_NOT_MAPPED_AS_POINTER = ccuda.cudaError_enum.CUDA_ERROR_NOT_MAPPED_AS_POINTER{{endif}} + CUDA_ERROR_NOT_MAPPED_AS_POINTER = cydriver.cudaError_enum.CUDA_ERROR_NOT_MAPPED_AS_POINTER{{endif}} {{if 'CUDA_ERROR_ECC_UNCORRECTABLE' in found_values}} #: This indicates that an uncorrectable ECC error was detected during #: execution. - CUDA_ERROR_ECC_UNCORRECTABLE = ccuda.cudaError_enum.CUDA_ERROR_ECC_UNCORRECTABLE{{endif}} + CUDA_ERROR_ECC_UNCORRECTABLE = cydriver.cudaError_enum.CUDA_ERROR_ECC_UNCORRECTABLE{{endif}} {{if 'CUDA_ERROR_UNSUPPORTED_LIMIT' in found_values}} #: This indicates that the :py:obj:`~.CUlimit` passed to the API call #: is not supported by the active device. - CUDA_ERROR_UNSUPPORTED_LIMIT = ccuda.cudaError_enum.CUDA_ERROR_UNSUPPORTED_LIMIT{{endif}} + CUDA_ERROR_UNSUPPORTED_LIMIT = cydriver.cudaError_enum.CUDA_ERROR_UNSUPPORTED_LIMIT{{endif}} {{if 'CUDA_ERROR_CONTEXT_ALREADY_IN_USE' in found_values}} #: This indicates that the :py:obj:`~.CUcontext` passed to the API call #: can only be bound to a single CPU thread at a time but is already #: bound to a CPU thread. - CUDA_ERROR_CONTEXT_ALREADY_IN_USE = ccuda.cudaError_enum.CUDA_ERROR_CONTEXT_ALREADY_IN_USE{{endif}} + CUDA_ERROR_CONTEXT_ALREADY_IN_USE = cydriver.cudaError_enum.CUDA_ERROR_CONTEXT_ALREADY_IN_USE{{endif}} {{if 'CUDA_ERROR_PEER_ACCESS_UNSUPPORTED' in found_values}} #: This indicates that peer access is not supported across the given #: devices. - CUDA_ERROR_PEER_ACCESS_UNSUPPORTED = ccuda.cudaError_enum.CUDA_ERROR_PEER_ACCESS_UNSUPPORTED{{endif}} + CUDA_ERROR_PEER_ACCESS_UNSUPPORTED = cydriver.cudaError_enum.CUDA_ERROR_PEER_ACCESS_UNSUPPORTED{{endif}} {{if 'CUDA_ERROR_INVALID_PTX' in found_values}} #: This indicates that a PTX JIT compilation failed. - CUDA_ERROR_INVALID_PTX = ccuda.cudaError_enum.CUDA_ERROR_INVALID_PTX{{endif}} + CUDA_ERROR_INVALID_PTX = cydriver.cudaError_enum.CUDA_ERROR_INVALID_PTX{{endif}} {{if 'CUDA_ERROR_INVALID_GRAPHICS_CONTEXT' in found_values}} #: This indicates an error with OpenGL or DirectX context. - CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = ccuda.cudaError_enum.CUDA_ERROR_INVALID_GRAPHICS_CONTEXT{{endif}} + CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = cydriver.cudaError_enum.CUDA_ERROR_INVALID_GRAPHICS_CONTEXT{{endif}} {{if 'CUDA_ERROR_NVLINK_UNCORRECTABLE' in found_values}} #: This indicates that an uncorrectable NVLink error was detected #: during the execution. - CUDA_ERROR_NVLINK_UNCORRECTABLE = ccuda.cudaError_enum.CUDA_ERROR_NVLINK_UNCORRECTABLE{{endif}} + CUDA_ERROR_NVLINK_UNCORRECTABLE = cydriver.cudaError_enum.CUDA_ERROR_NVLINK_UNCORRECTABLE{{endif}} {{if 'CUDA_ERROR_JIT_COMPILER_NOT_FOUND' in found_values}} #: This indicates that the PTX JIT compiler library was not found. - CUDA_ERROR_JIT_COMPILER_NOT_FOUND = ccuda.cudaError_enum.CUDA_ERROR_JIT_COMPILER_NOT_FOUND{{endif}} + CUDA_ERROR_JIT_COMPILER_NOT_FOUND = cydriver.cudaError_enum.CUDA_ERROR_JIT_COMPILER_NOT_FOUND{{endif}} {{if 'CUDA_ERROR_UNSUPPORTED_PTX_VERSION' in found_values}} #: This indicates that the provided PTX was compiled with an #: unsupported toolchain. - CUDA_ERROR_UNSUPPORTED_PTX_VERSION = ccuda.cudaError_enum.CUDA_ERROR_UNSUPPORTED_PTX_VERSION{{endif}} + CUDA_ERROR_UNSUPPORTED_PTX_VERSION = cydriver.cudaError_enum.CUDA_ERROR_UNSUPPORTED_PTX_VERSION{{endif}} {{if 'CUDA_ERROR_JIT_COMPILATION_DISABLED' in found_values}} #: This indicates that the PTX JIT compilation was disabled. - CUDA_ERROR_JIT_COMPILATION_DISABLED = ccuda.cudaError_enum.CUDA_ERROR_JIT_COMPILATION_DISABLED{{endif}} + CUDA_ERROR_JIT_COMPILATION_DISABLED = cydriver.cudaError_enum.CUDA_ERROR_JIT_COMPILATION_DISABLED{{endif}} {{if 'CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY' in found_values}} #: This indicates that the :py:obj:`~.CUexecAffinityType` passed to the #: API call is not supported by the active device. - CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY = ccuda.cudaError_enum.CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY{{endif}} + CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY = cydriver.cudaError_enum.CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY{{endif}} {{if 'CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC' in found_values}} #: This indicates that the code to be compiled by the PTX JIT contains #: unsupported call to cudaDeviceSynchronize. - CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC = ccuda.cudaError_enum.CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC{{endif}} + CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC = cydriver.cudaError_enum.CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC{{endif}} {{if 'CUDA_ERROR_INVALID_SOURCE' in found_values}} #: This indicates that the device kernel source is invalid. This #: includes compilation/linker errors encountered in device code or #: user error. - CUDA_ERROR_INVALID_SOURCE = ccuda.cudaError_enum.CUDA_ERROR_INVALID_SOURCE{{endif}} + CUDA_ERROR_INVALID_SOURCE = cydriver.cudaError_enum.CUDA_ERROR_INVALID_SOURCE{{endif}} {{if 'CUDA_ERROR_FILE_NOT_FOUND' in found_values}} #: This indicates that the file specified was not found. - CUDA_ERROR_FILE_NOT_FOUND = ccuda.cudaError_enum.CUDA_ERROR_FILE_NOT_FOUND{{endif}} + CUDA_ERROR_FILE_NOT_FOUND = cydriver.cudaError_enum.CUDA_ERROR_FILE_NOT_FOUND{{endif}} {{if 'CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND' in found_values}} #: This indicates that a link to a shared object failed to resolve. - CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = ccuda.cudaError_enum.CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND{{endif}} + CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = cydriver.cudaError_enum.CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND{{endif}} {{if 'CUDA_ERROR_SHARED_OBJECT_INIT_FAILED' in found_values}} #: This indicates that initialization of a shared object failed. - CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = ccuda.cudaError_enum.CUDA_ERROR_SHARED_OBJECT_INIT_FAILED{{endif}} + CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = cydriver.cudaError_enum.CUDA_ERROR_SHARED_OBJECT_INIT_FAILED{{endif}} {{if 'CUDA_ERROR_OPERATING_SYSTEM' in found_values}} #: This indicates that an OS call failed. - CUDA_ERROR_OPERATING_SYSTEM = ccuda.cudaError_enum.CUDA_ERROR_OPERATING_SYSTEM{{endif}} + CUDA_ERROR_OPERATING_SYSTEM = cydriver.cudaError_enum.CUDA_ERROR_OPERATING_SYSTEM{{endif}} {{if 'CUDA_ERROR_INVALID_HANDLE' in found_values}} #: This indicates that a resource handle passed to the API call was not #: valid. Resource handles are opaque types like :py:obj:`~.CUstream` #: and :py:obj:`~.CUevent`. - CUDA_ERROR_INVALID_HANDLE = ccuda.cudaError_enum.CUDA_ERROR_INVALID_HANDLE{{endif}} + CUDA_ERROR_INVALID_HANDLE = cydriver.cudaError_enum.CUDA_ERROR_INVALID_HANDLE{{endif}} {{if 'CUDA_ERROR_ILLEGAL_STATE' in found_values}} #: This indicates that a resource required by the API call is not in a #: valid state to perform the requested operation. - CUDA_ERROR_ILLEGAL_STATE = ccuda.cudaError_enum.CUDA_ERROR_ILLEGAL_STATE{{endif}} + CUDA_ERROR_ILLEGAL_STATE = cydriver.cudaError_enum.CUDA_ERROR_ILLEGAL_STATE{{endif}} {{if 'CUDA_ERROR_LOSSY_QUERY' in found_values}} #: This indicates an attempt was made to introspect an object in a way @@ -3317,13 +3317,13 @@ class CUresult(IntEnum): #: either due to the object using funtionality newer than the API #: version used to introspect it or omission of optional return #: arguments. - CUDA_ERROR_LOSSY_QUERY = ccuda.cudaError_enum.CUDA_ERROR_LOSSY_QUERY{{endif}} + CUDA_ERROR_LOSSY_QUERY = cydriver.cudaError_enum.CUDA_ERROR_LOSSY_QUERY{{endif}} {{if 'CUDA_ERROR_NOT_FOUND' in found_values}} #: This indicates that a named symbol was not found. Examples of #: symbols are global/constant variable names, driver function names, #: texture names, and surface names. - CUDA_ERROR_NOT_FOUND = ccuda.cudaError_enum.CUDA_ERROR_NOT_FOUND{{endif}} + CUDA_ERROR_NOT_FOUND = cydriver.cudaError_enum.CUDA_ERROR_NOT_FOUND{{endif}} {{if 'CUDA_ERROR_NOT_READY' in found_values}} #: This indicates that asynchronous operations issued previously have @@ -3331,7 +3331,7 @@ class CUresult(IntEnum): #: indicated differently than :py:obj:`~.CUDA_SUCCESS` (which indicates #: completion). Calls that may return this value include #: :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`. - CUDA_ERROR_NOT_READY = ccuda.cudaError_enum.CUDA_ERROR_NOT_READY{{endif}} + CUDA_ERROR_NOT_READY = cydriver.cudaError_enum.CUDA_ERROR_NOT_READY{{endif}} {{if 'CUDA_ERROR_ILLEGAL_ADDRESS' in found_values}} #: While executing a kernel, the device encountered a load or store @@ -3339,7 +3339,7 @@ class CUresult(IntEnum): #: an inconsistent state and any further CUDA work will return the same #: error. To continue using CUDA, the process must be terminated and #: relaunched. - CUDA_ERROR_ILLEGAL_ADDRESS = ccuda.cudaError_enum.CUDA_ERROR_ILLEGAL_ADDRESS{{endif}} + CUDA_ERROR_ILLEGAL_ADDRESS = cydriver.cudaError_enum.CUDA_ERROR_ILLEGAL_ADDRESS{{endif}} {{if 'CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES' in found_values}} #: This indicates that a launch did not occur because it did not have @@ -3349,7 +3349,7 @@ class CUresult(IntEnum): #: register count. Passing arguments of the wrong size (i.e. a 64-bit #: pointer when a 32-bit int is expected) is equivalent to passing too #: many arguments and can also result in this error. - CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = ccuda.cudaError_enum.CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES{{endif}} + CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = cydriver.cudaError_enum.CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES{{endif}} {{if 'CUDA_ERROR_LAUNCH_TIMEOUT' in found_values}} #: This indicates that the device kernel took too long to execute. This @@ -3358,59 +3358,59 @@ class CUresult(IntEnum): #: information. This leaves the process in an inconsistent state and #: any further CUDA work will return the same error. To continue using #: CUDA, the process must be terminated and relaunched. - CUDA_ERROR_LAUNCH_TIMEOUT = ccuda.cudaError_enum.CUDA_ERROR_LAUNCH_TIMEOUT{{endif}} + CUDA_ERROR_LAUNCH_TIMEOUT = cydriver.cudaError_enum.CUDA_ERROR_LAUNCH_TIMEOUT{{endif}} {{if 'CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING' in found_values}} #: This error indicates a kernel launch that uses an incompatible #: texturing mode. - CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = ccuda.cudaError_enum.CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING{{endif}} + CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = cydriver.cudaError_enum.CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING{{endif}} {{if 'CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED' in found_values}} #: This error indicates that a call to #: :py:obj:`~.cuCtxEnablePeerAccess()` is trying to re-enable peer #: access to a context which has already had peer access to it enabled. - CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = ccuda.cudaError_enum.CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED{{endif}} + CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = cydriver.cudaError_enum.CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED{{endif}} {{if 'CUDA_ERROR_PEER_ACCESS_NOT_ENABLED' in found_values}} #: This error indicates that :py:obj:`~.cuCtxDisablePeerAccess()` is #: trying to disable peer access which has not been enabled yet via #: :py:obj:`~.cuCtxEnablePeerAccess()`. - CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = ccuda.cudaError_enum.CUDA_ERROR_PEER_ACCESS_NOT_ENABLED{{endif}} + CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = cydriver.cudaError_enum.CUDA_ERROR_PEER_ACCESS_NOT_ENABLED{{endif}} {{if 'CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE' in found_values}} #: This error indicates that the primary context for the specified #: device has already been initialized. - CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = ccuda.cudaError_enum.CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE{{endif}} + CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = cydriver.cudaError_enum.CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE{{endif}} {{if 'CUDA_ERROR_CONTEXT_IS_DESTROYED' in found_values}} #: This error indicates that the context current to the calling thread #: has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary #: context which has not yet been initialized. - CUDA_ERROR_CONTEXT_IS_DESTROYED = ccuda.cudaError_enum.CUDA_ERROR_CONTEXT_IS_DESTROYED{{endif}} + CUDA_ERROR_CONTEXT_IS_DESTROYED = cydriver.cudaError_enum.CUDA_ERROR_CONTEXT_IS_DESTROYED{{endif}} {{if 'CUDA_ERROR_ASSERT' in found_values}} #: A device-side assert triggered during kernel execution. The context #: cannot be used anymore, and must be destroyed. All existing device #: memory allocations from this context are invalid and must be #: reconstructed if the program is to continue using CUDA. - CUDA_ERROR_ASSERT = ccuda.cudaError_enum.CUDA_ERROR_ASSERT{{endif}} + CUDA_ERROR_ASSERT = cydriver.cudaError_enum.CUDA_ERROR_ASSERT{{endif}} {{if 'CUDA_ERROR_TOO_MANY_PEERS' in found_values}} #: This error indicates that the hardware resources required to enable #: peer access have been exhausted for one or more of the devices #: passed to :py:obj:`~.cuCtxEnablePeerAccess()`. - CUDA_ERROR_TOO_MANY_PEERS = ccuda.cudaError_enum.CUDA_ERROR_TOO_MANY_PEERS{{endif}} + CUDA_ERROR_TOO_MANY_PEERS = cydriver.cudaError_enum.CUDA_ERROR_TOO_MANY_PEERS{{endif}} {{if 'CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED' in found_values}} #: This error indicates that the memory range passed to #: :py:obj:`~.cuMemHostRegister()` has already been registered. - CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = ccuda.cudaError_enum.CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED{{endif}} + CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = cydriver.cudaError_enum.CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED{{endif}} {{if 'CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED' in found_values}} #: This error indicates that the pointer passed to #: :py:obj:`~.cuMemHostUnregister()` does not correspond to any #: currently registered memory region. - CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = ccuda.cudaError_enum.CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED{{endif}} + CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = cydriver.cudaError_enum.CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED{{endif}} {{if 'CUDA_ERROR_HARDWARE_STACK_ERROR' in found_values}} #: While executing a kernel, the device encountered a stack error. This @@ -3418,14 +3418,14 @@ class CUresult(IntEnum): #: This leaves the process in an inconsistent state and any further #: CUDA work will return the same error. To continue using CUDA, the #: process must be terminated and relaunched. - CUDA_ERROR_HARDWARE_STACK_ERROR = ccuda.cudaError_enum.CUDA_ERROR_HARDWARE_STACK_ERROR{{endif}} + CUDA_ERROR_HARDWARE_STACK_ERROR = cydriver.cudaError_enum.CUDA_ERROR_HARDWARE_STACK_ERROR{{endif}} {{if 'CUDA_ERROR_ILLEGAL_INSTRUCTION' in found_values}} #: While executing a kernel, the device encountered an illegal #: instruction. This leaves the process in an inconsistent state and #: any further CUDA work will return the same error. To continue using #: CUDA, the process must be terminated and relaunched. - CUDA_ERROR_ILLEGAL_INSTRUCTION = ccuda.cudaError_enum.CUDA_ERROR_ILLEGAL_INSTRUCTION{{endif}} + CUDA_ERROR_ILLEGAL_INSTRUCTION = cydriver.cudaError_enum.CUDA_ERROR_ILLEGAL_INSTRUCTION{{endif}} {{if 'CUDA_ERROR_MISALIGNED_ADDRESS' in found_values}} #: While executing a kernel, the device encountered a load or store @@ -3433,7 +3433,7 @@ class CUresult(IntEnum): #: the process in an inconsistent state and any further CUDA work will #: return the same error. To continue using CUDA, the process must be #: terminated and relaunched. - CUDA_ERROR_MISALIGNED_ADDRESS = ccuda.cudaError_enum.CUDA_ERROR_MISALIGNED_ADDRESS{{endif}} + CUDA_ERROR_MISALIGNED_ADDRESS = cydriver.cudaError_enum.CUDA_ERROR_MISALIGNED_ADDRESS{{endif}} {{if 'CUDA_ERROR_INVALID_ADDRESS_SPACE' in found_values}} #: While executing a kernel, the device encountered an instruction @@ -3443,14 +3443,14 @@ class CUresult(IntEnum): #: inconsistent state and any further CUDA work will return the same #: error. To continue using CUDA, the process must be terminated and #: relaunched. - CUDA_ERROR_INVALID_ADDRESS_SPACE = ccuda.cudaError_enum.CUDA_ERROR_INVALID_ADDRESS_SPACE{{endif}} + CUDA_ERROR_INVALID_ADDRESS_SPACE = cydriver.cudaError_enum.CUDA_ERROR_INVALID_ADDRESS_SPACE{{endif}} {{if 'CUDA_ERROR_INVALID_PC' in found_values}} #: While executing a kernel, the device program counter wrapped its #: address space. This leaves the process in an inconsistent state and #: any further CUDA work will return the same error. To continue using #: CUDA, the process must be terminated and relaunched. - CUDA_ERROR_INVALID_PC = ccuda.cudaError_enum.CUDA_ERROR_INVALID_PC{{endif}} + CUDA_ERROR_INVALID_PC = cydriver.cudaError_enum.CUDA_ERROR_INVALID_PC{{endif}} {{if 'CUDA_ERROR_LAUNCH_FAILED' in found_values}} #: An exception occurred on the device while executing a kernel. Common @@ -3461,7 +3461,7 @@ class CUresult(IntEnum): #: inconsistent state and any further CUDA work will return the same #: error. To continue using CUDA, the process must be terminated and #: relaunched. - CUDA_ERROR_LAUNCH_FAILED = ccuda.cudaError_enum.CUDA_ERROR_LAUNCH_FAILED{{endif}} + CUDA_ERROR_LAUNCH_FAILED = cydriver.cudaError_enum.CUDA_ERROR_LAUNCH_FAILED{{endif}} {{if 'CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE' in found_values}} #: This error indicates that the number of blocks launched per grid for @@ -3473,16 +3473,16 @@ class CUresult(IntEnum): #: :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` #: times the number of multiprocessors as specified by the device #: attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT`. - CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE = ccuda.cudaError_enum.CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE{{endif}} + CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE = cydriver.cudaError_enum.CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE{{endif}} {{if 'CUDA_ERROR_NOT_PERMITTED' in found_values}} #: This error indicates that the attempted operation is not permitted. - CUDA_ERROR_NOT_PERMITTED = ccuda.cudaError_enum.CUDA_ERROR_NOT_PERMITTED{{endif}} + CUDA_ERROR_NOT_PERMITTED = cydriver.cudaError_enum.CUDA_ERROR_NOT_PERMITTED{{endif}} {{if 'CUDA_ERROR_NOT_SUPPORTED' in found_values}} #: This error indicates that the attempted operation is not supported #: on the current system or device. - CUDA_ERROR_NOT_SUPPORTED = ccuda.cudaError_enum.CUDA_ERROR_NOT_SUPPORTED{{endif}} + CUDA_ERROR_NOT_SUPPORTED = cydriver.cudaError_enum.CUDA_ERROR_NOT_SUPPORTED{{endif}} {{if 'CUDA_ERROR_SYSTEM_NOT_READY' in found_values}} #: This error indicates that the system is not yet ready to start any @@ -3490,13 +3490,13 @@ class CUresult(IntEnum): #: is in a valid state and all required driver daemons are actively #: running. More information about this error can be found in the #: system specific user guide. - CUDA_ERROR_SYSTEM_NOT_READY = ccuda.cudaError_enum.CUDA_ERROR_SYSTEM_NOT_READY{{endif}} + CUDA_ERROR_SYSTEM_NOT_READY = cydriver.cudaError_enum.CUDA_ERROR_SYSTEM_NOT_READY{{endif}} {{if 'CUDA_ERROR_SYSTEM_DRIVER_MISMATCH' in found_values}} #: This error indicates that there is a mismatch between the versions #: of the display driver and the CUDA driver. Refer to the #: compatibility documentation for supported versions. - CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = ccuda.cudaError_enum.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH{{endif}} + CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = cydriver.cudaError_enum.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH{{endif}} {{if 'CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE' in found_values}} #: This error indicates that the system was upgraded to run with @@ -3505,109 +3505,109 @@ class CUresult(IntEnum): #: documentation for the supported hardware matrix or ensure that only #: supported hardware is visible during initialization via the #: CUDA_VISIBLE_DEVICES environment variable. - CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE = ccuda.cudaError_enum.CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE{{endif}} + CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE = cydriver.cudaError_enum.CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE{{endif}} {{if 'CUDA_ERROR_MPS_CONNECTION_FAILED' in found_values}} #: This error indicates that the MPS client failed to connect to the #: MPS control daemon or the MPS server. - CUDA_ERROR_MPS_CONNECTION_FAILED = ccuda.cudaError_enum.CUDA_ERROR_MPS_CONNECTION_FAILED{{endif}} + CUDA_ERROR_MPS_CONNECTION_FAILED = cydriver.cudaError_enum.CUDA_ERROR_MPS_CONNECTION_FAILED{{endif}} {{if 'CUDA_ERROR_MPS_RPC_FAILURE' in found_values}} #: This error indicates that the remote procedural call between the MPS #: server and the MPS client failed. - CUDA_ERROR_MPS_RPC_FAILURE = ccuda.cudaError_enum.CUDA_ERROR_MPS_RPC_FAILURE{{endif}} + CUDA_ERROR_MPS_RPC_FAILURE = cydriver.cudaError_enum.CUDA_ERROR_MPS_RPC_FAILURE{{endif}} {{if 'CUDA_ERROR_MPS_SERVER_NOT_READY' in found_values}} #: This error indicates that the MPS server is not ready to accept new #: MPS client requests. This error can be returned when the MPS server #: is in the process of recovering from a fatal failure. - CUDA_ERROR_MPS_SERVER_NOT_READY = ccuda.cudaError_enum.CUDA_ERROR_MPS_SERVER_NOT_READY{{endif}} + CUDA_ERROR_MPS_SERVER_NOT_READY = cydriver.cudaError_enum.CUDA_ERROR_MPS_SERVER_NOT_READY{{endif}} {{if 'CUDA_ERROR_MPS_MAX_CLIENTS_REACHED' in found_values}} #: This error indicates that the hardware resources required to create #: MPS client have been exhausted. - CUDA_ERROR_MPS_MAX_CLIENTS_REACHED = ccuda.cudaError_enum.CUDA_ERROR_MPS_MAX_CLIENTS_REACHED{{endif}} + CUDA_ERROR_MPS_MAX_CLIENTS_REACHED = cydriver.cudaError_enum.CUDA_ERROR_MPS_MAX_CLIENTS_REACHED{{endif}} {{if 'CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED' in found_values}} #: This error indicates the the hardware resources required to support #: device connections have been exhausted. - CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED = ccuda.cudaError_enum.CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED{{endif}} + CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED = cydriver.cudaError_enum.CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED{{endif}} {{if 'CUDA_ERROR_MPS_CLIENT_TERMINATED' in found_values}} #: This error indicates that the MPS client has been terminated by the #: server. To continue using CUDA, the process must be terminated and #: relaunched. - CUDA_ERROR_MPS_CLIENT_TERMINATED = ccuda.cudaError_enum.CUDA_ERROR_MPS_CLIENT_TERMINATED{{endif}} + CUDA_ERROR_MPS_CLIENT_TERMINATED = cydriver.cudaError_enum.CUDA_ERROR_MPS_CLIENT_TERMINATED{{endif}} {{if 'CUDA_ERROR_CDP_NOT_SUPPORTED' in found_values}} #: This error indicates that the module is using CUDA Dynamic #: Parallelism, but the current configuration, like MPS, does not #: support it. - CUDA_ERROR_CDP_NOT_SUPPORTED = ccuda.cudaError_enum.CUDA_ERROR_CDP_NOT_SUPPORTED{{endif}} + CUDA_ERROR_CDP_NOT_SUPPORTED = cydriver.cudaError_enum.CUDA_ERROR_CDP_NOT_SUPPORTED{{endif}} {{if 'CUDA_ERROR_CDP_VERSION_MISMATCH' in found_values}} #: This error indicates that a module contains an unsupported #: interaction between different versions of CUDA Dynamic Parallelism. - CUDA_ERROR_CDP_VERSION_MISMATCH = ccuda.cudaError_enum.CUDA_ERROR_CDP_VERSION_MISMATCH{{endif}} + CUDA_ERROR_CDP_VERSION_MISMATCH = cydriver.cudaError_enum.CUDA_ERROR_CDP_VERSION_MISMATCH{{endif}} {{if 'CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED' in found_values}} #: This error indicates that the operation is not permitted when the #: stream is capturing. - CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED = ccuda.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED{{endif}} + CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED = cydriver.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED{{endif}} {{if 'CUDA_ERROR_STREAM_CAPTURE_INVALIDATED' in found_values}} #: This error indicates that the current capture sequence on the stream #: has been invalidated due to a previous error. - CUDA_ERROR_STREAM_CAPTURE_INVALIDATED = ccuda.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_INVALIDATED{{endif}} + CUDA_ERROR_STREAM_CAPTURE_INVALIDATED = cydriver.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_INVALIDATED{{endif}} {{if 'CUDA_ERROR_STREAM_CAPTURE_MERGE' in found_values}} #: This error indicates that the operation would have resulted in a #: merge of two independent capture sequences. - CUDA_ERROR_STREAM_CAPTURE_MERGE = ccuda.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_MERGE{{endif}} + CUDA_ERROR_STREAM_CAPTURE_MERGE = cydriver.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_MERGE{{endif}} {{if 'CUDA_ERROR_STREAM_CAPTURE_UNMATCHED' in found_values}} #: This error indicates that the capture was not initiated in this #: stream. - CUDA_ERROR_STREAM_CAPTURE_UNMATCHED = ccuda.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_UNMATCHED{{endif}} + CUDA_ERROR_STREAM_CAPTURE_UNMATCHED = cydriver.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_UNMATCHED{{endif}} {{if 'CUDA_ERROR_STREAM_CAPTURE_UNJOINED' in found_values}} #: This error indicates that the capture sequence contains a fork that #: was not joined to the primary stream. - CUDA_ERROR_STREAM_CAPTURE_UNJOINED = ccuda.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_UNJOINED{{endif}} + CUDA_ERROR_STREAM_CAPTURE_UNJOINED = cydriver.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_UNJOINED{{endif}} {{if 'CUDA_ERROR_STREAM_CAPTURE_ISOLATION' in found_values}} #: This error indicates that a dependency would have been created which #: crosses the capture sequence boundary. Only implicit in-stream #: ordering dependencies are allowed to cross the boundary. - CUDA_ERROR_STREAM_CAPTURE_ISOLATION = ccuda.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_ISOLATION{{endif}} + CUDA_ERROR_STREAM_CAPTURE_ISOLATION = cydriver.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_ISOLATION{{endif}} {{if 'CUDA_ERROR_STREAM_CAPTURE_IMPLICIT' in found_values}} #: This error indicates a disallowed implicit dependency on a current #: capture sequence from cudaStreamLegacy. - CUDA_ERROR_STREAM_CAPTURE_IMPLICIT = ccuda.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_IMPLICIT{{endif}} + CUDA_ERROR_STREAM_CAPTURE_IMPLICIT = cydriver.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_IMPLICIT{{endif}} {{if 'CUDA_ERROR_CAPTURED_EVENT' in found_values}} #: This error indicates that the operation is not permitted on an event #: which was last recorded in a capturing stream. - CUDA_ERROR_CAPTURED_EVENT = ccuda.cudaError_enum.CUDA_ERROR_CAPTURED_EVENT{{endif}} + CUDA_ERROR_CAPTURED_EVENT = cydriver.cudaError_enum.CUDA_ERROR_CAPTURED_EVENT{{endif}} {{if 'CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD' in found_values}} #: A stream capture sequence not initiated with the #: :py:obj:`~.CU_STREAM_CAPTURE_MODE_RELAXED` argument to #: :py:obj:`~.cuStreamBeginCapture` was passed to #: :py:obj:`~.cuStreamEndCapture` in a different thread. - CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD = ccuda.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD{{endif}} + CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD = cydriver.cudaError_enum.CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD{{endif}} {{if 'CUDA_ERROR_TIMEOUT' in found_values}} #: This error indicates that the timeout specified for the wait #: operation has lapsed. - CUDA_ERROR_TIMEOUT = ccuda.cudaError_enum.CUDA_ERROR_TIMEOUT{{endif}} + CUDA_ERROR_TIMEOUT = cydriver.cudaError_enum.CUDA_ERROR_TIMEOUT{{endif}} {{if 'CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE' in found_values}} #: This error indicates that the graph update was not performed because #: it included changes which violated constraints specific to #: instantiated graph update. - CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE = ccuda.cudaError_enum.CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE{{endif}} + CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE = cydriver.cudaError_enum.CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE{{endif}} {{if 'CUDA_ERROR_EXTERNAL_DEVICE' in found_values}} #: This indicates that an async error has occurred in a device outside @@ -3617,30 +3617,30 @@ class CUresult(IntEnum): #: the process in an inconsistent state and any further CUDA work will #: return the same error. To continue using CUDA, the process must be #: terminated and relaunched. - CUDA_ERROR_EXTERNAL_DEVICE = ccuda.cudaError_enum.CUDA_ERROR_EXTERNAL_DEVICE{{endif}} + CUDA_ERROR_EXTERNAL_DEVICE = cydriver.cudaError_enum.CUDA_ERROR_EXTERNAL_DEVICE{{endif}} {{if 'CUDA_ERROR_INVALID_CLUSTER_SIZE' in found_values}} #: Indicates a kernel launch error due to cluster misconfiguration. - CUDA_ERROR_INVALID_CLUSTER_SIZE = ccuda.cudaError_enum.CUDA_ERROR_INVALID_CLUSTER_SIZE{{endif}} + CUDA_ERROR_INVALID_CLUSTER_SIZE = cydriver.cudaError_enum.CUDA_ERROR_INVALID_CLUSTER_SIZE{{endif}} {{if 'CUDA_ERROR_FUNCTION_NOT_LOADED' in found_values}} #: Indiciates a function handle is not loaded when calling an API that #: requires a loaded function. - CUDA_ERROR_FUNCTION_NOT_LOADED = ccuda.cudaError_enum.CUDA_ERROR_FUNCTION_NOT_LOADED{{endif}} + CUDA_ERROR_FUNCTION_NOT_LOADED = cydriver.cudaError_enum.CUDA_ERROR_FUNCTION_NOT_LOADED{{endif}} {{if 'CUDA_ERROR_INVALID_RESOURCE_TYPE' in found_values}} #: This error indicates one or more resources passed in are not valid #: resource types for the operation. - CUDA_ERROR_INVALID_RESOURCE_TYPE = ccuda.cudaError_enum.CUDA_ERROR_INVALID_RESOURCE_TYPE{{endif}} + CUDA_ERROR_INVALID_RESOURCE_TYPE = cydriver.cudaError_enum.CUDA_ERROR_INVALID_RESOURCE_TYPE{{endif}} {{if 'CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION' in found_values}} #: This error indicates one or more resources are insufficient or non- #: applicable for the operation. - CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION = ccuda.cudaError_enum.CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION{{endif}} + CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION = cydriver.cudaError_enum.CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION{{endif}} {{if 'CUDA_ERROR_UNKNOWN' in found_values}} #: This indicates that an unknown internal error has occurred. - CUDA_ERROR_UNKNOWN = ccuda.cudaError_enum.CUDA_ERROR_UNKNOWN{{endif}} + CUDA_ERROR_UNKNOWN = cydriver.cudaError_enum.CUDA_ERROR_UNKNOWN{{endif}} {{endif}} {{if 'CUdevice_P2PAttribute_enum' in found_types}} @@ -3652,23 +3652,23 @@ class CUdevice_P2PAttribute(IntEnum): #: A relative value indicating the performance of the link between two #: devices - CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = ccuda.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK{{endif}} + CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = cydriver.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK{{endif}} {{if 'CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED' in found_values}} #: P2P Access is enable - CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = ccuda.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED{{endif}} + CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = cydriver.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED{{endif}} {{if 'CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED' in found_values}} #: Atomic operation over the link supported - CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = ccuda.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED{{endif}} + CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = cydriver.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED{{endif}} {{if 'CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED' in found_values}} #: [Deprecated] - CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED = ccuda.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED{{endif}} + CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED = cydriver.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED{{endif}} {{if 'CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED' in found_values}} #: Accessing CUDA arrays over the link supported - CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = ccuda.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED{{endif}} + CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = cydriver.CUdevice_P2PAttribute_enum.CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED{{endif}} {{endif}} {{if 'CUresourceViewFormat_enum' in found_types}} @@ -3679,143 +3679,143 @@ class CUresourceViewFormat(IntEnum): {{if 'CU_RES_VIEW_FORMAT_NONE' in found_values}} #: No resource view format (use underlying resource format) - CU_RES_VIEW_FORMAT_NONE = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_NONE{{endif}} + CU_RES_VIEW_FORMAT_NONE = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_NONE{{endif}} {{if 'CU_RES_VIEW_FORMAT_UINT_1X8' in found_values}} #: 1 channel unsigned 8-bit integers - CU_RES_VIEW_FORMAT_UINT_1X8 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_1X8{{endif}} + CU_RES_VIEW_FORMAT_UINT_1X8 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_1X8{{endif}} {{if 'CU_RES_VIEW_FORMAT_UINT_2X8' in found_values}} #: 2 channel unsigned 8-bit integers - CU_RES_VIEW_FORMAT_UINT_2X8 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_2X8{{endif}} + CU_RES_VIEW_FORMAT_UINT_2X8 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_2X8{{endif}} {{if 'CU_RES_VIEW_FORMAT_UINT_4X8' in found_values}} #: 4 channel unsigned 8-bit integers - CU_RES_VIEW_FORMAT_UINT_4X8 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_4X8{{endif}} + CU_RES_VIEW_FORMAT_UINT_4X8 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_4X8{{endif}} {{if 'CU_RES_VIEW_FORMAT_SINT_1X8' in found_values}} #: 1 channel signed 8-bit integers - CU_RES_VIEW_FORMAT_SINT_1X8 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_1X8{{endif}} + CU_RES_VIEW_FORMAT_SINT_1X8 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_1X8{{endif}} {{if 'CU_RES_VIEW_FORMAT_SINT_2X8' in found_values}} #: 2 channel signed 8-bit integers - CU_RES_VIEW_FORMAT_SINT_2X8 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_2X8{{endif}} + CU_RES_VIEW_FORMAT_SINT_2X8 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_2X8{{endif}} {{if 'CU_RES_VIEW_FORMAT_SINT_4X8' in found_values}} #: 4 channel signed 8-bit integers - CU_RES_VIEW_FORMAT_SINT_4X8 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_4X8{{endif}} + CU_RES_VIEW_FORMAT_SINT_4X8 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_4X8{{endif}} {{if 'CU_RES_VIEW_FORMAT_UINT_1X16' in found_values}} #: 1 channel unsigned 16-bit integers - CU_RES_VIEW_FORMAT_UINT_1X16 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_1X16{{endif}} + CU_RES_VIEW_FORMAT_UINT_1X16 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_1X16{{endif}} {{if 'CU_RES_VIEW_FORMAT_UINT_2X16' in found_values}} #: 2 channel unsigned 16-bit integers - CU_RES_VIEW_FORMAT_UINT_2X16 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_2X16{{endif}} + CU_RES_VIEW_FORMAT_UINT_2X16 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_2X16{{endif}} {{if 'CU_RES_VIEW_FORMAT_UINT_4X16' in found_values}} #: 4 channel unsigned 16-bit integers - CU_RES_VIEW_FORMAT_UINT_4X16 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_4X16{{endif}} + CU_RES_VIEW_FORMAT_UINT_4X16 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_4X16{{endif}} {{if 'CU_RES_VIEW_FORMAT_SINT_1X16' in found_values}} #: 1 channel signed 16-bit integers - CU_RES_VIEW_FORMAT_SINT_1X16 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_1X16{{endif}} + CU_RES_VIEW_FORMAT_SINT_1X16 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_1X16{{endif}} {{if 'CU_RES_VIEW_FORMAT_SINT_2X16' in found_values}} #: 2 channel signed 16-bit integers - CU_RES_VIEW_FORMAT_SINT_2X16 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_2X16{{endif}} + CU_RES_VIEW_FORMAT_SINT_2X16 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_2X16{{endif}} {{if 'CU_RES_VIEW_FORMAT_SINT_4X16' in found_values}} #: 4 channel signed 16-bit integers - CU_RES_VIEW_FORMAT_SINT_4X16 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_4X16{{endif}} + CU_RES_VIEW_FORMAT_SINT_4X16 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_4X16{{endif}} {{if 'CU_RES_VIEW_FORMAT_UINT_1X32' in found_values}} #: 1 channel unsigned 32-bit integers - CU_RES_VIEW_FORMAT_UINT_1X32 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_1X32{{endif}} + CU_RES_VIEW_FORMAT_UINT_1X32 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_1X32{{endif}} {{if 'CU_RES_VIEW_FORMAT_UINT_2X32' in found_values}} #: 2 channel unsigned 32-bit integers - CU_RES_VIEW_FORMAT_UINT_2X32 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_2X32{{endif}} + CU_RES_VIEW_FORMAT_UINT_2X32 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_2X32{{endif}} {{if 'CU_RES_VIEW_FORMAT_UINT_4X32' in found_values}} #: 4 channel unsigned 32-bit integers - CU_RES_VIEW_FORMAT_UINT_4X32 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_4X32{{endif}} + CU_RES_VIEW_FORMAT_UINT_4X32 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UINT_4X32{{endif}} {{if 'CU_RES_VIEW_FORMAT_SINT_1X32' in found_values}} #: 1 channel signed 32-bit integers - CU_RES_VIEW_FORMAT_SINT_1X32 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_1X32{{endif}} + CU_RES_VIEW_FORMAT_SINT_1X32 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_1X32{{endif}} {{if 'CU_RES_VIEW_FORMAT_SINT_2X32' in found_values}} #: 2 channel signed 32-bit integers - CU_RES_VIEW_FORMAT_SINT_2X32 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_2X32{{endif}} + CU_RES_VIEW_FORMAT_SINT_2X32 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_2X32{{endif}} {{if 'CU_RES_VIEW_FORMAT_SINT_4X32' in found_values}} #: 4 channel signed 32-bit integers - CU_RES_VIEW_FORMAT_SINT_4X32 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_4X32{{endif}} + CU_RES_VIEW_FORMAT_SINT_4X32 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SINT_4X32{{endif}} {{if 'CU_RES_VIEW_FORMAT_FLOAT_1X16' in found_values}} #: 1 channel 16-bit floating point - CU_RES_VIEW_FORMAT_FLOAT_1X16 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_1X16{{endif}} + CU_RES_VIEW_FORMAT_FLOAT_1X16 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_1X16{{endif}} {{if 'CU_RES_VIEW_FORMAT_FLOAT_2X16' in found_values}} #: 2 channel 16-bit floating point - CU_RES_VIEW_FORMAT_FLOAT_2X16 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_2X16{{endif}} + CU_RES_VIEW_FORMAT_FLOAT_2X16 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_2X16{{endif}} {{if 'CU_RES_VIEW_FORMAT_FLOAT_4X16' in found_values}} #: 4 channel 16-bit floating point - CU_RES_VIEW_FORMAT_FLOAT_4X16 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_4X16{{endif}} + CU_RES_VIEW_FORMAT_FLOAT_4X16 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_4X16{{endif}} {{if 'CU_RES_VIEW_FORMAT_FLOAT_1X32' in found_values}} #: 1 channel 32-bit floating point - CU_RES_VIEW_FORMAT_FLOAT_1X32 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_1X32{{endif}} + CU_RES_VIEW_FORMAT_FLOAT_1X32 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_1X32{{endif}} {{if 'CU_RES_VIEW_FORMAT_FLOAT_2X32' in found_values}} #: 2 channel 32-bit floating point - CU_RES_VIEW_FORMAT_FLOAT_2X32 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_2X32{{endif}} + CU_RES_VIEW_FORMAT_FLOAT_2X32 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_2X32{{endif}} {{if 'CU_RES_VIEW_FORMAT_FLOAT_4X32' in found_values}} #: 4 channel 32-bit floating point - CU_RES_VIEW_FORMAT_FLOAT_4X32 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_4X32{{endif}} + CU_RES_VIEW_FORMAT_FLOAT_4X32 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_FLOAT_4X32{{endif}} {{if 'CU_RES_VIEW_FORMAT_UNSIGNED_BC1' in found_values}} #: Block compressed 1 - CU_RES_VIEW_FORMAT_UNSIGNED_BC1 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC1{{endif}} + CU_RES_VIEW_FORMAT_UNSIGNED_BC1 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC1{{endif}} {{if 'CU_RES_VIEW_FORMAT_UNSIGNED_BC2' in found_values}} #: Block compressed 2 - CU_RES_VIEW_FORMAT_UNSIGNED_BC2 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC2{{endif}} + CU_RES_VIEW_FORMAT_UNSIGNED_BC2 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC2{{endif}} {{if 'CU_RES_VIEW_FORMAT_UNSIGNED_BC3' in found_values}} #: Block compressed 3 - CU_RES_VIEW_FORMAT_UNSIGNED_BC3 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC3{{endif}} + CU_RES_VIEW_FORMAT_UNSIGNED_BC3 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC3{{endif}} {{if 'CU_RES_VIEW_FORMAT_UNSIGNED_BC4' in found_values}} #: Block compressed 4 unsigned - CU_RES_VIEW_FORMAT_UNSIGNED_BC4 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC4{{endif}} + CU_RES_VIEW_FORMAT_UNSIGNED_BC4 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC4{{endif}} {{if 'CU_RES_VIEW_FORMAT_SIGNED_BC4' in found_values}} #: Block compressed 4 signed - CU_RES_VIEW_FORMAT_SIGNED_BC4 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SIGNED_BC4{{endif}} + CU_RES_VIEW_FORMAT_SIGNED_BC4 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SIGNED_BC4{{endif}} {{if 'CU_RES_VIEW_FORMAT_UNSIGNED_BC5' in found_values}} #: Block compressed 5 unsigned - CU_RES_VIEW_FORMAT_UNSIGNED_BC5 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC5{{endif}} + CU_RES_VIEW_FORMAT_UNSIGNED_BC5 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC5{{endif}} {{if 'CU_RES_VIEW_FORMAT_SIGNED_BC5' in found_values}} #: Block compressed 5 signed - CU_RES_VIEW_FORMAT_SIGNED_BC5 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SIGNED_BC5{{endif}} + CU_RES_VIEW_FORMAT_SIGNED_BC5 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SIGNED_BC5{{endif}} {{if 'CU_RES_VIEW_FORMAT_UNSIGNED_BC6H' in found_values}} #: Block compressed 6 unsigned half-float - CU_RES_VIEW_FORMAT_UNSIGNED_BC6H = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC6H{{endif}} + CU_RES_VIEW_FORMAT_UNSIGNED_BC6H = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC6H{{endif}} {{if 'CU_RES_VIEW_FORMAT_SIGNED_BC6H' in found_values}} #: Block compressed 6 signed half-float - CU_RES_VIEW_FORMAT_SIGNED_BC6H = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SIGNED_BC6H{{endif}} + CU_RES_VIEW_FORMAT_SIGNED_BC6H = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_SIGNED_BC6H{{endif}} {{if 'CU_RES_VIEW_FORMAT_UNSIGNED_BC7' in found_values}} #: Block compressed 7 - CU_RES_VIEW_FORMAT_UNSIGNED_BC7 = ccuda.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC7{{endif}} + CU_RES_VIEW_FORMAT_UNSIGNED_BC7 = cydriver.CUresourceViewFormat_enum.CU_RES_VIEW_FORMAT_UNSIGNED_BC7{{endif}} {{endif}} {{if 'CUtensorMapDataType_enum' in found_types}} @@ -3824,31 +3824,31 @@ class CUtensorMapDataType(IntEnum): Tensor map data type """ {{if 'CU_TENSOR_MAP_DATA_TYPE_UINT8' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_UINT8 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_UINT8{{endif}} + CU_TENSOR_MAP_DATA_TYPE_UINT8 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_UINT8{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_UINT16' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_UINT16 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_UINT16{{endif}} + CU_TENSOR_MAP_DATA_TYPE_UINT16 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_UINT16{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_UINT32' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_UINT32 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_UINT32{{endif}} + CU_TENSOR_MAP_DATA_TYPE_UINT32 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_UINT32{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_INT32' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_INT32 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_INT32{{endif}} + CU_TENSOR_MAP_DATA_TYPE_INT32 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_INT32{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_UINT64' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_UINT64 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_UINT64{{endif}} + CU_TENSOR_MAP_DATA_TYPE_UINT64 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_UINT64{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_INT64' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_INT64 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_INT64{{endif}} + CU_TENSOR_MAP_DATA_TYPE_INT64 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_INT64{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_FLOAT16' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_FLOAT16 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_FLOAT16{{endif}} + CU_TENSOR_MAP_DATA_TYPE_FLOAT16 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_FLOAT16{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_FLOAT32' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_FLOAT32 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_FLOAT32{{endif}} + CU_TENSOR_MAP_DATA_TYPE_FLOAT32 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_FLOAT32{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_FLOAT64' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_FLOAT64 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_FLOAT64{{endif}} + CU_TENSOR_MAP_DATA_TYPE_FLOAT64 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_FLOAT64{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_BFLOAT16' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_BFLOAT16{{endif}} + CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_BFLOAT16{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ{{endif}} + CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_TFLOAT32' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32{{endif}} + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32{{endif}} {{if 'CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ' in found_values}} - CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ = ccuda.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ{{endif}} + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ = cydriver.CUtensorMapDataType_enum.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ{{endif}} {{endif}} {{if 'CUtensorMapInterleave_enum' in found_types}} @@ -3857,11 +3857,11 @@ class CUtensorMapInterleave(IntEnum): Tensor map interleave layout type """ {{if 'CU_TENSOR_MAP_INTERLEAVE_NONE' in found_values}} - CU_TENSOR_MAP_INTERLEAVE_NONE = ccuda.CUtensorMapInterleave_enum.CU_TENSOR_MAP_INTERLEAVE_NONE{{endif}} + CU_TENSOR_MAP_INTERLEAVE_NONE = cydriver.CUtensorMapInterleave_enum.CU_TENSOR_MAP_INTERLEAVE_NONE{{endif}} {{if 'CU_TENSOR_MAP_INTERLEAVE_16B' in found_values}} - CU_TENSOR_MAP_INTERLEAVE_16B = ccuda.CUtensorMapInterleave_enum.CU_TENSOR_MAP_INTERLEAVE_16B{{endif}} + CU_TENSOR_MAP_INTERLEAVE_16B = cydriver.CUtensorMapInterleave_enum.CU_TENSOR_MAP_INTERLEAVE_16B{{endif}} {{if 'CU_TENSOR_MAP_INTERLEAVE_32B' in found_values}} - CU_TENSOR_MAP_INTERLEAVE_32B = ccuda.CUtensorMapInterleave_enum.CU_TENSOR_MAP_INTERLEAVE_32B{{endif}} + CU_TENSOR_MAP_INTERLEAVE_32B = cydriver.CUtensorMapInterleave_enum.CU_TENSOR_MAP_INTERLEAVE_32B{{endif}} {{endif}} {{if 'CUtensorMapSwizzle_enum' in found_types}} @@ -3870,13 +3870,13 @@ class CUtensorMapSwizzle(IntEnum): Tensor map swizzling mode of shared memory banks """ {{if 'CU_TENSOR_MAP_SWIZZLE_NONE' in found_values}} - CU_TENSOR_MAP_SWIZZLE_NONE = ccuda.CUtensorMapSwizzle_enum.CU_TENSOR_MAP_SWIZZLE_NONE{{endif}} + CU_TENSOR_MAP_SWIZZLE_NONE = cydriver.CUtensorMapSwizzle_enum.CU_TENSOR_MAP_SWIZZLE_NONE{{endif}} {{if 'CU_TENSOR_MAP_SWIZZLE_32B' in found_values}} - CU_TENSOR_MAP_SWIZZLE_32B = ccuda.CUtensorMapSwizzle_enum.CU_TENSOR_MAP_SWIZZLE_32B{{endif}} + CU_TENSOR_MAP_SWIZZLE_32B = cydriver.CUtensorMapSwizzle_enum.CU_TENSOR_MAP_SWIZZLE_32B{{endif}} {{if 'CU_TENSOR_MAP_SWIZZLE_64B' in found_values}} - CU_TENSOR_MAP_SWIZZLE_64B = ccuda.CUtensorMapSwizzle_enum.CU_TENSOR_MAP_SWIZZLE_64B{{endif}} + CU_TENSOR_MAP_SWIZZLE_64B = cydriver.CUtensorMapSwizzle_enum.CU_TENSOR_MAP_SWIZZLE_64B{{endif}} {{if 'CU_TENSOR_MAP_SWIZZLE_128B' in found_values}} - CU_TENSOR_MAP_SWIZZLE_128B = ccuda.CUtensorMapSwizzle_enum.CU_TENSOR_MAP_SWIZZLE_128B{{endif}} + CU_TENSOR_MAP_SWIZZLE_128B = cydriver.CUtensorMapSwizzle_enum.CU_TENSOR_MAP_SWIZZLE_128B{{endif}} {{endif}} {{if 'CUtensorMapL2promotion_enum' in found_types}} @@ -3885,13 +3885,13 @@ class CUtensorMapL2promotion(IntEnum): Tensor map L2 promotion type """ {{if 'CU_TENSOR_MAP_L2_PROMOTION_NONE' in found_values}} - CU_TENSOR_MAP_L2_PROMOTION_NONE = ccuda.CUtensorMapL2promotion_enum.CU_TENSOR_MAP_L2_PROMOTION_NONE{{endif}} + CU_TENSOR_MAP_L2_PROMOTION_NONE = cydriver.CUtensorMapL2promotion_enum.CU_TENSOR_MAP_L2_PROMOTION_NONE{{endif}} {{if 'CU_TENSOR_MAP_L2_PROMOTION_L2_64B' in found_values}} - CU_TENSOR_MAP_L2_PROMOTION_L2_64B = ccuda.CUtensorMapL2promotion_enum.CU_TENSOR_MAP_L2_PROMOTION_L2_64B{{endif}} + CU_TENSOR_MAP_L2_PROMOTION_L2_64B = cydriver.CUtensorMapL2promotion_enum.CU_TENSOR_MAP_L2_PROMOTION_L2_64B{{endif}} {{if 'CU_TENSOR_MAP_L2_PROMOTION_L2_128B' in found_values}} - CU_TENSOR_MAP_L2_PROMOTION_L2_128B = ccuda.CUtensorMapL2promotion_enum.CU_TENSOR_MAP_L2_PROMOTION_L2_128B{{endif}} + CU_TENSOR_MAP_L2_PROMOTION_L2_128B = cydriver.CUtensorMapL2promotion_enum.CU_TENSOR_MAP_L2_PROMOTION_L2_128B{{endif}} {{if 'CU_TENSOR_MAP_L2_PROMOTION_L2_256B' in found_values}} - CU_TENSOR_MAP_L2_PROMOTION_L2_256B = ccuda.CUtensorMapL2promotion_enum.CU_TENSOR_MAP_L2_PROMOTION_L2_256B{{endif}} + CU_TENSOR_MAP_L2_PROMOTION_L2_256B = cydriver.CUtensorMapL2promotion_enum.CU_TENSOR_MAP_L2_PROMOTION_L2_256B{{endif}} {{endif}} {{if 'CUtensorMapFloatOOBfill_enum' in found_types}} @@ -3900,9 +3900,9 @@ class CUtensorMapFloatOOBfill(IntEnum): Tensor map out-of-bounds fill type """ {{if 'CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE' in found_values}} - CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = ccuda.CUtensorMapFloatOOBfill_enum.CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE{{endif}} + CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = cydriver.CUtensorMapFloatOOBfill_enum.CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE{{endif}} {{if 'CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA' in found_values}} - CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA = ccuda.CUtensorMapFloatOOBfill_enum.CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA{{endif}} + CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA = cydriver.CUtensorMapFloatOOBfill_enum.CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA{{endif}} {{endif}} {{if 'CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum' in found_types}} @@ -3916,17 +3916,17 @@ class CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS(IntEnum): #: No access, meaning the device cannot access this memory at all, thus #: must be staged through accessible memory in order to complete #: certain operations - CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE = ccuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE{{endif}} + CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE = cydriver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE{{endif}} {{if 'CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ' in found_values}} #: Read-only access, meaning writes to this memory are considered #: invalid accesses and thus return error in that case. - CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ = ccuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ{{endif}} + CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ = cydriver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ{{endif}} {{if 'CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE' in found_values}} #: Read-write access, the device has full read-write access to the #: memory - CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE = ccuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE{{endif}} + CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE = cydriver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE{{endif}} {{endif}} {{if 'CUexternalMemoryHandleType_enum' in found_types}} @@ -3937,35 +3937,35 @@ class CUexternalMemoryHandleType(IntEnum): {{if 'CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD' in found_values}} #: Handle is an opaque file descriptor - CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD{{endif}} + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD{{endif}} {{if 'CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32' in found_values}} #: Handle is an opaque shared NT handle - CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32{{endif}} + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32{{endif}} {{if 'CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT' in found_values}} #: Handle is an opaque, globally shared handle - CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT{{endif}} + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT{{endif}} {{if 'CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP' in found_values}} #: Handle is a D3D12 heap object - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP{{endif}} + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP{{endif}} {{if 'CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE' in found_values}} #: Handle is a D3D12 committed resource - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE{{endif}} + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE{{endif}} {{if 'CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE' in found_values}} #: Handle is a shared NT handle to a D3D11 resource - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE{{endif}} + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE{{endif}} {{if 'CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT' in found_values}} #: Handle is a globally shared handle to a D3D11 resource - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT{{endif}} + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT{{endif}} {{if 'CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF' in found_values}} #: Handle is an NvSciBuf object - CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = ccuda.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF{{endif}} + CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = cydriver.CUexternalMemoryHandleType_enum.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF{{endif}} {{endif}} {{if 'CUexternalSemaphoreHandleType_enum' in found_types}} @@ -3976,45 +3976,45 @@ class CUexternalSemaphoreHandleType(IntEnum): {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD' in found_values}} #: Handle is an opaque file descriptor - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD{{endif}} {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32' in found_values}} #: Handle is an opaque shared NT handle - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32{{endif}} {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT' in found_values}} #: Handle is an opaque, globally shared handle - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT{{endif}} {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE' in found_values}} #: Handle is a shared NT handle referencing a D3D12 fence object - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE{{endif}} {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE' in found_values}} #: Handle is a shared NT handle referencing a D3D11 fence object - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE{{endif}} {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC' in found_values}} #: Opaque handle to NvSciSync Object - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC{{endif}} {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX' in found_values}} #: Handle is a shared NT handle referencing a D3D11 keyed mutex object - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX{{endif}} {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT' in found_values}} #: Handle is a globally shared handle referencing a D3D11 keyed mutex #: object - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT{{endif}} {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD' in found_values}} #: Handle is an opaque file descriptor referencing a timeline semaphore - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD{{endif}} {{if 'CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32' in found_values}} #: Handle is an opaque shared NT handle referencing a timeline #: semaphore - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = ccuda.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32{{endif}} + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = cydriver.CUexternalSemaphoreHandleType_enum.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32{{endif}} {{endif}} {{if 'CUmemAllocationHandleType_enum' in found_types}} @@ -4025,26 +4025,26 @@ class CUmemAllocationHandleType(IntEnum): {{if 'CU_MEM_HANDLE_TYPE_NONE' in found_values}} #: Does not allow any export mechanism. > - CU_MEM_HANDLE_TYPE_NONE = ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_NONE{{endif}} + CU_MEM_HANDLE_TYPE_NONE = cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_NONE{{endif}} {{if 'CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR' in found_values}} #: Allows a file descriptor to be used for exporting. Permitted only on #: POSIX systems. (int) - CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR = ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR{{endif}} + CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR = cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR{{endif}} {{if 'CU_MEM_HANDLE_TYPE_WIN32' in found_values}} #: Allows a Win32 NT handle to be used for exporting. (HANDLE) - CU_MEM_HANDLE_TYPE_WIN32 = ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32{{endif}} + CU_MEM_HANDLE_TYPE_WIN32 = cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32{{endif}} {{if 'CU_MEM_HANDLE_TYPE_WIN32_KMT' in found_values}} #: Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) - CU_MEM_HANDLE_TYPE_WIN32_KMT = ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32_KMT{{endif}} + CU_MEM_HANDLE_TYPE_WIN32_KMT = cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_WIN32_KMT{{endif}} {{if 'CU_MEM_HANDLE_TYPE_FABRIC' in found_values}} #: Allows a fabric handle to be used for exporting. (CUmemFabricHandle) - CU_MEM_HANDLE_TYPE_FABRIC = ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_FABRIC{{endif}} + CU_MEM_HANDLE_TYPE_FABRIC = cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_FABRIC{{endif}} {{if 'CU_MEM_HANDLE_TYPE_MAX' in found_values}} - CU_MEM_HANDLE_TYPE_MAX = ccuda.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_MAX{{endif}} + CU_MEM_HANDLE_TYPE_MAX = cydriver.CUmemAllocationHandleType_enum.CU_MEM_HANDLE_TYPE_MAX{{endif}} {{endif}} {{if 'CUmemAccess_flags_enum' in found_types}} @@ -4055,17 +4055,17 @@ class CUmemAccess_flags(IntEnum): {{if 'CU_MEM_ACCESS_FLAGS_PROT_NONE' in found_values}} #: Default, make the address range not accessible - CU_MEM_ACCESS_FLAGS_PROT_NONE = ccuda.CUmemAccess_flags_enum.CU_MEM_ACCESS_FLAGS_PROT_NONE{{endif}} + CU_MEM_ACCESS_FLAGS_PROT_NONE = cydriver.CUmemAccess_flags_enum.CU_MEM_ACCESS_FLAGS_PROT_NONE{{endif}} {{if 'CU_MEM_ACCESS_FLAGS_PROT_READ' in found_values}} #: Make the address range read accessible - CU_MEM_ACCESS_FLAGS_PROT_READ = ccuda.CUmemAccess_flags_enum.CU_MEM_ACCESS_FLAGS_PROT_READ{{endif}} + CU_MEM_ACCESS_FLAGS_PROT_READ = cydriver.CUmemAccess_flags_enum.CU_MEM_ACCESS_FLAGS_PROT_READ{{endif}} {{if 'CU_MEM_ACCESS_FLAGS_PROT_READWRITE' in found_values}} #: Make the address range read-write accessible - CU_MEM_ACCESS_FLAGS_PROT_READWRITE = ccuda.CUmemAccess_flags_enum.CU_MEM_ACCESS_FLAGS_PROT_READWRITE{{endif}} + CU_MEM_ACCESS_FLAGS_PROT_READWRITE = cydriver.CUmemAccess_flags_enum.CU_MEM_ACCESS_FLAGS_PROT_READWRITE{{endif}} {{if 'CU_MEM_ACCESS_FLAGS_PROT_MAX' in found_values}} - CU_MEM_ACCESS_FLAGS_PROT_MAX = ccuda.CUmemAccess_flags_enum.CU_MEM_ACCESS_FLAGS_PROT_MAX{{endif}} + CU_MEM_ACCESS_FLAGS_PROT_MAX = cydriver.CUmemAccess_flags_enum.CU_MEM_ACCESS_FLAGS_PROT_MAX{{endif}} {{endif}} {{if 'CUmemLocationType_enum' in found_types}} @@ -4074,25 +4074,25 @@ class CUmemLocationType(IntEnum): Specifies the type of location """ {{if 'CU_MEM_LOCATION_TYPE_INVALID' in found_values}} - CU_MEM_LOCATION_TYPE_INVALID = ccuda.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_INVALID{{endif}} + CU_MEM_LOCATION_TYPE_INVALID = cydriver.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_INVALID{{endif}} {{if 'CU_MEM_LOCATION_TYPE_DEVICE' in found_values}} #: Location is a device location, thus id is a device ordinal - CU_MEM_LOCATION_TYPE_DEVICE = ccuda.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_DEVICE{{endif}} + CU_MEM_LOCATION_TYPE_DEVICE = cydriver.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_DEVICE{{endif}} {{if 'CU_MEM_LOCATION_TYPE_HOST' in found_values}} #: Location is host, id is ignored - CU_MEM_LOCATION_TYPE_HOST = ccuda.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_HOST{{endif}} + CU_MEM_LOCATION_TYPE_HOST = cydriver.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_HOST{{endif}} {{if 'CU_MEM_LOCATION_TYPE_HOST_NUMA' in found_values}} #: Location is a host NUMA node, thus id is a host NUMA node id - CU_MEM_LOCATION_TYPE_HOST_NUMA = ccuda.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_HOST_NUMA{{endif}} + CU_MEM_LOCATION_TYPE_HOST_NUMA = cydriver.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_HOST_NUMA{{endif}} {{if 'CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT' in found_values}} #: Location is a host NUMA node of the current thread, id is ignored - CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT = ccuda.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT{{endif}} + CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT = cydriver.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT{{endif}} {{if 'CU_MEM_LOCATION_TYPE_MAX' in found_values}} - CU_MEM_LOCATION_TYPE_MAX = ccuda.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_MAX{{endif}} + CU_MEM_LOCATION_TYPE_MAX = cydriver.CUmemLocationType_enum.CU_MEM_LOCATION_TYPE_MAX{{endif}} {{endif}} {{if 'CUmemAllocationType_enum' in found_types}} @@ -4101,14 +4101,14 @@ class CUmemAllocationType(IntEnum): Defines the allocation types available """ {{if 'CU_MEM_ALLOCATION_TYPE_INVALID' in found_values}} - CU_MEM_ALLOCATION_TYPE_INVALID = ccuda.CUmemAllocationType_enum.CU_MEM_ALLOCATION_TYPE_INVALID{{endif}} + CU_MEM_ALLOCATION_TYPE_INVALID = cydriver.CUmemAllocationType_enum.CU_MEM_ALLOCATION_TYPE_INVALID{{endif}} {{if 'CU_MEM_ALLOCATION_TYPE_PINNED' in found_values}} #: This allocation type is 'pinned', i.e. cannot migrate from its #: current location while the application is actively using it - CU_MEM_ALLOCATION_TYPE_PINNED = ccuda.CUmemAllocationType_enum.CU_MEM_ALLOCATION_TYPE_PINNED{{endif}} + CU_MEM_ALLOCATION_TYPE_PINNED = cydriver.CUmemAllocationType_enum.CU_MEM_ALLOCATION_TYPE_PINNED{{endif}} {{if 'CU_MEM_ALLOCATION_TYPE_MAX' in found_values}} - CU_MEM_ALLOCATION_TYPE_MAX = ccuda.CUmemAllocationType_enum.CU_MEM_ALLOCATION_TYPE_MAX{{endif}} + CU_MEM_ALLOCATION_TYPE_MAX = cydriver.CUmemAllocationType_enum.CU_MEM_ALLOCATION_TYPE_MAX{{endif}} {{endif}} {{if 'CUmemAllocationGranularity_flags_enum' in found_types}} @@ -4120,11 +4120,11 @@ class CUmemAllocationGranularity_flags(IntEnum): {{if 'CU_MEM_ALLOC_GRANULARITY_MINIMUM' in found_values}} #: Minimum required granularity for allocation - CU_MEM_ALLOC_GRANULARITY_MINIMUM = ccuda.CUmemAllocationGranularity_flags_enum.CU_MEM_ALLOC_GRANULARITY_MINIMUM{{endif}} + CU_MEM_ALLOC_GRANULARITY_MINIMUM = cydriver.CUmemAllocationGranularity_flags_enum.CU_MEM_ALLOC_GRANULARITY_MINIMUM{{endif}} {{if 'CU_MEM_ALLOC_GRANULARITY_RECOMMENDED' in found_values}} #: Recommended granularity for allocation for best performance - CU_MEM_ALLOC_GRANULARITY_RECOMMENDED = ccuda.CUmemAllocationGranularity_flags_enum.CU_MEM_ALLOC_GRANULARITY_RECOMMENDED{{endif}} + CU_MEM_ALLOC_GRANULARITY_RECOMMENDED = cydriver.CUmemAllocationGranularity_flags_enum.CU_MEM_ALLOC_GRANULARITY_RECOMMENDED{{endif}} {{endif}} {{if 'CUmemRangeHandleType_enum' in found_types}} @@ -4133,9 +4133,9 @@ class CUmemRangeHandleType(IntEnum): Specifies the handle type for address range """ {{if 'CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD' in found_values}} - CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD = ccuda.CUmemRangeHandleType_enum.CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD{{endif}} + CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD = cydriver.CUmemRangeHandleType_enum.CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD{{endif}} {{if 'CU_MEM_RANGE_HANDLE_TYPE_MAX' in found_values}} - CU_MEM_RANGE_HANDLE_TYPE_MAX = ccuda.CUmemRangeHandleType_enum.CU_MEM_RANGE_HANDLE_TYPE_MAX{{endif}} + CU_MEM_RANGE_HANDLE_TYPE_MAX = cydriver.CUmemRangeHandleType_enum.CU_MEM_RANGE_HANDLE_TYPE_MAX{{endif}} {{endif}} {{if 'CUarraySparseSubresourceType_enum' in found_types}} @@ -4144,9 +4144,9 @@ class CUarraySparseSubresourceType(IntEnum): Sparse subresource types """ {{if 'CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL' in found_values}} - CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = ccuda.CUarraySparseSubresourceType_enum.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL{{endif}} + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = cydriver.CUarraySparseSubresourceType_enum.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL{{endif}} {{if 'CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL' in found_values}} - CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = ccuda.CUarraySparseSubresourceType_enum.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL{{endif}} + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = cydriver.CUarraySparseSubresourceType_enum.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL{{endif}} {{endif}} {{if 'CUmemOperationType_enum' in found_types}} @@ -4155,9 +4155,9 @@ class CUmemOperationType(IntEnum): Memory operation types """ {{if 'CU_MEM_OPERATION_TYPE_MAP' in found_values}} - CU_MEM_OPERATION_TYPE_MAP = ccuda.CUmemOperationType_enum.CU_MEM_OPERATION_TYPE_MAP{{endif}} + CU_MEM_OPERATION_TYPE_MAP = cydriver.CUmemOperationType_enum.CU_MEM_OPERATION_TYPE_MAP{{endif}} {{if 'CU_MEM_OPERATION_TYPE_UNMAP' in found_values}} - CU_MEM_OPERATION_TYPE_UNMAP = ccuda.CUmemOperationType_enum.CU_MEM_OPERATION_TYPE_UNMAP{{endif}} + CU_MEM_OPERATION_TYPE_UNMAP = cydriver.CUmemOperationType_enum.CU_MEM_OPERATION_TYPE_UNMAP{{endif}} {{endif}} {{if 'CUmemHandleType_enum' in found_types}} @@ -4166,7 +4166,7 @@ class CUmemHandleType(IntEnum): Memory handle types """ {{if 'CU_MEM_HANDLE_TYPE_GENERIC' in found_values}} - CU_MEM_HANDLE_TYPE_GENERIC = ccuda.CUmemHandleType_enum.CU_MEM_HANDLE_TYPE_GENERIC{{endif}} + CU_MEM_HANDLE_TYPE_GENERIC = cydriver.CUmemHandleType_enum.CU_MEM_HANDLE_TYPE_GENERIC{{endif}} {{endif}} {{if 'CUmemAllocationCompType_enum' in found_types}} @@ -4177,11 +4177,11 @@ class CUmemAllocationCompType(IntEnum): {{if 'CU_MEM_ALLOCATION_COMP_NONE' in found_values}} #: Allocating non-compressible memory - CU_MEM_ALLOCATION_COMP_NONE = ccuda.CUmemAllocationCompType_enum.CU_MEM_ALLOCATION_COMP_NONE{{endif}} + CU_MEM_ALLOCATION_COMP_NONE = cydriver.CUmemAllocationCompType_enum.CU_MEM_ALLOCATION_COMP_NONE{{endif}} {{if 'CU_MEM_ALLOCATION_COMP_GENERIC' in found_values}} #: Allocating compressible memory - CU_MEM_ALLOCATION_COMP_GENERIC = ccuda.CUmemAllocationCompType_enum.CU_MEM_ALLOCATION_COMP_GENERIC{{endif}} + CU_MEM_ALLOCATION_COMP_GENERIC = cydriver.CUmemAllocationCompType_enum.CU_MEM_ALLOCATION_COMP_GENERIC{{endif}} {{endif}} {{if 'CUmulticastGranularity_flags_enum' in found_types}} @@ -4192,11 +4192,11 @@ class CUmulticastGranularity_flags(IntEnum): {{if 'CU_MULTICAST_GRANULARITY_MINIMUM' in found_values}} #: Minimum required granularity - CU_MULTICAST_GRANULARITY_MINIMUM = ccuda.CUmulticastGranularity_flags_enum.CU_MULTICAST_GRANULARITY_MINIMUM{{endif}} + CU_MULTICAST_GRANULARITY_MINIMUM = cydriver.CUmulticastGranularity_flags_enum.CU_MULTICAST_GRANULARITY_MINIMUM{{endif}} {{if 'CU_MULTICAST_GRANULARITY_RECOMMENDED' in found_values}} #: Recommended granularity for best performance - CU_MULTICAST_GRANULARITY_RECOMMENDED = ccuda.CUmulticastGranularity_flags_enum.CU_MULTICAST_GRANULARITY_RECOMMENDED{{endif}} + CU_MULTICAST_GRANULARITY_RECOMMENDED = cydriver.CUmulticastGranularity_flags_enum.CU_MULTICAST_GRANULARITY_RECOMMENDED{{endif}} {{endif}} {{if 'CUgraphExecUpdateResult_enum' in found_types}} @@ -4207,44 +4207,44 @@ class CUgraphExecUpdateResult(IntEnum): {{if 'CU_GRAPH_EXEC_UPDATE_SUCCESS' in found_values}} #: The update succeeded - CU_GRAPH_EXEC_UPDATE_SUCCESS = ccuda.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_SUCCESS{{endif}} + CU_GRAPH_EXEC_UPDATE_SUCCESS = cydriver.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_SUCCESS{{endif}} {{if 'CU_GRAPH_EXEC_UPDATE_ERROR' in found_values}} #: The update failed for an unexpected reason which is described in the #: return value of the function - CU_GRAPH_EXEC_UPDATE_ERROR = ccuda.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR{{endif}} + CU_GRAPH_EXEC_UPDATE_ERROR = cydriver.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR{{endif}} {{if 'CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED' in found_values}} #: The update failed because the topology changed - CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED = ccuda.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED{{endif}} + CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED = cydriver.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED{{endif}} {{if 'CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED' in found_values}} #: The update failed because a node type changed - CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED = ccuda.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED{{endif}} + CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED = cydriver.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED{{endif}} {{if 'CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED' in found_values}} #: The update failed because the function of a kernel node changed #: (CUDA driver < 11.2) - CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED = ccuda.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED{{endif}} + CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED = cydriver.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED{{endif}} {{if 'CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED' in found_values}} #: The update failed because the parameters changed in a way that is #: not supported - CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED = ccuda.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED{{endif}} + CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED = cydriver.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED{{endif}} {{if 'CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED' in found_values}} #: The update failed because something about the node is not supported - CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED = ccuda.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED{{endif}} + CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED = cydriver.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED{{endif}} {{if 'CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE' in found_values}} #: The update failed because the function of a kernel node changed in #: an unsupported way - CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE = ccuda.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE{{endif}} + CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE = cydriver.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE{{endif}} {{if 'CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED' in found_values}} #: The update failed because the node attributes changed in a way that #: is not supported - CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED = ccuda.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED{{endif}} + CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED = cydriver.CUgraphExecUpdateResult_enum.CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED{{endif}} {{endif}} {{if 'CUmemPool_attribute_enum' in found_types}} @@ -4259,18 +4259,18 @@ class CUmemPool_attribute(IntEnum): #: dependency of the allocating stream on the free action exists. Cuda #: events and null stream interactions can create the required stream #: ordered dependencies. (default enabled) - CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES = ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES{{endif}} + CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES = cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES{{endif}} {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC' in found_values}} #: (value type = int) Allow reuse of already completed frees when there #: is no dependency between the free and allocation. (default enabled) - CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC = ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC{{endif}} + CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC = cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC{{endif}} {{if 'CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES' in found_values}} #: (value type = int) Allow cuMemAllocAsync to insert new stream #: dependencies in order to establish the stream ordering required to #: reuse a piece of memory released by cuFreeAsync (default enabled). - CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES = ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES{{endif}} + CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES = cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES{{endif}} {{if 'CU_MEMPOOL_ATTR_RELEASE_THRESHOLD' in found_values}} #: (value type = cuuint64_t) Amount of reserved memory in bytes to hold @@ -4278,29 +4278,29 @@ class CUmemPool_attribute(IntEnum): #: the release threshold bytes of memory are held by the memory pool, #: the allocator will try to release memory back to the OS on the next #: call to stream, event or context synchronize. (default 0) - CU_MEMPOOL_ATTR_RELEASE_THRESHOLD = ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD{{endif}} + CU_MEMPOOL_ATTR_RELEASE_THRESHOLD = cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD{{endif}} {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT' in found_values}} #: (value type = cuuint64_t) Amount of backing memory currently #: allocated for the mempool. - CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT = ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT{{endif}} + CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT = cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT{{endif}} {{if 'CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH' in found_values}} #: (value type = cuuint64_t) High watermark of backing memory allocated #: for the mempool since the last time it was reset. High watermark can #: only be reset to zero. - CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH = ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH{{endif}} + CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH = cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH{{endif}} {{if 'CU_MEMPOOL_ATTR_USED_MEM_CURRENT' in found_values}} #: (value type = cuuint64_t) Amount of memory from the pool that is #: currently in use by the application. - CU_MEMPOOL_ATTR_USED_MEM_CURRENT = ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_CURRENT{{endif}} + CU_MEMPOOL_ATTR_USED_MEM_CURRENT = cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_CURRENT{{endif}} {{if 'CU_MEMPOOL_ATTR_USED_MEM_HIGH' in found_values}} #: (value type = cuuint64_t) High watermark of the amount of memory #: from the pool that was in use by the application since the last time #: it was reset. High watermark can only be reset to zero. - CU_MEMPOOL_ATTR_USED_MEM_HIGH = ccuda.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_HIGH{{endif}} + CU_MEMPOOL_ATTR_USED_MEM_HIGH = cydriver.CUmemPool_attribute_enum.CU_MEMPOOL_ATTR_USED_MEM_HIGH{{endif}} {{endif}} {{if 'CUgraphMem_attribute_enum' in found_types}} @@ -4312,24 +4312,24 @@ class CUgraphMem_attribute(IntEnum): #: (value type = cuuint64_t) Amount of memory, in bytes, currently #: associated with graphs - CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT = ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT{{endif}} + CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT = cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT{{endif}} {{if 'CU_GRAPH_MEM_ATTR_USED_MEM_HIGH' in found_values}} #: (value type = cuuint64_t) High watermark of memory, in bytes, #: associated with graphs since the last time it was reset. High #: watermark can only be reset to zero. - CU_GRAPH_MEM_ATTR_USED_MEM_HIGH = ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH{{endif}} + CU_GRAPH_MEM_ATTR_USED_MEM_HIGH = cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH{{endif}} {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT' in found_values}} #: (value type = cuuint64_t) Amount of memory, in bytes, currently #: allocated for use by the CUDA graphs asynchronous allocator. - CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT = ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT{{endif}} + CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT = cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT{{endif}} {{if 'CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH' in found_values}} #: (value type = cuuint64_t) High watermark of memory, in bytes, #: currently allocated for use by the CUDA graphs asynchronous #: allocator. - CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH = ccuda.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH{{endif}} + CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH = cydriver.CUgraphMem_attribute_enum.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH{{endif}} {{endif}} {{if 'CUflushGPUDirectRDMAWritesOptions_enum' in found_types}} @@ -4342,13 +4342,13 @@ class CUflushGPUDirectRDMAWritesOptions(IntEnum): #: :py:obj:`~.cuFlushGPUDirectRDMAWrites()` and its CUDA Runtime API #: counterpart are supported on the device. - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST = ccuda.CUflushGPUDirectRDMAWritesOptions_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST{{endif}} + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST = cydriver.CUflushGPUDirectRDMAWritesOptions_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST{{endif}} {{if 'CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS' in found_values}} #: The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the #: :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported #: on the device. - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS = ccuda.CUflushGPUDirectRDMAWritesOptions_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS{{endif}} + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS = cydriver.CUflushGPUDirectRDMAWritesOptions_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS{{endif}} {{endif}} {{if 'CUGPUDirectRDMAWritesOrdering_enum' in found_types}} @@ -4361,17 +4361,17 @@ class CUGPUDirectRDMAWritesOrdering(IntEnum): #: The device does not natively support ordering of remote writes. #: :py:obj:`~.cuFlushGPUDirectRDMAWrites()` can be leveraged if #: supported. - CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE = ccuda.CUGPUDirectRDMAWritesOrdering_enum.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE{{endif}} + CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE = cydriver.CUGPUDirectRDMAWritesOrdering_enum.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE{{endif}} {{if 'CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER' in found_values}} #: Natively, the device can consistently consume remote writes, #: although other CUDA devices may not. - CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER = ccuda.CUGPUDirectRDMAWritesOrdering_enum.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER{{endif}} + CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER = cydriver.CUGPUDirectRDMAWritesOrdering_enum.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER{{endif}} {{if 'CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES' in found_values}} #: Any CUDA device in the system can consistently consume remote writes #: to this device. - CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES = ccuda.CUGPUDirectRDMAWritesOrdering_enum.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES{{endif}} + CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES = cydriver.CUGPUDirectRDMAWritesOrdering_enum.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES{{endif}} {{endif}} {{if 'CUflushGPUDirectRDMAWritesScope_enum' in found_types}} @@ -4383,11 +4383,11 @@ class CUflushGPUDirectRDMAWritesScope(IntEnum): #: Blocks until remote writes are visible to the CUDA device context #: owning the data. - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER = ccuda.CUflushGPUDirectRDMAWritesScope_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER{{endif}} + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER = cydriver.CUflushGPUDirectRDMAWritesScope_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER{{endif}} {{if 'CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES' in found_values}} #: Blocks until remote writes are visible to all CUDA device contexts. - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES = ccuda.CUflushGPUDirectRDMAWritesScope_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES{{endif}} + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES = cydriver.CUflushGPUDirectRDMAWritesScope_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES{{endif}} {{endif}} {{if 'CUflushGPUDirectRDMAWritesTarget_enum' in found_types}} @@ -4399,7 +4399,7 @@ class CUflushGPUDirectRDMAWritesTarget(IntEnum): #: Sets the target for :py:obj:`~.cuFlushGPUDirectRDMAWrites()` to the #: currently active CUDA device context. - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX = ccuda.CUflushGPUDirectRDMAWritesTarget_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX{{endif}} + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX = cydriver.CUflushGPUDirectRDMAWritesTarget_enum.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX{{endif}} {{endif}} {{if 'CUgraphDebugDot_flags_enum' in found_types}} @@ -4410,67 +4410,67 @@ class CUgraphDebugDot_flags(IntEnum): {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE' in found_values}} #: Output all debug data as if every debug flag is enabled - CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES' in found_values}} #: Use CUDA Runtime structures for output - CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS' in found_values}} #: Adds CUDA_KERNEL_NODE_PARAMS values to output - CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS' in found_values}} #: Adds CUDA_MEMCPY3D values to output - CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS' in found_values}} #: Adds CUDA_MEMSET_NODE_PARAMS values to output - CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS' in found_values}} #: Adds CUDA_HOST_NODE_PARAMS values to output - CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS' in found_values}} #: Adds CUevent handle from record and wait nodes to output - CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS' in found_values}} #: Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output - CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS' in found_values}} #: Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output - CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES' in found_values}} #: Adds CUkernelNodeAttrValue values to output - CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES' in found_values}} #: Adds node handles and every kernel function handle to output - CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS' in found_values}} #: Adds memory alloc node parameters to output - CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS' in found_values}} #: Adds memory free node parameters to output - CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS' in found_values}} #: Adds batch mem op node parameters to output - CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO' in found_values}} #: Adds edge numbering information - CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO{{endif}} {{if 'CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS' in found_values}} #: Adds conditional node parameters to output - CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS = ccuda.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS{{endif}} + CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS = cydriver.CUgraphDebugDot_flags_enum.CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS{{endif}} {{endif}} {{if 'CUuserObject_flags_enum' in found_types}} @@ -4482,7 +4482,7 @@ class CUuserObject_flags(IntEnum): #: Indicates the destructor execution is not synchronized by any CUDA #: handle. - CU_USER_OBJECT_NO_DESTRUCTOR_SYNC = ccuda.CUuserObject_flags_enum.CU_USER_OBJECT_NO_DESTRUCTOR_SYNC{{endif}} + CU_USER_OBJECT_NO_DESTRUCTOR_SYNC = cydriver.CUuserObject_flags_enum.CU_USER_OBJECT_NO_DESTRUCTOR_SYNC{{endif}} {{endif}} {{if 'CUuserObjectRetain_flags_enum' in found_types}} @@ -4494,7 +4494,7 @@ class CUuserObjectRetain_flags(IntEnum): #: Transfer references from the caller rather than creating new #: references. - CU_GRAPH_USER_OBJECT_MOVE = ccuda.CUuserObjectRetain_flags_enum.CU_GRAPH_USER_OBJECT_MOVE{{endif}} + CU_GRAPH_USER_OBJECT_MOVE = cydriver.CUuserObjectRetain_flags_enum.CU_GRAPH_USER_OBJECT_MOVE{{endif}} {{endif}} {{if 'CUgraphInstantiate_flags_enum' in found_types}} @@ -4505,25 +4505,25 @@ class CUgraphInstantiate_flags(IntEnum): {{if 'CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH' in found_values}} #: Automatically free memory allocated in a graph before relaunching. - CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH = ccuda.CUgraphInstantiate_flags_enum.CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH{{endif}} + CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH = cydriver.CUgraphInstantiate_flags_enum.CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH{{endif}} {{if 'CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD' in found_values}} #: Automatically upload the graph after instantiation. Only supported #: by :py:obj:`~.cuGraphInstantiateWithParams`. The upload will be #: performed using the stream provided in `instantiateParams`. - CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD = ccuda.CUgraphInstantiate_flags_enum.CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD{{endif}} + CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD = cydriver.CUgraphInstantiate_flags_enum.CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD{{endif}} {{if 'CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH' in found_values}} #: Instantiate the graph to be launchable from the device. This flag #: can only be used on platforms which support unified addressing. This #: flag cannot be used in conjunction with #: CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. - CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH = ccuda.CUgraphInstantiate_flags_enum.CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH{{endif}} + CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH = cydriver.CUgraphInstantiate_flags_enum.CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH{{endif}} {{if 'CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY' in found_values}} #: Run the graph using the per-node priority attributes rather than the #: priority of the stream it is launched into. - CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY = ccuda.CUgraphInstantiate_flags_enum.CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY{{endif}} + CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY = cydriver.CUgraphInstantiate_flags_enum.CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY{{endif}} {{endif}} {{if 'CUdeviceNumaConfig_enum' in found_types}} @@ -4534,12 +4534,12 @@ class CUdeviceNumaConfig(IntEnum): {{if 'CU_DEVICE_NUMA_CONFIG_NONE' in found_values}} #: The GPU is not a NUMA node - CU_DEVICE_NUMA_CONFIG_NONE = ccuda.CUdeviceNumaConfig_enum.CU_DEVICE_NUMA_CONFIG_NONE{{endif}} + CU_DEVICE_NUMA_CONFIG_NONE = cydriver.CUdeviceNumaConfig_enum.CU_DEVICE_NUMA_CONFIG_NONE{{endif}} {{if 'CU_DEVICE_NUMA_CONFIG_NUMA_NODE' in found_values}} #: The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its #: NUMA ID - CU_DEVICE_NUMA_CONFIG_NUMA_NODE = ccuda.CUdeviceNumaConfig_enum.CU_DEVICE_NUMA_CONFIG_NUMA_NODE{{endif}} + CU_DEVICE_NUMA_CONFIG_NUMA_NODE = cydriver.CUdeviceNumaConfig_enum.CU_DEVICE_NUMA_CONFIG_NUMA_NODE{{endif}} {{endif}} {{if 'CUmoduleLoadingMode_enum' in found_types}} @@ -4550,11 +4550,11 @@ class CUmoduleLoadingMode(IntEnum): {{if 'CU_MODULE_EAGER_LOADING' in found_values}} #: Lazy Kernel Loading is not enabled - CU_MODULE_EAGER_LOADING = ccuda.CUmoduleLoadingMode_enum.CU_MODULE_EAGER_LOADING{{endif}} + CU_MODULE_EAGER_LOADING = cydriver.CUmoduleLoadingMode_enum.CU_MODULE_EAGER_LOADING{{endif}} {{if 'CU_MODULE_LAZY_LOADING' in found_values}} #: Lazy Kernel Loading is enabled - CU_MODULE_LAZY_LOADING = ccuda.CUmoduleLoadingMode_enum.CU_MODULE_LAZY_LOADING{{endif}} + CU_MODULE_LAZY_LOADING = cydriver.CUmoduleLoadingMode_enum.CU_MODULE_LAZY_LOADING{{endif}} {{endif}} {{if 'CUfunctionLoadingState_enum' in found_types}} @@ -4563,11 +4563,11 @@ class CUfunctionLoadingState(IntEnum): """ {{if 'CU_FUNCTION_LOADING_STATE_UNLOADED' in found_values}} - CU_FUNCTION_LOADING_STATE_UNLOADED = ccuda.CUfunctionLoadingState_enum.CU_FUNCTION_LOADING_STATE_UNLOADED{{endif}} + CU_FUNCTION_LOADING_STATE_UNLOADED = cydriver.CUfunctionLoadingState_enum.CU_FUNCTION_LOADING_STATE_UNLOADED{{endif}} {{if 'CU_FUNCTION_LOADING_STATE_LOADED' in found_values}} - CU_FUNCTION_LOADING_STATE_LOADED = ccuda.CUfunctionLoadingState_enum.CU_FUNCTION_LOADING_STATE_LOADED{{endif}} + CU_FUNCTION_LOADING_STATE_LOADED = cydriver.CUfunctionLoadingState_enum.CU_FUNCTION_LOADING_STATE_LOADED{{endif}} {{if 'CU_FUNCTION_LOADING_STATE_MAX' in found_values}} - CU_FUNCTION_LOADING_STATE_MAX = ccuda.CUfunctionLoadingState_enum.CU_FUNCTION_LOADING_STATE_MAX{{endif}} + CU_FUNCTION_LOADING_STATE_MAX = cydriver.CUfunctionLoadingState_enum.CU_FUNCTION_LOADING_STATE_MAX{{endif}} {{endif}} {{if 'CUcoredumpSettings_enum' in found_types}} @@ -4576,21 +4576,21 @@ class CUcoredumpSettings(IntEnum): Flags for choosing a coredump attribute to get/set """ {{if 'CU_COREDUMP_ENABLE_ON_EXCEPTION' in found_values}} - CU_COREDUMP_ENABLE_ON_EXCEPTION = ccuda.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_ON_EXCEPTION{{endif}} + CU_COREDUMP_ENABLE_ON_EXCEPTION = cydriver.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_ON_EXCEPTION{{endif}} {{if 'CU_COREDUMP_TRIGGER_HOST' in found_values}} - CU_COREDUMP_TRIGGER_HOST = ccuda.CUcoredumpSettings_enum.CU_COREDUMP_TRIGGER_HOST{{endif}} + CU_COREDUMP_TRIGGER_HOST = cydriver.CUcoredumpSettings_enum.CU_COREDUMP_TRIGGER_HOST{{endif}} {{if 'CU_COREDUMP_LIGHTWEIGHT' in found_values}} - CU_COREDUMP_LIGHTWEIGHT = ccuda.CUcoredumpSettings_enum.CU_COREDUMP_LIGHTWEIGHT{{endif}} + CU_COREDUMP_LIGHTWEIGHT = cydriver.CUcoredumpSettings_enum.CU_COREDUMP_LIGHTWEIGHT{{endif}} {{if 'CU_COREDUMP_ENABLE_USER_TRIGGER' in found_values}} - CU_COREDUMP_ENABLE_USER_TRIGGER = ccuda.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_USER_TRIGGER{{endif}} + CU_COREDUMP_ENABLE_USER_TRIGGER = cydriver.CUcoredumpSettings_enum.CU_COREDUMP_ENABLE_USER_TRIGGER{{endif}} {{if 'CU_COREDUMP_FILE' in found_values}} - CU_COREDUMP_FILE = ccuda.CUcoredumpSettings_enum.CU_COREDUMP_FILE{{endif}} + CU_COREDUMP_FILE = cydriver.CUcoredumpSettings_enum.CU_COREDUMP_FILE{{endif}} {{if 'CU_COREDUMP_PIPE' in found_values}} - CU_COREDUMP_PIPE = ccuda.CUcoredumpSettings_enum.CU_COREDUMP_PIPE{{endif}} + CU_COREDUMP_PIPE = cydriver.CUcoredumpSettings_enum.CU_COREDUMP_PIPE{{endif}} {{if 'CU_COREDUMP_GENERATION_FLAGS' in found_values}} - CU_COREDUMP_GENERATION_FLAGS = ccuda.CUcoredumpSettings_enum.CU_COREDUMP_GENERATION_FLAGS{{endif}} + CU_COREDUMP_GENERATION_FLAGS = cydriver.CUcoredumpSettings_enum.CU_COREDUMP_GENERATION_FLAGS{{endif}} {{if 'CU_COREDUMP_MAX' in found_values}} - CU_COREDUMP_MAX = ccuda.CUcoredumpSettings_enum.CU_COREDUMP_MAX{{endif}} + CU_COREDUMP_MAX = cydriver.CUcoredumpSettings_enum.CU_COREDUMP_MAX{{endif}} {{endif}} {{if 'CUCoredumpGenerationFlags' in found_types}} @@ -4599,21 +4599,21 @@ class CUCoredumpGenerationFlags(IntEnum): Flags for controlling coredump contents """ {{if 'CU_COREDUMP_DEFAULT_FLAGS' in found_values}} - CU_COREDUMP_DEFAULT_FLAGS = ccuda.CUCoredumpGenerationFlags.CU_COREDUMP_DEFAULT_FLAGS{{endif}} + CU_COREDUMP_DEFAULT_FLAGS = cydriver.CUCoredumpGenerationFlags.CU_COREDUMP_DEFAULT_FLAGS{{endif}} {{if 'CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES' in found_values}} - CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES = ccuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES{{endif}} + CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES = cydriver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES{{endif}} {{if 'CU_COREDUMP_SKIP_GLOBAL_MEMORY' in found_values}} - CU_COREDUMP_SKIP_GLOBAL_MEMORY = ccuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_GLOBAL_MEMORY{{endif}} + CU_COREDUMP_SKIP_GLOBAL_MEMORY = cydriver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_GLOBAL_MEMORY{{endif}} {{if 'CU_COREDUMP_SKIP_SHARED_MEMORY' in found_values}} - CU_COREDUMP_SKIP_SHARED_MEMORY = ccuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_SHARED_MEMORY{{endif}} + CU_COREDUMP_SKIP_SHARED_MEMORY = cydriver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_SHARED_MEMORY{{endif}} {{if 'CU_COREDUMP_SKIP_LOCAL_MEMORY' in found_values}} - CU_COREDUMP_SKIP_LOCAL_MEMORY = ccuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_LOCAL_MEMORY{{endif}} + CU_COREDUMP_SKIP_LOCAL_MEMORY = cydriver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_LOCAL_MEMORY{{endif}} {{if 'CU_COREDUMP_SKIP_ABORT' in found_values}} - CU_COREDUMP_SKIP_ABORT = ccuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_ABORT{{endif}} + CU_COREDUMP_SKIP_ABORT = cydriver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_ABORT{{endif}} {{if 'CU_COREDUMP_SKIP_CONSTBANK_MEMORY' in found_values}} - CU_COREDUMP_SKIP_CONSTBANK_MEMORY = ccuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_CONSTBANK_MEMORY{{endif}} + CU_COREDUMP_SKIP_CONSTBANK_MEMORY = cydriver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_CONSTBANK_MEMORY{{endif}} {{if 'CU_COREDUMP_LIGHTWEIGHT_FLAGS' in found_values}} - CU_COREDUMP_LIGHTWEIGHT_FLAGS = ccuda.CUCoredumpGenerationFlags.CU_COREDUMP_LIGHTWEIGHT_FLAGS{{endif}} + CU_COREDUMP_LIGHTWEIGHT_FLAGS = cydriver.CUCoredumpGenerationFlags.CU_COREDUMP_LIGHTWEIGHT_FLAGS{{endif}} {{endif}} {{if 'CUgreenCtxCreate_flags' in found_types}} @@ -4624,7 +4624,7 @@ class CUgreenCtxCreate_flags(IntEnum): {{if 'CU_GREEN_CTX_DEFAULT_STREAM' in found_values}} #: Required. Creates a default stream to use inside the green context - CU_GREEN_CTX_DEFAULT_STREAM = ccuda.CUgreenCtxCreate_flags.CU_GREEN_CTX_DEFAULT_STREAM{{endif}} + CU_GREEN_CTX_DEFAULT_STREAM = cydriver.CUgreenCtxCreate_flags.CU_GREEN_CTX_DEFAULT_STREAM{{endif}} {{endif}} {{if 'CUdevSmResourceSplit_flags' in found_types}} @@ -4633,9 +4633,9 @@ class CUdevSmResourceSplit_flags(IntEnum): """ {{if 'CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING' in found_values}} - CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING = ccuda.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING{{endif}} + CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING = cydriver.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING{{endif}} {{if 'CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE' in found_values}} - CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE = ccuda.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE{{endif}} + CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE = cydriver.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE{{endif}} {{endif}} {{if 'CUdevResourceType' in found_types}} @@ -4644,11 +4644,11 @@ class CUdevResourceType(IntEnum): Type of resource """ {{if 'CU_DEV_RESOURCE_TYPE_INVALID' in found_values}} - CU_DEV_RESOURCE_TYPE_INVALID = ccuda.CUdevResourceType.CU_DEV_RESOURCE_TYPE_INVALID{{endif}} + CU_DEV_RESOURCE_TYPE_INVALID = cydriver.CUdevResourceType.CU_DEV_RESOURCE_TYPE_INVALID{{endif}} {{if 'CU_DEV_RESOURCE_TYPE_SM' in found_values}} #: Streaming multiprocessors related information - CU_DEV_RESOURCE_TYPE_SM = ccuda.CUdevResourceType.CU_DEV_RESOURCE_TYPE_SM{{endif}} + CU_DEV_RESOURCE_TYPE_SM = cydriver.CUdevResourceType.CU_DEV_RESOURCE_TYPE_SM{{endif}} {{endif}} {{if 'CUoutput_mode_enum' in found_types}} @@ -4659,11 +4659,11 @@ class CUoutput_mode(IntEnum): {{if 'CU_OUT_KEY_VALUE_PAIR' in found_values}} #: Output mode Key-Value pair format. - CU_OUT_KEY_VALUE_PAIR = ccuda.CUoutput_mode_enum.CU_OUT_KEY_VALUE_PAIR{{endif}} + CU_OUT_KEY_VALUE_PAIR = cydriver.CUoutput_mode_enum.CU_OUT_KEY_VALUE_PAIR{{endif}} {{if 'CU_OUT_CSV' in found_values}} #: Output mode Comma separated values format. - CU_OUT_CSV = ccuda.CUoutput_mode_enum.CU_OUT_CSV{{endif}} + CU_OUT_CSV = cydriver.CUoutput_mode_enum.CU_OUT_CSV{{endif}} {{endif}} {{if True}} @@ -4674,11 +4674,11 @@ class CUeglFrameType(IntEnum): {{if True}} #: Frame type CUDA array - CU_EGL_FRAME_TYPE_ARRAY = ccuda.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_ARRAY{{endif}} + CU_EGL_FRAME_TYPE_ARRAY = cydriver.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_ARRAY{{endif}} {{if True}} #: Frame type pointer - CU_EGL_FRAME_TYPE_PITCH = ccuda.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_PITCH{{endif}} + CU_EGL_FRAME_TYPE_PITCH = cydriver.CUeglFrameType_enum.CU_EGL_FRAME_TYPE_PITCH{{endif}} {{endif}} {{if True}} @@ -4699,11 +4699,11 @@ class CUeglResourceLocationFlags(IntEnum): {{if True}} #: Resource location sysmem - CU_EGL_RESOURCE_LOCATION_SYSMEM = ccuda.CUeglResourceLocationFlags_enum.CU_EGL_RESOURCE_LOCATION_SYSMEM{{endif}} + CU_EGL_RESOURCE_LOCATION_SYSMEM = cydriver.CUeglResourceLocationFlags_enum.CU_EGL_RESOURCE_LOCATION_SYSMEM{{endif}} {{if True}} #: Resource location vidmem - CU_EGL_RESOURCE_LOCATION_VIDMEM = ccuda.CUeglResourceLocationFlags_enum.CU_EGL_RESOURCE_LOCATION_VIDMEM{{endif}} + CU_EGL_RESOURCE_LOCATION_VIDMEM = cydriver.CUeglResourceLocationFlags_enum.CU_EGL_RESOURCE_LOCATION_VIDMEM{{endif}} {{endif}} {{if True}} @@ -4718,557 +4718,557 @@ class CUeglColorFormat(IntEnum): #: Y, U, V in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YUV420_PLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YUV420_PLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR{{endif}} {{if True}} #: Y, UV in two surfaces (UV as one surface) with VU byte ordering, #: width, height ratio same as YUV420Planar. - CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR{{endif}} {{if True}} #: Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V #: height = Y height. - CU_EGL_COLOR_FORMAT_YUV422_PLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YUV422_PLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR{{endif}} {{if True}} #: Y, UV in two surfaces with VU byte ordering, width, height ratio #: same as YUV422Planar. - CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR{{endif}} {{if True}} #: R/G/B three channels in one surface with BGR byte ordering. Only #: pitch linear format supported. - CU_EGL_COLOR_FORMAT_RGB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RGB{{endif}} + CU_EGL_COLOR_FORMAT_RGB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RGB{{endif}} {{if True}} #: R/G/B three channels in one surface with RGB byte ordering. Only #: pitch linear format supported. - CU_EGL_COLOR_FORMAT_BGR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BGR{{endif}} + CU_EGL_COLOR_FORMAT_BGR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BGR{{endif}} {{if True}} #: R/G/B/A four channels in one surface with BGRA byte ordering. - CU_EGL_COLOR_FORMAT_ARGB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ARGB{{endif}} + CU_EGL_COLOR_FORMAT_ARGB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ARGB{{endif}} {{if True}} #: R/G/B/A four channels in one surface with ABGR byte ordering. - CU_EGL_COLOR_FORMAT_RGBA = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RGBA{{endif}} + CU_EGL_COLOR_FORMAT_RGBA = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RGBA{{endif}} {{if True}} #: single luminance channel in one surface. - CU_EGL_COLOR_FORMAT_L = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_L{{endif}} + CU_EGL_COLOR_FORMAT_L = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_L{{endif}} {{if True}} #: single color channel in one surface. - CU_EGL_COLOR_FORMAT_R = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_R{{endif}} + CU_EGL_COLOR_FORMAT_R = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_R{{endif}} {{if True}} #: Y, U, V in three surfaces, each in a separate surface, U/V width = Y #: width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_YUV444_PLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YUV444_PLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR{{endif}} {{if True}} #: Y, UV in two surfaces (UV as one surface) with VU byte ordering, #: width, height ratio same as YUV444Planar. - CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR{{endif}} {{if True}} #: Y, U, V in one surface, interleaved as UYVY in one channel. - CU_EGL_COLOR_FORMAT_YUYV_422 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_422{{endif}} + CU_EGL_COLOR_FORMAT_YUYV_422 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_422{{endif}} {{if True}} #: Y, U, V in one surface, interleaved as YUYV in one channel. - CU_EGL_COLOR_FORMAT_UYVY_422 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_422{{endif}} + CU_EGL_COLOR_FORMAT_UYVY_422 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_422{{endif}} {{if True}} #: R/G/B/A four channels in one surface with RGBA byte ordering. - CU_EGL_COLOR_FORMAT_ABGR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ABGR{{endif}} + CU_EGL_COLOR_FORMAT_ABGR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_ABGR{{endif}} {{if True}} #: R/G/B/A four channels in one surface with ARGB byte ordering. - CU_EGL_COLOR_FORMAT_BGRA = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BGRA{{endif}} + CU_EGL_COLOR_FORMAT_BGRA = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BGRA{{endif}} {{if True}} #: Alpha color format - one channel in one surface. - CU_EGL_COLOR_FORMAT_A = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_A{{endif}} + CU_EGL_COLOR_FORMAT_A = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_A{{endif}} {{if True}} #: R/G color format - two channels in one surface with GR byte ordering - CU_EGL_COLOR_FORMAT_RG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RG{{endif}} + CU_EGL_COLOR_FORMAT_RG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_RG{{endif}} {{if True}} #: Y, U, V, A four channels in one surface, interleaved as VUYA. - CU_EGL_COLOR_FORMAT_AYUV = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV{{endif}} + CU_EGL_COLOR_FORMAT_AYUV = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V #: width = Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V #: width = 1/2 Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface) with UV byte #: ordering, U/V width = Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface) with UV byte #: ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR{{endif}} {{if True}} #: Y12, V12U12 in two surfaces (VU as one surface) with UV byte #: ordering, U/V width = Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR{{endif}} {{if True}} #: Y12, V12U12 in two surfaces (VU as one surface) with UV byte #: ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR{{endif}} {{if True}} #: Extended Range Y, U, V in one surface, interleaved as YVYU in one #: channel. - CU_EGL_COLOR_FORMAT_VYUY_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY_ER{{endif}} + CU_EGL_COLOR_FORMAT_VYUY_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY_ER{{endif}} {{if True}} #: Extended Range Y, U, V in one surface, interleaved as YUYV in one #: channel. - CU_EGL_COLOR_FORMAT_UYVY_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_ER{{endif}} + CU_EGL_COLOR_FORMAT_UYVY_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_UYVY_ER{{endif}} {{if True}} #: Extended Range Y, U, V in one surface, interleaved as UYVY in one #: channel. - CU_EGL_COLOR_FORMAT_YUYV_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_ER{{endif}} + CU_EGL_COLOR_FORMAT_YUYV_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUYV_ER{{endif}} {{if True}} #: Extended Range Y, U, V in one surface, interleaved as VYUY in one #: channel. - CU_EGL_COLOR_FORMAT_YVYU_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU_ER{{endif}} + CU_EGL_COLOR_FORMAT_YVYU_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU_ER{{endif}} {{if True}} #: Extended Range Y, U, V three channels in one surface, interleaved as #: VUY. Only pitch linear format supported. - CU_EGL_COLOR_FORMAT_YUV_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV_ER{{endif}} + CU_EGL_COLOR_FORMAT_YUV_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV_ER{{endif}} {{if True}} #: Extended Range Y, U, V, A four channels in one surface, interleaved #: as AVUY. - CU_EGL_COLOR_FORMAT_YUVA_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA_ER{{endif}} + CU_EGL_COLOR_FORMAT_YUVA_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA_ER{{endif}} {{if True}} #: Extended Range Y, U, V, A four channels in one surface, interleaved #: as VUYA. - CU_EGL_COLOR_FORMAT_AYUV_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV_ER{{endif}} + CU_EGL_COLOR_FORMAT_AYUV_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_AYUV_ER{{endif}} {{if True}} #: Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V #: height = Y height. - CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER{{endif}} {{if True}} #: Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, #: U/V height = Y height. - CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER{{endif}} {{if True}} #: Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER{{endif}} {{if True}} #: Extended Range Y, UV in two surfaces (UV as one surface) with VU #: byte ordering, U/V width = Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER{{endif}} {{if True}} #: Extended Range Y, UV in two surfaces (UV as one surface) with VU #: byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER{{endif}} {{if True}} #: Extended Range Y, UV in two surfaces (UV as one surface) with VU #: byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER{{endif}} {{if True}} #: Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V #: height = Y height. - CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER{{endif}} {{if True}} #: Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, #: U/V height = Y height. - CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER{{endif}} {{if True}} #: Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER{{endif}} {{if True}} #: Extended Range Y, VU in two surfaces (VU as one surface) with UV #: byte ordering, U/V width = Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER{{endif}} {{if True}} #: Extended Range Y, VU in two surfaces (VU as one surface) with UV #: byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER{{endif}} {{if True}} #: Extended Range Y, VU in two surfaces (VU as one surface) with UV #: byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved RGGB #: ordering. - CU_EGL_COLOR_FORMAT_BAYER_RGGB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RGGB{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_RGGB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RGGB{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved BGGR #: ordering. - CU_EGL_COLOR_FORMAT_BAYER_BGGR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BGGR{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_BGGR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BGGR{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved GRBG #: ordering. - CU_EGL_COLOR_FORMAT_BAYER_GRBG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GRBG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_GRBG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GRBG{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved GBRG #: ordering. - CU_EGL_COLOR_FORMAT_BAYER_GBRG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GBRG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_GBRG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_GBRG{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved RGGB #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER10_RGGB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_RGGB{{endif}} + CU_EGL_COLOR_FORMAT_BAYER10_RGGB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_RGGB{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved BGGR #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER10_BGGR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_BGGR{{endif}} + CU_EGL_COLOR_FORMAT_BAYER10_BGGR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_BGGR{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved GRBG #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER10_GRBG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GRBG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER10_GRBG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GRBG{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved GBRG #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER10_GBRG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GBRG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER10_GBRG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_GBRG{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved RGGB #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER12_RGGB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RGGB{{endif}} + CU_EGL_COLOR_FORMAT_BAYER12_RGGB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RGGB{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved BGGR #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER12_BGGR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BGGR{{endif}} + CU_EGL_COLOR_FORMAT_BAYER12_BGGR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BGGR{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved GRBG #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER12_GRBG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GRBG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER12_GRBG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GRBG{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved GBRG #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER12_GBRG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GBRG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER12_GBRG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_GBRG{{endif}} {{if True}} #: Bayer14 format - one channel in one surface with interleaved RGGB #: ordering. Out of 16 bits, 14 bits used 2 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER14_RGGB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_RGGB{{endif}} + CU_EGL_COLOR_FORMAT_BAYER14_RGGB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_RGGB{{endif}} {{if True}} #: Bayer14 format - one channel in one surface with interleaved BGGR #: ordering. Out of 16 bits, 14 bits used 2 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER14_BGGR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_BGGR{{endif}} + CU_EGL_COLOR_FORMAT_BAYER14_BGGR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_BGGR{{endif}} {{if True}} #: Bayer14 format - one channel in one surface with interleaved GRBG #: ordering. Out of 16 bits, 14 bits used 2 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER14_GRBG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GRBG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER14_GRBG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GRBG{{endif}} {{if True}} #: Bayer14 format - one channel in one surface with interleaved GBRG #: ordering. Out of 16 bits, 14 bits used 2 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER14_GBRG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GBRG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER14_GBRG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER14_GBRG{{endif}} {{if True}} #: Bayer20 format - one channel in one surface with interleaved RGGB #: ordering. Out of 32 bits, 20 bits used 12 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER20_RGGB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_RGGB{{endif}} + CU_EGL_COLOR_FORMAT_BAYER20_RGGB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_RGGB{{endif}} {{if True}} #: Bayer20 format - one channel in one surface with interleaved BGGR #: ordering. Out of 32 bits, 20 bits used 12 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER20_BGGR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_BGGR{{endif}} + CU_EGL_COLOR_FORMAT_BAYER20_BGGR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_BGGR{{endif}} {{if True}} #: Bayer20 format - one channel in one surface with interleaved GRBG #: ordering. Out of 32 bits, 20 bits used 12 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER20_GRBG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GRBG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER20_GRBG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GRBG{{endif}} {{if True}} #: Bayer20 format - one channel in one surface with interleaved GBRG #: ordering. Out of 32 bits, 20 bits used 12 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER20_GBRG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GBRG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER20_GBRG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER20_GBRG{{endif}} {{if True}} #: Y, V, U in three surfaces, each in a separate surface, U/V width = Y #: width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_YVU444_PLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YVU444_PLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU444_PLANAR{{endif}} {{if True}} #: Y, V, U in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_YVU422_PLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YVU422_PLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU422_PLANAR{{endif}} {{if True}} #: Y, V, U in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YVU420_PLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR{{endif}} + CU_EGL_COLOR_FORMAT_YVU420_PLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR{{endif}} {{if True}} #: Nvidia proprietary Bayer ISP format - one channel in one surface #: with interleaved RGGB ordering and mapped to opaque integer #: datatype. - CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB{{endif}} {{if True}} #: Nvidia proprietary Bayer ISP format - one channel in one surface #: with interleaved BGGR ordering and mapped to opaque integer #: datatype. - CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR{{endif}} {{if True}} #: Nvidia proprietary Bayer ISP format - one channel in one surface #: with interleaved GRBG ordering and mapped to opaque integer #: datatype. - CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG{{endif}} {{if True}} #: Nvidia proprietary Bayer ISP format - one channel in one surface #: with interleaved GBRG ordering and mapped to opaque integer #: datatype. - CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved BCCR #: ordering. - CU_EGL_COLOR_FORMAT_BAYER_BCCR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BCCR{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_BCCR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_BCCR{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved RCCB #: ordering. - CU_EGL_COLOR_FORMAT_BAYER_RCCB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RCCB{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_RCCB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_RCCB{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved CRBC #: ordering. - CU_EGL_COLOR_FORMAT_BAYER_CRBC = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CRBC{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_CRBC = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CRBC{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved CBRC #: ordering. - CU_EGL_COLOR_FORMAT_BAYER_CBRC = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CBRC{{endif}} + CU_EGL_COLOR_FORMAT_BAYER_CBRC = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER_CBRC{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved CCCC #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER10_CCCC = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_CCCC{{endif}} + CU_EGL_COLOR_FORMAT_BAYER10_CCCC = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER10_CCCC{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved BCCR #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER12_BCCR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BCCR{{endif}} + CU_EGL_COLOR_FORMAT_BAYER12_BCCR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_BCCR{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved RCCB #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER12_RCCB = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RCCB{{endif}} + CU_EGL_COLOR_FORMAT_BAYER12_RCCB = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_RCCB{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved CRBC #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER12_CRBC = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CRBC{{endif}} + CU_EGL_COLOR_FORMAT_BAYER12_CRBC = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CRBC{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved CBRC #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER12_CBRC = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CBRC{{endif}} + CU_EGL_COLOR_FORMAT_BAYER12_CBRC = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CBRC{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved CCCC #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - CU_EGL_COLOR_FORMAT_BAYER12_CCCC = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CCCC{{endif}} + CU_EGL_COLOR_FORMAT_BAYER12_CCCC = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_BAYER12_CCCC{{endif}} {{if True}} #: Color format for single Y plane. - CU_EGL_COLOR_FORMAT_Y = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y{{endif}} + CU_EGL_COLOR_FORMAT_Y = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y{{endif}} {{if True}} #: Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020{{endif}} + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020{{endif}} + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020{{endif}} {{if True}} #: Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V #: height= 1/2 Y height. - CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020{{endif}} + CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020{{endif}} {{if True}} #: Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V #: height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020{{endif}} + CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020{{endif}} {{if True}} #: Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709{{endif}} + CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709{{endif}} + CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709{{endif}} {{if True}} #: Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V #: height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709{{endif}} + CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709{{endif}} {{if True}} #: Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V #: height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709{{endif}} + CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y #: width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y #: width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020{{endif}} {{if True}} #: Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y #: width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020{{endif}} {{if True}} #: Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y #: width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR{{endif}} {{if True}} #: Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y #: width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709{{endif}} {{if True}} #: Extended Range Color format for single Y plane. - CU_EGL_COLOR_FORMAT_Y_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_ER{{endif}} {{if True}} #: Extended Range Color format for single Y plane. - CU_EGL_COLOR_FORMAT_Y_709_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_709_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y_709_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y_709_ER{{endif}} {{if True}} #: Extended Range Color format for single Y10 plane. - CU_EGL_COLOR_FORMAT_Y10_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y10_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_ER{{endif}} {{if True}} #: Extended Range Color format for single Y10 plane. - CU_EGL_COLOR_FORMAT_Y10_709_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_709_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y10_709_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10_709_ER{{endif}} {{if True}} #: Extended Range Color format for single Y12 plane. - CU_EGL_COLOR_FORMAT_Y12_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y12_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_ER{{endif}} {{if True}} #: Extended Range Color format for single Y12 plane. - CU_EGL_COLOR_FORMAT_Y12_709_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_709_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y12_709_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12_709_ER{{endif}} {{if True}} #: Y, U, V, A four channels in one surface, interleaved as AVUY. - CU_EGL_COLOR_FORMAT_YUVA = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA{{endif}} + CU_EGL_COLOR_FORMAT_YUVA = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUVA{{endif}} {{if True}} #: Y, U, V three channels in one surface, interleaved as VUY. Only #: pitch linear format supported. - CU_EGL_COLOR_FORMAT_YUV = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV{{endif}} + CU_EGL_COLOR_FORMAT_YUV = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YUV{{endif}} {{if True}} #: Y, U, V in one surface, interleaved as YVYU in one channel. - CU_EGL_COLOR_FORMAT_YVYU = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU{{endif}} + CU_EGL_COLOR_FORMAT_YVYU = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_YVYU{{endif}} {{if True}} #: Y, U, V in one surface, interleaved as VYUY in one channel. - CU_EGL_COLOR_FORMAT_VYUY = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY{{endif}} + CU_EGL_COLOR_FORMAT_VYUY = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_VYUY{{endif}} {{if True}} #: Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER{{endif}} {{if True}} #: Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER{{endif}} {{if True}} #: Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V #: width = Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER{{endif}} {{if True}} #: Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V #: width = Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER{{endif}} {{if True}} #: Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER{{endif}} {{if True}} #: Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER{{endif}} {{if True}} #: Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V #: width = Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER{{endif}} {{if True}} #: Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V #: width = Y width, U/V height = Y height. - CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER{{endif}} + CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER{{endif}} {{if True}} - CU_EGL_COLOR_FORMAT_MAX = ccuda.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_MAX{{endif}} + CU_EGL_COLOR_FORMAT_MAX = cydriver.CUeglColorFormat_enum.CU_EGL_COLOR_FORMAT_MAX{{endif}} {{endif}} {{if True}} @@ -5279,17 +5279,17 @@ class CUGLDeviceList(IntEnum): {{if True}} #: The CUDA devices for all GPUs used by the current OpenGL context - CU_GL_DEVICE_LIST_ALL = ccuda.CUGLDeviceList_enum.CU_GL_DEVICE_LIST_ALL{{endif}} + CU_GL_DEVICE_LIST_ALL = cydriver.CUGLDeviceList_enum.CU_GL_DEVICE_LIST_ALL{{endif}} {{if True}} #: The CUDA devices for the GPUs used by the current OpenGL context in #: its currently rendering frame - CU_GL_DEVICE_LIST_CURRENT_FRAME = ccuda.CUGLDeviceList_enum.CU_GL_DEVICE_LIST_CURRENT_FRAME{{endif}} + CU_GL_DEVICE_LIST_CURRENT_FRAME = cydriver.CUGLDeviceList_enum.CU_GL_DEVICE_LIST_CURRENT_FRAME{{endif}} {{if True}} #: The CUDA devices for the GPUs to be used by the current OpenGL #: context in the next frame - CU_GL_DEVICE_LIST_NEXT_FRAME = ccuda.CUGLDeviceList_enum.CU_GL_DEVICE_LIST_NEXT_FRAME{{endif}} + CU_GL_DEVICE_LIST_NEXT_FRAME = cydriver.CUGLDeviceList_enum.CU_GL_DEVICE_LIST_NEXT_FRAME{{endif}} {{endif}} {{if True}} @@ -5298,11 +5298,11 @@ class CUGLmap_flags(IntEnum): Flags to map or unmap a resource """ {{if True}} - CU_GL_MAP_RESOURCE_FLAGS_NONE = ccuda.CUGLmap_flags_enum.CU_GL_MAP_RESOURCE_FLAGS_NONE{{endif}} + CU_GL_MAP_RESOURCE_FLAGS_NONE = cydriver.CUGLmap_flags_enum.CU_GL_MAP_RESOURCE_FLAGS_NONE{{endif}} {{if True}} - CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY = ccuda.CUGLmap_flags_enum.CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY{{endif}} + CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY = cydriver.CUGLmap_flags_enum.CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY{{endif}} {{if True}} - CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD = ccuda.CUGLmap_flags_enum.CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD{{endif}} + CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD = cydriver.CUGLmap_flags_enum.CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD{{endif}} {{endif}} {{if 'CUdeviceptr' in found_types}} @@ -5321,7 +5321,7 @@ cdef class CUdeviceptr: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -5351,7 +5351,7 @@ cdef class CUdevice: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -5381,7 +5381,7 @@ cdef class CUtexObject: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -5411,7 +5411,7 @@ cdef class CUsurfObject: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -5439,7 +5439,7 @@ cdef class CUgraphConditionalHandle: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -5462,32 +5462,32 @@ class CUkernelNodeAttrID(IntEnum): {{if 'CU_LAUNCH_ATTRIBUTE_IGNORE' in found_values}} #: Ignored entry, for convenient composition - CU_LAUNCH_ATTRIBUTE_IGNORE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_IGNORE{{endif}} + CU_LAUNCH_ATTRIBUTE_IGNORE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_IGNORE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.accessPolicyWindow`. - CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW{{endif}} + CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_COOPERATIVE' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.cooperative`. - CU_LAUNCH_ATTRIBUTE_COOPERATIVE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_COOPERATIVE{{endif}} + CU_LAUNCH_ATTRIBUTE_COOPERATIVE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_COOPERATIVE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY' in found_values}} #: Valid for streams. See #: :py:obj:`~.CUlaunchAttributeValue.syncPolicy`. - CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY{{endif}} + CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.clusterDim`. - CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION{{endif}} + CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.clusterSchedulingPolicyPreference`. - CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE{{endif}} + CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION' in found_values}} #: Valid for launches. Setting @@ -5499,7 +5499,7 @@ class CUkernelNodeAttrID(IntEnum): #: The dependent launches can choose to wait on the dependency using #: the programmatic sync (cudaGridDependencySynchronize() or equivalent #: PTX instructions). - CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION{{endif}} + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT' in found_values}} #: Valid for launches. Set @@ -5523,22 +5523,22 @@ class CUkernelNodeAttrID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT{{endif}} + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PRIORITY' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.priority`. - CU_LAUNCH_ATTRIBUTE_PRIORITY = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PRIORITY{{endif}} + CU_LAUNCH_ATTRIBUTE_PRIORITY = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PRIORITY{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.memSyncDomainMap`. - CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP{{endif}} + CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.memSyncDomain`. - CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN{{endif}} + CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT' in found_values}} #: Valid for launches. Set @@ -5559,7 +5559,7 @@ class CUkernelNodeAttrID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT{{endif}} + CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE' in found_values}} #: Valid for graph nodes, launches. This attribute is graphs-only, and @@ -5593,7 +5593,7 @@ class CUkernelNodeAttrID(IntEnum): #: graph, if host-side executable graph updates are made to the device- #: updatable nodes, the graph must be uploaded before it is launched #: again. - CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE{{endif}} + CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT' in found_values}} #: Valid for launches. On devices where the L1 cache and shared memory @@ -5605,7 +5605,7 @@ class CUkernelNodeAttrID(IntEnum): #: :py:obj:`~.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT`. This #: is only a hint, and the CUDA driver can choose a different #: configuration if required for the launch. - CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT{{endif}} + CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT{{endif}} {{endif}} {{if 'CUlaunchAttributeID_enum' in found_types}} @@ -5617,32 +5617,32 @@ class CUstreamAttrID(IntEnum): {{if 'CU_LAUNCH_ATTRIBUTE_IGNORE' in found_values}} #: Ignored entry, for convenient composition - CU_LAUNCH_ATTRIBUTE_IGNORE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_IGNORE{{endif}} + CU_LAUNCH_ATTRIBUTE_IGNORE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_IGNORE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.accessPolicyWindow`. - CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW{{endif}} + CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_COOPERATIVE' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.cooperative`. - CU_LAUNCH_ATTRIBUTE_COOPERATIVE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_COOPERATIVE{{endif}} + CU_LAUNCH_ATTRIBUTE_COOPERATIVE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_COOPERATIVE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY' in found_values}} #: Valid for streams. See #: :py:obj:`~.CUlaunchAttributeValue.syncPolicy`. - CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY{{endif}} + CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.clusterDim`. - CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION{{endif}} + CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.clusterSchedulingPolicyPreference`. - CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE{{endif}} + CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION' in found_values}} #: Valid for launches. Setting @@ -5654,7 +5654,7 @@ class CUstreamAttrID(IntEnum): #: The dependent launches can choose to wait on the dependency using #: the programmatic sync (cudaGridDependencySynchronize() or equivalent #: PTX instructions). - CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION{{endif}} + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT' in found_values}} #: Valid for launches. Set @@ -5678,22 +5678,22 @@ class CUstreamAttrID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT{{endif}} + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PRIORITY' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.priority`. - CU_LAUNCH_ATTRIBUTE_PRIORITY = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PRIORITY{{endif}} + CU_LAUNCH_ATTRIBUTE_PRIORITY = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PRIORITY{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.memSyncDomainMap`. - CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP{{endif}} + CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.CUlaunchAttributeValue.memSyncDomain`. - CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN{{endif}} + CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT' in found_values}} #: Valid for launches. Set @@ -5714,7 +5714,7 @@ class CUstreamAttrID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT{{endif}} + CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE' in found_values}} #: Valid for graph nodes, launches. This attribute is graphs-only, and @@ -5748,7 +5748,7 @@ class CUstreamAttrID(IntEnum): #: graph, if host-side executable graph updates are made to the device- #: updatable nodes, the graph must be uploaded before it is launched #: again. - CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE{{endif}} + CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE{{endif}} {{if 'CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT' in found_values}} #: Valid for launches. On devices where the L1 cache and shared memory @@ -5760,7 +5760,7 @@ class CUstreamAttrID(IntEnum): #: :py:obj:`~.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT`. This #: is only a hint, and the CUDA driver can choose a different #: configuration if required for the launch. - CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = ccuda.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT{{endif}} + CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = cydriver.CUlaunchAttributeID_enum.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT{{endif}} {{endif}} {{if 'CUmemGenericAllocationHandle' in found_types}} @@ -5777,7 +5777,7 @@ cdef class CUmemGenericAllocationHandle: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -5806,9 +5806,9 @@ cdef class CUcontext: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -5837,9 +5837,9 @@ cdef class CUmodule: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -5868,9 +5868,9 @@ cdef class CUfunction: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -5899,9 +5899,9 @@ cdef class CUlibrary: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -5930,9 +5930,9 @@ cdef class CUkernel: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -5961,9 +5961,9 @@ cdef class CUarray: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -5992,9 +5992,9 @@ cdef class CUmipmappedArray: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6023,9 +6023,9 @@ cdef class CUtexref: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6054,9 +6054,9 @@ cdef class CUsurfref: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6085,9 +6085,9 @@ cdef class CUevent: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6116,9 +6116,9 @@ cdef class CUstream: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6147,9 +6147,9 @@ cdef class CUgraphicsResource: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6178,9 +6178,9 @@ cdef class CUexternalMemory: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6209,9 +6209,9 @@ cdef class CUexternalSemaphore: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6240,9 +6240,9 @@ cdef class CUgraph: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6271,9 +6271,9 @@ cdef class CUgraphNode: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6302,9 +6302,9 @@ cdef class CUgraphExec: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6333,9 +6333,9 @@ cdef class CUmemoryPool: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6364,9 +6364,9 @@ cdef class CUuserObject: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6395,9 +6395,9 @@ cdef class CUgraphDeviceNode: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6426,9 +6426,9 @@ cdef class CUasyncCallbackHandle: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6457,9 +6457,9 @@ cdef class CUgreenCtx: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6486,9 +6486,9 @@ cdef class CUlinkState: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): self._keepalive = [] def __repr__(self): @@ -6517,9 +6517,9 @@ cdef class CUdevResourceDesc: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6548,9 +6548,9 @@ cdef class CUeglStreamConnection: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6577,9 +6577,9 @@ cdef class EGLImageKHR: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6606,9 +6606,9 @@ cdef class EGLStreamKHR: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6635,9 +6635,9 @@ cdef class EGLSyncKHR: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6664,9 +6664,9 @@ cdef class CUasyncCallback: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6693,9 +6693,9 @@ cdef class CUhostFn: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6722,9 +6722,9 @@ cdef class CUstreamCallback: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6751,9 +6751,9 @@ cdef class CUoccupancyB2DSize: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -6785,7 +6785,7 @@ cdef class CUuuid_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -6831,7 +6831,7 @@ cdef class CUmemFabricHandle_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -6879,7 +6879,7 @@ cdef class CUipcEventHandle_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -6935,7 +6935,7 @@ cdef class CUipcMemHandle_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -6996,7 +6996,7 @@ cdef class CUstreamMemOpWaitValueParams_st: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._address = CUdeviceptr(_ptr=&self._ptr[0].waitValue.address) @@ -7048,48 +7048,48 @@ cdef class CUstreamMemOpWaitValueParams_st: return self._address @address.setter def address(self, address): - cdef ccuda.CUdeviceptr caddress + cdef cydriver.CUdeviceptr cyaddress if address is None: - caddress = 0 + cyaddress = 0 elif isinstance(address, (CUdeviceptr)): paddress = int(address) - caddress = paddress + cyaddress = paddress else: paddress = int(CUdeviceptr(address)) - caddress = paddress - self._address._ptr[0] = caddress + cyaddress = paddress + self._address._ptr[0] = cyaddress @property def value(self): return self._value @value.setter def value(self, value): - cdef ccuda.cuuint32_t cvalue + cdef cydriver.cuuint32_t cyvalue if value is None: - cvalue = 0 + cyvalue = 0 elif isinstance(value, (cuuint32_t)): pvalue = int(value) - cvalue = pvalue + cyvalue = pvalue else: pvalue = int(cuuint32_t(value)) - cvalue = pvalue - self._value._ptr[0] = cvalue + cyvalue = pvalue + self._value._ptr[0] = cyvalue @property def value64(self): return self._value64 @value64.setter def value64(self, value64): - cdef ccuda.cuuint64_t cvalue64 + cdef cydriver.cuuint64_t cyvalue64 if value64 is None: - cvalue64 = 0 + cyvalue64 = 0 elif isinstance(value64, (cuuint64_t)): pvalue64 = int(value64) - cvalue64 = pvalue64 + cyvalue64 = pvalue64 else: pvalue64 = int(cuuint64_t(value64)) - cvalue64 = pvalue64 - self._value64._ptr[0] = cvalue64 + cyvalue64 = pvalue64 + self._value64._ptr[0] = cyvalue64 @property def flags(self): @@ -7102,16 +7102,16 @@ cdef class CUstreamMemOpWaitValueParams_st: return self._alias @alias.setter def alias(self, alias): - cdef ccuda.CUdeviceptr calias + cdef cydriver.CUdeviceptr cyalias if alias is None: - calias = 0 + cyalias = 0 elif isinstance(alias, (CUdeviceptr)): palias = int(alias) - calias = palias + cyalias = palias else: palias = int(CUdeviceptr(alias)) - calias = palias - self._alias._ptr[0] = calias + cyalias = palias + self._alias._ptr[0] = cyalias {{endif}} {{if 'union CUstreamBatchMemOpParams_union' in found_types}} @@ -7140,7 +7140,7 @@ cdef class CUstreamMemOpWriteValueParams_st: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._address = CUdeviceptr(_ptr=&self._ptr[0].writeValue.address) @@ -7192,48 +7192,48 @@ cdef class CUstreamMemOpWriteValueParams_st: return self._address @address.setter def address(self, address): - cdef ccuda.CUdeviceptr caddress + cdef cydriver.CUdeviceptr cyaddress if address is None: - caddress = 0 + cyaddress = 0 elif isinstance(address, (CUdeviceptr)): paddress = int(address) - caddress = paddress + cyaddress = paddress else: paddress = int(CUdeviceptr(address)) - caddress = paddress - self._address._ptr[0] = caddress + cyaddress = paddress + self._address._ptr[0] = cyaddress @property def value(self): return self._value @value.setter def value(self, value): - cdef ccuda.cuuint32_t cvalue + cdef cydriver.cuuint32_t cyvalue if value is None: - cvalue = 0 + cyvalue = 0 elif isinstance(value, (cuuint32_t)): pvalue = int(value) - cvalue = pvalue + cyvalue = pvalue else: pvalue = int(cuuint32_t(value)) - cvalue = pvalue - self._value._ptr[0] = cvalue + cyvalue = pvalue + self._value._ptr[0] = cyvalue @property def value64(self): return self._value64 @value64.setter def value64(self, value64): - cdef ccuda.cuuint64_t cvalue64 + cdef cydriver.cuuint64_t cyvalue64 if value64 is None: - cvalue64 = 0 + cyvalue64 = 0 elif isinstance(value64, (cuuint64_t)): pvalue64 = int(value64) - cvalue64 = pvalue64 + cyvalue64 = pvalue64 else: pvalue64 = int(cuuint64_t(value64)) - cvalue64 = pvalue64 - self._value64._ptr[0] = cvalue64 + cyvalue64 = pvalue64 + self._value64._ptr[0] = cyvalue64 @property def flags(self): @@ -7246,16 +7246,16 @@ cdef class CUstreamMemOpWriteValueParams_st: return self._alias @alias.setter def alias(self, alias): - cdef ccuda.CUdeviceptr calias + cdef cydriver.CUdeviceptr cyalias if alias is None: - calias = 0 + cyalias = 0 elif isinstance(alias, (CUdeviceptr)): palias = int(alias) - calias = palias + cyalias = palias else: palias = int(CUdeviceptr(alias)) - calias = palias - self._alias._ptr[0] = calias + cyalias = palias + self._alias._ptr[0] = cyalias {{endif}} {{if 'union CUstreamBatchMemOpParams_union' in found_types}} @@ -7276,7 +7276,7 @@ cdef class CUstreamMemOpFlushRemoteWritesParams_st: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -7329,7 +7329,7 @@ cdef class CUstreamMemOpMemoryBarrierParams_st: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -7395,7 +7395,7 @@ cdef class CUstreamBatchMemOpParams_union: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._waitValue = CUstreamMemOpWaitValueParams_st(_ptr=self._ptr) self._writeValue = CUstreamMemOpWriteValueParams_st(_ptr=self._ptr) @@ -7446,25 +7446,25 @@ cdef class CUstreamBatchMemOpParams_union: return self._waitValue @waitValue.setter def waitValue(self, waitValue not None : CUstreamMemOpWaitValueParams_st): - string.memcpy(&self._ptr[0].waitValue, waitValue.getPtr(), sizeof(self._ptr[0].waitValue)) + string.memcpy(&self._ptr[0].waitValue, waitValue.getPtr(), sizeof(self._ptr[0].waitValue)) @property def writeValue(self): return self._writeValue @writeValue.setter def writeValue(self, writeValue not None : CUstreamMemOpWriteValueParams_st): - string.memcpy(&self._ptr[0].writeValue, writeValue.getPtr(), sizeof(self._ptr[0].writeValue)) + string.memcpy(&self._ptr[0].writeValue, writeValue.getPtr(), sizeof(self._ptr[0].writeValue)) @property def flushRemoteWrites(self): return self._flushRemoteWrites @flushRemoteWrites.setter def flushRemoteWrites(self, flushRemoteWrites not None : CUstreamMemOpFlushRemoteWritesParams_st): - string.memcpy(&self._ptr[0].flushRemoteWrites, flushRemoteWrites.getPtr(), sizeof(self._ptr[0].flushRemoteWrites)) + string.memcpy(&self._ptr[0].flushRemoteWrites, flushRemoteWrites.getPtr(), sizeof(self._ptr[0].flushRemoteWrites)) @property def memoryBarrier(self): return self._memoryBarrier @memoryBarrier.setter def memoryBarrier(self, memoryBarrier not None : CUstreamMemOpMemoryBarrierParams_st): - string.memcpy(&self._ptr[0].memoryBarrier, memoryBarrier.getPtr(), sizeof(self._ptr[0].memoryBarrier)) + string.memcpy(&self._ptr[0].memoryBarrier, memoryBarrier.getPtr(), sizeof(self._ptr[0].memoryBarrier)) @property def pad(self): return [cuuint64_t(init_value=_pad) for _pad in self._ptr[0].pad] @@ -7498,7 +7498,7 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._ctx = CUcontext(_ptr=&self._ptr[0].ctx) def __dealloc__(self): @@ -7533,16 +7533,16 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st: return self._ctx @ctx.setter def ctx(self, ctx): - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - self._ctx._ptr[0] = cctx + cyctx = pctx + self._ctx._ptr[0] = cyctx @property def count(self): return self._ptr[0].count @@ -7551,7 +7551,7 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st: self._ptr[0].count = count @property def paramArray(self): - arrs = [self._ptr[0].paramArray + x*sizeof(ccuda.CUstreamBatchMemOpParams) for x in range(self._paramArray_length)] + arrs = [self._ptr[0].paramArray + x*sizeof(cydriver.CUstreamBatchMemOpParams) for x in range(self._paramArray_length)] return [CUstreamBatchMemOpParams(_ptr=arr) for arr in arrs] @paramArray.setter def paramArray(self, val): @@ -7562,13 +7562,13 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st: else: if self._paramArray_length != len(val): free(self._paramArray) - self._paramArray = calloc(len(val), sizeof(ccuda.CUstreamBatchMemOpParams)) + self._paramArray = calloc(len(val), sizeof(cydriver.CUstreamBatchMemOpParams)) if self._paramArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUstreamBatchMemOpParams))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUstreamBatchMemOpParams))) self._paramArray_length = len(val) self._ptr[0].paramArray = self._paramArray for idx in range(len(val)): - string.memcpy(&self._paramArray[idx], (val[idx])._ptr, sizeof(ccuda.CUstreamBatchMemOpParams)) + string.memcpy(&self._paramArray[idx], (val[idx])._ptr, sizeof(cydriver.CUstreamBatchMemOpParams)) @property def flags(self): @@ -7604,7 +7604,7 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._ctx = CUcontext(_ptr=&self._ptr[0].ctx) def __dealloc__(self): @@ -7639,16 +7639,16 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st: return self._ctx @ctx.setter def ctx(self, ctx): - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - self._ctx._ptr[0] = cctx + cyctx = pctx + self._ctx._ptr[0] = cyctx @property def count(self): return self._ptr[0].count @@ -7657,7 +7657,7 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st: self._ptr[0].count = count @property def paramArray(self): - arrs = [self._ptr[0].paramArray + x*sizeof(ccuda.CUstreamBatchMemOpParams) for x in range(self._paramArray_length)] + arrs = [self._ptr[0].paramArray + x*sizeof(cydriver.CUstreamBatchMemOpParams) for x in range(self._paramArray_length)] return [CUstreamBatchMemOpParams(_ptr=arr) for arr in arrs] @paramArray.setter def paramArray(self, val): @@ -7668,13 +7668,13 @@ cdef class CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st: else: if self._paramArray_length != len(val): free(self._paramArray) - self._paramArray = calloc(len(val), sizeof(ccuda.CUstreamBatchMemOpParams)) + self._paramArray = calloc(len(val), sizeof(cydriver.CUstreamBatchMemOpParams)) if self._paramArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUstreamBatchMemOpParams))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUstreamBatchMemOpParams))) self._paramArray_length = len(val) self._ptr[0].paramArray = self._paramArray for idx in range(len(val)): - string.memcpy(&self._paramArray[idx], (val[idx])._ptr, sizeof(ccuda.CUstreamBatchMemOpParams)) + string.memcpy(&self._paramArray[idx], (val[idx])._ptr, sizeof(cydriver.CUstreamBatchMemOpParams)) @property def flags(self): @@ -7699,7 +7699,7 @@ cdef class anon_struct0: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -7740,7 +7740,7 @@ cdef class anon_union2: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._overBudget = anon_struct0(_ptr=self._ptr) @@ -7763,7 +7763,7 @@ cdef class anon_union2: return self._overBudget @overBudget.setter def overBudget(self, overBudget not None : anon_struct0): - string.memcpy(&self._ptr[0].info.overBudget, overBudget.getPtr(), sizeof(self._ptr[0].info.overBudget)) + string.memcpy(&self._ptr[0].info.overBudget, overBudget.getPtr(), sizeof(self._ptr[0].info.overBudget)) {{endif}} {{if 'struct CUasyncNotificationInfo_st' in found_types}} @@ -7786,10 +7786,10 @@ cdef class CUasyncNotificationInfo_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccuda.CUasyncNotificationInfo_st)) + self._val_ptr = calloc(1, sizeof(cydriver.CUasyncNotificationInfo_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._info = anon_union2(_ptr=self._ptr) def __dealloc__(self): @@ -7822,7 +7822,7 @@ cdef class CUasyncNotificationInfo_st: return self._info @info.setter def info(self, info not None : anon_union2): - string.memcpy(&self._ptr[0].info, info.getPtr(), sizeof(self._ptr[0].info)) + string.memcpy(&self._ptr[0].info, info.getPtr(), sizeof(self._ptr[0].info)) {{endif}} {{if 'struct CUdevprop_st' in found_types}} @@ -7863,7 +7863,7 @@ cdef class CUdevprop_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -8018,7 +8018,7 @@ cdef class CUaccessPolicyWindow_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -8056,8 +8056,8 @@ cdef class CUaccessPolicyWindow_st: return self._ptr[0].base_ptr @base_ptr.setter def base_ptr(self, base_ptr): - _cbase_ptr = utils.HelperInputVoidPtr(base_ptr) - self._ptr[0].base_ptr = _cbase_ptr.cptr + _cybase_ptr = utils.HelperInputVoidPtr(base_ptr) + self._ptr[0].base_ptr = _cybase_ptr.cptr @property def num_bytes(self): return self._ptr[0].num_bytes @@ -8122,7 +8122,7 @@ cdef class CUDA_KERNEL_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._func = CUfunction(_ptr=&self._ptr[0].func) def __dealloc__(self): @@ -8180,16 +8180,16 @@ cdef class CUDA_KERNEL_NODE_PARAMS_st: return self._func @func.setter def func(self, func): - cdef ccuda.CUfunction cfunc + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc - self._func._ptr[0] = cfunc + cyfunc = pfunc + self._func._ptr[0] = cyfunc @property def gridDimX(self): return self._ptr[0].gridDimX @@ -8237,8 +8237,8 @@ cdef class CUDA_KERNEL_NODE_PARAMS_st: return self._ptr[0].kernelParams @kernelParams.setter def kernelParams(self, kernelParams): - self._ckernelParams = utils.HelperKernelParams(kernelParams) - self._ptr[0].kernelParams = self._ckernelParams.ckernelParams + self._cykernelParams = utils.HelperKernelParams(kernelParams) + self._ptr[0].kernelParams = self._cykernelParams.ckernelParams @property def extra(self): return self._ptr[0].extra @@ -8291,7 +8291,7 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v2_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._func = CUfunction(_ptr=&self._ptr[0].func) self._kern = CUkernel(_ptr=&self._ptr[0].kern) @@ -8359,16 +8359,16 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v2_st: return self._func @func.setter def func(self, func): - cdef ccuda.CUfunction cfunc + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc - self._func._ptr[0] = cfunc + cyfunc = pfunc + self._func._ptr[0] = cyfunc @property def gridDimX(self): return self._ptr[0].gridDimX @@ -8416,8 +8416,8 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v2_st: return self._ptr[0].kernelParams @kernelParams.setter def kernelParams(self, kernelParams): - self._ckernelParams = utils.HelperKernelParams(kernelParams) - self._ptr[0].kernelParams = self._ckernelParams.ckernelParams + self._cykernelParams = utils.HelperKernelParams(kernelParams) + self._ptr[0].kernelParams = self._cykernelParams.ckernelParams @property def extra(self): return self._ptr[0].extra @@ -8429,31 +8429,31 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v2_st: return self._kern @kern.setter def kern(self, kern): - cdef ccuda.CUkernel ckern + cdef cydriver.CUkernel cykern if kern is None: - ckern = 0 + cykern = 0 elif isinstance(kern, (CUkernel,)): pkern = int(kern) - ckern = pkern + cykern = pkern else: pkern = int(CUkernel(kern)) - ckern = pkern - self._kern._ptr[0] = ckern + cykern = pkern + self._kern._ptr[0] = cykern @property def ctx(self): return self._ctx @ctx.setter def ctx(self, ctx): - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - self._ctx._ptr[0] = cctx + cyctx = pctx + self._ctx._ptr[0] = cyctx {{endif}} {{if 'struct CUDA_KERNEL_NODE_PARAMS_v3_st' in found_types}} @@ -8500,7 +8500,7 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v3_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._func = CUfunction(_ptr=&self._ptr[0].func) self._kern = CUkernel(_ptr=&self._ptr[0].kern) @@ -8568,16 +8568,16 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v3_st: return self._func @func.setter def func(self, func): - cdef ccuda.CUfunction cfunc + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc - self._func._ptr[0] = cfunc + cyfunc = pfunc + self._func._ptr[0] = cyfunc @property def gridDimX(self): return self._ptr[0].gridDimX @@ -8625,8 +8625,8 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v3_st: return self._ptr[0].kernelParams @kernelParams.setter def kernelParams(self, kernelParams): - self._ckernelParams = utils.HelperKernelParams(kernelParams) - self._ptr[0].kernelParams = self._ckernelParams.ckernelParams + self._cykernelParams = utils.HelperKernelParams(kernelParams) + self._ptr[0].kernelParams = self._cykernelParams.ckernelParams @property def extra(self): return self._ptr[0].extra @@ -8638,31 +8638,31 @@ cdef class CUDA_KERNEL_NODE_PARAMS_v3_st: return self._kern @kern.setter def kern(self, kern): - cdef ccuda.CUkernel ckern + cdef cydriver.CUkernel cykern if kern is None: - ckern = 0 + cykern = 0 elif isinstance(kern, (CUkernel,)): pkern = int(kern) - ckern = pkern + cykern = pkern else: pkern = int(CUkernel(kern)) - ckern = pkern - self._kern._ptr[0] = ckern + cykern = pkern + self._kern._ptr[0] = cykern @property def ctx(self): return self._ctx @ctx.setter def ctx(self, ctx): - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - self._ctx._ptr[0] = cctx + cyctx = pctx + self._ctx._ptr[0] = cyctx {{endif}} {{if 'struct CUDA_MEMSET_NODE_PARAMS_st' in found_types}} @@ -8695,7 +8695,7 @@ cdef class CUDA_MEMSET_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._dst = CUdeviceptr(_ptr=&self._ptr[0].dst) def __dealloc__(self): @@ -8737,16 +8737,16 @@ cdef class CUDA_MEMSET_NODE_PARAMS_st: return self._dst @dst.setter def dst(self, dst): - cdef ccuda.CUdeviceptr cdst + cdef cydriver.CUdeviceptr cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (CUdeviceptr)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(CUdeviceptr(dst)) - cdst = pdst - self._dst._ptr[0] = cdst + cydst = pdst + self._dst._ptr[0] = cydst @property def pitch(self): @@ -8812,7 +8812,7 @@ cdef class CUDA_MEMSET_NODE_PARAMS_v2_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._dst = CUdeviceptr(_ptr=&self._ptr[0].dst) self._ctx = CUcontext(_ptr=&self._ptr[0].ctx) @@ -8859,16 +8859,16 @@ cdef class CUDA_MEMSET_NODE_PARAMS_v2_st: return self._dst @dst.setter def dst(self, dst): - cdef ccuda.CUdeviceptr cdst + cdef cydriver.CUdeviceptr cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (CUdeviceptr)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(CUdeviceptr(dst)) - cdst = pdst - self._dst._ptr[0] = cdst + cydst = pdst + self._dst._ptr[0] = cydst @property def pitch(self): @@ -8905,16 +8905,16 @@ cdef class CUDA_MEMSET_NODE_PARAMS_v2_st: return self._ctx @ctx.setter def ctx(self, ctx): - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - self._ctx._ptr[0] = cctx + cyctx = pctx + self._ctx._ptr[0] = cyctx {{endif}} {{if 'struct CUDA_HOST_NODE_PARAMS_st' in found_types}} @@ -8939,7 +8939,7 @@ cdef class CUDA_HOST_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._fn = CUhostFn(_ptr=&self._ptr[0].fn) def __dealloc__(self): @@ -8965,23 +8965,23 @@ cdef class CUDA_HOST_NODE_PARAMS_st: return self._fn @fn.setter def fn(self, fn): - cdef ccuda.CUhostFn cfn + cdef cydriver.CUhostFn cyfn if fn is None: - cfn = 0 + cyfn = 0 elif isinstance(fn, (CUhostFn)): pfn = int(fn) - cfn = pfn + cyfn = pfn else: pfn = int(CUhostFn(fn)) - cfn = pfn - self._fn._ptr[0] = cfn + cyfn = pfn + self._fn._ptr[0] = cyfn @property def userData(self): return self._ptr[0].userData @userData.setter def userData(self, userData): - _cuserData = utils.HelperInputVoidPtr(userData) - self._ptr[0].userData = _cuserData.cptr + _cyuserData = utils.HelperInputVoidPtr(userData) + self._ptr[0].userData = _cyuserData.cptr {{endif}} {{if 'struct CUDA_HOST_NODE_PARAMS_v2_st' in found_types}} @@ -9006,7 +9006,7 @@ cdef class CUDA_HOST_NODE_PARAMS_v2_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._fn = CUhostFn(_ptr=&self._ptr[0].fn) def __dealloc__(self): @@ -9032,23 +9032,23 @@ cdef class CUDA_HOST_NODE_PARAMS_v2_st: return self._fn @fn.setter def fn(self, fn): - cdef ccuda.CUhostFn cfn + cdef cydriver.CUhostFn cyfn if fn is None: - cfn = 0 + cyfn = 0 elif isinstance(fn, (CUhostFn)): pfn = int(fn) - cfn = pfn + cyfn = pfn else: pfn = int(CUhostFn(fn)) - cfn = pfn - self._fn._ptr[0] = cfn + cyfn = pfn + self._fn._ptr[0] = cyfn @property def userData(self): return self._ptr[0].userData @userData.setter def userData(self, userData): - _cuserData = utils.HelperInputVoidPtr(userData) - self._ptr[0].userData = _cuserData.cptr + _cyuserData = utils.HelperInputVoidPtr(userData) + self._ptr[0].userData = _cyuserData.cptr {{endif}} {{if 'struct CUDA_CONDITIONAL_NODE_PARAMS' in found_types}} @@ -9090,7 +9090,7 @@ cdef class CUDA_CONDITIONAL_NODE_PARAMS: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._handle = CUgraphConditionalHandle(_ptr=&self._ptr[0].handle) self._ctx = CUcontext(_ptr=&self._ptr[0].ctx) @@ -9129,16 +9129,16 @@ cdef class CUDA_CONDITIONAL_NODE_PARAMS: return self._handle @handle.setter def handle(self, handle): - cdef ccuda.CUgraphConditionalHandle chandle + cdef cydriver.CUgraphConditionalHandle cyhandle if handle is None: - chandle = 0 + cyhandle = 0 elif isinstance(handle, (CUgraphConditionalHandle)): phandle = int(handle) - chandle = phandle + cyhandle = phandle else: phandle = int(CUgraphConditionalHandle(handle)) - chandle = phandle - self._handle._ptr[0] = chandle + cyhandle = phandle + self._handle._ptr[0] = cyhandle @property def type(self): @@ -9154,23 +9154,23 @@ cdef class CUDA_CONDITIONAL_NODE_PARAMS: self._ptr[0].size = size @property def phGraph_out(self): - arrs = [self._ptr[0].phGraph_out + x*sizeof(ccuda.CUgraph) for x in range(self.size)] + arrs = [self._ptr[0].phGraph_out + x*sizeof(cydriver.CUgraph) for x in range(self.size)] return [CUgraph(_ptr=arr) for arr in arrs] @property def ctx(self): return self._ctx @ctx.setter def ctx(self, ctx): - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - self._ctx._ptr[0] = cctx + cyctx = pctx + self._ctx._ptr[0] = cyctx {{endif}} {{if 'struct CUgraphEdgeData_st' in found_types}} @@ -9218,7 +9218,7 @@ cdef class CUgraphEdgeData_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -9302,7 +9302,7 @@ cdef class CUDA_GRAPH_INSTANTIATE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._flags = cuuint64_t(_ptr=&self._ptr[0].flags) self._hUploadStream = CUstream(_ptr=&self._ptr[0].hUploadStream) @@ -9338,47 +9338,47 @@ cdef class CUDA_GRAPH_INSTANTIATE_PARAMS_st: return self._flags @flags.setter def flags(self, flags): - cdef ccuda.cuuint64_t cflags + cdef cydriver.cuuint64_t cyflags if flags is None: - cflags = 0 + cyflags = 0 elif isinstance(flags, (cuuint64_t)): pflags = int(flags) - cflags = pflags + cyflags = pflags else: pflags = int(cuuint64_t(flags)) - cflags = pflags - self._flags._ptr[0] = cflags + cyflags = pflags + self._flags._ptr[0] = cyflags @property def hUploadStream(self): return self._hUploadStream @hUploadStream.setter def hUploadStream(self, hUploadStream): - cdef ccuda.CUstream chUploadStream + cdef cydriver.CUstream cyhUploadStream if hUploadStream is None: - chUploadStream = 0 + cyhUploadStream = 0 elif isinstance(hUploadStream, (CUstream,)): phUploadStream = int(hUploadStream) - chUploadStream = phUploadStream + cyhUploadStream = phUploadStream else: phUploadStream = int(CUstream(hUploadStream)) - chUploadStream = phUploadStream - self._hUploadStream._ptr[0] = chUploadStream + cyhUploadStream = phUploadStream + self._hUploadStream._ptr[0] = cyhUploadStream @property def hErrNode_out(self): return self._hErrNode_out @hErrNode_out.setter def hErrNode_out(self, hErrNode_out): - cdef ccuda.CUgraphNode chErrNode_out + cdef cydriver.CUgraphNode cyhErrNode_out if hErrNode_out is None: - chErrNode_out = 0 + cyhErrNode_out = 0 elif isinstance(hErrNode_out, (CUgraphNode,)): phErrNode_out = int(hErrNode_out) - chErrNode_out = phErrNode_out + cyhErrNode_out = phErrNode_out else: phErrNode_out = int(CUgraphNode(hErrNode_out)) - chErrNode_out = phErrNode_out - self._hErrNode_out._ptr[0] = chErrNode_out + cyhErrNode_out = phErrNode_out + self._hErrNode_out._ptr[0] = cyhErrNode_out @property def result_out(self): return CUgraphInstantiateResult(self._ptr[0].result_out) @@ -9415,7 +9415,7 @@ cdef class CUlaunchMemSyncDomainMap_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -9469,7 +9469,7 @@ cdef class anon_struct1: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -9534,7 +9534,7 @@ cdef class anon_struct2: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._event = CUevent(_ptr=&self._ptr[0].programmaticEvent.event) @@ -9565,16 +9565,16 @@ cdef class anon_struct2: return self._event @event.setter def event(self, event): - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent - self._event._ptr[0] = cevent + cyevent = pevent + self._event._ptr[0] = cyevent @property def flags(self): return self._ptr[0].programmaticEvent.flags @@ -9606,7 +9606,7 @@ cdef class anon_struct3: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._event = CUevent(_ptr=&self._ptr[0].launchCompletionEvent.event) @@ -9633,16 +9633,16 @@ cdef class anon_struct3: return self._event @event.setter def event(self, event): - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent - self._event._ptr[0] = cevent + cyevent = pevent + self._event._ptr[0] = cyevent @property def flags(self): return self._ptr[0].launchCompletionEvent.flags @@ -9668,7 +9668,7 @@ cdef class anon_struct4: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._devNode = CUgraphDeviceNode(_ptr=&self._ptr[0].deviceUpdatableKernelNode.devNode) @@ -9701,16 +9701,16 @@ cdef class anon_struct4: return self._devNode @devNode.setter def devNode(self, devNode): - cdef ccuda.CUgraphDeviceNode cdevNode + cdef cydriver.CUgraphDeviceNode cydevNode if devNode is None: - cdevNode = 0 + cydevNode = 0 elif isinstance(devNode, (CUgraphDeviceNode,)): pdevNode = int(devNode) - cdevNode = pdevNode + cydevNode = pdevNode else: pdevNode = int(CUgraphDeviceNode(devNode)) - cdevNode = pdevNode - self._devNode._ptr[0] = cdevNode + cydevNode = pdevNode + self._devNode._ptr[0] = cydevNode {{endif}} {{if 'union CUlaunchAttributeValue_union' in found_types}} @@ -9789,7 +9789,7 @@ cdef class CUlaunchAttributeValue_union: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._accessPolicyWindow = CUaccessPolicyWindow(_ptr=&self._ptr[0].accessPolicyWindow) self._clusterDim = anon_struct1(_ptr=self._ptr) @@ -9885,7 +9885,7 @@ cdef class CUlaunchAttributeValue_union: return self._accessPolicyWindow @accessPolicyWindow.setter def accessPolicyWindow(self, accessPolicyWindow not None : CUaccessPolicyWindow): - string.memcpy(&self._ptr[0].accessPolicyWindow, accessPolicyWindow.getPtr(), sizeof(self._ptr[0].accessPolicyWindow)) + string.memcpy(&self._ptr[0].accessPolicyWindow, accessPolicyWindow.getPtr(), sizeof(self._ptr[0].accessPolicyWindow)) @property def cooperative(self): return self._ptr[0].cooperative @@ -9903,7 +9903,7 @@ cdef class CUlaunchAttributeValue_union: return self._clusterDim @clusterDim.setter def clusterDim(self, clusterDim not None : anon_struct1): - string.memcpy(&self._ptr[0].clusterDim, clusterDim.getPtr(), sizeof(self._ptr[0].clusterDim)) + string.memcpy(&self._ptr[0].clusterDim, clusterDim.getPtr(), sizeof(self._ptr[0].clusterDim)) @property def clusterSchedulingPolicyPreference(self): return CUclusterSchedulingPolicy(self._ptr[0].clusterSchedulingPolicyPreference) @@ -9921,13 +9921,13 @@ cdef class CUlaunchAttributeValue_union: return self._programmaticEvent @programmaticEvent.setter def programmaticEvent(self, programmaticEvent not None : anon_struct2): - string.memcpy(&self._ptr[0].programmaticEvent, programmaticEvent.getPtr(), sizeof(self._ptr[0].programmaticEvent)) + string.memcpy(&self._ptr[0].programmaticEvent, programmaticEvent.getPtr(), sizeof(self._ptr[0].programmaticEvent)) @property def launchCompletionEvent(self): return self._launchCompletionEvent @launchCompletionEvent.setter def launchCompletionEvent(self, launchCompletionEvent not None : anon_struct3): - string.memcpy(&self._ptr[0].launchCompletionEvent, launchCompletionEvent.getPtr(), sizeof(self._ptr[0].launchCompletionEvent)) + string.memcpy(&self._ptr[0].launchCompletionEvent, launchCompletionEvent.getPtr(), sizeof(self._ptr[0].launchCompletionEvent)) @property def priority(self): return self._ptr[0].priority @@ -9939,7 +9939,7 @@ cdef class CUlaunchAttributeValue_union: return self._memSyncDomainMap @memSyncDomainMap.setter def memSyncDomainMap(self, memSyncDomainMap not None : CUlaunchMemSyncDomainMap): - string.memcpy(&self._ptr[0].memSyncDomainMap, memSyncDomainMap.getPtr(), sizeof(self._ptr[0].memSyncDomainMap)) + string.memcpy(&self._ptr[0].memSyncDomainMap, memSyncDomainMap.getPtr(), sizeof(self._ptr[0].memSyncDomainMap)) @property def memSyncDomain(self): return CUlaunchMemSyncDomain(self._ptr[0].memSyncDomain) @@ -9951,7 +9951,7 @@ cdef class CUlaunchAttributeValue_union: return self._deviceUpdatableKernelNode @deviceUpdatableKernelNode.setter def deviceUpdatableKernelNode(self, deviceUpdatableKernelNode not None : anon_struct4): - string.memcpy(&self._ptr[0].deviceUpdatableKernelNode, deviceUpdatableKernelNode.getPtr(), sizeof(self._ptr[0].deviceUpdatableKernelNode)) + string.memcpy(&self._ptr[0].deviceUpdatableKernelNode, deviceUpdatableKernelNode.getPtr(), sizeof(self._ptr[0].deviceUpdatableKernelNode)) @property def sharedMemCarveout(self): return self._ptr[0].sharedMemCarveout @@ -9982,7 +9982,7 @@ cdef class CUlaunchAttribute_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._value = CUlaunchAttributeValue(_ptr=&self._ptr[0].value) def __dealloc__(self): @@ -10014,7 +10014,7 @@ cdef class CUlaunchAttribute_st: return self._value @value.setter def value(self, value not None : CUlaunchAttributeValue): - string.memcpy(&self._ptr[0].value, value.getPtr(), sizeof(self._ptr[0].value)) + string.memcpy(&self._ptr[0].value, value.getPtr(), sizeof(self._ptr[0].value)) {{endif}} {{if 'struct CUlaunchConfig_st' in found_types}} @@ -10055,7 +10055,7 @@ cdef class CUlaunchConfig_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._hStream = CUstream(_ptr=&self._ptr[0].hStream) def __dealloc__(self): @@ -10156,19 +10156,19 @@ cdef class CUlaunchConfig_st: return self._hStream @hStream.setter def hStream(self, hStream): - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - self._hStream._ptr[0] = chStream + cyhStream = phStream + self._hStream._ptr[0] = cyhStream @property def attrs(self): - arrs = [self._ptr[0].attrs + x*sizeof(ccuda.CUlaunchAttribute) for x in range(self._attrs_length)] + arrs = [self._ptr[0].attrs + x*sizeof(cydriver.CUlaunchAttribute) for x in range(self._attrs_length)] return [CUlaunchAttribute(_ptr=arr) for arr in arrs] @attrs.setter def attrs(self, val): @@ -10179,13 +10179,13 @@ cdef class CUlaunchConfig_st: else: if self._attrs_length != len(val): free(self._attrs) - self._attrs = calloc(len(val), sizeof(ccuda.CUlaunchAttribute)) + self._attrs = calloc(len(val), sizeof(cydriver.CUlaunchAttribute)) if self._attrs is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUlaunchAttribute))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUlaunchAttribute))) self._attrs_length = len(val) self._ptr[0].attrs = self._attrs for idx in range(len(val)): - string.memcpy(&self._attrs[idx], (val[idx])._ptr, sizeof(ccuda.CUlaunchAttribute)) + string.memcpy(&self._attrs[idx], (val[idx])._ptr, sizeof(cydriver.CUlaunchAttribute)) @property def numAttrs(self): @@ -10215,7 +10215,7 @@ cdef class CUexecAffinitySmCount_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -10255,7 +10255,7 @@ cdef class anon_union3: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._smCount = CUexecAffinitySmCount(_ptr=&self._ptr[0].param.smCount) @@ -10278,7 +10278,7 @@ cdef class anon_union3: return self._smCount @smCount.setter def smCount(self, smCount not None : CUexecAffinitySmCount): - string.memcpy(&self._ptr[0].param.smCount, smCount.getPtr(), sizeof(self._ptr[0].param.smCount)) + string.memcpy(&self._ptr[0].param.smCount, smCount.getPtr(), sizeof(self._ptr[0].param.smCount)) {{endif}} {{if 'struct CUexecAffinityParam_st' in found_types}} @@ -10301,10 +10301,10 @@ cdef class CUexecAffinityParam_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccuda.CUexecAffinityParam_st)) + self._val_ptr = calloc(1, sizeof(cydriver.CUexecAffinityParam_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._param = anon_union3(_ptr=self._ptr) def __dealloc__(self): @@ -10337,7 +10337,7 @@ cdef class CUexecAffinityParam_st: return self._param @param.setter def param(self, param not None : anon_union3): - string.memcpy(&self._ptr[0].param, param.getPtr(), sizeof(self._ptr[0].param)) + string.memcpy(&self._ptr[0].param, param.getPtr(), sizeof(self._ptr[0].param)) {{endif}} {{if 'struct CUctxCigParam_st' in found_types}} @@ -10362,7 +10362,7 @@ cdef class CUctxCigParam_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -10394,8 +10394,8 @@ cdef class CUctxCigParam_st: return self._ptr[0].sharedData @sharedData.setter def sharedData(self, sharedData): - _csharedData = utils.HelperInputVoidPtr(sharedData) - self._ptr[0].sharedData = _csharedData.cptr + _cysharedData = utils.HelperInputVoidPtr(sharedData) + self._ptr[0].sharedData = _cysharedData.cptr {{endif}} {{if 'struct CUctxCreateParams_st' in found_types}} @@ -10423,7 +10423,7 @@ cdef class CUctxCreateParams_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -10453,7 +10453,7 @@ cdef class CUctxCreateParams_st: return '' @property def execAffinityParams(self): - arrs = [self._ptr[0].execAffinityParams + x*sizeof(ccuda.CUexecAffinityParam) for x in range(self._execAffinityParams_length)] + arrs = [self._ptr[0].execAffinityParams + x*sizeof(cydriver.CUexecAffinityParam) for x in range(self._execAffinityParams_length)] return [CUexecAffinityParam(_ptr=arr) for arr in arrs] @execAffinityParams.setter def execAffinityParams(self, val): @@ -10464,13 +10464,13 @@ cdef class CUctxCreateParams_st: else: if self._execAffinityParams_length != len(val): free(self._execAffinityParams) - self._execAffinityParams = calloc(len(val), sizeof(ccuda.CUexecAffinityParam)) + self._execAffinityParams = calloc(len(val), sizeof(cydriver.CUexecAffinityParam)) if self._execAffinityParams is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUexecAffinityParam))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUexecAffinityParam))) self._execAffinityParams_length = len(val) self._ptr[0].execAffinityParams = self._execAffinityParams for idx in range(len(val)): - string.memcpy(&self._execAffinityParams[idx], (val[idx])._ptr, sizeof(ccuda.CUexecAffinityParam)) + string.memcpy(&self._execAffinityParams[idx], (val[idx])._ptr, sizeof(cydriver.CUexecAffinityParam)) @property def numExecAffinityParams(self): @@ -10480,7 +10480,7 @@ cdef class CUctxCreateParams_st: self._ptr[0].numExecAffinityParams = numExecAffinityParams @property def cigParams(self): - arrs = [self._ptr[0].cigParams + x*sizeof(ccuda.CUctxCigParam) for x in range(self._cigParams_length)] + arrs = [self._ptr[0].cigParams + x*sizeof(cydriver.CUctxCigParam) for x in range(self._cigParams_length)] return [CUctxCigParam(_ptr=arr) for arr in arrs] @cigParams.setter def cigParams(self, val): @@ -10491,13 +10491,13 @@ cdef class CUctxCreateParams_st: else: if self._cigParams_length != len(val): free(self._cigParams) - self._cigParams = calloc(len(val), sizeof(ccuda.CUctxCigParam)) + self._cigParams = calloc(len(val), sizeof(cydriver.CUctxCigParam)) if self._cigParams is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUctxCigParam))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUctxCigParam))) self._cigParams_length = len(val) self._ptr[0].cigParams = self._cigParams for idx in range(len(val)): - string.memcpy(&self._cigParams[idx], (val[idx])._ptr, sizeof(ccuda.CUctxCigParam)) + string.memcpy(&self._cigParams[idx], (val[idx])._ptr, sizeof(cydriver.CUctxCigParam)) {{endif}} {{if 'struct CUlibraryHostUniversalFunctionAndDataTable_st' in found_types}} @@ -10525,7 +10525,7 @@ cdef class CUlibraryHostUniversalFunctionAndDataTable_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -10559,8 +10559,8 @@ cdef class CUlibraryHostUniversalFunctionAndDataTable_st: return self._ptr[0].functionTable @functionTable.setter def functionTable(self, functionTable): - _cfunctionTable = utils.HelperInputVoidPtr(functionTable) - self._ptr[0].functionTable = _cfunctionTable.cptr + _cyfunctionTable = utils.HelperInputVoidPtr(functionTable) + self._ptr[0].functionTable = _cyfunctionTable.cptr @property def functionWindowSize(self): return self._ptr[0].functionWindowSize @@ -10572,8 +10572,8 @@ cdef class CUlibraryHostUniversalFunctionAndDataTable_st: return self._ptr[0].dataTable @dataTable.setter def dataTable(self, dataTable): - _cdataTable = utils.HelperInputVoidPtr(dataTable) - self._ptr[0].dataTable = _cdataTable.cptr + _cydataTable = utils.HelperInputVoidPtr(dataTable) + self._ptr[0].dataTable = _cydataTable.cptr @property def dataWindowSize(self): return self._ptr[0].dataWindowSize @@ -10632,7 +10632,7 @@ cdef class CUDA_MEMCPY2D_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._srcDevice = CUdeviceptr(_ptr=&self._ptr[0].srcDevice) self._srcArray = CUarray(_ptr=&self._ptr[0].srcArray) @@ -10735,39 +10735,39 @@ cdef class CUDA_MEMCPY2D_st: return self._ptr[0].srcHost @srcHost.setter def srcHost(self, srcHost): - _csrcHost = utils.HelperInputVoidPtr(srcHost) - self._ptr[0].srcHost = _csrcHost.cptr + _cysrcHost = utils.HelperInputVoidPtr(srcHost) + self._ptr[0].srcHost = _cysrcHost.cptr @property def srcDevice(self): return self._srcDevice @srcDevice.setter def srcDevice(self, srcDevice): - cdef ccuda.CUdeviceptr csrcDevice + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - self._srcDevice._ptr[0] = csrcDevice + cysrcDevice = psrcDevice + self._srcDevice._ptr[0] = cysrcDevice @property def srcArray(self): return self._srcArray @srcArray.setter def srcArray(self, srcArray): - cdef ccuda.CUarray csrcArray + cdef cydriver.CUarray cysrcArray if srcArray is None: - csrcArray = 0 + cysrcArray = 0 elif isinstance(srcArray, (CUarray,)): psrcArray = int(srcArray) - csrcArray = psrcArray + cysrcArray = psrcArray else: psrcArray = int(CUarray(srcArray)) - csrcArray = psrcArray - self._srcArray._ptr[0] = csrcArray + cysrcArray = psrcArray + self._srcArray._ptr[0] = cysrcArray @property def srcPitch(self): return self._ptr[0].srcPitch @@ -10797,39 +10797,39 @@ cdef class CUDA_MEMCPY2D_st: return self._ptr[0].dstHost @dstHost.setter def dstHost(self, dstHost): - _cdstHost = utils.HelperInputVoidPtr(dstHost) - self._ptr[0].dstHost = _cdstHost.cptr + _cydstHost = utils.HelperInputVoidPtr(dstHost) + self._ptr[0].dstHost = _cydstHost.cptr @property def dstDevice(self): return self._dstDevice @dstDevice.setter def dstDevice(self, dstDevice): - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - self._dstDevice._ptr[0] = cdstDevice + cydstDevice = pdstDevice + self._dstDevice._ptr[0] = cydstDevice @property def dstArray(self): return self._dstArray @dstArray.setter def dstArray(self, dstArray): - cdef ccuda.CUarray cdstArray + cdef cydriver.CUarray cydstArray if dstArray is None: - cdstArray = 0 + cydstArray = 0 elif isinstance(dstArray, (CUarray,)): pdstArray = int(dstArray) - cdstArray = pdstArray + cydstArray = pdstArray else: pdstArray = int(CUarray(dstArray)) - cdstArray = pdstArray - self._dstArray._ptr[0] = cdstArray + cydstArray = pdstArray + self._dstArray._ptr[0] = cydstArray @property def dstPitch(self): return self._ptr[0].dstPitch @@ -10919,7 +10919,7 @@ cdef class CUDA_MEMCPY3D_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._srcDevice = CUdeviceptr(_ptr=&self._ptr[0].srcDevice) self._srcArray = CUarray(_ptr=&self._ptr[0].srcArray) @@ -11070,46 +11070,46 @@ cdef class CUDA_MEMCPY3D_st: return self._ptr[0].srcHost @srcHost.setter def srcHost(self, srcHost): - _csrcHost = utils.HelperInputVoidPtr(srcHost) - self._ptr[0].srcHost = _csrcHost.cptr + _cysrcHost = utils.HelperInputVoidPtr(srcHost) + self._ptr[0].srcHost = _cysrcHost.cptr @property def srcDevice(self): return self._srcDevice @srcDevice.setter def srcDevice(self, srcDevice): - cdef ccuda.CUdeviceptr csrcDevice + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - self._srcDevice._ptr[0] = csrcDevice + cysrcDevice = psrcDevice + self._srcDevice._ptr[0] = cysrcDevice @property def srcArray(self): return self._srcArray @srcArray.setter def srcArray(self, srcArray): - cdef ccuda.CUarray csrcArray + cdef cydriver.CUarray cysrcArray if srcArray is None: - csrcArray = 0 + cysrcArray = 0 elif isinstance(srcArray, (CUarray,)): psrcArray = int(srcArray) - csrcArray = psrcArray + cysrcArray = psrcArray else: psrcArray = int(CUarray(srcArray)) - csrcArray = psrcArray - self._srcArray._ptr[0] = csrcArray + cysrcArray = psrcArray + self._srcArray._ptr[0] = cysrcArray @property def reserved0(self): return self._ptr[0].reserved0 @reserved0.setter def reserved0(self, reserved0): - _creserved0 = utils.HelperInputVoidPtr(reserved0) - self._ptr[0].reserved0 = _creserved0.cptr + _cyreserved0 = utils.HelperInputVoidPtr(reserved0) + self._ptr[0].reserved0 = _cyreserved0.cptr @property def srcPitch(self): return self._ptr[0].srcPitch @@ -11157,46 +11157,46 @@ cdef class CUDA_MEMCPY3D_st: return self._ptr[0].dstHost @dstHost.setter def dstHost(self, dstHost): - _cdstHost = utils.HelperInputVoidPtr(dstHost) - self._ptr[0].dstHost = _cdstHost.cptr + _cydstHost = utils.HelperInputVoidPtr(dstHost) + self._ptr[0].dstHost = _cydstHost.cptr @property def dstDevice(self): return self._dstDevice @dstDevice.setter def dstDevice(self, dstDevice): - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - self._dstDevice._ptr[0] = cdstDevice + cydstDevice = pdstDevice + self._dstDevice._ptr[0] = cydstDevice @property def dstArray(self): return self._dstArray @dstArray.setter def dstArray(self, dstArray): - cdef ccuda.CUarray cdstArray + cdef cydriver.CUarray cydstArray if dstArray is None: - cdstArray = 0 + cydstArray = 0 elif isinstance(dstArray, (CUarray,)): pdstArray = int(dstArray) - cdstArray = pdstArray + cydstArray = pdstArray else: pdstArray = int(CUarray(dstArray)) - cdstArray = pdstArray - self._dstArray._ptr[0] = cdstArray + cydstArray = pdstArray + self._dstArray._ptr[0] = cydstArray @property def reserved1(self): return self._ptr[0].reserved1 @reserved1.setter def reserved1(self, reserved1): - _creserved1 = utils.HelperInputVoidPtr(reserved1) - self._ptr[0].reserved1 = _creserved1.cptr + _cyreserved1 = utils.HelperInputVoidPtr(reserved1) + self._ptr[0].reserved1 = _cyreserved1.cptr @property def dstPitch(self): return self._ptr[0].dstPitch @@ -11299,7 +11299,7 @@ cdef class CUDA_MEMCPY3D_PEER_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._srcDevice = CUdeviceptr(_ptr=&self._ptr[0].srcDevice) self._srcArray = CUarray(_ptr=&self._ptr[0].srcArray) @@ -11452,54 +11452,54 @@ cdef class CUDA_MEMCPY3D_PEER_st: return self._ptr[0].srcHost @srcHost.setter def srcHost(self, srcHost): - _csrcHost = utils.HelperInputVoidPtr(srcHost) - self._ptr[0].srcHost = _csrcHost.cptr + _cysrcHost = utils.HelperInputVoidPtr(srcHost) + self._ptr[0].srcHost = _cysrcHost.cptr @property def srcDevice(self): return self._srcDevice @srcDevice.setter def srcDevice(self, srcDevice): - cdef ccuda.CUdeviceptr csrcDevice + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - self._srcDevice._ptr[0] = csrcDevice + cysrcDevice = psrcDevice + self._srcDevice._ptr[0] = cysrcDevice @property def srcArray(self): return self._srcArray @srcArray.setter def srcArray(self, srcArray): - cdef ccuda.CUarray csrcArray + cdef cydriver.CUarray cysrcArray if srcArray is None: - csrcArray = 0 + cysrcArray = 0 elif isinstance(srcArray, (CUarray,)): psrcArray = int(srcArray) - csrcArray = psrcArray + cysrcArray = psrcArray else: psrcArray = int(CUarray(srcArray)) - csrcArray = psrcArray - self._srcArray._ptr[0] = csrcArray + cysrcArray = psrcArray + self._srcArray._ptr[0] = cysrcArray @property def srcContext(self): return self._srcContext @srcContext.setter def srcContext(self, srcContext): - cdef ccuda.CUcontext csrcContext + cdef cydriver.CUcontext cysrcContext if srcContext is None: - csrcContext = 0 + cysrcContext = 0 elif isinstance(srcContext, (CUcontext,)): psrcContext = int(srcContext) - csrcContext = psrcContext + cysrcContext = psrcContext else: psrcContext = int(CUcontext(srcContext)) - csrcContext = psrcContext - self._srcContext._ptr[0] = csrcContext + cysrcContext = psrcContext + self._srcContext._ptr[0] = cysrcContext @property def srcPitch(self): return self._ptr[0].srcPitch @@ -11547,54 +11547,54 @@ cdef class CUDA_MEMCPY3D_PEER_st: return self._ptr[0].dstHost @dstHost.setter def dstHost(self, dstHost): - _cdstHost = utils.HelperInputVoidPtr(dstHost) - self._ptr[0].dstHost = _cdstHost.cptr + _cydstHost = utils.HelperInputVoidPtr(dstHost) + self._ptr[0].dstHost = _cydstHost.cptr @property def dstDevice(self): return self._dstDevice @dstDevice.setter def dstDevice(self, dstDevice): - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - self._dstDevice._ptr[0] = cdstDevice + cydstDevice = pdstDevice + self._dstDevice._ptr[0] = cydstDevice @property def dstArray(self): return self._dstArray @dstArray.setter def dstArray(self, dstArray): - cdef ccuda.CUarray cdstArray + cdef cydriver.CUarray cydstArray if dstArray is None: - cdstArray = 0 + cydstArray = 0 elif isinstance(dstArray, (CUarray,)): pdstArray = int(dstArray) - cdstArray = pdstArray + cydstArray = pdstArray else: pdstArray = int(CUarray(dstArray)) - cdstArray = pdstArray - self._dstArray._ptr[0] = cdstArray + cydstArray = pdstArray + self._dstArray._ptr[0] = cydstArray @property def dstContext(self): return self._dstContext @dstContext.setter def dstContext(self, dstContext): - cdef ccuda.CUcontext cdstContext + cdef cydriver.CUcontext cydstContext if dstContext is None: - cdstContext = 0 + cydstContext = 0 elif isinstance(dstContext, (CUcontext,)): pdstContext = int(dstContext) - cdstContext = pdstContext + cydstContext = pdstContext else: pdstContext = int(CUcontext(dstContext)) - cdstContext = pdstContext - self._dstContext._ptr[0] = cdstContext + cydstContext = pdstContext + self._dstContext._ptr[0] = cydstContext @property def dstPitch(self): return self._ptr[0].dstPitch @@ -11653,7 +11653,7 @@ cdef class CUDA_MEMCPY_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._copyCtx = CUcontext(_ptr=&self._ptr[0].copyCtx) self._copyParams = CUDA_MEMCPY3D(_ptr=&self._ptr[0].copyParams) @@ -11700,22 +11700,22 @@ cdef class CUDA_MEMCPY_NODE_PARAMS_st: return self._copyCtx @copyCtx.setter def copyCtx(self, copyCtx): - cdef ccuda.CUcontext ccopyCtx + cdef cydriver.CUcontext cycopyCtx if copyCtx is None: - ccopyCtx = 0 + cycopyCtx = 0 elif isinstance(copyCtx, (CUcontext,)): pcopyCtx = int(copyCtx) - ccopyCtx = pcopyCtx + cycopyCtx = pcopyCtx else: pcopyCtx = int(CUcontext(copyCtx)) - ccopyCtx = pcopyCtx - self._copyCtx._ptr[0] = ccopyCtx + cycopyCtx = pcopyCtx + self._copyCtx._ptr[0] = cycopyCtx @property def copyParams(self): return self._copyParams @copyParams.setter def copyParams(self, copyParams not None : CUDA_MEMCPY3D): - string.memcpy(&self._ptr[0].copyParams, copyParams.getPtr(), sizeof(self._ptr[0].copyParams)) + string.memcpy(&self._ptr[0].copyParams, copyParams.getPtr(), sizeof(self._ptr[0].copyParams)) {{endif}} {{if 'struct CUDA_ARRAY_DESCRIPTOR_st' in found_types}} @@ -11744,7 +11744,7 @@ cdef class CUDA_ARRAY_DESCRIPTOR_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -11829,7 +11829,7 @@ cdef class CUDA_ARRAY3D_DESCRIPTOR_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -11923,7 +11923,7 @@ cdef class anon_struct5: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -11998,7 +11998,7 @@ cdef class CUDA_ARRAY_SPARSE_PROPERTIES_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._tileExtent = anon_struct5(_ptr=self._ptr) def __dealloc__(self): @@ -12036,7 +12036,7 @@ cdef class CUDA_ARRAY_SPARSE_PROPERTIES_st: return self._tileExtent @tileExtent.setter def tileExtent(self, tileExtent not None : anon_struct5): - string.memcpy(&self._ptr[0].tileExtent, tileExtent.getPtr(), sizeof(self._ptr[0].tileExtent)) + string.memcpy(&self._ptr[0].tileExtent, tileExtent.getPtr(), sizeof(self._ptr[0].tileExtent)) @property def miptailFirstLevel(self): return self._ptr[0].miptailFirstLevel @@ -12087,7 +12087,7 @@ cdef class CUDA_ARRAY_MEMORY_REQUIREMENTS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -12147,7 +12147,7 @@ cdef class anon_struct6: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._hArray = CUarray(_ptr=&self._ptr[0].res.array.hArray) @@ -12170,16 +12170,16 @@ cdef class anon_struct6: return self._hArray @hArray.setter def hArray(self, hArray): - cdef ccuda.CUarray chArray + cdef cydriver.CUarray cyhArray if hArray is None: - chArray = 0 + cyhArray = 0 elif isinstance(hArray, (CUarray,)): phArray = int(hArray) - chArray = phArray + cyhArray = phArray else: phArray = int(CUarray(hArray)) - chArray = phArray - self._hArray._ptr[0] = chArray + cyhArray = phArray + self._hArray._ptr[0] = cyhArray {{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} @@ -12197,7 +12197,7 @@ cdef class anon_struct7: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._hMipmappedArray = CUmipmappedArray(_ptr=&self._ptr[0].res.mipmap.hMipmappedArray) @@ -12220,16 +12220,16 @@ cdef class anon_struct7: return self._hMipmappedArray @hMipmappedArray.setter def hMipmappedArray(self, hMipmappedArray): - cdef ccuda.CUmipmappedArray chMipmappedArray + cdef cydriver.CUmipmappedArray cyhMipmappedArray if hMipmappedArray is None: - chMipmappedArray = 0 + cyhMipmappedArray = 0 elif isinstance(hMipmappedArray, (CUmipmappedArray,)): phMipmappedArray = int(hMipmappedArray) - chMipmappedArray = phMipmappedArray + cyhMipmappedArray = phMipmappedArray else: phMipmappedArray = int(CUmipmappedArray(hMipmappedArray)) - chMipmappedArray = phMipmappedArray - self._hMipmappedArray._ptr[0] = chMipmappedArray + cyhMipmappedArray = phMipmappedArray + self._hMipmappedArray._ptr[0] = cyhMipmappedArray {{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} @@ -12253,7 +12253,7 @@ cdef class anon_struct8: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._devPtr = CUdeviceptr(_ptr=&self._ptr[0].res.linear.devPtr) @@ -12288,16 +12288,16 @@ cdef class anon_struct8: return self._devPtr @devPtr.setter def devPtr(self, devPtr): - cdef ccuda.CUdeviceptr cdevPtr + cdef cydriver.CUdeviceptr cydevPtr if devPtr is None: - cdevPtr = 0 + cydevPtr = 0 elif isinstance(devPtr, (CUdeviceptr)): pdevPtr = int(devPtr) - cdevPtr = pdevPtr + cydevPtr = pdevPtr else: pdevPtr = int(CUdeviceptr(devPtr)) - cdevPtr = pdevPtr - self._devPtr._ptr[0] = cdevPtr + cydevPtr = pdevPtr + self._devPtr._ptr[0] = cydevPtr @property def format(self): @@ -12344,7 +12344,7 @@ cdef class anon_struct9: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._devPtr = CUdeviceptr(_ptr=&self._ptr[0].res.pitch2D.devPtr) @@ -12387,16 +12387,16 @@ cdef class anon_struct9: return self._devPtr @devPtr.setter def devPtr(self, devPtr): - cdef ccuda.CUdeviceptr cdevPtr + cdef cydriver.CUdeviceptr cydevPtr if devPtr is None: - cdevPtr = 0 + cydevPtr = 0 elif isinstance(devPtr, (CUdeviceptr)): pdevPtr = int(devPtr) - cdevPtr = pdevPtr + cydevPtr = pdevPtr else: pdevPtr = int(CUdeviceptr(devPtr)) - cdevPtr = pdevPtr - self._devPtr._ptr[0] = cdevPtr + cydevPtr = pdevPtr + self._devPtr._ptr[0] = cydevPtr @property def format(self): @@ -12445,7 +12445,7 @@ cdef class anon_struct10: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -12494,7 +12494,7 @@ cdef class anon_union4: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._array = anon_struct6(_ptr=self._ptr) @@ -12537,31 +12537,31 @@ cdef class anon_union4: return self._array @array.setter def array(self, array not None : anon_struct6): - string.memcpy(&self._ptr[0].res.array, array.getPtr(), sizeof(self._ptr[0].res.array)) + string.memcpy(&self._ptr[0].res.array, array.getPtr(), sizeof(self._ptr[0].res.array)) @property def mipmap(self): return self._mipmap @mipmap.setter def mipmap(self, mipmap not None : anon_struct7): - string.memcpy(&self._ptr[0].res.mipmap, mipmap.getPtr(), sizeof(self._ptr[0].res.mipmap)) + string.memcpy(&self._ptr[0].res.mipmap, mipmap.getPtr(), sizeof(self._ptr[0].res.mipmap)) @property def linear(self): return self._linear @linear.setter def linear(self, linear not None : anon_struct8): - string.memcpy(&self._ptr[0].res.linear, linear.getPtr(), sizeof(self._ptr[0].res.linear)) + string.memcpy(&self._ptr[0].res.linear, linear.getPtr(), sizeof(self._ptr[0].res.linear)) @property def pitch2D(self): return self._pitch2D @pitch2D.setter def pitch2D(self, pitch2D not None : anon_struct9): - string.memcpy(&self._ptr[0].res.pitch2D, pitch2D.getPtr(), sizeof(self._ptr[0].res.pitch2D)) + string.memcpy(&self._ptr[0].res.pitch2D, pitch2D.getPtr(), sizeof(self._ptr[0].res.pitch2D)) @property def reserved(self): return self._reserved @reserved.setter def reserved(self, reserved not None : anon_struct10): - string.memcpy(&self._ptr[0].res.reserved, reserved.getPtr(), sizeof(self._ptr[0].res.reserved)) + string.memcpy(&self._ptr[0].res.reserved, reserved.getPtr(), sizeof(self._ptr[0].res.reserved)) {{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} @@ -12586,10 +12586,10 @@ cdef class CUDA_RESOURCE_DESC_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccuda.CUDA_RESOURCE_DESC_st)) + self._val_ptr = calloc(1, sizeof(cydriver.CUDA_RESOURCE_DESC_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._res = anon_union4(_ptr=self._ptr) def __dealloc__(self): @@ -12626,7 +12626,7 @@ cdef class CUDA_RESOURCE_DESC_st: return self._res @res.setter def res(self, res not None : anon_union4): - string.memcpy(&self._ptr[0].res, res.getPtr(), sizeof(self._ptr[0].res)) + string.memcpy(&self._ptr[0].res, res.getPtr(), sizeof(self._ptr[0].res)) @property def flags(self): return self._ptr[0].flags @@ -12673,7 +12673,7 @@ cdef class CUDA_TEXTURE_DESC_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -12824,7 +12824,7 @@ cdef class CUDA_RESOURCE_VIEW_DESC_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -12950,7 +12950,7 @@ cdef class CUtensorMap_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -12998,7 +12998,7 @@ cdef class CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -13071,7 +13071,7 @@ cdef class CUDA_LAUNCH_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._function = CUfunction(_ptr=&self._ptr[0].function) self._hStream = CUstream(_ptr=&self._ptr[0].hStream) @@ -13130,16 +13130,16 @@ cdef class CUDA_LAUNCH_PARAMS_st: return self._function @function.setter def function(self, function): - cdef ccuda.CUfunction cfunction + cdef cydriver.CUfunction cyfunction if function is None: - cfunction = 0 + cyfunction = 0 elif isinstance(function, (CUfunction,)): pfunction = int(function) - cfunction = pfunction + cyfunction = pfunction else: pfunction = int(CUfunction(function)) - cfunction = pfunction - self._function._ptr[0] = cfunction + cyfunction = pfunction + self._function._ptr[0] = cyfunction @property def gridDimX(self): return self._ptr[0].gridDimX @@ -13187,23 +13187,23 @@ cdef class CUDA_LAUNCH_PARAMS_st: return self._hStream @hStream.setter def hStream(self, hStream): - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - self._hStream._ptr[0] = chStream + cyhStream = phStream + self._hStream._ptr[0] = cyhStream @property def kernelParams(self): return self._ptr[0].kernelParams @kernelParams.setter def kernelParams(self, kernelParams): - self._ckernelParams = utils.HelperKernelParams(kernelParams) - self._ptr[0].kernelParams = self._ckernelParams.ckernelParams + self._cykernelParams = utils.HelperKernelParams(kernelParams) + self._ptr[0].kernelParams = self._cykernelParams.ckernelParams {{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st' in found_types}} @@ -13223,7 +13223,7 @@ cdef class anon_struct11: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -13250,15 +13250,15 @@ cdef class anon_struct11: return self._ptr[0].handle.win32.handle @handle.setter def handle(self, handle): - _chandle = utils.HelperInputVoidPtr(handle) - self._ptr[0].handle.win32.handle = _chandle.cptr + _cyhandle = utils.HelperInputVoidPtr(handle) + self._ptr[0].handle.win32.handle = _cyhandle.cptr @property def name(self): return self._ptr[0].handle.win32.name @name.setter def name(self, name): - _cname = utils.HelperInputVoidPtr(name) - self._ptr[0].handle.win32.name = _cname.cptr + _cyname = utils.HelperInputVoidPtr(name) + self._ptr[0].handle.win32.name = _cyname.cptr {{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st' in found_types}} @@ -13280,7 +13280,7 @@ cdef class anon_union5: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._win32 = anon_struct11(_ptr=self._ptr) @@ -13317,14 +13317,14 @@ cdef class anon_union5: return self._win32 @win32.setter def win32(self, win32 not None : anon_struct11): - string.memcpy(&self._ptr[0].handle.win32, win32.getPtr(), sizeof(self._ptr[0].handle.win32)) + string.memcpy(&self._ptr[0].handle.win32, win32.getPtr(), sizeof(self._ptr[0].handle.win32)) @property def nvSciBufObject(self): return self._ptr[0].handle.nvSciBufObject @nvSciBufObject.setter def nvSciBufObject(self, nvSciBufObject): - _cnvSciBufObject = utils.HelperInputVoidPtr(nvSciBufObject) - self._ptr[0].handle.nvSciBufObject = _cnvSciBufObject.cptr + _cynvSciBufObject = utils.HelperInputVoidPtr(nvSciBufObject) + self._ptr[0].handle.nvSciBufObject = _cynvSciBufObject.cptr {{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st' in found_types}} @@ -13353,10 +13353,10 @@ cdef class CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st)) + self._val_ptr = calloc(1, sizeof(cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._handle = anon_union5(_ptr=self._ptr) def __dealloc__(self): @@ -13401,7 +13401,7 @@ cdef class CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st: return self._handle @handle.setter def handle(self, handle not None : anon_union5): - string.memcpy(&self._ptr[0].handle, handle.getPtr(), sizeof(self._ptr[0].handle)) + string.memcpy(&self._ptr[0].handle, handle.getPtr(), sizeof(self._ptr[0].handle)) @property def size(self): return self._ptr[0].size @@ -13448,7 +13448,7 @@ cdef class CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -13530,7 +13530,7 @@ cdef class CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._arrayDesc = CUDA_ARRAY3D_DESCRIPTOR(_ptr=&self._ptr[0].arrayDesc) def __dealloc__(self): @@ -13570,7 +13570,7 @@ cdef class CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st: return self._arrayDesc @arrayDesc.setter def arrayDesc(self, arrayDesc not None : CUDA_ARRAY3D_DESCRIPTOR): - string.memcpy(&self._ptr[0].arrayDesc, arrayDesc.getPtr(), sizeof(self._ptr[0].arrayDesc)) + string.memcpy(&self._ptr[0].arrayDesc, arrayDesc.getPtr(), sizeof(self._ptr[0].arrayDesc)) @property def numLevels(self): return self._ptr[0].numLevels @@ -13602,7 +13602,7 @@ cdef class anon_struct12: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -13629,15 +13629,15 @@ cdef class anon_struct12: return self._ptr[0].handle.win32.handle @handle.setter def handle(self, handle): - _chandle = utils.HelperInputVoidPtr(handle) - self._ptr[0].handle.win32.handle = _chandle.cptr + _cyhandle = utils.HelperInputVoidPtr(handle) + self._ptr[0].handle.win32.handle = _cyhandle.cptr @property def name(self): return self._ptr[0].handle.win32.name @name.setter def name(self, name): - _cname = utils.HelperInputVoidPtr(name) - self._ptr[0].handle.win32.name = _cname.cptr + _cyname = utils.HelperInputVoidPtr(name) + self._ptr[0].handle.win32.name = _cyname.cptr {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st' in found_types}} @@ -13659,7 +13659,7 @@ cdef class anon_union6: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._win32 = anon_struct12(_ptr=self._ptr) @@ -13696,14 +13696,14 @@ cdef class anon_union6: return self._win32 @win32.setter def win32(self, win32 not None : anon_struct12): - string.memcpy(&self._ptr[0].handle.win32, win32.getPtr(), sizeof(self._ptr[0].handle.win32)) + string.memcpy(&self._ptr[0].handle.win32, win32.getPtr(), sizeof(self._ptr[0].handle.win32)) @property def nvSciSyncObj(self): return self._ptr[0].handle.nvSciSyncObj @nvSciSyncObj.setter def nvSciSyncObj(self, nvSciSyncObj): - _cnvSciSyncObj = utils.HelperInputVoidPtr(nvSciSyncObj) - self._ptr[0].handle.nvSciSyncObj = _cnvSciSyncObj.cptr + _cynvSciSyncObj = utils.HelperInputVoidPtr(nvSciSyncObj) + self._ptr[0].handle.nvSciSyncObj = _cynvSciSyncObj.cptr {{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st' in found_types}} @@ -13730,10 +13730,10 @@ cdef class CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st)) + self._val_ptr = calloc(1, sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._handle = anon_union6(_ptr=self._ptr) def __dealloc__(self): @@ -13774,7 +13774,7 @@ cdef class CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st: return self._handle @handle.setter def handle(self, handle not None : anon_union6): - string.memcpy(&self._ptr[0].handle, handle.getPtr(), sizeof(self._ptr[0].handle)) + string.memcpy(&self._ptr[0].handle, handle.getPtr(), sizeof(self._ptr[0].handle)) @property def flags(self): return self._ptr[0].flags @@ -13804,7 +13804,7 @@ cdef class anon_struct13: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -13847,7 +13847,7 @@ cdef class anon_union7: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -13874,8 +13874,8 @@ cdef class anon_union7: return self._ptr[0].params.nvSciSync.fence @fence.setter def fence(self, fence): - _cfence = utils.HelperInputVoidPtr(fence) - self._ptr[0].params.nvSciSync.fence = _cfence.cptr + _cyfence = utils.HelperInputVoidPtr(fence) + self._ptr[0].params.nvSciSync.fence = _cyfence.cptr @property def reserved(self): return self._ptr[0].params.nvSciSync.reserved @@ -13899,7 +13899,7 @@ cdef class anon_struct14: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -13946,7 +13946,7 @@ cdef class anon_struct15: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._fence = anon_struct13(_ptr=self._ptr) @@ -13983,19 +13983,19 @@ cdef class anon_struct15: return self._fence @fence.setter def fence(self, fence not None : anon_struct13): - string.memcpy(&self._ptr[0].params.fence, fence.getPtr(), sizeof(self._ptr[0].params.fence)) + string.memcpy(&self._ptr[0].params.fence, fence.getPtr(), sizeof(self._ptr[0].params.fence)) @property def nvSciSync(self): return self._nvSciSync @nvSciSync.setter def nvSciSync(self, nvSciSync not None : anon_union7): - string.memcpy(&self._ptr[0].params.nvSciSync, nvSciSync.getPtr(), sizeof(self._ptr[0].params.nvSciSync)) + string.memcpy(&self._ptr[0].params.nvSciSync, nvSciSync.getPtr(), sizeof(self._ptr[0].params.nvSciSync)) @property def keyedMutex(self): return self._keyedMutex @keyedMutex.setter def keyedMutex(self, keyedMutex not None : anon_struct14): - string.memcpy(&self._ptr[0].params.keyedMutex, keyedMutex.getPtr(), sizeof(self._ptr[0].params.keyedMutex)) + string.memcpy(&self._ptr[0].params.keyedMutex, keyedMutex.getPtr(), sizeof(self._ptr[0].params.keyedMutex)) @property def reserved(self): return self._ptr[0].params.reserved @@ -14035,7 +14035,7 @@ cdef class CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._params = anon_struct15(_ptr=self._ptr) def __dealloc__(self): @@ -14065,7 +14065,7 @@ cdef class CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st: return self._params @params.setter def params(self, params not None : anon_struct15): - string.memcpy(&self._ptr[0].params, params.getPtr(), sizeof(self._ptr[0].params)) + string.memcpy(&self._ptr[0].params, params.getPtr(), sizeof(self._ptr[0].params)) @property def flags(self): return self._ptr[0].flags @@ -14095,7 +14095,7 @@ cdef class anon_struct16: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -14138,7 +14138,7 @@ cdef class anon_union8: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -14165,8 +14165,8 @@ cdef class anon_union8: return self._ptr[0].params.nvSciSync.fence @fence.setter def fence(self, fence): - _cfence = utils.HelperInputVoidPtr(fence) - self._ptr[0].params.nvSciSync.fence = _cfence.cptr + _cyfence = utils.HelperInputVoidPtr(fence) + self._ptr[0].params.nvSciSync.fence = _cyfence.cptr @property def reserved(self): return self._ptr[0].params.nvSciSync.reserved @@ -14192,7 +14192,7 @@ cdef class anon_struct17: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -14249,7 +14249,7 @@ cdef class anon_struct18: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._fence = anon_struct16(_ptr=self._ptr) @@ -14286,19 +14286,19 @@ cdef class anon_struct18: return self._fence @fence.setter def fence(self, fence not None : anon_struct16): - string.memcpy(&self._ptr[0].params.fence, fence.getPtr(), sizeof(self._ptr[0].params.fence)) + string.memcpy(&self._ptr[0].params.fence, fence.getPtr(), sizeof(self._ptr[0].params.fence)) @property def nvSciSync(self): return self._nvSciSync @nvSciSync.setter def nvSciSync(self, nvSciSync not None : anon_union8): - string.memcpy(&self._ptr[0].params.nvSciSync, nvSciSync.getPtr(), sizeof(self._ptr[0].params.nvSciSync)) + string.memcpy(&self._ptr[0].params.nvSciSync, nvSciSync.getPtr(), sizeof(self._ptr[0].params.nvSciSync)) @property def keyedMutex(self): return self._keyedMutex @keyedMutex.setter def keyedMutex(self, keyedMutex not None : anon_struct17): - string.memcpy(&self._ptr[0].params.keyedMutex, keyedMutex.getPtr(), sizeof(self._ptr[0].params.keyedMutex)) + string.memcpy(&self._ptr[0].params.keyedMutex, keyedMutex.getPtr(), sizeof(self._ptr[0].params.keyedMutex)) @property def reserved(self): return self._ptr[0].params.reserved @@ -14338,7 +14338,7 @@ cdef class CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._params = anon_struct18(_ptr=self._ptr) def __dealloc__(self): @@ -14368,7 +14368,7 @@ cdef class CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st: return self._params @params.setter def params(self, params not None : anon_struct18): - string.memcpy(&self._ptr[0].params, params.getPtr(), sizeof(self._ptr[0].params)) + string.memcpy(&self._ptr[0].params, params.getPtr(), sizeof(self._ptr[0].params)) @property def flags(self): return self._ptr[0].flags @@ -14408,7 +14408,7 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -14438,7 +14438,7 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st: return '' @property def extSemArray(self): - arrs = [self._ptr[0].extSemArray + x*sizeof(ccuda.CUexternalSemaphore) for x in range(self._extSemArray_length)] + arrs = [self._ptr[0].extSemArray + x*sizeof(cydriver.CUexternalSemaphore) for x in range(self._extSemArray_length)] return [CUexternalSemaphore(_ptr=arr) for arr in arrs] @extSemArray.setter def extSemArray(self, val): @@ -14449,9 +14449,9 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st: else: if self._extSemArray_length != len(val): free(self._extSemArray) - self._extSemArray = calloc(len(val), sizeof(ccuda.CUexternalSemaphore)) + self._extSemArray = calloc(len(val), sizeof(cydriver.CUexternalSemaphore)) if self._extSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUexternalSemaphore))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUexternalSemaphore))) self._extSemArray_length = len(val) self._ptr[0].extSemArray = self._extSemArray for idx in range(len(val)): @@ -14459,7 +14459,7 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st: @property def paramsArray(self): - arrs = [self._ptr[0].paramsArray + x*sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS) for x in range(self._paramsArray_length)] + arrs = [self._ptr[0].paramsArray + x*sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS) for x in range(self._paramsArray_length)] return [CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS(_ptr=arr) for arr in arrs] @paramsArray.setter def paramsArray(self, val): @@ -14470,13 +14470,13 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st: else: if self._paramsArray_length != len(val): free(self._paramsArray) - self._paramsArray = calloc(len(val), sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) + self._paramsArray = calloc(len(val), sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) if self._paramsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS))) self._paramsArray_length = len(val) self._ptr[0].paramsArray = self._paramsArray for idx in range(len(val)): - string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) + string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) @property def numExtSems(self): @@ -14511,7 +14511,7 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -14541,7 +14541,7 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st: return '' @property def extSemArray(self): - arrs = [self._ptr[0].extSemArray + x*sizeof(ccuda.CUexternalSemaphore) for x in range(self._extSemArray_length)] + arrs = [self._ptr[0].extSemArray + x*sizeof(cydriver.CUexternalSemaphore) for x in range(self._extSemArray_length)] return [CUexternalSemaphore(_ptr=arr) for arr in arrs] @extSemArray.setter def extSemArray(self, val): @@ -14552,9 +14552,9 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st: else: if self._extSemArray_length != len(val): free(self._extSemArray) - self._extSemArray = calloc(len(val), sizeof(ccuda.CUexternalSemaphore)) + self._extSemArray = calloc(len(val), sizeof(cydriver.CUexternalSemaphore)) if self._extSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUexternalSemaphore))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUexternalSemaphore))) self._extSemArray_length = len(val) self._ptr[0].extSemArray = self._extSemArray for idx in range(len(val)): @@ -14562,7 +14562,7 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st: @property def paramsArray(self): - arrs = [self._ptr[0].paramsArray + x*sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS) for x in range(self._paramsArray_length)] + arrs = [self._ptr[0].paramsArray + x*sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS) for x in range(self._paramsArray_length)] return [CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS(_ptr=arr) for arr in arrs] @paramsArray.setter def paramsArray(self, val): @@ -14573,13 +14573,13 @@ cdef class CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st: else: if self._paramsArray_length != len(val): free(self._paramsArray) - self._paramsArray = calloc(len(val), sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) + self._paramsArray = calloc(len(val), sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) if self._paramsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS))) self._paramsArray_length = len(val) self._ptr[0].paramsArray = self._paramsArray for idx in range(len(val)): - string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) + string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) @property def numExtSems(self): @@ -14614,7 +14614,7 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -14644,7 +14644,7 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_st: return '' @property def extSemArray(self): - arrs = [self._ptr[0].extSemArray + x*sizeof(ccuda.CUexternalSemaphore) for x in range(self._extSemArray_length)] + arrs = [self._ptr[0].extSemArray + x*sizeof(cydriver.CUexternalSemaphore) for x in range(self._extSemArray_length)] return [CUexternalSemaphore(_ptr=arr) for arr in arrs] @extSemArray.setter def extSemArray(self, val): @@ -14655,9 +14655,9 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_st: else: if self._extSemArray_length != len(val): free(self._extSemArray) - self._extSemArray = calloc(len(val), sizeof(ccuda.CUexternalSemaphore)) + self._extSemArray = calloc(len(val), sizeof(cydriver.CUexternalSemaphore)) if self._extSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUexternalSemaphore))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUexternalSemaphore))) self._extSemArray_length = len(val) self._ptr[0].extSemArray = self._extSemArray for idx in range(len(val)): @@ -14665,7 +14665,7 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_st: @property def paramsArray(self): - arrs = [self._ptr[0].paramsArray + x*sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS) for x in range(self._paramsArray_length)] + arrs = [self._ptr[0].paramsArray + x*sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS) for x in range(self._paramsArray_length)] return [CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS(_ptr=arr) for arr in arrs] @paramsArray.setter def paramsArray(self, val): @@ -14676,13 +14676,13 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_st: else: if self._paramsArray_length != len(val): free(self._paramsArray) - self._paramsArray = calloc(len(val), sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) + self._paramsArray = calloc(len(val), sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) if self._paramsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS))) self._paramsArray_length = len(val) self._ptr[0].paramsArray = self._paramsArray for idx in range(len(val)): - string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) + string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) @property def numExtSems(self): @@ -14717,7 +14717,7 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -14747,7 +14747,7 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st: return '' @property def extSemArray(self): - arrs = [self._ptr[0].extSemArray + x*sizeof(ccuda.CUexternalSemaphore) for x in range(self._extSemArray_length)] + arrs = [self._ptr[0].extSemArray + x*sizeof(cydriver.CUexternalSemaphore) for x in range(self._extSemArray_length)] return [CUexternalSemaphore(_ptr=arr) for arr in arrs] @extSemArray.setter def extSemArray(self, val): @@ -14758,9 +14758,9 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st: else: if self._extSemArray_length != len(val): free(self._extSemArray) - self._extSemArray = calloc(len(val), sizeof(ccuda.CUexternalSemaphore)) + self._extSemArray = calloc(len(val), sizeof(cydriver.CUexternalSemaphore)) if self._extSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUexternalSemaphore))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUexternalSemaphore))) self._extSemArray_length = len(val) self._ptr[0].extSemArray = self._extSemArray for idx in range(len(val)): @@ -14768,7 +14768,7 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st: @property def paramsArray(self): - arrs = [self._ptr[0].paramsArray + x*sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS) for x in range(self._paramsArray_length)] + arrs = [self._ptr[0].paramsArray + x*sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS) for x in range(self._paramsArray_length)] return [CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS(_ptr=arr) for arr in arrs] @paramsArray.setter def paramsArray(self, val): @@ -14779,13 +14779,13 @@ cdef class CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st: else: if self._paramsArray_length != len(val): free(self._paramsArray) - self._paramsArray = calloc(len(val), sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) + self._paramsArray = calloc(len(val), sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) if self._paramsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS))) self._paramsArray_length = len(val) self._ptr[0].paramsArray = self._paramsArray for idx in range(len(val)): - string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) + string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) @property def numExtSems(self): @@ -14812,7 +14812,7 @@ cdef class anon_union9: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._mipmap = CUmipmappedArray(_ptr=&self._ptr[0].resource.mipmap) @@ -14840,31 +14840,31 @@ cdef class anon_union9: return self._mipmap @mipmap.setter def mipmap(self, mipmap): - cdef ccuda.CUmipmappedArray cmipmap + cdef cydriver.CUmipmappedArray cymipmap if mipmap is None: - cmipmap = 0 + cymipmap = 0 elif isinstance(mipmap, (CUmipmappedArray,)): pmipmap = int(mipmap) - cmipmap = pmipmap + cymipmap = pmipmap else: pmipmap = int(CUmipmappedArray(mipmap)) - cmipmap = pmipmap - self._mipmap._ptr[0] = cmipmap + cymipmap = pmipmap + self._mipmap._ptr[0] = cymipmap @property def array(self): return self._array @array.setter def array(self, array): - cdef ccuda.CUarray carray + cdef cydriver.CUarray cyarray if array is None: - carray = 0 + cyarray = 0 elif isinstance(array, (CUarray,)): parray = int(array) - carray = parray + cyarray = parray else: parray = int(CUarray(array)) - carray = parray - self._array._ptr[0] = carray + cyarray = parray + self._array._ptr[0] = cyarray {{endif}} {{if 'struct CUarrayMapInfo_st' in found_types}} @@ -14896,7 +14896,7 @@ cdef class anon_struct19: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -15011,7 +15011,7 @@ cdef class anon_struct20: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -15074,7 +15074,7 @@ cdef class anon_union10: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._sparseLevel = anon_struct19(_ptr=self._ptr) @@ -15102,13 +15102,13 @@ cdef class anon_union10: return self._sparseLevel @sparseLevel.setter def sparseLevel(self, sparseLevel not None : anon_struct19): - string.memcpy(&self._ptr[0].subresource.sparseLevel, sparseLevel.getPtr(), sizeof(self._ptr[0].subresource.sparseLevel)) + string.memcpy(&self._ptr[0].subresource.sparseLevel, sparseLevel.getPtr(), sizeof(self._ptr[0].subresource.sparseLevel)) @property def miptail(self): return self._miptail @miptail.setter def miptail(self, miptail not None : anon_struct20): - string.memcpy(&self._ptr[0].subresource.miptail, miptail.getPtr(), sizeof(self._ptr[0].subresource.miptail)) + string.memcpy(&self._ptr[0].subresource.miptail, miptail.getPtr(), sizeof(self._ptr[0].subresource.miptail)) {{endif}} {{if 'struct CUarrayMapInfo_st' in found_types}} @@ -15126,7 +15126,7 @@ cdef class anon_union11: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._memHandle = CUmemGenericAllocationHandle(_ptr=&self._ptr[0].memHandle.memHandle) @@ -15149,16 +15149,16 @@ cdef class anon_union11: return self._memHandle @memHandle.setter def memHandle(self, memHandle): - cdef ccuda.CUmemGenericAllocationHandle cmemHandle + cdef cydriver.CUmemGenericAllocationHandle cymemHandle if memHandle is None: - cmemHandle = 0 + cymemHandle = 0 elif isinstance(memHandle, (CUmemGenericAllocationHandle)): pmemHandle = int(memHandle) - cmemHandle = pmemHandle + cymemHandle = pmemHandle else: pmemHandle = int(CUmemGenericAllocationHandle(memHandle)) - cmemHandle = pmemHandle - self._memHandle._ptr[0] = cmemHandle + cymemHandle = pmemHandle + self._memHandle._ptr[0] = cymemHandle {{endif}} {{if 'struct CUarrayMapInfo_st' in found_types}} @@ -15201,10 +15201,10 @@ cdef class CUarrayMapInfo_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccuda.CUarrayMapInfo_st)) + self._val_ptr = calloc(1, sizeof(cydriver.CUarrayMapInfo_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._resource = anon_union9(_ptr=self._ptr) self._subresource = anon_union10(_ptr=self._ptr) @@ -15275,7 +15275,7 @@ cdef class CUarrayMapInfo_st: return self._resource @resource.setter def resource(self, resource not None : anon_union9): - string.memcpy(&self._ptr[0].resource, resource.getPtr(), sizeof(self._ptr[0].resource)) + string.memcpy(&self._ptr[0].resource, resource.getPtr(), sizeof(self._ptr[0].resource)) @property def subresourceType(self): return CUarraySparseSubresourceType(self._ptr[0].subresourceType) @@ -15287,7 +15287,7 @@ cdef class CUarrayMapInfo_st: return self._subresource @subresource.setter def subresource(self, subresource not None : anon_union10): - string.memcpy(&self._ptr[0].subresource, subresource.getPtr(), sizeof(self._ptr[0].subresource)) + string.memcpy(&self._ptr[0].subresource, subresource.getPtr(), sizeof(self._ptr[0].subresource)) @property def memOperationType(self): return CUmemOperationType(self._ptr[0].memOperationType) @@ -15305,7 +15305,7 @@ cdef class CUarrayMapInfo_st: return self._memHandle @memHandle.setter def memHandle(self, memHandle not None : anon_union11): - string.memcpy(&self._ptr[0].memHandle, memHandle.getPtr(), sizeof(self._ptr[0].memHandle)) + string.memcpy(&self._ptr[0].memHandle, memHandle.getPtr(), sizeof(self._ptr[0].memHandle)) @property def offset(self): return self._ptr[0].offset @@ -15354,7 +15354,7 @@ cdef class CUmemLocation_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -15410,7 +15410,7 @@ cdef class anon_struct21: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -15501,7 +15501,7 @@ cdef class CUmemAllocationProp_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._location = CUmemLocation(_ptr=&self._ptr[0].location) self._allocFlags = anon_struct21(_ptr=self._ptr) @@ -15552,20 +15552,20 @@ cdef class CUmemAllocationProp_st: return self._location @location.setter def location(self, location not None : CUmemLocation): - string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) + string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) @property def win32HandleMetaData(self): return self._ptr[0].win32HandleMetaData @win32HandleMetaData.setter def win32HandleMetaData(self, win32HandleMetaData): - _cwin32HandleMetaData = utils.HelperInputVoidPtr(win32HandleMetaData) - self._ptr[0].win32HandleMetaData = _cwin32HandleMetaData.cptr + _cywin32HandleMetaData = utils.HelperInputVoidPtr(win32HandleMetaData) + self._ptr[0].win32HandleMetaData = _cywin32HandleMetaData.cptr @property def allocFlags(self): return self._allocFlags @allocFlags.setter def allocFlags(self, allocFlags not None : anon_struct21): - string.memcpy(&self._ptr[0].allocFlags, allocFlags.getPtr(), sizeof(self._ptr[0].allocFlags)) + string.memcpy(&self._ptr[0].allocFlags, allocFlags.getPtr(), sizeof(self._ptr[0].allocFlags)) {{endif}} {{if 'struct CUmulticastObjectProp_st' in found_types}} @@ -15597,7 +15597,7 @@ cdef class CUmulticastObjectProp_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -15674,7 +15674,7 @@ cdef class CUmemAccessDesc_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._location = CUmemLocation(_ptr=&self._ptr[0].location) def __dealloc__(self): @@ -15700,7 +15700,7 @@ cdef class CUmemAccessDesc_st: return self._location @location.setter def location(self, location not None : CUmemLocation): - string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) + string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) @property def flags(self): return CUmemAccess_flags(self._ptr[0].flags) @@ -15736,7 +15736,7 @@ cdef class CUgraphExecUpdateResultInfo_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._errorNode = CUgraphNode(_ptr=&self._ptr[0].errorNode) self._errorFromNode = CUgraphNode(_ptr=&self._ptr[0].errorFromNode) @@ -15773,31 +15773,31 @@ cdef class CUgraphExecUpdateResultInfo_st: return self._errorNode @errorNode.setter def errorNode(self, errorNode): - cdef ccuda.CUgraphNode cerrorNode + cdef cydriver.CUgraphNode cyerrorNode if errorNode is None: - cerrorNode = 0 + cyerrorNode = 0 elif isinstance(errorNode, (CUgraphNode,)): perrorNode = int(errorNode) - cerrorNode = perrorNode + cyerrorNode = perrorNode else: perrorNode = int(CUgraphNode(errorNode)) - cerrorNode = perrorNode - self._errorNode._ptr[0] = cerrorNode + cyerrorNode = perrorNode + self._errorNode._ptr[0] = cyerrorNode @property def errorFromNode(self): return self._errorFromNode @errorFromNode.setter def errorFromNode(self, errorFromNode): - cdef ccuda.CUgraphNode cerrorFromNode + cdef cydriver.CUgraphNode cyerrorFromNode if errorFromNode is None: - cerrorFromNode = 0 + cyerrorFromNode = 0 elif isinstance(errorFromNode, (CUgraphNode,)): perrorFromNode = int(errorFromNode) - cerrorFromNode = perrorFromNode + cyerrorFromNode = perrorFromNode else: perrorFromNode = int(CUgraphNode(errorFromNode)) - cerrorFromNode = perrorFromNode - self._errorFromNode._ptr[0] = cerrorFromNode + cyerrorFromNode = perrorFromNode + self._errorFromNode._ptr[0] = cyerrorFromNode {{endif}} {{if 'struct CUmemPoolProps_st' in found_types}} @@ -15838,7 +15838,7 @@ cdef class CUmemPoolProps_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._location = CUmemLocation(_ptr=&self._ptr[0].location) def __dealloc__(self): @@ -15896,14 +15896,14 @@ cdef class CUmemPoolProps_st: return self._location @location.setter def location(self, location not None : CUmemLocation): - string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) + string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) @property def win32SecurityAttributes(self): return self._ptr[0].win32SecurityAttributes @win32SecurityAttributes.setter def win32SecurityAttributes(self, win32SecurityAttributes): - _cwin32SecurityAttributes = utils.HelperInputVoidPtr(win32SecurityAttributes) - self._ptr[0].win32SecurityAttributes = _cwin32SecurityAttributes.cptr + _cywin32SecurityAttributes = utils.HelperInputVoidPtr(win32SecurityAttributes) + self._ptr[0].win32SecurityAttributes = _cywin32SecurityAttributes.cptr @property def maxSize(self): return self._ptr[0].maxSize @@ -15947,7 +15947,7 @@ cdef class CUmemPoolPtrExportData_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -16007,7 +16007,7 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v1_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._poolProps = CUmemPoolProps(_ptr=&self._ptr[0].poolProps) self._dptr = CUdeviceptr(_ptr=&self._ptr[0].dptr) @@ -16047,10 +16047,10 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v1_st: return self._poolProps @poolProps.setter def poolProps(self, poolProps not None : CUmemPoolProps): - string.memcpy(&self._ptr[0].poolProps, poolProps.getPtr(), sizeof(self._ptr[0].poolProps)) + string.memcpy(&self._ptr[0].poolProps, poolProps.getPtr(), sizeof(self._ptr[0].poolProps)) @property def accessDescs(self): - arrs = [self._ptr[0].accessDescs + x*sizeof(ccuda.CUmemAccessDesc) for x in range(self._accessDescs_length)] + arrs = [self._ptr[0].accessDescs + x*sizeof(cydriver.CUmemAccessDesc) for x in range(self._accessDescs_length)] return [CUmemAccessDesc(_ptr=arr) for arr in arrs] @accessDescs.setter def accessDescs(self, val): @@ -16061,13 +16061,13 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v1_st: else: if self._accessDescs_length != len(val): free(self._accessDescs) - self._accessDescs = calloc(len(val), sizeof(ccuda.CUmemAccessDesc)) + self._accessDescs = calloc(len(val), sizeof(cydriver.CUmemAccessDesc)) if self._accessDescs is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUmemAccessDesc))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUmemAccessDesc))) self._accessDescs_length = len(val) self._ptr[0].accessDescs = self._accessDescs for idx in range(len(val)): - string.memcpy(&self._accessDescs[idx], (val[idx])._ptr, sizeof(ccuda.CUmemAccessDesc)) + string.memcpy(&self._accessDescs[idx], (val[idx])._ptr, sizeof(cydriver.CUmemAccessDesc)) @property def accessDescCount(self): @@ -16086,16 +16086,16 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v1_st: return self._dptr @dptr.setter def dptr(self, dptr): - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr - self._dptr._ptr[0] = cdptr + cydptr = pdptr + self._dptr._ptr[0] = cydptr {{endif}} {{if 'struct CUDA_MEM_ALLOC_NODE_PARAMS_v2_st' in found_types}} @@ -16131,7 +16131,7 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v2_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._poolProps = CUmemPoolProps(_ptr=&self._ptr[0].poolProps) self._dptr = CUdeviceptr(_ptr=&self._ptr[0].dptr) @@ -16171,10 +16171,10 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v2_st: return self._poolProps @poolProps.setter def poolProps(self, poolProps not None : CUmemPoolProps): - string.memcpy(&self._ptr[0].poolProps, poolProps.getPtr(), sizeof(self._ptr[0].poolProps)) + string.memcpy(&self._ptr[0].poolProps, poolProps.getPtr(), sizeof(self._ptr[0].poolProps)) @property def accessDescs(self): - arrs = [self._ptr[0].accessDescs + x*sizeof(ccuda.CUmemAccessDesc) for x in range(self._accessDescs_length)] + arrs = [self._ptr[0].accessDescs + x*sizeof(cydriver.CUmemAccessDesc) for x in range(self._accessDescs_length)] return [CUmemAccessDesc(_ptr=arr) for arr in arrs] @accessDescs.setter def accessDescs(self, val): @@ -16185,13 +16185,13 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v2_st: else: if self._accessDescs_length != len(val): free(self._accessDescs) - self._accessDescs = calloc(len(val), sizeof(ccuda.CUmemAccessDesc)) + self._accessDescs = calloc(len(val), sizeof(cydriver.CUmemAccessDesc)) if self._accessDescs is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccuda.CUmemAccessDesc))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cydriver.CUmemAccessDesc))) self._accessDescs_length = len(val) self._ptr[0].accessDescs = self._accessDescs for idx in range(len(val)): - string.memcpy(&self._accessDescs[idx], (val[idx])._ptr, sizeof(ccuda.CUmemAccessDesc)) + string.memcpy(&self._accessDescs[idx], (val[idx])._ptr, sizeof(cydriver.CUmemAccessDesc)) @property def accessDescCount(self): @@ -16210,16 +16210,16 @@ cdef class CUDA_MEM_ALLOC_NODE_PARAMS_v2_st: return self._dptr @dptr.setter def dptr(self, dptr): - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr - self._dptr._ptr[0] = cdptr + cydptr = pdptr + self._dptr._ptr[0] = cydptr {{endif}} {{if 'struct CUDA_MEM_FREE_NODE_PARAMS_st' in found_types}} @@ -16243,7 +16243,7 @@ cdef class CUDA_MEM_FREE_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._dptr = CUdeviceptr(_ptr=&self._ptr[0].dptr) def __dealloc__(self): @@ -16265,16 +16265,16 @@ cdef class CUDA_MEM_FREE_NODE_PARAMS_st: return self._dptr @dptr.setter def dptr(self, dptr): - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr - self._dptr._ptr[0] = cdptr + cydptr = pdptr + self._dptr._ptr[0] = cydptr {{endif}} {{if 'struct CUDA_CHILD_GRAPH_NODE_PARAMS_st' in found_types}} @@ -16299,7 +16299,7 @@ cdef class CUDA_CHILD_GRAPH_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._graph = CUgraph(_ptr=&self._ptr[0].graph) def __dealloc__(self): @@ -16321,16 +16321,16 @@ cdef class CUDA_CHILD_GRAPH_NODE_PARAMS_st: return self._graph @graph.setter def graph(self, graph): - cdef ccuda.CUgraph cgraph + cdef cydriver.CUgraph cygraph if graph is None: - cgraph = 0 + cygraph = 0 elif isinstance(graph, (CUgraph,)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(CUgraph(graph)) - cgraph = pgraph - self._graph._ptr[0] = cgraph + cygraph = pgraph + self._graph._ptr[0] = cygraph {{endif}} {{if 'struct CUDA_EVENT_RECORD_NODE_PARAMS_st' in found_types}} @@ -16353,7 +16353,7 @@ cdef class CUDA_EVENT_RECORD_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._event = CUevent(_ptr=&self._ptr[0].event) def __dealloc__(self): @@ -16375,16 +16375,16 @@ cdef class CUDA_EVENT_RECORD_NODE_PARAMS_st: return self._event @event.setter def event(self, event): - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent - self._event._ptr[0] = cevent + cyevent = pevent + self._event._ptr[0] = cyevent {{endif}} {{if 'struct CUDA_EVENT_WAIT_NODE_PARAMS_st' in found_types}} @@ -16407,7 +16407,7 @@ cdef class CUDA_EVENT_WAIT_NODE_PARAMS_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._event = CUevent(_ptr=&self._ptr[0].event) def __dealloc__(self): @@ -16429,16 +16429,16 @@ cdef class CUDA_EVENT_WAIT_NODE_PARAMS_st: return self._event @event.setter def event(self, event): - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent - self._event._ptr[0] = cevent + cyevent = pevent + self._event._ptr[0] = cyevent {{endif}} {{if 'struct CUgraphNodeParams_st' in found_types}} @@ -16491,10 +16491,10 @@ cdef class CUgraphNodeParams_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccuda.CUgraphNodeParams_st)) + self._val_ptr = calloc(1, sizeof(cydriver.CUgraphNodeParams_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._kernel = CUDA_KERNEL_NODE_PARAMS_v3(_ptr=&self._ptr[0].kernel) self._memcpy = CUDA_MEMCPY_NODE_PARAMS(_ptr=&self._ptr[0].memcpy) @@ -16611,79 +16611,79 @@ cdef class CUgraphNodeParams_st: return self._kernel @kernel.setter def kernel(self, kernel not None : CUDA_KERNEL_NODE_PARAMS_v3): - string.memcpy(&self._ptr[0].kernel, kernel.getPtr(), sizeof(self._ptr[0].kernel)) + string.memcpy(&self._ptr[0].kernel, kernel.getPtr(), sizeof(self._ptr[0].kernel)) @property def memcpy(self): return self._memcpy @memcpy.setter def memcpy(self, memcpy not None : CUDA_MEMCPY_NODE_PARAMS): - string.memcpy(&self._ptr[0].memcpy, memcpy.getPtr(), sizeof(self._ptr[0].memcpy)) + string.memcpy(&self._ptr[0].memcpy, memcpy.getPtr(), sizeof(self._ptr[0].memcpy)) @property def memset(self): return self._memset @memset.setter def memset(self, memset not None : CUDA_MEMSET_NODE_PARAMS_v2): - string.memcpy(&self._ptr[0].memset, memset.getPtr(), sizeof(self._ptr[0].memset)) + string.memcpy(&self._ptr[0].memset, memset.getPtr(), sizeof(self._ptr[0].memset)) @property def host(self): return self._host @host.setter def host(self, host not None : CUDA_HOST_NODE_PARAMS_v2): - string.memcpy(&self._ptr[0].host, host.getPtr(), sizeof(self._ptr[0].host)) + string.memcpy(&self._ptr[0].host, host.getPtr(), sizeof(self._ptr[0].host)) @property def graph(self): return self._graph @graph.setter def graph(self, graph not None : CUDA_CHILD_GRAPH_NODE_PARAMS): - string.memcpy(&self._ptr[0].graph, graph.getPtr(), sizeof(self._ptr[0].graph)) + string.memcpy(&self._ptr[0].graph, graph.getPtr(), sizeof(self._ptr[0].graph)) @property def eventWait(self): return self._eventWait @eventWait.setter def eventWait(self, eventWait not None : CUDA_EVENT_WAIT_NODE_PARAMS): - string.memcpy(&self._ptr[0].eventWait, eventWait.getPtr(), sizeof(self._ptr[0].eventWait)) + string.memcpy(&self._ptr[0].eventWait, eventWait.getPtr(), sizeof(self._ptr[0].eventWait)) @property def eventRecord(self): return self._eventRecord @eventRecord.setter def eventRecord(self, eventRecord not None : CUDA_EVENT_RECORD_NODE_PARAMS): - string.memcpy(&self._ptr[0].eventRecord, eventRecord.getPtr(), sizeof(self._ptr[0].eventRecord)) + string.memcpy(&self._ptr[0].eventRecord, eventRecord.getPtr(), sizeof(self._ptr[0].eventRecord)) @property def extSemSignal(self): return self._extSemSignal @extSemSignal.setter def extSemSignal(self, extSemSignal not None : CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2): - string.memcpy(&self._ptr[0].extSemSignal, extSemSignal.getPtr(), sizeof(self._ptr[0].extSemSignal)) + string.memcpy(&self._ptr[0].extSemSignal, extSemSignal.getPtr(), sizeof(self._ptr[0].extSemSignal)) @property def extSemWait(self): return self._extSemWait @extSemWait.setter def extSemWait(self, extSemWait not None : CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2): - string.memcpy(&self._ptr[0].extSemWait, extSemWait.getPtr(), sizeof(self._ptr[0].extSemWait)) + string.memcpy(&self._ptr[0].extSemWait, extSemWait.getPtr(), sizeof(self._ptr[0].extSemWait)) @property def alloc(self): return self._alloc @alloc.setter def alloc(self, alloc not None : CUDA_MEM_ALLOC_NODE_PARAMS_v2): - string.memcpy(&self._ptr[0].alloc, alloc.getPtr(), sizeof(self._ptr[0].alloc)) + string.memcpy(&self._ptr[0].alloc, alloc.getPtr(), sizeof(self._ptr[0].alloc)) @property def free(self): return self._free @free.setter def free(self, free not None : CUDA_MEM_FREE_NODE_PARAMS): - string.memcpy(&self._ptr[0].free, free.getPtr(), sizeof(self._ptr[0].free)) + string.memcpy(&self._ptr[0].free, free.getPtr(), sizeof(self._ptr[0].free)) @property def memOp(self): return self._memOp @memOp.setter def memOp(self, memOp not None : CUDA_BATCH_MEM_OP_NODE_PARAMS_v2): - string.memcpy(&self._ptr[0].memOp, memOp.getPtr(), sizeof(self._ptr[0].memOp)) + string.memcpy(&self._ptr[0].memOp, memOp.getPtr(), sizeof(self._ptr[0].memOp)) @property def conditional(self): return self._conditional @conditional.setter def conditional(self, conditional not None : CUDA_CONDITIONAL_NODE_PARAMS): - string.memcpy(&self._ptr[0].conditional, conditional.getPtr(), sizeof(self._ptr[0].conditional)) + string.memcpy(&self._ptr[0].conditional, conditional.getPtr(), sizeof(self._ptr[0].conditional)) @property def reserved2(self): return self._ptr[0].reserved2 @@ -16711,7 +16711,7 @@ cdef class CUdevSmResource_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -16758,10 +16758,10 @@ cdef class CUdevResource_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccuda.CUdevResource_st)) + self._val_ptr = calloc(1, sizeof(cydriver.CUdevResource_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._sm = CUdevSmResource(_ptr=&self._ptr[0].sm) def __dealloc__(self): @@ -16811,7 +16811,7 @@ cdef class CUdevResource_st: return self._sm @sm.setter def sm(self, sm not None : CUdevSmResource): - string.memcpy(&self._ptr[0].sm, sm.getPtr(), sizeof(self._ptr[0].sm)) + string.memcpy(&self._ptr[0].sm, sm.getPtr(), sizeof(self._ptr[0].sm)) @property def _oversize(self): return PyBytes_FromStringAndSize(self._ptr[0]._oversize, 48) @@ -16840,7 +16840,7 @@ cdef class anon_union14: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -16871,7 +16871,7 @@ cdef class anon_union14: raise IndexError('not enough values found during array assignment, expected 3, got', len(pArray)) pArray = [int(_pArray) for _pArray in pArray] for _idx, _pArray in enumerate(pArray): - self._ptr[0].frame.pArray[_idx] = _pArray + self._ptr[0].frame.pArray[_idx] = _pArray @property def pPitch(self): @@ -16923,10 +16923,10 @@ cdef class CUeglFrame_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccuda.CUeglFrame_st)) + self._val_ptr = calloc(1, sizeof(cydriver.CUeglFrame_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._frame = anon_union14(_ptr=self._ptr) def __dealloc__(self): @@ -16985,7 +16985,7 @@ cdef class CUeglFrame_st: return self._frame @frame.setter def frame(self, frame not None : anon_union14): - string.memcpy(&self._ptr[0].frame, frame.getPtr(), sizeof(self._ptr[0].frame)) + string.memcpy(&self._ptr[0].frame, frame.getPtr(), sizeof(self._ptr[0].frame)) @property def width(self): return self._ptr[0].width @@ -17056,7 +17056,7 @@ cdef class cuuint32_t: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17084,7 +17084,7 @@ cdef class cuuint64_t: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17114,7 +17114,7 @@ cdef class CUdeviceptr_v2: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17144,7 +17144,7 @@ cdef class CUdevice_v1: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17174,7 +17174,7 @@ cdef class CUtexObject_v1: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17204,7 +17204,7 @@ cdef class CUsurfObject_v1: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17232,7 +17232,7 @@ cdef class CUmemGenericAllocationHandle_v1: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17260,7 +17260,7 @@ cdef class GLenum: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17288,7 +17288,7 @@ cdef class GLuint: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17316,7 +17316,7 @@ cdef class EGLint: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17344,7 +17344,7 @@ cdef class VdpDevice: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17372,7 +17372,7 @@ cdef class VdpGetProcAddress: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17400,7 +17400,7 @@ cdef class VdpVideoSurface: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17428,7 +17428,7 @@ cdef class VdpOutputSurface: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -17468,9 +17468,9 @@ def cuGetErrorString(error not None : CUresult): -------- :py:obj:`~.CUresult`, :py:obj:`~.cudaGetErrorString` """ - cdef ccuda.CUresult cerror = error.value + cdef cydriver.CUresult cyerror = error.value cdef const char* pStr = NULL - err = ccuda.cuGetErrorString(cerror, &pStr) + err = cydriver.cuGetErrorString(cyerror, &pStr) return (CUresult(err), pStr) {{endif}} @@ -17501,9 +17501,9 @@ def cuGetErrorName(error not None : CUresult): -------- :py:obj:`~.CUresult`, :py:obj:`~.cudaGetErrorName` """ - cdef ccuda.CUresult cerror = error.value + cdef cydriver.CUresult cyerror = error.value cdef const char* pStr = NULL - err = ccuda.cuGetErrorName(cerror, &pStr) + err = cydriver.cuGetErrorName(cyerror, &pStr) return (CUresult(err), pStr) {{endif}} @@ -17523,7 +17523,7 @@ def cuInit(unsigned int Flags): CUresult :py:obj:`~.CUDA_SUCCESS`, :py:obj:`~.CUDA_ERROR_INVALID_VALUE`, :py:obj:`~.CUDA_ERROR_INVALID_DEVICE`, :py:obj:`~.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH`, :py:obj:`~.CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE` """ - err = ccuda.cuInit(Flags) + err = cydriver.cuInit(Flags) return (CUresult(err),) {{endif}} @@ -17552,7 +17552,7 @@ def cuDriverGetVersion(): :py:obj:`~.cudaDriverGetVersion`, :py:obj:`~.cudaRuntimeGetVersion` """ cdef int driverVersion = 0 - err = ccuda.cuDriverGetVersion(&driverVersion) + err = cydriver.cuDriverGetVersion(&driverVersion) return (CUresult(err), driverVersion) {{endif}} @@ -17582,7 +17582,7 @@ def cuDeviceGet(int ordinal): :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetUuid`, :py:obj:`~.cuDeviceGetLuid`, :py:obj:`~.cuDeviceTotalMem`, :py:obj:`~.cuDeviceGetExecAffinitySupport` """ cdef CUdevice device = CUdevice() - err = ccuda.cuDeviceGet(device._ptr, ordinal) + err = cydriver.cuDeviceGet(device._ptr, ordinal) return (CUresult(err), device) {{endif}} @@ -17608,7 +17608,7 @@ def cuDeviceGetCount(): :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetUuid`, :py:obj:`~.cuDeviceGetLuid`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceTotalMem`, :py:obj:`~.cuDeviceGetExecAffinitySupport`, :py:obj:`~.cudaGetDeviceCount` """ cdef int count = 0 - err = ccuda.cuDeviceGetCount(&count) + err = cydriver.cuDeviceGetCount(&count) return (CUresult(err), count) {{endif}} @@ -17640,18 +17640,18 @@ def cuDeviceGetName(int length, dev): -------- :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetUuid`, :py:obj:`~.cuDeviceGetLuid`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceTotalMem`, :py:obj:`~.cuDeviceGetExecAffinitySupport`, :py:obj:`~.cudaGetDeviceProperties` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev pyname = b" " * length cdef char* name = pyname - err = ccuda.cuDeviceGetName(name, length, cdev) + err = cydriver.cuDeviceGetName(name, length, cydev) return (CUresult(err), pyname) {{endif}} @@ -17684,17 +17684,17 @@ def cuDeviceGetUuid(dev): -------- :py:obj:`~.cuDeviceGetUuid_v2` :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetLuid`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceTotalMem`, :py:obj:`~.cuDeviceGetExecAffinitySupport`, :py:obj:`~.cudaGetDeviceProperties` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef CUuuid uuid = CUuuid() - err = ccuda.cuDeviceGetUuid(uuid._ptr, cdev) + err = cydriver.cuDeviceGetUuid(uuid._ptr, cydev) return (CUresult(err), uuid) {{endif}} @@ -17724,17 +17724,17 @@ def cuDeviceGetUuid_v2(dev): -------- :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetLuid`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceTotalMem`, :py:obj:`~.cudaGetDeviceProperties` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef CUuuid uuid = CUuuid() - err = ccuda.cuDeviceGetUuid_v2(uuid._ptr, cdev) + err = cydriver.cuDeviceGetUuid_v2(uuid._ptr, cydev) return (CUresult(err), uuid) {{endif}} @@ -17765,18 +17765,18 @@ def cuDeviceGetLuid(dev): -------- :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceTotalMem`, :py:obj:`~.cuDeviceGetExecAffinitySupport`, :py:obj:`~.cudaGetDeviceProperties` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef char luid[8] cdef unsigned int deviceNodeMask = 0 - err = ccuda.cuDeviceGetLuid(luid, &deviceNodeMask, cdev) + err = cydriver.cuDeviceGetLuid(luid, &deviceNodeMask, cydev) return (CUresult(err), luid, deviceNodeMask) {{endif}} @@ -17805,17 +17805,17 @@ def cuDeviceTotalMem(dev): -------- :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetUuid`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceGetExecAffinitySupport`, :py:obj:`~.cudaMemGetInfo` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef size_t numbytes = 0 - err = ccuda.cuDeviceTotalMem(&numbytes, cdev) + err = cydriver.cuDeviceTotalMem(&numbytes, cydev) return (CUresult(err), numbytes) {{endif}} @@ -17850,18 +17850,18 @@ def cuDeviceGetTexture1DLinearMaxWidth(pformat not None : CUarray_format, unsign -------- :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetUuid`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cudaMemGetInfo`, :py:obj:`~.cuDeviceTotalMem` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef size_t maxWidthInElements = 0 - cdef ccuda.CUarray_format cpformat = pformat.value - err = ccuda.cuDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cpformat, numChannels, cdev) + cdef cydriver.CUarray_format cypformat = pformat.value + err = cydriver.cuDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cypformat, numChannels, cydev) return (CUresult(err), maxWidthInElements) {{endif}} @@ -18279,18 +18279,18 @@ def cuDeviceGetAttribute(attrib not None : CUdevice_attribute, dev): -------- :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetUuid`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceTotalMem`, :py:obj:`~.cuDeviceGetExecAffinitySupport`, :py:obj:`~.cudaDeviceGetAttribute`, :py:obj:`~.cudaGetDeviceProperties` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef int pi = 0 - cdef ccuda.CUdevice_attribute cattrib = attrib.value - err = ccuda.cuDeviceGetAttribute(&pi, cattrib, cdev) + cdef cydriver.CUdevice_attribute cyattrib = attrib.value + err = cydriver.cuDeviceGetAttribute(&pi, cyattrib, cydev) return (CUresult(err), pi) {{endif}} @@ -18377,18 +18377,18 @@ def cuDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, dev, int flags): -------- :py:obj:`~.cuImportExternalSemaphore`, :py:obj:`~.cuDestroyExternalSemaphore`, :py:obj:`~.cuSignalExternalSemaphoresAsync`, :py:obj:`~.cuWaitExternalSemaphoresAsync` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - cnvSciSyncAttrList = utils.HelperInputVoidPtr(nvSciSyncAttrList) - cdef void* cnvSciSyncAttrList_ptr = cnvSciSyncAttrList.cptr - err = ccuda.cuDeviceGetNvSciSyncAttributes(cnvSciSyncAttrList_ptr, cdev, flags) + cydev = pdev + cynvSciSyncAttrList = utils.HelperInputVoidPtr(nvSciSyncAttrList) + cdef void* cynvSciSyncAttrList_ptr = cynvSciSyncAttrList.cptr + err = cydriver.cuDeviceGetNvSciSyncAttributes(cynvSciSyncAttrList_ptr, cydev, flags) return (CUresult(err),) {{endif}} @@ -18423,25 +18423,25 @@ def cuDeviceSetMemPool(dev, pool): ----- Use :py:obj:`~.cuMemAllocFromPoolAsync` to specify asynchronous allocations from a device different than the one the stream runs on. """ - cdef ccuda.CUmemoryPool cpool + cdef cydriver.CUmemoryPool cypool if pool is None: - cpool = 0 + cypool = 0 elif isinstance(pool, (CUmemoryPool,)): ppool = int(pool) - cpool = ppool + cypool = ppool else: ppool = int(CUmemoryPool(pool)) - cpool = ppool - cdef ccuda.CUdevice cdev + cypool = ppool + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - err = ccuda.cuDeviceSetMemPool(cdev, cpool) + cydev = pdev + err = cydriver.cuDeviceSetMemPool(cydev, cypool) return (CUresult(err),) {{endif}} @@ -18473,17 +18473,17 @@ def cuDeviceGetMemPool(dev): -------- :py:obj:`~.cuDeviceGetDefaultMemPool`, :py:obj:`~.cuMemPoolCreate`, :py:obj:`~.cuDeviceSetMemPool` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef CUmemoryPool pool = CUmemoryPool() - err = ccuda.cuDeviceGetMemPool(pool._ptr, cdev) + err = cydriver.cuDeviceGetMemPool(pool._ptr, cydev) return (CUresult(err), pool) {{endif}} @@ -18512,17 +18512,17 @@ def cuDeviceGetDefaultMemPool(dev): -------- :py:obj:`~.cuMemAllocAsync`, :py:obj:`~.cuMemPoolTrimTo`, :py:obj:`~.cuMemPoolGetAttribute`, :py:obj:`~.cuMemPoolSetAttribute`, :py:obj:`~.cuMemPoolSetAccess`, :py:obj:`~.cuDeviceGetMemPool`, :py:obj:`~.cuMemPoolCreate` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef CUmemoryPool pool_out = CUmemoryPool() - err = ccuda.cuDeviceGetDefaultMemPool(pool_out._ptr, cdev) + err = cydriver.cuDeviceGetDefaultMemPool(pool_out._ptr, cydev) return (CUresult(err), pool_out) {{endif}} @@ -18557,18 +18557,18 @@ def cuDeviceGetExecAffinitySupport(typename not None : CUexecAffinityType, dev): -------- :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetUuid`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceTotalMem` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef int pi = 0 - cdef ccuda.CUexecAffinityType ctypename = typename.value - err = ccuda.cuDeviceGetExecAffinitySupport(&pi, ctypename, cdev) + cdef cydriver.CUexecAffinityType cytypename = typename.value + err = cydriver.cuDeviceGetExecAffinitySupport(&pi, cytypename, cydev) return (CUresult(err), pi) {{endif}} @@ -18606,9 +18606,9 @@ def cuFlushGPUDirectRDMAWrites(target not None : CUflushGPUDirectRDMAWritesTarge CUresult :py:obj:`~.CUDA_SUCCESS`, :py:obj:`~.CUDA_ERROR_DEINITIALIZED`, :py:obj:`~.CUDA_ERROR_NOT_INITIALIZED`, :py:obj:`~.CUDA_ERROR_INVALID_CONTEXT`, :py:obj:`~.CUDA_ERROR_INVALID_VALUE`, """ - cdef ccuda.CUflushGPUDirectRDMAWritesTarget ctarget = target.value - cdef ccuda.CUflushGPUDirectRDMAWritesScope cscope = scope.value - err = ccuda.cuFlushGPUDirectRDMAWrites(ctarget, cscope) + cdef cydriver.CUflushGPUDirectRDMAWritesTarget cytarget = target.value + cdef cydriver.CUflushGPUDirectRDMAWritesScope cyscope = scope.value + err = cydriver.cuFlushGPUDirectRDMAWrites(cytarget, cyscope) return (CUresult(err),) {{endif}} @@ -18676,17 +18676,17 @@ def cuDeviceGetProperties(dev): -------- :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetUuid`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceTotalMem` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef CUdevprop prop = CUdevprop() - err = ccuda.cuDeviceGetProperties(prop._ptr, cdev) + err = cydriver.cuDeviceGetProperties(prop._ptr, cydev) return (CUresult(err), prop) {{endif}} @@ -18722,18 +18722,18 @@ def cuDeviceComputeCapability(dev): -------- :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetCount`, :py:obj:`~.cuDeviceGetName`, :py:obj:`~.cuDeviceGetUuid`, :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceTotalMem` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef int major = 0 cdef int minor = 0 - err = ccuda.cuDeviceComputeCapability(&major, &minor, cdev) + err = cydriver.cuDeviceComputeCapability(&major, &minor, cydev) return (CUresult(err), major, minor) {{endif}} @@ -18779,17 +18779,17 @@ def cuDevicePrimaryCtxRetain(dev): -------- :py:obj:`~.cuDevicePrimaryCtxRelease`, :py:obj:`~.cuDevicePrimaryCtxSetFlags`, :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef CUcontext pctx = CUcontext() - err = ccuda.cuDevicePrimaryCtxRetain(pctx._ptr, cdev) + err = cydriver.cuDevicePrimaryCtxRetain(pctx._ptr, cydev) return (CUresult(err), pctx) {{endif}} @@ -18826,16 +18826,16 @@ def cuDevicePrimaryCtxRelease(dev): -------- :py:obj:`~.cuDevicePrimaryCtxRetain`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - err = ccuda.cuDevicePrimaryCtxRelease(cdev) + cydev = pdev + err = cydriver.cuDevicePrimaryCtxRelease(cydev) return (CUresult(err),) {{endif}} @@ -18940,16 +18940,16 @@ def cuDevicePrimaryCtxSetFlags(dev, unsigned int flags): -------- :py:obj:`~.cuDevicePrimaryCtxRetain`, :py:obj:`~.cuDevicePrimaryCtxGetState`, :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxSetFlags`, :py:obj:`~.cudaSetDeviceFlags` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - err = ccuda.cuDevicePrimaryCtxSetFlags(cdev, flags) + cydev = pdev + err = cydriver.cuDevicePrimaryCtxSetFlags(cydev, flags) return (CUresult(err),) {{endif}} @@ -18981,18 +18981,18 @@ def cuDevicePrimaryCtxGetState(dev): -------- :py:obj:`~.cuDevicePrimaryCtxSetFlags`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxSetFlags`, :py:obj:`~.cudaGetDeviceFlags` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef unsigned int flags = 0 cdef int active = 0 - err = ccuda.cuDevicePrimaryCtxGetState(cdev, &flags, &active) + err = cydriver.cuDevicePrimaryCtxGetState(cydev, &flags, &active) return (CUresult(err), flags, active) {{endif}} @@ -19028,16 +19028,16 @@ def cuDevicePrimaryCtxReset(dev): -------- :py:obj:`~.cuDevicePrimaryCtxRetain`, :py:obj:`~.cuDevicePrimaryCtxRelease`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize`, :py:obj:`~.cudaDeviceReset` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - err = ccuda.cuDevicePrimaryCtxReset(cdev) + cydev = pdev + err = cydriver.cuDevicePrimaryCtxReset(cydev) return (CUresult(err),) {{endif}} @@ -19170,17 +19170,17 @@ def cuCtxCreate(unsigned int flags, dev): ----- In most cases it is recommended to use :py:obj:`~.cuDevicePrimaryCtxRetain`. """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef CUcontext pctx = CUcontext() - err = ccuda.cuCtxCreate(pctx._ptr, flags, cdev) + err = cydriver.cuCtxCreate(pctx._ptr, flags, cydev) return (CUresult(err), pctx) {{endif}} @@ -19323,29 +19323,29 @@ def cuCtxCreate_v3(paramsArray : Optional[Tuple[CUexecAffinityParam] | List[CUex -------- :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize`, :py:obj:`~.cuCoredumpSetAttributeGlobal`, :py:obj:`~.cuCoredumpSetAttribute`, :py:obj:`~.CUexecAffinityParam` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev paramsArray = [] if paramsArray is None else paramsArray if not all(isinstance(_x, (CUexecAffinityParam,)) for _x in paramsArray): - raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[ccuda.CUexecAffinityParam,] or List[ccuda.CUexecAffinityParam,]") + raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[cydriver.CUexecAffinityParam,] or List[cydriver.CUexecAffinityParam,]") cdef CUcontext pctx = CUcontext() - cdef ccuda.CUexecAffinityParam* cparamsArray = NULL + cdef cydriver.CUexecAffinityParam* cyparamsArray = NULL if len(paramsArray) > 0: - cparamsArray = calloc(len(paramsArray), sizeof(ccuda.CUexecAffinityParam)) - if cparamsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(ccuda.CUexecAffinityParam))) + cyparamsArray = calloc(len(paramsArray), sizeof(cydriver.CUexecAffinityParam)) + if cyparamsArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cydriver.CUexecAffinityParam))) for idx in range(len(paramsArray)): - string.memcpy(&cparamsArray[idx], (paramsArray[idx])._ptr, sizeof(ccuda.CUexecAffinityParam)) - err = ccuda.cuCtxCreate_v3(pctx._ptr, (paramsArray[0])._ptr if len(paramsArray) == 1 else cparamsArray, numParams, flags, cdev) - if cparamsArray is not NULL: - free(cparamsArray) + string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._ptr, sizeof(cydriver.CUexecAffinityParam)) + err = cydriver.cuCtxCreate_v3(pctx._ptr, (paramsArray[0])._ptr if len(paramsArray) == 1 else cyparamsArray, numParams, flags, cydev) + if cyparamsArray is not NULL: + free(cyparamsArray) return (CUresult(err), pctx) {{endif}} @@ -19513,18 +19513,18 @@ def cuCtxCreate_v4(ctxCreateParams : Optional[CUctxCreateParams], unsigned int f -------- :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCoredumpSetAttributeGlobal`, :py:obj:`~.cuCoredumpSetAttribute`, :py:obj:`~.cuCtxSynchronize` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef CUcontext pctx = CUcontext() - cdef ccuda.CUctxCreateParams* cctxCreateParams_ptr = ctxCreateParams._ptr if ctxCreateParams != None else NULL - err = ccuda.cuCtxCreate_v4(pctx._ptr, cctxCreateParams_ptr, flags, cdev) + cdef cydriver.CUctxCreateParams* cyctxCreateParams_ptr = ctxCreateParams._ptr if ctxCreateParams != None else NULL + err = cydriver.cuCtxCreate_v4(pctx._ptr, cyctxCreateParams_ptr, flags, cydev) return (CUresult(err), pctx) {{endif}} @@ -19578,16 +19578,16 @@ def cuCtxDestroy(ctx): ----- :py:obj:`~.cuCtxDestroy()` will not destroy memory allocations by :py:obj:`~.cuMemCreate()`, :py:obj:`~.cuMemAllocAsync()` and :py:obj:`~.cuMemAllocFromPoolAsync()`. These memory allocations are not associated with any CUDA context and need to be destroyed explicitly. """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - err = ccuda.cuCtxDestroy(cctx) + cyctx = pctx + err = cydriver.cuCtxDestroy(cyctx) return (CUresult(err),) {{endif}} @@ -19619,16 +19619,16 @@ def cuCtxPushCurrent(ctx): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - err = ccuda.cuCtxPushCurrent(cctx) + cyctx = pctx + err = cydriver.cuCtxPushCurrent(cyctx) return (CUresult(err),) {{endif}} @@ -19658,7 +19658,7 @@ def cuCtxPopCurrent(): :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize` """ cdef CUcontext pctx = CUcontext() - err = ccuda.cuCtxPopCurrent(pctx._ptr) + err = cydriver.cuCtxPopCurrent(pctx._ptr) return (CUresult(err), pctx) {{endif}} @@ -19692,16 +19692,16 @@ def cuCtxSetCurrent(ctx): -------- :py:obj:`~.cuCtxGetCurrent`, :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cudaSetDevice` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - err = ccuda.cuCtxSetCurrent(cctx) + cyctx = pctx + err = cydriver.cuCtxSetCurrent(cyctx) return (CUresult(err),) {{endif}} @@ -19727,7 +19727,7 @@ def cuCtxGetCurrent(): :py:obj:`~.cuCtxSetCurrent`, :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cudaGetDevice` """ cdef CUcontext pctx = CUcontext() - err = ccuda.cuCtxGetCurrent(pctx._ptr) + err = cydriver.cuCtxGetCurrent(pctx._ptr) return (CUresult(err), pctx) {{endif}} @@ -19751,7 +19751,7 @@ def cuCtxGetDevice(): :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize`, :py:obj:`~.cudaGetDevice` """ cdef CUdevice device = CUdevice() - err = ccuda.cuCtxGetDevice(device._ptr) + err = cydriver.cuCtxGetDevice(device._ptr) return (CUresult(err), device) {{endif}} @@ -19776,7 +19776,7 @@ def cuCtxGetFlags(): :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetCurrent`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxGetSharedMemConfig`, :py:obj:`~.cuCtxGetStreamPriorityRange`, :py:obj:`~.cuCtxSetFlags`, :py:obj:`~.cudaGetDeviceFlags` """ cdef unsigned int flags = 0 - err = ccuda.cuCtxGetFlags(&flags) + err = cydriver.cuCtxGetFlags(&flags) return (CUresult(err), flags) {{endif}} @@ -19803,7 +19803,7 @@ def cuCtxSetFlags(unsigned int flags): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetCurrent`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxGetSharedMemConfig`, :py:obj:`~.cuCtxGetStreamPriorityRange`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cudaGetDeviceFlags`, :py:obj:`~.cuDevicePrimaryCtxSetFlags`, """ - err = ccuda.cuCtxSetFlags(flags) + err = cydriver.cuCtxSetFlags(flags) return (CUresult(err),) {{endif}} @@ -19834,17 +19834,17 @@ def cuCtxGetId(ctx): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPushCurrent` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx + cyctx = pctx cdef unsigned long long ctxId = 0 - err = ccuda.cuCtxGetId(cctx, &ctxId) + err = cydriver.cuCtxGetId(cyctx, &ctxId) return (CUresult(err), ctxId) {{endif}} @@ -19871,7 +19871,7 @@ def cuCtxSynchronize(): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cudaDeviceSynchronize` """ - err = ccuda.cuCtxSynchronize() + err = cydriver.cuCtxSynchronize() return (CUresult(err),) {{endif}} @@ -19976,8 +19976,8 @@ def cuCtxSetLimit(limit not None : CUlimit, size_t value): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSynchronize`, :py:obj:`~.cudaDeviceSetLimit` """ - cdef ccuda.CUlimit climit = limit.value - err = ccuda.cuCtxSetLimit(climit, value) + cdef cydriver.CUlimit cylimit = limit.value + err = cydriver.cuCtxSetLimit(cylimit, value) return (CUresult(err),) {{endif}} @@ -20032,8 +20032,8 @@ def cuCtxGetLimit(limit not None : CUlimit): :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize`, :py:obj:`~.cudaDeviceGetLimit` """ cdef size_t pvalue = 0 - cdef ccuda.CUlimit climit = limit.value - err = ccuda.cuCtxGetLimit(&pvalue, climit) + cdef cydriver.CUlimit cylimit = limit.value + err = cydriver.cuCtxGetLimit(&pvalue, cylimit) return (CUresult(err), pvalue) {{endif}} @@ -20077,8 +20077,8 @@ def cuCtxGetCacheConfig(): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cudaDeviceGetCacheConfig` """ - cdef ccuda.CUfunc_cache pconfig - err = ccuda.cuCtxGetCacheConfig(&pconfig) + cdef cydriver.CUfunc_cache pconfig + err = cydriver.cuCtxGetCacheConfig(&pconfig) return (CUresult(err), CUfunc_cache(pconfig)) {{endif}} @@ -20134,8 +20134,8 @@ def cuCtxSetCacheConfig(config not None : CUfunc_cache): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cuKernelSetCacheConfig` """ - cdef ccuda.CUfunc_cache cconfig = config.value - err = ccuda.cuCtxSetCacheConfig(cconfig) + cdef cydriver.CUfunc_cache cyconfig = config.value + err = cydriver.cuCtxSetCacheConfig(cyconfig) return (CUresult(err),) {{endif}} @@ -20171,17 +20171,17 @@ def cuCtxGetApiVersion(ctx): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx + cyctx = pctx cdef unsigned int version = 0 - err = ccuda.cuCtxGetApiVersion(cctx, &version) + err = cydriver.cuCtxGetApiVersion(cyctx, &version) return (CUresult(err), version) {{endif}} @@ -20224,7 +20224,7 @@ def cuCtxGetStreamPriorityRange(): """ cdef int leastPriority = 0 cdef int greatestPriority = 0 - err = ccuda.cuCtxGetStreamPriorityRange(&leastPriority, &greatestPriority) + err = cydriver.cuCtxGetStreamPriorityRange(&leastPriority, &greatestPriority) return (CUresult(err), leastPriority, greatestPriority) {{endif}} @@ -20246,7 +20246,7 @@ def cuCtxResetPersistingL2Cache(): -------- :py:obj:`~.CUaccessPolicyWindow` """ - err = ccuda.cuCtxResetPersistingL2Cache() + err = cydriver.cuCtxResetPersistingL2Cache() return (CUresult(err),) {{endif}} @@ -20279,8 +20279,8 @@ def cuCtxGetExecAffinity(typename not None : CUexecAffinityType): :py:obj:`~.CUexecAffinityParam` """ cdef CUexecAffinityParam pExecAffinity = CUexecAffinityParam() - cdef ccuda.CUexecAffinityType ctypename = typename.value - err = ccuda.cuCtxGetExecAffinity(pExecAffinity._ptr, ctypename) + cdef cydriver.CUexecAffinityType cytypename = typename.value + err = cydriver.cuCtxGetExecAffinity(pExecAffinity._ptr, cytypename) return (CUresult(err), pExecAffinity) {{endif}} @@ -20323,25 +20323,25 @@ def cuCtxRecordEvent(hCtx, hEvent): ----- The API will return :py:obj:`~.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED` if the specified context `hCtx` has a stream in the capture mode. In such a case, the call will invalidate all the conflicting captures. """ - cdef ccuda.CUevent chEvent + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - cdef ccuda.CUcontext chCtx + cyhEvent = phEvent + cdef cydriver.CUcontext cyhCtx if hCtx is None: - chCtx = 0 + cyhCtx = 0 elif isinstance(hCtx, (CUcontext,)): phCtx = int(hCtx) - chCtx = phCtx + cyhCtx = phCtx else: phCtx = int(CUcontext(hCtx)) - chCtx = phCtx - err = ccuda.cuCtxRecordEvent(chCtx, chEvent) + cyhCtx = phCtx + err = cydriver.cuCtxRecordEvent(cyhCtx, cyhEvent) return (CUresult(err),) {{endif}} @@ -20383,25 +20383,25 @@ def cuCtxWaitEvent(hCtx, hEvent): The API will return :py:obj:`~.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED` and invalidate the capture if the specified event `hEvent` is part of an ongoing capture sequence or if the specified context `hCtx` has a stream in the capture mode. """ - cdef ccuda.CUevent chEvent + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - cdef ccuda.CUcontext chCtx + cyhEvent = phEvent + cdef cydriver.CUcontext cyhCtx if hCtx is None: - chCtx = 0 + cyhCtx = 0 elif isinstance(hCtx, (CUcontext,)): phCtx = int(hCtx) - chCtx = phCtx + cyhCtx = phCtx else: phCtx = int(CUcontext(hCtx)) - chCtx = phCtx - err = ccuda.cuCtxWaitEvent(chCtx, chEvent) + cyhCtx = phCtx + err = cydriver.cuCtxWaitEvent(cyhCtx, cyhEvent) return (CUresult(err),) {{endif}} @@ -20439,7 +20439,7 @@ def cuCtxAttach(unsigned int flags): :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxDetach`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize` """ cdef CUcontext pctx = CUcontext() - err = ccuda.cuCtxAttach(pctx._ptr, flags) + err = cydriver.cuCtxAttach(pctx._ptr, flags) return (CUresult(err), pctx) {{endif}} @@ -20472,16 +20472,16 @@ def cuCtxDetach(ctx): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - err = ccuda.cuCtxDetach(cctx) + cyctx = pctx + err = cydriver.cuCtxDetach(cyctx) return (CUresult(err),) {{endif}} @@ -20520,8 +20520,8 @@ def cuCtxGetSharedMemConfig(): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize`, :py:obj:`~.cuCtxGetSharedMemConfig`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cudaDeviceGetSharedMemConfig` """ - cdef ccuda.CUsharedconfig pConfig - err = ccuda.cuCtxGetSharedMemConfig(&pConfig) + cdef cydriver.CUsharedconfig pConfig + err = cydriver.cuCtxGetSharedMemConfig(&pConfig) return (CUresult(err), CUsharedconfig(pConfig)) {{endif}} @@ -20574,8 +20574,8 @@ def cuCtxSetSharedMemConfig(config not None : CUsharedconfig): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxDestroy`, :py:obj:`~.cuCtxGetApiVersion`, :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxGetDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuCtxGetLimit`, :py:obj:`~.cuCtxPopCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuCtxSetLimit`, :py:obj:`~.cuCtxSynchronize`, :py:obj:`~.cuCtxGetSharedMemConfig`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cudaDeviceSetSharedMemConfig` """ - cdef ccuda.CUsharedconfig cconfig = config.value - err = ccuda.cuCtxSetSharedMemConfig(cconfig) + cdef cydriver.CUsharedconfig cyconfig = config.value + err = cydriver.cuCtxSetSharedMemConfig(cyconfig) return (CUresult(err),) {{endif}} @@ -20611,7 +20611,7 @@ def cuModuleLoad(char* fname): :py:obj:`~.cuModuleGetFunction`, :py:obj:`~.cuModuleGetGlobal`, :py:obj:`~.cuModuleGetTexRef`, :py:obj:`~.cuModuleLoadData`, :py:obj:`~.cuModuleLoadDataEx`, :py:obj:`~.cuModuleLoadFatBinary`, :py:obj:`~.cuModuleUnload` """ cdef CUmodule module = CUmodule() - err = ccuda.cuModuleLoad(module._ptr, fname) + err = cydriver.cuModuleLoad(module._ptr, fname) return (CUresult(err), module) {{endif}} @@ -20643,9 +20643,9 @@ def cuModuleLoadData(image): :py:obj:`~.cuModuleGetFunction`, :py:obj:`~.cuModuleGetGlobal`, :py:obj:`~.cuModuleGetTexRef`, :py:obj:`~.cuModuleLoad`, :py:obj:`~.cuModuleLoadDataEx`, :py:obj:`~.cuModuleLoadFatBinary`, :py:obj:`~.cuModuleUnload` """ cdef CUmodule module = CUmodule() - cimage = utils.HelperInputVoidPtr(image) - cdef void* cimage_ptr = cimage.cptr - err = ccuda.cuModuleLoadData(module._ptr, cimage_ptr) + cyimage = utils.HelperInputVoidPtr(image) + cdef void* cyimage_ptr = cyimage.cptr + err = cydriver.cuModuleLoadData(module._ptr, cyimage_ptr) return (CUresult(err), module) {{endif}} @@ -20685,16 +20685,16 @@ def cuModuleLoadDataEx(image, unsigned int numOptions, options : Optional[Tuple[ optionValues = [] if optionValues is None else optionValues options = [] if options is None else options if not all(isinstance(_x, (CUjit_option)) for _x in options): - raise TypeError("Argument 'options' is not instance of type (expected Tuple[ccuda.CUjit_option] or List[ccuda.CUjit_option]") + raise TypeError("Argument 'options' is not instance of type (expected Tuple[cydriver.CUjit_option] or List[cydriver.CUjit_option]") cdef CUmodule module = CUmodule() - cimage = utils.HelperInputVoidPtr(image) - cdef void* cimage_ptr = cimage.cptr + cyimage = utils.HelperInputVoidPtr(image) + cdef void* cyimage_ptr = cyimage.cptr if numOptions > len(options): raise RuntimeError("List is too small: " + str(len(options)) + " < " + str(numOptions)) if numOptions > len(optionValues): raise RuntimeError("List is too small: " + str(len(optionValues)) + " < " + str(numOptions)) - cdef vector[ccuda.CUjit_option] coptions = [pyoptions.value for pyoptions in (options)] + cdef vector[cydriver.CUjit_option] cyoptions = [pyoptions.value for pyoptions in (options)] pylist = [utils.HelperCUjit_option(pyoptions, pyoptionValues) for pyoptions, pyoptionValues in zip(options, optionValues)] cdef utils.InputVoidPtrPtrHelper voidStarHelperoptionValues = utils.InputVoidPtrPtrHelper(pylist) - err = ccuda.cuModuleLoadDataEx(module._ptr, cimage_ptr, numOptions, coptions.data(), voidStarHelperoptionValues.cptr) + err = cydriver.cuModuleLoadDataEx(module._ptr, cyimage_ptr, numOptions, cyoptions.data(), voidStarHelperoptionValues.cptr) return (CUresult(err), module) {{endif}} @@ -20732,9 +20732,9 @@ def cuModuleLoadFatBinary(fatCubin): :py:obj:`~.cuModuleGetFunction`, :py:obj:`~.cuModuleGetGlobal`, :py:obj:`~.cuModuleGetTexRef`, :py:obj:`~.cuModuleLoad`, :py:obj:`~.cuModuleLoadData`, :py:obj:`~.cuModuleLoadDataEx`, :py:obj:`~.cuModuleUnload` """ cdef CUmodule module = CUmodule() - cfatCubin = utils.HelperInputVoidPtr(fatCubin) - cdef void* cfatCubin_ptr = cfatCubin.cptr - err = ccuda.cuModuleLoadFatBinary(module._ptr, cfatCubin_ptr) + cyfatCubin = utils.HelperInputVoidPtr(fatCubin) + cdef void* cyfatCubin_ptr = cyfatCubin.cptr + err = cydriver.cuModuleLoadFatBinary(module._ptr, cyfatCubin_ptr) return (CUresult(err), module) {{endif}} @@ -20763,16 +20763,16 @@ def cuModuleUnload(hmod): -------- :py:obj:`~.cuModuleGetFunction`, :py:obj:`~.cuModuleGetGlobal`, :py:obj:`~.cuModuleGetTexRef`, :py:obj:`~.cuModuleLoad`, :py:obj:`~.cuModuleLoadData`, :py:obj:`~.cuModuleLoadDataEx`, :py:obj:`~.cuModuleLoadFatBinary` """ - cdef ccuda.CUmodule chmod + cdef cydriver.CUmodule cyhmod if hmod is None: - chmod = 0 + cyhmod = 0 elif isinstance(hmod, (CUmodule,)): phmod = int(hmod) - chmod = phmod + cyhmod = phmod else: phmod = int(CUmodule(hmod)) - chmod = phmod - err = ccuda.cuModuleUnload(chmod) + cyhmod = phmod + err = cydriver.cuModuleUnload(cyhmod) return (CUresult(err),) {{endif}} @@ -20796,8 +20796,8 @@ def cuModuleGetLoadingMode(): -------- :py:obj:`~.cuModuleLoad`, """ - cdef ccuda.CUmoduleLoadingMode mode - err = ccuda.cuModuleGetLoadingMode(&mode) + cdef cydriver.CUmoduleLoadingMode mode + err = cydriver.cuModuleGetLoadingMode(&mode) return (CUresult(err), CUmoduleLoadingMode(mode)) {{endif}} @@ -20830,17 +20830,17 @@ def cuModuleGetFunction(hmod, char* name): -------- :py:obj:`~.cuModuleGetGlobal`, :py:obj:`~.cuModuleGetTexRef`, :py:obj:`~.cuModuleLoad`, :py:obj:`~.cuModuleLoadData`, :py:obj:`~.cuModuleLoadDataEx`, :py:obj:`~.cuModuleLoadFatBinary`, :py:obj:`~.cuModuleUnload` """ - cdef ccuda.CUmodule chmod + cdef cydriver.CUmodule cyhmod if hmod is None: - chmod = 0 + cyhmod = 0 elif isinstance(hmod, (CUmodule,)): phmod = int(hmod) - chmod = phmod + cyhmod = phmod else: phmod = int(CUmodule(hmod)) - chmod = phmod + cyhmod = phmod cdef CUfunction hfunc = CUfunction() - err = ccuda.cuModuleGetFunction(hfunc._ptr, chmod, name) + err = cydriver.cuModuleGetFunction(hfunc._ptr, cyhmod, name) return (CUresult(err), hfunc) {{endif}} @@ -20864,17 +20864,17 @@ def cuModuleGetFunctionCount(mod): count : unsigned int Number of functions found within the module """ - cdef ccuda.CUmodule cmod + cdef cydriver.CUmodule cymod if mod is None: - cmod = 0 + cymod = 0 elif isinstance(mod, (CUmodule,)): pmod = int(mod) - cmod = pmod + cymod = pmod else: pmod = int(CUmodule(mod)) - cmod = pmod + cymod = pmod cdef unsigned int count = 0 - err = ccuda.cuModuleGetFunctionCount(&count, cmod) + err = cydriver.cuModuleGetFunctionCount(&count, cymod) return (CUresult(err), count) {{endif}} @@ -20912,26 +20912,26 @@ def cuModuleEnumerateFunctions(unsigned int numFunctions, mod): -------- :py:obj:`~.cuModuleGetFunction`, :py:obj:`~.cuModuleGetFunctionCount`, :py:obj:`~.cuFuncIsLoaded`, :py:obj:`~.cuFuncLoad` """ - cdef ccuda.CUmodule cmod + cdef cydriver.CUmodule cymod if mod is None: - cmod = 0 + cymod = 0 elif isinstance(mod, (CUmodule,)): pmod = int(mod) - cmod = pmod + cymod = pmod else: pmod = int(CUmodule(mod)) - cmod = pmod - cdef ccuda.CUfunction* cfunctions = NULL + cymod = pmod + cdef cydriver.CUfunction* cyfunctions = NULL pyfunctions = [] if numFunctions != 0: - cfunctions = calloc(numFunctions, sizeof(ccuda.CUfunction)) - if cfunctions is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(numFunctions) + 'x' + str(sizeof(ccuda.CUfunction))) - err = ccuda.cuModuleEnumerateFunctions(cfunctions, numFunctions, cmod) + cyfunctions = calloc(numFunctions, sizeof(cydriver.CUfunction)) + if cyfunctions is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(numFunctions) + 'x' + str(sizeof(cydriver.CUfunction))) + err = cydriver.cuModuleEnumerateFunctions(cyfunctions, numFunctions, cymod) if CUresult(err) == CUresult(0): - pyfunctions = [CUfunction(init_value=cfunctions[idx]) for idx in range(numFunctions)] - if cfunctions is not NULL: - free(cfunctions) + pyfunctions = [CUfunction(init_value=cyfunctions[idx]) for idx in range(numFunctions)] + if cyfunctions is not NULL: + free(cyfunctions) return (CUresult(err), pyfunctions) {{endif}} @@ -20967,18 +20967,18 @@ def cuModuleGetGlobal(hmod, char* name): -------- :py:obj:`~.cuModuleGetFunction`, :py:obj:`~.cuModuleGetTexRef`, :py:obj:`~.cuModuleLoad`, :py:obj:`~.cuModuleLoadData`, :py:obj:`~.cuModuleLoadDataEx`, :py:obj:`~.cuModuleLoadFatBinary`, :py:obj:`~.cuModuleUnload`, :py:obj:`~.cudaGetSymbolAddress`, :py:obj:`~.cudaGetSymbolSize` """ - cdef ccuda.CUmodule chmod + cdef cydriver.CUmodule cyhmod if hmod is None: - chmod = 0 + cyhmod = 0 elif isinstance(hmod, (CUmodule,)): phmod = int(hmod) - chmod = phmod + cyhmod = phmod else: phmod = int(CUmodule(hmod)) - chmod = phmod + cyhmod = phmod cdef CUdeviceptr dptr = CUdeviceptr() cdef size_t numbytes = 0 - err = ccuda.cuModuleGetGlobal(dptr._ptr, &numbytes, chmod, name) + err = cydriver.cuModuleGetGlobal(dptr._ptr, &numbytes, cyhmod, name) return (CUresult(err), dptr, numbytes) {{endif}} @@ -21038,14 +21038,14 @@ def cuLinkCreate(unsigned int numOptions, options : Optional[Tuple[CUjit_option] optionValues = [] if optionValues is None else optionValues options = [] if options is None else options if not all(isinstance(_x, (CUjit_option)) for _x in options): - raise TypeError("Argument 'options' is not instance of type (expected Tuple[ccuda.CUjit_option] or List[ccuda.CUjit_option]") + raise TypeError("Argument 'options' is not instance of type (expected Tuple[cydriver.CUjit_option] or List[cydriver.CUjit_option]") if numOptions > len(options): raise RuntimeError("List is too small: " + str(len(options)) + " < " + str(numOptions)) if numOptions > len(optionValues): raise RuntimeError("List is too small: " + str(len(optionValues)) + " < " + str(numOptions)) - cdef vector[ccuda.CUjit_option] coptions = [pyoptions.value for pyoptions in (options)] + cdef vector[cydriver.CUjit_option] cyoptions = [pyoptions.value for pyoptions in (options)] pylist = [utils.HelperCUjit_option(pyoptions, pyoptionValues) for pyoptions, pyoptionValues in zip(options, optionValues)] cdef utils.InputVoidPtrPtrHelper voidStarHelperoptionValues = utils.InputVoidPtrPtrHelper(pylist) cdef CUlinkState stateOut = CUlinkState() - err = ccuda.cuLinkCreate(numOptions, coptions.data(), voidStarHelperoptionValues.cptr, stateOut._ptr) + err = cydriver.cuLinkCreate(numOptions, cyoptions.data(), voidStarHelperoptionValues.cptr, stateOut._ptr) stateOut._keepalive.append(voidStarHelperoptionValues) for option in pylist: stateOut._keepalive.append(option) @@ -21103,25 +21103,25 @@ def cuLinkAddData(state, typename not None : CUjitInputType, data, size_t size, optionValues = [] if optionValues is None else optionValues options = [] if options is None else options if not all(isinstance(_x, (CUjit_option)) for _x in options): - raise TypeError("Argument 'options' is not instance of type (expected Tuple[ccuda.CUjit_option] or List[ccuda.CUjit_option]") - cdef ccuda.CUlinkState cstate + raise TypeError("Argument 'options' is not instance of type (expected Tuple[cydriver.CUjit_option] or List[cydriver.CUjit_option]") + cdef cydriver.CUlinkState cystate if state is None: - cstate = 0 + cystate = 0 elif isinstance(state, (CUlinkState,)): pstate = int(state) - cstate = pstate + cystate = pstate else: pstate = int(CUlinkState(state)) - cstate = pstate - cdef ccuda.CUjitInputType ctypename = typename.value - cdata = utils.HelperInputVoidPtr(data) - cdef void* cdata_ptr = cdata.cptr + cystate = pstate + cdef cydriver.CUjitInputType cytypename = typename.value + cydata = utils.HelperInputVoidPtr(data) + cdef void* cydata_ptr = cydata.cptr if numOptions > len(options): raise RuntimeError("List is too small: " + str(len(options)) + " < " + str(numOptions)) if numOptions > len(optionValues): raise RuntimeError("List is too small: " + str(len(optionValues)) + " < " + str(numOptions)) - cdef vector[ccuda.CUjit_option] coptions = [pyoptions.value for pyoptions in (options)] + cdef vector[cydriver.CUjit_option] cyoptions = [pyoptions.value for pyoptions in (options)] pylist = [utils.HelperCUjit_option(pyoptions, pyoptionValues) for pyoptions, pyoptionValues in zip(options, optionValues)] cdef utils.InputVoidPtrPtrHelper voidStarHelperoptionValues = utils.InputVoidPtrPtrHelper(pylist) - err = ccuda.cuLinkAddData(cstate, ctypename, cdata_ptr, size, name, numOptions, coptions.data(), voidStarHelperoptionValues.cptr) + err = cydriver.cuLinkAddData(cystate, cytypename, cydata_ptr, size, name, numOptions, cyoptions.data(), voidStarHelperoptionValues.cptr) return (CUresult(err),) {{endif}} @@ -21174,23 +21174,23 @@ def cuLinkAddFile(state, typename not None : CUjitInputType, char* path, unsigne optionValues = [] if optionValues is None else optionValues options = [] if options is None else options if not all(isinstance(_x, (CUjit_option)) for _x in options): - raise TypeError("Argument 'options' is not instance of type (expected Tuple[ccuda.CUjit_option] or List[ccuda.CUjit_option]") - cdef ccuda.CUlinkState cstate + raise TypeError("Argument 'options' is not instance of type (expected Tuple[cydriver.CUjit_option] or List[cydriver.CUjit_option]") + cdef cydriver.CUlinkState cystate if state is None: - cstate = 0 + cystate = 0 elif isinstance(state, (CUlinkState,)): pstate = int(state) - cstate = pstate + cystate = pstate else: pstate = int(CUlinkState(state)) - cstate = pstate - cdef ccuda.CUjitInputType ctypename = typename.value + cystate = pstate + cdef cydriver.CUjitInputType cytypename = typename.value if numOptions > len(options): raise RuntimeError("List is too small: " + str(len(options)) + " < " + str(numOptions)) if numOptions > len(optionValues): raise RuntimeError("List is too small: " + str(len(optionValues)) + " < " + str(numOptions)) - cdef vector[ccuda.CUjit_option] coptions = [pyoptions.value for pyoptions in (options)] + cdef vector[cydriver.CUjit_option] cyoptions = [pyoptions.value for pyoptions in (options)] pylist = [utils.HelperCUjit_option(pyoptions, pyoptionValues) for pyoptions, pyoptionValues in zip(options, optionValues)] cdef utils.InputVoidPtrPtrHelper voidStarHelperoptionValues = utils.InputVoidPtrPtrHelper(pylist) - err = ccuda.cuLinkAddFile(cstate, ctypename, path, numOptions, coptions.data(), voidStarHelperoptionValues.cptr) + err = cydriver.cuLinkAddFile(cystate, cytypename, path, numOptions, cyoptions.data(), voidStarHelperoptionValues.cptr) return (CUresult(err),) {{endif}} @@ -21224,18 +21224,18 @@ def cuLinkComplete(state): -------- :py:obj:`~.cuLinkCreate`, :py:obj:`~.cuLinkAddData`, :py:obj:`~.cuLinkAddFile`, :py:obj:`~.cuLinkDestroy`, :py:obj:`~.cuModuleLoadData` """ - cdef ccuda.CUlinkState cstate + cdef cydriver.CUlinkState cystate if state is None: - cstate = 0 + cystate = 0 elif isinstance(state, (CUlinkState,)): pstate = int(state) - cstate = pstate + cystate = pstate else: pstate = int(CUlinkState(state)) - cstate = pstate + cystate = pstate cdef void_ptr cubinOut = 0 cdef size_t sizeOut = 0 - err = ccuda.cuLinkComplete(cstate, &cubinOut, &sizeOut) + err = cydriver.cuLinkComplete(cystate, &cubinOut, &sizeOut) return (CUresult(err), cubinOut, sizeOut) {{endif}} @@ -21259,16 +21259,16 @@ def cuLinkDestroy(state): -------- :py:obj:`~.cuLinkCreate` """ - cdef ccuda.CUlinkState cstate + cdef cydriver.CUlinkState cystate if state is None: - cstate = 0 + cystate = 0 elif isinstance(state, (CUlinkState,)): pstate = int(state) - cstate = pstate + cystate = pstate else: pstate = int(CUlinkState(state)) - cstate = pstate - err = ccuda.cuLinkDestroy(cstate) + cystate = pstate + err = cydriver.cuLinkDestroy(cystate) return (CUresult(err),) {{endif}} @@ -21305,17 +21305,17 @@ def cuModuleGetTexRef(hmod, char* name): -------- :py:obj:`~.cuModuleGetFunction`, :py:obj:`~.cuModuleGetGlobal`, :py:obj:`~.cuModuleGetSurfRef`, :py:obj:`~.cuModuleLoad`, :py:obj:`~.cuModuleLoadData`, :py:obj:`~.cuModuleLoadDataEx`, :py:obj:`~.cuModuleLoadFatBinary`, :py:obj:`~.cuModuleUnload` """ - cdef ccuda.CUmodule chmod + cdef cydriver.CUmodule cyhmod if hmod is None: - chmod = 0 + cyhmod = 0 elif isinstance(hmod, (CUmodule,)): phmod = int(hmod) - chmod = phmod + cyhmod = phmod else: phmod = int(CUmodule(hmod)) - chmod = phmod + cyhmod = phmod cdef CUtexref pTexRef = CUtexref() - err = ccuda.cuModuleGetTexRef(pTexRef._ptr, chmod, name) + err = cydriver.cuModuleGetTexRef(pTexRef._ptr, cyhmod, name) return (CUresult(err), pTexRef) {{endif}} @@ -21350,17 +21350,17 @@ def cuModuleGetSurfRef(hmod, char* name): -------- :py:obj:`~.cuModuleGetFunction`, :py:obj:`~.cuModuleGetGlobal`, :py:obj:`~.cuModuleGetTexRef`, :py:obj:`~.cuModuleLoad`, :py:obj:`~.cuModuleLoadData`, :py:obj:`~.cuModuleLoadDataEx`, :py:obj:`~.cuModuleLoadFatBinary`, :py:obj:`~.cuModuleUnload` """ - cdef ccuda.CUmodule chmod + cdef cydriver.CUmodule cyhmod if hmod is None: - chmod = 0 + cyhmod = 0 elif isinstance(hmod, (CUmodule,)): phmod = int(hmod) - chmod = phmod + cyhmod = phmod else: phmod = int(CUmodule(hmod)) - chmod = phmod + cyhmod = phmod cdef CUsurfref pSurfRef = CUsurfref() - err = ccuda.cuModuleGetSurfRef(pSurfRef._ptr, chmod, name) + err = cydriver.cuModuleGetSurfRef(pSurfRef._ptr, cyhmod, name) return (CUresult(err), pSurfRef) {{endif}} @@ -21435,25 +21435,25 @@ def cuLibraryLoadData(code, jitOptions : Optional[Tuple[CUjit_option] | List[CUj libraryOptionValues = [] if libraryOptionValues is None else libraryOptionValues libraryOptions = [] if libraryOptions is None else libraryOptions if not all(isinstance(_x, (CUlibraryOption)) for _x in libraryOptions): - raise TypeError("Argument 'libraryOptions' is not instance of type (expected Tuple[ccuda.CUlibraryOption] or List[ccuda.CUlibraryOption]") + raise TypeError("Argument 'libraryOptions' is not instance of type (expected Tuple[cydriver.CUlibraryOption] or List[cydriver.CUlibraryOption]") jitOptionsValues = [] if jitOptionsValues is None else jitOptionsValues jitOptions = [] if jitOptions is None else jitOptions if not all(isinstance(_x, (CUjit_option)) for _x in jitOptions): - raise TypeError("Argument 'jitOptions' is not instance of type (expected Tuple[ccuda.CUjit_option] or List[ccuda.CUjit_option]") + raise TypeError("Argument 'jitOptions' is not instance of type (expected Tuple[cydriver.CUjit_option] or List[cydriver.CUjit_option]") cdef CUlibrary library = CUlibrary() - ccode = utils.HelperInputVoidPtr(code) - cdef void* ccode_ptr = ccode.cptr - cdef vector[ccuda.CUjit_option] cjitOptions = [pyjitOptions.value for pyjitOptions in (jitOptions)] + cycode = utils.HelperInputVoidPtr(code) + cdef void* cycode_ptr = cycode.cptr + cdef vector[cydriver.CUjit_option] cyjitOptions = [pyjitOptions.value for pyjitOptions in (jitOptions)] pylist = [utils.HelperCUjit_option(pyoptions, pyoptionValues) for pyoptions, pyoptionValues in zip(jitOptions, jitOptionsValues)] cdef utils.InputVoidPtrPtrHelper voidStarHelperjitOptionsValues = utils.InputVoidPtrPtrHelper(pylist) if numJitOptions > len(jitOptions): raise RuntimeError("List is too small: " + str(len(jitOptions)) + " < " + str(numJitOptions)) if numJitOptions > len(jitOptionsValues): raise RuntimeError("List is too small: " + str(len(jitOptionsValues)) + " < " + str(numJitOptions)) - cdef vector[ccuda.CUlibraryOption] clibraryOptions = [pylibraryOptions.value for pylibraryOptions in (libraryOptions)] + cdef vector[cydriver.CUlibraryOption] cylibraryOptions = [pylibraryOptions.value for pylibraryOptions in (libraryOptions)] pylist = [utils.HelperCUlibraryOption(pyoptions, pyoptionValues) for pyoptions, pyoptionValues in zip(libraryOptions, libraryOptionValues)] cdef utils.InputVoidPtrPtrHelper voidStarHelperlibraryOptionValues = utils.InputVoidPtrPtrHelper(pylist) if numLibraryOptions > len(libraryOptions): raise RuntimeError("List is too small: " + str(len(libraryOptions)) + " < " + str(numLibraryOptions)) if numLibraryOptions > len(libraryOptionValues): raise RuntimeError("List is too small: " + str(len(libraryOptionValues)) + " < " + str(numLibraryOptions)) - err = ccuda.cuLibraryLoadData(library._ptr, ccode_ptr, cjitOptions.data(), voidStarHelperjitOptionsValues.cptr, numJitOptions, clibraryOptions.data(), voidStarHelperlibraryOptionValues.cptr, numLibraryOptions) + err = cydriver.cuLibraryLoadData(library._ptr, cycode_ptr, cyjitOptions.data(), voidStarHelperjitOptionsValues.cptr, numJitOptions, cylibraryOptions.data(), voidStarHelperlibraryOptionValues.cptr, numLibraryOptions) return (CUresult(err), library) {{endif}} @@ -21529,23 +21529,23 @@ def cuLibraryLoadFromFile(char* fileName, jitOptions : Optional[Tuple[CUjit_opti libraryOptionValues = [] if libraryOptionValues is None else libraryOptionValues libraryOptions = [] if libraryOptions is None else libraryOptions if not all(isinstance(_x, (CUlibraryOption)) for _x in libraryOptions): - raise TypeError("Argument 'libraryOptions' is not instance of type (expected Tuple[ccuda.CUlibraryOption] or List[ccuda.CUlibraryOption]") + raise TypeError("Argument 'libraryOptions' is not instance of type (expected Tuple[cydriver.CUlibraryOption] or List[cydriver.CUlibraryOption]") jitOptionsValues = [] if jitOptionsValues is None else jitOptionsValues jitOptions = [] if jitOptions is None else jitOptions if not all(isinstance(_x, (CUjit_option)) for _x in jitOptions): - raise TypeError("Argument 'jitOptions' is not instance of type (expected Tuple[ccuda.CUjit_option] or List[ccuda.CUjit_option]") + raise TypeError("Argument 'jitOptions' is not instance of type (expected Tuple[cydriver.CUjit_option] or List[cydriver.CUjit_option]") cdef CUlibrary library = CUlibrary() - cdef vector[ccuda.CUjit_option] cjitOptions = [pyjitOptions.value for pyjitOptions in (jitOptions)] + cdef vector[cydriver.CUjit_option] cyjitOptions = [pyjitOptions.value for pyjitOptions in (jitOptions)] pylist = [utils.HelperCUjit_option(pyoptions, pyoptionValues) for pyoptions, pyoptionValues in zip(jitOptions, jitOptionsValues)] cdef utils.InputVoidPtrPtrHelper voidStarHelperjitOptionsValues = utils.InputVoidPtrPtrHelper(pylist) if numJitOptions > len(jitOptions): raise RuntimeError("List is too small: " + str(len(jitOptions)) + " < " + str(numJitOptions)) if numJitOptions > len(jitOptionsValues): raise RuntimeError("List is too small: " + str(len(jitOptionsValues)) + " < " + str(numJitOptions)) - cdef vector[ccuda.CUlibraryOption] clibraryOptions = [pylibraryOptions.value for pylibraryOptions in (libraryOptions)] + cdef vector[cydriver.CUlibraryOption] cylibraryOptions = [pylibraryOptions.value for pylibraryOptions in (libraryOptions)] pylist = [utils.HelperCUlibraryOption(pyoptions, pyoptionValues) for pyoptions, pyoptionValues in zip(libraryOptions, libraryOptionValues)] cdef utils.InputVoidPtrPtrHelper voidStarHelperlibraryOptionValues = utils.InputVoidPtrPtrHelper(pylist) if numLibraryOptions > len(libraryOptions): raise RuntimeError("List is too small: " + str(len(libraryOptions)) + " < " + str(numLibraryOptions)) if numLibraryOptions > len(libraryOptionValues): raise RuntimeError("List is too small: " + str(len(libraryOptionValues)) + " < " + str(numLibraryOptions)) - err = ccuda.cuLibraryLoadFromFile(library._ptr, fileName, cjitOptions.data(), voidStarHelperjitOptionsValues.cptr, numJitOptions, clibraryOptions.data(), voidStarHelperlibraryOptionValues.cptr, numLibraryOptions) + err = cydriver.cuLibraryLoadFromFile(library._ptr, fileName, cyjitOptions.data(), voidStarHelperjitOptionsValues.cptr, numJitOptions, cylibraryOptions.data(), voidStarHelperlibraryOptionValues.cptr, numLibraryOptions) return (CUresult(err), library) {{endif}} @@ -21571,16 +21571,16 @@ def cuLibraryUnload(library): -------- :py:obj:`~.cuLibraryLoadData`, :py:obj:`~.cuLibraryLoadFromFile`, :py:obj:`~.cuModuleUnload` """ - cdef ccuda.CUlibrary clibrary + cdef cydriver.CUlibrary cylibrary if library is None: - clibrary = 0 + cylibrary = 0 elif isinstance(library, (CUlibrary,)): plibrary = int(library) - clibrary = plibrary + cylibrary = plibrary else: plibrary = int(CUlibrary(library)) - clibrary = plibrary - err = ccuda.cuLibraryUnload(clibrary) + cylibrary = plibrary + err = cydriver.cuLibraryUnload(cylibrary) return (CUresult(err),) {{endif}} @@ -21612,17 +21612,17 @@ def cuLibraryGetKernel(library, char* name): -------- :py:obj:`~.cuLibraryLoadData`, :py:obj:`~.cuLibraryLoadFromFile`, :py:obj:`~.cuLibraryUnload`, :py:obj:`~.cuKernelGetFunction`, :py:obj:`~.cuLibraryGetModule`, :py:obj:`~.cuModuleGetFunction` """ - cdef ccuda.CUlibrary clibrary + cdef cydriver.CUlibrary cylibrary if library is None: - clibrary = 0 + cylibrary = 0 elif isinstance(library, (CUlibrary,)): plibrary = int(library) - clibrary = plibrary + cylibrary = plibrary else: plibrary = int(CUlibrary(library)) - clibrary = plibrary + cylibrary = plibrary cdef CUkernel pKernel = CUkernel() - err = ccuda.cuLibraryGetKernel(pKernel._ptr, clibrary, name) + err = cydriver.cuLibraryGetKernel(pKernel._ptr, cylibrary, name) return (CUresult(err), pKernel) {{endif}} @@ -21646,17 +21646,17 @@ def cuLibraryGetKernelCount(lib): count : unsigned int Number of kernels found within the library """ - cdef ccuda.CUlibrary clib + cdef cydriver.CUlibrary cylib if lib is None: - clib = 0 + cylib = 0 elif isinstance(lib, (CUlibrary,)): plib = int(lib) - clib = plib + cylib = plib else: plib = int(CUlibrary(lib)) - clib = plib + cylib = plib cdef unsigned int count = 0 - err = ccuda.cuLibraryGetKernelCount(&count, clib) + err = cydriver.cuLibraryGetKernelCount(&count, cylib) return (CUresult(err), count) {{endif}} @@ -21688,26 +21688,26 @@ def cuLibraryEnumerateKernels(unsigned int numKernels, lib): -------- :py:obj:`~.cuLibraryGetKernelCount` """ - cdef ccuda.CUlibrary clib + cdef cydriver.CUlibrary cylib if lib is None: - clib = 0 + cylib = 0 elif isinstance(lib, (CUlibrary,)): plib = int(lib) - clib = plib + cylib = plib else: plib = int(CUlibrary(lib)) - clib = plib - cdef ccuda.CUkernel* ckernels = NULL + cylib = plib + cdef cydriver.CUkernel* cykernels = NULL pykernels = [] if numKernels != 0: - ckernels = calloc(numKernels, sizeof(ccuda.CUkernel)) - if ckernels is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(numKernels) + 'x' + str(sizeof(ccuda.CUkernel))) - err = ccuda.cuLibraryEnumerateKernels(ckernels, numKernels, clib) + cykernels = calloc(numKernels, sizeof(cydriver.CUkernel)) + if cykernels is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(numKernels) + 'x' + str(sizeof(cydriver.CUkernel))) + err = cydriver.cuLibraryEnumerateKernels(cykernels, numKernels, cylib) if CUresult(err) == CUresult(0): - pykernels = [CUkernel(init_value=ckernels[idx]) for idx in range(numKernels)] - if ckernels is not NULL: - free(ckernels) + pykernels = [CUkernel(init_value=cykernels[idx]) for idx in range(numKernels)] + if cykernels is not NULL: + free(cykernels) return (CUresult(err), pykernels) {{endif}} @@ -21737,17 +21737,17 @@ def cuLibraryGetModule(library): -------- :py:obj:`~.cuLibraryLoadData`, :py:obj:`~.cuLibraryLoadFromFile`, :py:obj:`~.cuLibraryUnload`, :py:obj:`~.cuModuleGetFunction` """ - cdef ccuda.CUlibrary clibrary + cdef cydriver.CUlibrary cylibrary if library is None: - clibrary = 0 + cylibrary = 0 elif isinstance(library, (CUlibrary,)): plibrary = int(library) - clibrary = plibrary + cylibrary = plibrary else: plibrary = int(CUlibrary(library)) - clibrary = plibrary + cylibrary = plibrary cdef CUmodule pMod = CUmodule() - err = ccuda.cuLibraryGetModule(pMod._ptr, clibrary) + err = cydriver.cuLibraryGetModule(pMod._ptr, cylibrary) return (CUresult(err), pMod) {{endif}} @@ -21777,17 +21777,17 @@ def cuKernelGetFunction(kernel): -------- :py:obj:`~.cuLibraryLoadData`, :py:obj:`~.cuLibraryLoadFromFile`, :py:obj:`~.cuLibraryUnload`, :py:obj:`~.cuLibraryGetKernel`, :py:obj:`~.cuLibraryGetModule`, :py:obj:`~.cuModuleGetFunction` """ - cdef ccuda.CUkernel ckernel + cdef cydriver.CUkernel cykernel if kernel is None: - ckernel = 0 + cykernel = 0 elif isinstance(kernel, (CUkernel,)): pkernel = int(kernel) - ckernel = pkernel + cykernel = pkernel else: pkernel = int(CUkernel(kernel)) - ckernel = pkernel + cykernel = pkernel cdef CUfunction pFunc = CUfunction() - err = ccuda.cuKernelGetFunction(pFunc._ptr, ckernel) + err = cydriver.cuKernelGetFunction(pFunc._ptr, cykernel) return (CUresult(err), pFunc) {{endif}} @@ -21816,17 +21816,17 @@ def cuKernelGetLibrary(kernel): -------- :py:obj:`~.cuLibraryLoadData`, :py:obj:`~.cuLibraryLoadFromFile`, :py:obj:`~.cuLibraryUnload`, :py:obj:`~.cuLibraryGetKernel` """ - cdef ccuda.CUkernel ckernel + cdef cydriver.CUkernel cykernel if kernel is None: - ckernel = 0 + cykernel = 0 elif isinstance(kernel, (CUkernel,)): pkernel = int(kernel) - ckernel = pkernel + cykernel = pkernel else: pkernel = int(CUkernel(kernel)) - ckernel = pkernel + cykernel = pkernel cdef CUlibrary pLib = CUlibrary() - err = ccuda.cuKernelGetLibrary(pLib._ptr, ckernel) + err = cydriver.cuKernelGetLibrary(pLib._ptr, cykernel) return (CUresult(err), pLib) {{endif}} @@ -21862,18 +21862,18 @@ def cuLibraryGetGlobal(library, char* name): -------- :py:obj:`~.cuLibraryLoadData`, :py:obj:`~.cuLibraryLoadFromFile`, :py:obj:`~.cuLibraryUnload`, :py:obj:`~.cuLibraryGetModule`, :py:obj:`~.cuModuleGetGlobal` """ - cdef ccuda.CUlibrary clibrary + cdef cydriver.CUlibrary cylibrary if library is None: - clibrary = 0 + cylibrary = 0 elif isinstance(library, (CUlibrary,)): plibrary = int(library) - clibrary = plibrary + cylibrary = plibrary else: plibrary = int(CUlibrary(library)) - clibrary = plibrary + cylibrary = plibrary cdef CUdeviceptr dptr = CUdeviceptr() cdef size_t numbytes = 0 - err = ccuda.cuLibraryGetGlobal(dptr._ptr, &numbytes, clibrary, name) + err = cydriver.cuLibraryGetGlobal(dptr._ptr, &numbytes, cylibrary, name) return (CUresult(err), dptr, numbytes) {{endif}} @@ -21911,18 +21911,18 @@ def cuLibraryGetManaged(library, char* name): -------- :py:obj:`~.cuLibraryLoadData`, :py:obj:`~.cuLibraryLoadFromFile`, :py:obj:`~.cuLibraryUnload` """ - cdef ccuda.CUlibrary clibrary + cdef cydriver.CUlibrary cylibrary if library is None: - clibrary = 0 + cylibrary = 0 elif isinstance(library, (CUlibrary,)): plibrary = int(library) - clibrary = plibrary + cylibrary = plibrary else: plibrary = int(CUlibrary(library)) - clibrary = plibrary + cylibrary = plibrary cdef CUdeviceptr dptr = CUdeviceptr() cdef size_t numbytes = 0 - err = ccuda.cuLibraryGetManaged(dptr._ptr, &numbytes, clibrary, name) + err = cydriver.cuLibraryGetManaged(dptr._ptr, &numbytes, cylibrary, name) return (CUresult(err), dptr, numbytes) {{endif}} @@ -21957,17 +21957,17 @@ def cuLibraryGetUnifiedFunction(library, char* symbol): -------- :py:obj:`~.cuLibraryLoadData`, :py:obj:`~.cuLibraryLoadFromFile`, :py:obj:`~.cuLibraryUnload` """ - cdef ccuda.CUlibrary clibrary + cdef cydriver.CUlibrary cylibrary if library is None: - clibrary = 0 + cylibrary = 0 elif isinstance(library, (CUlibrary,)): plibrary = int(library) - clibrary = plibrary + cylibrary = plibrary else: plibrary = int(CUlibrary(library)) - clibrary = plibrary + cylibrary = plibrary cdef void_ptr fptr = 0 - err = ccuda.cuLibraryGetUnifiedFunction(&fptr, clibrary, symbol) + err = cydriver.cuLibraryGetUnifiedFunction(&fptr, cylibrary, symbol) return (CUresult(err), fptr) {{endif}} @@ -22081,27 +22081,27 @@ def cuKernelGetAttribute(attrib not None : CUfunction_attribute, kernel, dev): ----- If another thread is trying to set the same attribute on the same device using :py:obj:`~.cuKernelSetAttribute()` simultaneously, the attribute query will give the old or new value depending on the interleavings chosen by the OS scheduler and memory consistency. """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - cdef ccuda.CUkernel ckernel + cydev = pdev + cdef cydriver.CUkernel cykernel if kernel is None: - ckernel = 0 + cykernel = 0 elif isinstance(kernel, (CUkernel,)): pkernel = int(kernel) - ckernel = pkernel + cykernel = pkernel else: pkernel = int(CUkernel(kernel)) - ckernel = pkernel + cykernel = pkernel cdef int pi = 0 - cdef ccuda.CUfunction_attribute cattrib = attrib.value - err = ccuda.cuKernelGetAttribute(&pi, cattrib, ckernel, cdev) + cdef cydriver.CUfunction_attribute cyattrib = attrib.value + err = cydriver.cuKernelGetAttribute(&pi, cyattrib, cykernel, cydev) return (CUresult(err), pi) {{endif}} @@ -22198,26 +22198,26 @@ def cuKernelSetAttribute(attrib not None : CUfunction_attribute, int val, kernel ----- The API has stricter locking requirements in comparison to its legacy counterpart :py:obj:`~.cuFuncSetAttribute()` due to device-wide semantics. If multiple threads are trying to set the same attribute on the same device simultaneously, the attribute setting will depend on the interleavings chosen by the OS scheduler and memory consistency. """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - cdef ccuda.CUkernel ckernel + cydev = pdev + cdef cydriver.CUkernel cykernel if kernel is None: - ckernel = 0 + cykernel = 0 elif isinstance(kernel, (CUkernel,)): pkernel = int(kernel) - ckernel = pkernel + cykernel = pkernel else: pkernel = int(CUkernel(kernel)) - ckernel = pkernel - cdef ccuda.CUfunction_attribute cattrib = attrib.value - err = ccuda.cuKernelSetAttribute(cattrib, val, ckernel, cdev) + cykernel = pkernel + cdef cydriver.CUfunction_attribute cyattrib = attrib.value + err = cydriver.cuKernelSetAttribute(cyattrib, val, cykernel, cydev) return (CUresult(err),) {{endif}} @@ -22283,26 +22283,26 @@ def cuKernelSetCacheConfig(kernel, config not None : CUfunc_cache, dev): ----- The API has stricter locking requirements in comparison to its legacy counterpart :py:obj:`~.cuFuncSetCacheConfig()` due to device-wide semantics. If multiple threads are trying to set a config on the same device simultaneously, the cache config setting will depend on the interleavings chosen by the OS scheduler and memory consistency. """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - cdef ccuda.CUkernel ckernel + cydev = pdev + cdef cydriver.CUkernel cykernel if kernel is None: - ckernel = 0 + cykernel = 0 elif isinstance(kernel, (CUkernel,)): pkernel = int(kernel) - ckernel = pkernel + cykernel = pkernel else: pkernel = int(CUkernel(kernel)) - ckernel = pkernel - cdef ccuda.CUfunc_cache cconfig = config.value - err = ccuda.cuKernelSetCacheConfig(ckernel, cconfig, cdev) + cykernel = pkernel + cdef cydriver.CUfunc_cache cyconfig = config.value + err = cydriver.cuKernelSetCacheConfig(cykernel, cyconfig, cydev) return (CUresult(err),) {{endif}} @@ -22332,17 +22332,17 @@ def cuKernelGetName(hfunc): name : bytes The returned name of the function """ - cdef ccuda.CUkernel chfunc + cdef cydriver.CUkernel cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUkernel,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUkernel(hfunc)) - chfunc = phfunc + cyhfunc = phfunc cdef const char* name = NULL - err = ccuda.cuKernelGetName(&name, chfunc) + err = cydriver.cuKernelGetName(&name, cyhfunc) return (CUresult(err), name) {{endif}} @@ -22384,18 +22384,18 @@ def cuKernelGetParamInfo(kernel, size_t paramIndex): -------- :py:obj:`~.cuFuncGetParamInfo` """ - cdef ccuda.CUkernel ckernel + cdef cydriver.CUkernel cykernel if kernel is None: - ckernel = 0 + cykernel = 0 elif isinstance(kernel, (CUkernel,)): pkernel = int(kernel) - ckernel = pkernel + cykernel = pkernel else: pkernel = int(CUkernel(kernel)) - ckernel = pkernel + cykernel = pkernel cdef size_t paramOffset = 0 cdef size_t paramSize = 0 - err = ccuda.cuKernelGetParamInfo(ckernel, paramIndex, ¶mOffset, ¶mSize) + err = cydriver.cuKernelGetParamInfo(cykernel, paramIndex, ¶mOffset, ¶mSize) return (CUresult(err), paramOffset, paramSize) {{endif}} @@ -22437,7 +22437,7 @@ def cuMemGetInfo(): """ cdef size_t free = 0 cdef size_t total = 0 - err = ccuda.cuMemGetInfo(&free, &total) + err = cydriver.cuMemGetInfo(&free, &total) return (CUresult(err), free, total) {{endif}} @@ -22470,7 +22470,7 @@ def cuMemAlloc(size_t bytesize): :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMalloc` """ cdef CUdeviceptr dptr = CUdeviceptr() - err = ccuda.cuMemAlloc(dptr._ptr, bytesize) + err = cydriver.cuMemAlloc(dptr._ptr, bytesize) return (CUresult(err), dptr) {{endif}} @@ -22536,7 +22536,7 @@ def cuMemAllocPitch(size_t WidthInBytes, size_t Height, unsigned int ElementSize """ cdef CUdeviceptr dptr = CUdeviceptr() cdef size_t pPitch = 0 - err = ccuda.cuMemAllocPitch(dptr._ptr, &pPitch, WidthInBytes, Height, ElementSizeBytes) + err = cydriver.cuMemAllocPitch(dptr._ptr, &pPitch, WidthInBytes, Height, ElementSizeBytes) return (CUresult(err), dptr, pPitch) {{endif}} @@ -22575,16 +22575,16 @@ def cuMemFree(dptr): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemAllocManaged`, :py:obj:`~.cuMemAllocAsync`, :py:obj:`~.cuMemAllocFromPoolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemFreeAsync`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaFree` """ - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr - err = ccuda.cuMemFree(cdptr) + cydptr = pdptr + err = cydriver.cuMemFree(cydptr) return (CUresult(err),) {{endif}} @@ -22617,18 +22617,18 @@ def cuMemGetAddressRange(dptr): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32` """ - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr + cydptr = pdptr cdef CUdeviceptr pbase = CUdeviceptr() cdef size_t psize = 0 - err = ccuda.cuMemGetAddressRange(pbase._ptr, &psize, cdptr) + err = cydriver.cuMemGetAddressRange(pbase._ptr, &psize, cydptr) return (CUresult(err), pbase, psize) {{endif}} @@ -22682,7 +22682,7 @@ def cuMemAllocHost(size_t bytesize): :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMallocHost` """ cdef void_ptr pp = 0 - err = ccuda.cuMemAllocHost(&pp, bytesize) + err = cydriver.cuMemAllocHost(&pp, bytesize) return (CUresult(err), pp) {{endif}} @@ -22709,9 +22709,9 @@ def cuMemFreeHost(p): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaFreeHost` """ - cp = utils.HelperInputVoidPtr(p) - cdef void* cp_ptr = cp.cptr - err = ccuda.cuMemFreeHost(cp_ptr) + cyp = utils.HelperInputVoidPtr(p) + cdef void* cyp_ptr = cyp.cptr + err = cydriver.cuMemFreeHost(cyp_ptr) return (CUresult(err),) {{endif}} @@ -22801,7 +22801,7 @@ def cuMemHostAlloc(size_t bytesize, unsigned int Flags): :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaHostAlloc` """ cdef void_ptr pp = 0 - err = ccuda.cuMemHostAlloc(&pp, bytesize, Flags) + err = cydriver.cuMemHostAlloc(&pp, bytesize, Flags) return (CUresult(err), pp) {{endif}} @@ -22859,9 +22859,9 @@ def cuMemHostGetDevicePointer(p, unsigned int Flags): :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaHostGetDevicePointer` """ cdef CUdeviceptr pdptr = CUdeviceptr() - cp = utils.HelperInputVoidPtr(p) - cdef void* cp_ptr = cp.cptr - err = ccuda.cuMemHostGetDevicePointer(pdptr._ptr, cp_ptr, Flags) + cyp = utils.HelperInputVoidPtr(p) + cdef void* cyp_ptr = cyp.cptr + err = cydriver.cuMemHostGetDevicePointer(pdptr._ptr, cyp_ptr, Flags) return (CUresult(err), pdptr) {{endif}} @@ -22895,9 +22895,9 @@ def cuMemHostGetFlags(p): :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cudaHostGetFlags` """ cdef unsigned int pFlags = 0 - cp = utils.HelperInputVoidPtr(p) - cdef void* cp_ptr = cp.cptr - err = ccuda.cuMemHostGetFlags(&pFlags, cp_ptr) + cyp = utils.HelperInputVoidPtr(p) + cdef void* cyp_ptr = cyp.cptr + err = cydriver.cuMemHostGetFlags(&pFlags, cyp_ptr) return (CUresult(err), pFlags) {{endif}} @@ -23031,7 +23031,7 @@ def cuMemAllocManaged(size_t bytesize, unsigned int flags): :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuStreamAttachMemAsync`, :py:obj:`~.cudaMallocManaged` """ cdef CUdeviceptr dptr = CUdeviceptr() - err = ccuda.cuMemAllocManaged(dptr._ptr, bytesize, flags) + err = cydriver.cuMemAllocManaged(dptr._ptr, bytesize, flags) return (CUresult(err), dptr) {{endif}} @@ -23080,28 +23080,28 @@ def cuDeviceRegisterAsyncNotification(device, callbackFunc, userData): -------- :py:obj:`~.cuDeviceUnregisterAsyncNotification` """ - cdef ccuda.CUasyncCallback ccallbackFunc + cdef cydriver.CUasyncCallback cycallbackFunc if callbackFunc is None: - ccallbackFunc = 0 + cycallbackFunc = 0 elif isinstance(callbackFunc, (CUasyncCallback,)): pcallbackFunc = int(callbackFunc) - ccallbackFunc = pcallbackFunc + cycallbackFunc = pcallbackFunc else: pcallbackFunc = int(CUasyncCallback(callbackFunc)) - ccallbackFunc = pcallbackFunc - cdef ccuda.CUdevice cdevice + cycallbackFunc = pcallbackFunc + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice - cuserData = utils.HelperInputVoidPtr(userData) - cdef void* cuserData_ptr = cuserData.cptr + cydevice = pdevice + cyuserData = utils.HelperInputVoidPtr(userData) + cdef void* cyuserData_ptr = cyuserData.cptr cdef CUasyncCallbackHandle callback = CUasyncCallbackHandle() - err = ccuda.cuDeviceRegisterAsyncNotification(cdevice, ccallbackFunc, cuserData_ptr, callback._ptr) + err = cydriver.cuDeviceRegisterAsyncNotification(cydevice, cycallbackFunc, cyuserData_ptr, callback._ptr) return (CUresult(err), callback) {{endif}} @@ -23131,25 +23131,25 @@ def cuDeviceUnregisterAsyncNotification(device, callback): -------- :py:obj:`~.cuDeviceRegisterAsyncNotification` """ - cdef ccuda.CUasyncCallbackHandle ccallback + cdef cydriver.CUasyncCallbackHandle cycallback if callback is None: - ccallback = 0 + cycallback = 0 elif isinstance(callback, (CUasyncCallbackHandle,)): pcallback = int(callback) - ccallback = pcallback + cycallback = pcallback else: pcallback = int(CUasyncCallbackHandle(callback)) - ccallback = pcallback - cdef ccuda.CUdevice cdevice + cycallback = pcallback + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice - err = ccuda.cuDeviceUnregisterAsyncNotification(cdevice, ccallback) + cydevice = pdevice + err = cydriver.cuDeviceUnregisterAsyncNotification(cydevice, cycallback) return (CUresult(err),) {{endif}} @@ -23181,7 +23181,7 @@ def cuDeviceGetByPCIBusId(char* pciBusId): :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetPCIBusId`, :py:obj:`~.cudaDeviceGetByPCIBusId` """ cdef CUdevice dev = CUdevice() - err = ccuda.cuDeviceGetByPCIBusId(dev._ptr, pciBusId) + err = cydriver.cuDeviceGetByPCIBusId(dev._ptr, pciBusId) return (CUresult(err), dev) {{endif}} @@ -23217,18 +23217,18 @@ def cuDeviceGetPCIBusId(int length, dev): -------- :py:obj:`~.cuDeviceGet`, :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetByPCIBusId`, :py:obj:`~.cudaDeviceGetPCIBusId` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev pypciBusId = b" " * length cdef char* pciBusId = pypciBusId - err = ccuda.cuDeviceGetPCIBusId(pciBusId, length, cdev) + err = cydriver.cuDeviceGetPCIBusId(pciBusId, length, cydev) return (CUresult(err), pypciBusId) {{endif}} @@ -23277,17 +23277,17 @@ def cuIpcGetEventHandle(event): -------- :py:obj:`~.cuEventCreate`, :py:obj:`~.cuEventDestroy`, :py:obj:`~.cuEventSynchronize`, :py:obj:`~.cuEventQuery`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuIpcOpenEventHandle`, :py:obj:`~.cuIpcGetMemHandle`, :py:obj:`~.cuIpcOpenMemHandle`, :py:obj:`~.cuIpcCloseMemHandle`, :py:obj:`~.cudaIpcGetEventHandle` """ - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent + cyevent = pevent cdef CUipcEventHandle pHandle = CUipcEventHandle() - err = ccuda.cuIpcGetEventHandle(pHandle._ptr, cevent) + err = cydriver.cuIpcGetEventHandle(pHandle._ptr, cyevent) return (CUresult(err), pHandle) {{endif}} @@ -23331,7 +23331,7 @@ def cuIpcOpenEventHandle(handle not None : CUipcEventHandle): :py:obj:`~.cuEventCreate`, :py:obj:`~.cuEventDestroy`, :py:obj:`~.cuEventSynchronize`, :py:obj:`~.cuEventQuery`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuIpcGetEventHandle`, :py:obj:`~.cuIpcGetMemHandle`, :py:obj:`~.cuIpcOpenMemHandle`, :py:obj:`~.cuIpcCloseMemHandle`, :py:obj:`~.cudaIpcOpenEventHandle` """ cdef CUevent phEvent = CUevent() - err = ccuda.cuIpcOpenEventHandle(phEvent._ptr, handle._ptr[0]) + err = cydriver.cuIpcOpenEventHandle(phEvent._ptr, handle._ptr[0]) return (CUresult(err), phEvent) {{endif}} @@ -23375,17 +23375,17 @@ def cuIpcGetMemHandle(dptr): -------- :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuIpcGetEventHandle`, :py:obj:`~.cuIpcOpenEventHandle`, :py:obj:`~.cuIpcOpenMemHandle`, :py:obj:`~.cuIpcCloseMemHandle`, :py:obj:`~.cudaIpcGetMemHandle` """ - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr + cydptr = pdptr cdef CUipcMemHandle pHandle = CUipcMemHandle() - err = ccuda.cuIpcGetMemHandle(pHandle._ptr, cdptr) + err = cydriver.cuIpcGetMemHandle(pHandle._ptr, cydptr) return (CUresult(err), pHandle) {{endif}} @@ -23451,7 +23451,7 @@ def cuIpcOpenMemHandle(handle not None : CUipcMemHandle, unsigned int Flags): No guarantees are made about the address returned in `*pdptr`. In particular, multiple processes may not receive the same address for the same `handle`. """ cdef CUdeviceptr pdptr = CUdeviceptr() - err = ccuda.cuIpcOpenMemHandle(pdptr._ptr, handle._ptr[0], Flags) + err = cydriver.cuIpcOpenMemHandle(pdptr._ptr, handle._ptr[0], Flags) return (CUresult(err), pdptr) {{endif}} @@ -23491,16 +23491,16 @@ def cuIpcCloseMemHandle(dptr): -------- :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuIpcGetEventHandle`, :py:obj:`~.cuIpcOpenEventHandle`, :py:obj:`~.cuIpcGetMemHandle`, :py:obj:`~.cuIpcOpenMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle` """ - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr - err = ccuda.cuIpcCloseMemHandle(cdptr) + cydptr = pdptr + err = cydriver.cuIpcCloseMemHandle(cydptr) return (CUresult(err),) {{endif}} @@ -23602,9 +23602,9 @@ def cuMemHostRegister(p, size_t bytesize, unsigned int Flags): -------- :py:obj:`~.cuMemHostUnregister`, :py:obj:`~.cuMemHostGetFlags`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cudaHostRegister` """ - cp = utils.HelperInputVoidPtr(p) - cdef void* cp_ptr = cp.cptr - err = ccuda.cuMemHostRegister(cp_ptr, bytesize, Flags) + cyp = utils.HelperInputVoidPtr(p) + cdef void* cyp_ptr = cyp.cptr + err = cydriver.cuMemHostRegister(cyp_ptr, bytesize, Flags) return (CUresult(err),) {{endif}} @@ -23634,9 +23634,9 @@ def cuMemHostUnregister(p): -------- :py:obj:`~.cuMemHostRegister`, :py:obj:`~.cudaHostUnregister` """ - cp = utils.HelperInputVoidPtr(p) - cdef void* cp_ptr = cp.cptr - err = ccuda.cuMemHostUnregister(cp_ptr) + cyp = utils.HelperInputVoidPtr(p) + cdef void* cyp_ptr = cyp.cptr + err = cydriver.cuMemHostUnregister(cyp_ptr) return (CUresult(err),) {{endif}} @@ -23671,25 +23671,25 @@ def cuMemcpy(dst, src, size_t ByteCount): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol` """ - cdef ccuda.CUdeviceptr csrc + cdef cydriver.CUdeviceptr cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (CUdeviceptr,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(CUdeviceptr(src)) - csrc = psrc - cdef ccuda.CUdeviceptr cdst + cysrc = psrc + cdef cydriver.CUdeviceptr cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (CUdeviceptr,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(CUdeviceptr(dst)) - cdst = pdst - err = ccuda.cuMemcpy(cdst, csrc, ByteCount) + cydst = pdst + err = cydriver.cuMemcpy(cydst, cysrc, ByteCount) return (CUresult(err),) {{endif}} @@ -23727,43 +23727,43 @@ def cuMemcpyPeer(dstDevice, dstContext, srcDevice, srcContext, size_t ByteCount) -------- :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpy3DPeer`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyPeerAsync`, :py:obj:`~.cuMemcpy3DPeerAsync`, :py:obj:`~.cudaMemcpyPeer` """ - cdef ccuda.CUcontext csrcContext + cdef cydriver.CUcontext cysrcContext if srcContext is None: - csrcContext = 0 + cysrcContext = 0 elif isinstance(srcContext, (CUcontext,)): psrcContext = int(srcContext) - csrcContext = psrcContext + cysrcContext = psrcContext else: psrcContext = int(CUcontext(srcContext)) - csrcContext = psrcContext - cdef ccuda.CUdeviceptr csrcDevice + cysrcContext = psrcContext + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr,)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - cdef ccuda.CUcontext cdstContext + cysrcDevice = psrcDevice + cdef cydriver.CUcontext cydstContext if dstContext is None: - cdstContext = 0 + cydstContext = 0 elif isinstance(dstContext, (CUcontext,)): pdstContext = int(dstContext) - cdstContext = pdstContext + cydstContext = pdstContext else: pdstContext = int(CUcontext(dstContext)) - cdstContext = pdstContext - cdef ccuda.CUdeviceptr cdstDevice + cydstContext = pdstContext + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemcpyPeer(cdstDevice, cdstContext, csrcDevice, csrcContext, ByteCount) + cydstDevice = pdstDevice + err = cydriver.cuMemcpyPeer(cydstDevice, cydstContext, cysrcDevice, cysrcContext, ByteCount) return (CUresult(err),) {{endif}} @@ -23795,18 +23795,18 @@ def cuMemcpyHtoD(dstDevice, srcHost, size_t ByteCount): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyToSymbol` """ - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - csrcHost = utils.HelperInputVoidPtr(srcHost) - cdef void* csrcHost_ptr = csrcHost.cptr - err = ccuda.cuMemcpyHtoD(cdstDevice, csrcHost_ptr, ByteCount) + cydstDevice = pdstDevice + cysrcHost = utils.HelperInputVoidPtr(srcHost) + cdef void* cysrcHost_ptr = cysrcHost.cptr + err = cydriver.cuMemcpyHtoD(cydstDevice, cysrcHost_ptr, ByteCount) return (CUresult(err),) {{endif}} @@ -23838,18 +23838,18 @@ def cuMemcpyDtoH(dstHost, srcDevice, size_t ByteCount): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyFromSymbol` """ - cdef ccuda.CUdeviceptr csrcDevice + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr,)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - cdstHost = utils.HelperInputVoidPtr(dstHost) - cdef void* cdstHost_ptr = cdstHost.cptr - err = ccuda.cuMemcpyDtoH(cdstHost_ptr, csrcDevice, ByteCount) + cysrcDevice = psrcDevice + cydstHost = utils.HelperInputVoidPtr(dstHost) + cdef void* cydstHost_ptr = cydstHost.cptr + err = cydriver.cuMemcpyDtoH(cydstHost_ptr, cysrcDevice, ByteCount) return (CUresult(err),) {{endif}} @@ -23881,25 +23881,25 @@ def cuMemcpyDtoD(dstDevice, srcDevice, size_t ByteCount): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol` """ - cdef ccuda.CUdeviceptr csrcDevice + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr,)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - cdef ccuda.CUdeviceptr cdstDevice + cysrcDevice = psrcDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemcpyDtoD(cdstDevice, csrcDevice, ByteCount) + cydstDevice = pdstDevice + err = cydriver.cuMemcpyDtoD(cydstDevice, cysrcDevice, ByteCount) return (CUresult(err),) {{endif}} @@ -23934,25 +23934,25 @@ def cuMemcpyDtoA(dstArray, size_t dstOffset, srcDevice, size_t ByteCount): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpyToArray` """ - cdef ccuda.CUdeviceptr csrcDevice + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr,)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - cdef ccuda.CUarray cdstArray + cysrcDevice = psrcDevice + cdef cydriver.CUarray cydstArray if dstArray is None: - cdstArray = 0 + cydstArray = 0 elif isinstance(dstArray, (CUarray,)): pdstArray = int(dstArray) - cdstArray = pdstArray + cydstArray = pdstArray else: pdstArray = int(CUarray(dstArray)) - cdstArray = pdstArray - err = ccuda.cuMemcpyDtoA(cdstArray, dstOffset, csrcDevice, ByteCount) + cydstArray = pdstArray + err = cydriver.cuMemcpyDtoA(cydstArray, dstOffset, cysrcDevice, ByteCount) return (CUresult(err),) {{endif}} @@ -23989,25 +23989,25 @@ def cuMemcpyAtoD(dstDevice, srcArray, size_t srcOffset, size_t ByteCount): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpyFromArray` """ - cdef ccuda.CUarray csrcArray + cdef cydriver.CUarray cysrcArray if srcArray is None: - csrcArray = 0 + cysrcArray = 0 elif isinstance(srcArray, (CUarray,)): psrcArray = int(srcArray) - csrcArray = psrcArray + cysrcArray = psrcArray else: psrcArray = int(CUarray(srcArray)) - csrcArray = psrcArray - cdef ccuda.CUdeviceptr cdstDevice + cysrcArray = psrcArray + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemcpyAtoD(cdstDevice, csrcArray, srcOffset, ByteCount) + cydstDevice = pdstDevice + err = cydriver.cuMemcpyAtoD(cydstDevice, cysrcArray, srcOffset, ByteCount) return (CUresult(err),) {{endif}} @@ -24042,18 +24042,18 @@ def cuMemcpyHtoA(dstArray, size_t dstOffset, srcHost, size_t ByteCount): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpyToArray` """ - cdef ccuda.CUarray cdstArray + cdef cydriver.CUarray cydstArray if dstArray is None: - cdstArray = 0 + cydstArray = 0 elif isinstance(dstArray, (CUarray,)): pdstArray = int(dstArray) - cdstArray = pdstArray + cydstArray = pdstArray else: pdstArray = int(CUarray(dstArray)) - cdstArray = pdstArray - csrcHost = utils.HelperInputVoidPtr(srcHost) - cdef void* csrcHost_ptr = csrcHost.cptr - err = ccuda.cuMemcpyHtoA(cdstArray, dstOffset, csrcHost_ptr, ByteCount) + cydstArray = pdstArray + cysrcHost = utils.HelperInputVoidPtr(srcHost) + cdef void* cysrcHost_ptr = cysrcHost.cptr + err = cydriver.cuMemcpyHtoA(cydstArray, dstOffset, cysrcHost_ptr, ByteCount) return (CUresult(err),) {{endif}} @@ -24088,18 +24088,18 @@ def cuMemcpyAtoH(dstHost, srcArray, size_t srcOffset, size_t ByteCount): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpyFromArray` """ - cdef ccuda.CUarray csrcArray + cdef cydriver.CUarray cysrcArray if srcArray is None: - csrcArray = 0 + cysrcArray = 0 elif isinstance(srcArray, (CUarray,)): psrcArray = int(srcArray) - csrcArray = psrcArray + cysrcArray = psrcArray else: psrcArray = int(CUarray(srcArray)) - csrcArray = psrcArray - cdstHost = utils.HelperInputVoidPtr(dstHost) - cdef void* cdstHost_ptr = cdstHost.cptr - err = ccuda.cuMemcpyAtoH(cdstHost_ptr, csrcArray, srcOffset, ByteCount) + cysrcArray = psrcArray + cydstHost = utils.HelperInputVoidPtr(dstHost) + cdef void* cydstHost_ptr = cydstHost.cptr + err = cydriver.cuMemcpyAtoH(cydstHost_ptr, cysrcArray, srcOffset, ByteCount) return (CUresult(err),) {{endif}} @@ -24139,25 +24139,25 @@ def cuMemcpyAtoA(dstArray, size_t dstOffset, srcArray, size_t srcOffset, size_t -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpyArrayToArray` """ - cdef ccuda.CUarray csrcArray + cdef cydriver.CUarray cysrcArray if srcArray is None: - csrcArray = 0 + cysrcArray = 0 elif isinstance(srcArray, (CUarray,)): psrcArray = int(srcArray) - csrcArray = psrcArray + cysrcArray = psrcArray else: psrcArray = int(CUarray(srcArray)) - csrcArray = psrcArray - cdef ccuda.CUarray cdstArray + cysrcArray = psrcArray + cdef cydriver.CUarray cydstArray if dstArray is None: - cdstArray = 0 + cydstArray = 0 elif isinstance(dstArray, (CUarray,)): pdstArray = int(dstArray) - cdstArray = pdstArray + cydstArray = pdstArray else: pdstArray = int(CUarray(dstArray)) - cdstArray = pdstArray - err = ccuda.cuMemcpyAtoA(cdstArray, dstOffset, csrcArray, srcOffset, ByteCount) + cydstArray = pdstArray + err = cydriver.cuMemcpyAtoA(cydstArray, dstOffset, cysrcArray, srcOffset, ByteCount) return (CUresult(err),) {{endif}} @@ -24282,8 +24282,8 @@ def cuMemcpy2D(pCopy : Optional[CUDA_MEMCPY2D]): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray` """ - cdef ccuda.CUDA_MEMCPY2D* cpCopy_ptr = pCopy._ptr if pCopy != None else NULL - err = ccuda.cuMemcpy2D(cpCopy_ptr) + cdef cydriver.CUDA_MEMCPY2D* cypCopy_ptr = pCopy._ptr if pCopy != None else NULL + err = cydriver.cuMemcpy2D(cypCopy_ptr) return (CUresult(err),) {{endif}} @@ -24408,8 +24408,8 @@ def cuMemcpy2DUnaligned(pCopy : Optional[CUDA_MEMCPY2D]): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray` """ - cdef ccuda.CUDA_MEMCPY2D* cpCopy_ptr = pCopy._ptr if pCopy != None else NULL - err = ccuda.cuMemcpy2DUnaligned(cpCopy_ptr) + cdef cydriver.CUDA_MEMCPY2D* cypCopy_ptr = pCopy._ptr if pCopy != None else NULL + err = cydriver.cuMemcpy2DUnaligned(cypCopy_ptr) return (CUresult(err),) {{endif}} @@ -24537,8 +24537,8 @@ def cuMemcpy3D(pCopy : Optional[CUDA_MEMCPY3D]): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemcpy3D` """ - cdef ccuda.CUDA_MEMCPY3D* cpCopy_ptr = pCopy._ptr if pCopy != None else NULL - err = ccuda.cuMemcpy3D(cpCopy_ptr) + cdef cydriver.CUDA_MEMCPY3D* cypCopy_ptr = pCopy._ptr if pCopy != None else NULL + err = cydriver.cuMemcpy3D(cypCopy_ptr) return (CUresult(err),) {{endif}} @@ -24566,8 +24566,8 @@ def cuMemcpy3DPeer(pCopy : Optional[CUDA_MEMCPY3D_PEER]): -------- :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyPeer`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyPeerAsync`, :py:obj:`~.cuMemcpy3DPeerAsync`, :py:obj:`~.cudaMemcpy3DPeer` """ - cdef ccuda.CUDA_MEMCPY3D_PEER* cpCopy_ptr = pCopy._ptr if pCopy != None else NULL - err = ccuda.cuMemcpy3DPeer(cpCopy_ptr) + cdef cydriver.CUDA_MEMCPY3D_PEER* cypCopy_ptr = pCopy._ptr if pCopy != None else NULL + err = cydriver.cuMemcpy3DPeer(cypCopy_ptr) return (CUresult(err),) {{endif}} @@ -24604,34 +24604,34 @@ def cuMemcpyAsync(dst, src, size_t ByteCount, hStream): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr csrc + cyhStream = phStream + cdef cydriver.CUdeviceptr cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (CUdeviceptr,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(CUdeviceptr(src)) - csrc = psrc - cdef ccuda.CUdeviceptr cdst + cysrc = psrc + cdef cydriver.CUdeviceptr cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (CUdeviceptr,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(CUdeviceptr(dst)) - cdst = pdst - err = ccuda.cuMemcpyAsync(cdst, csrc, ByteCount, chStream) + cydst = pdst + err = cydriver.cuMemcpyAsync(cydst, cysrc, ByteCount, cyhStream) return (CUresult(err),) {{endif}} @@ -24671,52 +24671,52 @@ def cuMemcpyPeerAsync(dstDevice, dstContext, srcDevice, srcContext, size_t ByteC -------- :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyPeer`, :py:obj:`~.cuMemcpy3DPeer`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpy3DPeerAsync`, :py:obj:`~.cudaMemcpyPeerAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUcontext csrcContext + cyhStream = phStream + cdef cydriver.CUcontext cysrcContext if srcContext is None: - csrcContext = 0 + cysrcContext = 0 elif isinstance(srcContext, (CUcontext,)): psrcContext = int(srcContext) - csrcContext = psrcContext + cysrcContext = psrcContext else: psrcContext = int(CUcontext(srcContext)) - csrcContext = psrcContext - cdef ccuda.CUdeviceptr csrcDevice + cysrcContext = psrcContext + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr,)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - cdef ccuda.CUcontext cdstContext + cysrcDevice = psrcDevice + cdef cydriver.CUcontext cydstContext if dstContext is None: - cdstContext = 0 + cydstContext = 0 elif isinstance(dstContext, (CUcontext,)): pdstContext = int(dstContext) - cdstContext = pdstContext + cydstContext = pdstContext else: pdstContext = int(CUcontext(dstContext)) - cdstContext = pdstContext - cdef ccuda.CUdeviceptr cdstDevice + cydstContext = pdstContext + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemcpyPeerAsync(cdstDevice, cdstContext, csrcDevice, csrcContext, ByteCount, chStream) + cydstDevice = pdstDevice + err = cydriver.cuMemcpyPeerAsync(cydstDevice, cydstContext, cysrcDevice, cysrcContext, ByteCount, cyhStream) return (CUresult(err),) {{endif}} @@ -24750,27 +24750,27 @@ def cuMemcpyHtoDAsync(dstDevice, srcHost, size_t ByteCount, hStream): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr cdstDevice + cyhStream = phStream + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - csrcHost = utils.HelperInputVoidPtr(srcHost) - cdef void* csrcHost_ptr = csrcHost.cptr - err = ccuda.cuMemcpyHtoDAsync(cdstDevice, csrcHost_ptr, ByteCount, chStream) + cydstDevice = pdstDevice + cysrcHost = utils.HelperInputVoidPtr(srcHost) + cdef void* cysrcHost_ptr = cysrcHost.cptr + err = cydriver.cuMemcpyHtoDAsync(cydstDevice, cysrcHost_ptr, ByteCount, cyhStream) return (CUresult(err),) {{endif}} @@ -24804,27 +24804,27 @@ def cuMemcpyDtoHAsync(dstHost, srcDevice, size_t ByteCount, hStream): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr csrcDevice + cyhStream = phStream + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr,)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - cdstHost = utils.HelperInputVoidPtr(dstHost) - cdef void* cdstHost_ptr = cdstHost.cptr - err = ccuda.cuMemcpyDtoHAsync(cdstHost_ptr, csrcDevice, ByteCount, chStream) + cysrcDevice = psrcDevice + cydstHost = utils.HelperInputVoidPtr(dstHost) + cdef void* cydstHost_ptr = cydstHost.cptr + err = cydriver.cuMemcpyDtoHAsync(cydstHost_ptr, cysrcDevice, ByteCount, cyhStream) return (CUresult(err),) {{endif}} @@ -24858,34 +24858,34 @@ def cuMemcpyDtoDAsync(dstDevice, srcDevice, size_t ByteCount, hStream): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr csrcDevice + cyhStream = phStream + cdef cydriver.CUdeviceptr cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdeviceptr,)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdeviceptr(srcDevice)) - csrcDevice = psrcDevice - cdef ccuda.CUdeviceptr cdstDevice + cysrcDevice = psrcDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemcpyDtoDAsync(cdstDevice, csrcDevice, ByteCount, chStream) + cydstDevice = pdstDevice + err = cydriver.cuMemcpyDtoDAsync(cydstDevice, cysrcDevice, ByteCount, cyhStream) return (CUresult(err),) {{endif}} @@ -24922,27 +24922,27 @@ def cuMemcpyHtoAAsync(dstArray, size_t dstOffset, srcHost, size_t ByteCount, hSt -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemcpyToArrayAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUarray cdstArray + cyhStream = phStream + cdef cydriver.CUarray cydstArray if dstArray is None: - cdstArray = 0 + cydstArray = 0 elif isinstance(dstArray, (CUarray,)): pdstArray = int(dstArray) - cdstArray = pdstArray + cydstArray = pdstArray else: pdstArray = int(CUarray(dstArray)) - cdstArray = pdstArray - csrcHost = utils.HelperInputVoidPtr(srcHost) - cdef void* csrcHost_ptr = csrcHost.cptr - err = ccuda.cuMemcpyHtoAAsync(cdstArray, dstOffset, csrcHost_ptr, ByteCount, chStream) + cydstArray = pdstArray + cysrcHost = utils.HelperInputVoidPtr(srcHost) + cdef void* cysrcHost_ptr = cysrcHost.cptr + err = cydriver.cuMemcpyHtoAAsync(cydstArray, dstOffset, cysrcHost_ptr, ByteCount, cyhStream) return (CUresult(err),) {{endif}} @@ -24979,27 +24979,27 @@ def cuMemcpyAtoHAsync(dstHost, srcArray, size_t srcOffset, size_t ByteCount, hSt -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemcpyFromArrayAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUarray csrcArray + cyhStream = phStream + cdef cydriver.CUarray cysrcArray if srcArray is None: - csrcArray = 0 + cysrcArray = 0 elif isinstance(srcArray, (CUarray,)): psrcArray = int(srcArray) - csrcArray = psrcArray + cysrcArray = psrcArray else: psrcArray = int(CUarray(srcArray)) - csrcArray = psrcArray - cdstHost = utils.HelperInputVoidPtr(dstHost) - cdef void* cdstHost_ptr = cdstHost.cptr - err = ccuda.cuMemcpyAtoHAsync(cdstHost_ptr, csrcArray, srcOffset, ByteCount, chStream) + cysrcArray = psrcArray + cydstHost = utils.HelperInputVoidPtr(dstHost) + cdef void* cydstHost_ptr = cydstHost.cptr + err = cydriver.cuMemcpyAtoHAsync(cydstHost_ptr, cysrcArray, srcOffset, ByteCount, cyhStream) return (CUresult(err),) {{endif}} @@ -25133,17 +25133,17 @@ def cuMemcpy2DAsync(pCopy : Optional[CUDA_MEMCPY2D], hStream): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUDA_MEMCPY2D* cpCopy_ptr = pCopy._ptr if pCopy != None else NULL - err = ccuda.cuMemcpy2DAsync(cpCopy_ptr, chStream) + cyhStream = phStream + cdef cydriver.CUDA_MEMCPY2D* cypCopy_ptr = pCopy._ptr if pCopy != None else NULL + err = cydriver.cuMemcpy2DAsync(cypCopy_ptr, cyhStream) return (CUresult(err),) {{endif}} @@ -25273,17 +25273,17 @@ def cuMemcpy3DAsync(pCopy : Optional[CUDA_MEMCPY3D], hStream): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemcpy3DAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUDA_MEMCPY3D* cpCopy_ptr = pCopy._ptr if pCopy != None else NULL - err = ccuda.cuMemcpy3DAsync(cpCopy_ptr, chStream) + cyhStream = phStream + cdef cydriver.CUDA_MEMCPY3D* cypCopy_ptr = pCopy._ptr if pCopy != None else NULL + err = cydriver.cuMemcpy3DAsync(cypCopy_ptr, cyhStream) return (CUresult(err),) {{endif}} @@ -25313,17 +25313,17 @@ def cuMemcpy3DPeerAsync(pCopy : Optional[CUDA_MEMCPY3D_PEER], hStream): -------- :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyPeer`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyPeerAsync`, :py:obj:`~.cuMemcpy3DPeerAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUDA_MEMCPY3D_PEER* cpCopy_ptr = pCopy._ptr if pCopy != None else NULL - err = ccuda.cuMemcpy3DPeerAsync(cpCopy_ptr, chStream) + cyhStream = phStream + cdef cydriver.CUDA_MEMCPY3D_PEER* cypCopy_ptr = pCopy._ptr if pCopy != None else NULL + err = cydriver.cuMemcpy3DPeerAsync(cypCopy_ptr, cyhStream) return (CUresult(err),) {{endif}} @@ -25353,16 +25353,16 @@ def cuMemsetD8(dstDevice, unsigned char uc, size_t N): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemset` """ - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD8(cdstDevice, uc, N) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD8(cydstDevice, uc, N) return (CUresult(err),) {{endif}} @@ -25393,16 +25393,16 @@ def cuMemsetD16(dstDevice, unsigned short us, size_t N): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemset` """ - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD16(cdstDevice, us, N) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD16(cydstDevice, us, N) return (CUresult(err),) {{endif}} @@ -25433,16 +25433,16 @@ def cuMemsetD32(dstDevice, unsigned int ui, size_t N): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemset` """ - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD32(cdstDevice, ui, N) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD32(cydstDevice, ui, N) return (CUresult(err),) {{endif}} @@ -25480,16 +25480,16 @@ def cuMemsetD2D8(dstDevice, size_t dstPitch, unsigned char uc, size_t Width, siz -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemset2D` """ - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD2D8(cdstDevice, dstPitch, uc, Width, Height) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD2D8(cydstDevice, dstPitch, uc, Width, Height) return (CUresult(err),) {{endif}} @@ -25528,16 +25528,16 @@ def cuMemsetD2D16(dstDevice, size_t dstPitch, unsigned short us, size_t Width, s -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemset2D` """ - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD2D16(cdstDevice, dstPitch, us, Width, Height) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD2D16(cydstDevice, dstPitch, us, Width, Height) return (CUresult(err),) {{endif}} @@ -25576,16 +25576,16 @@ def cuMemsetD2D32(dstDevice, size_t dstPitch, unsigned int ui, size_t Width, siz -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemset2D` """ - cdef ccuda.CUdeviceptr cdstDevice + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD2D32(cdstDevice, dstPitch, ui, Width, Height) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD2D32(cydstDevice, dstPitch, ui, Width, Height) return (CUresult(err),) {{endif}} @@ -25617,25 +25617,25 @@ def cuMemsetD8Async(dstDevice, unsigned char uc, size_t N, hStream): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemsetAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr cdstDevice + cyhStream = phStream + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD8Async(cdstDevice, uc, N, chStream) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD8Async(cydstDevice, uc, N, cyhStream) return (CUresult(err),) {{endif}} @@ -25668,25 +25668,25 @@ def cuMemsetD16Async(dstDevice, unsigned short us, size_t N, hStream): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemsetAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr cdstDevice + cyhStream = phStream + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD16Async(cdstDevice, us, N, chStream) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD16Async(cydstDevice, us, N, cyhStream) return (CUresult(err),) {{endif}} @@ -25719,25 +25719,25 @@ def cuMemsetD32Async(dstDevice, unsigned int ui, size_t N, hStream): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMemsetAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr cdstDevice + cyhStream = phStream + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD32Async(cdstDevice, ui, N, chStream) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD32Async(cydstDevice, ui, N, cyhStream) return (CUresult(err),) {{endif}} @@ -25777,25 +25777,25 @@ def cuMemsetD2D8Async(dstDevice, size_t dstPitch, unsigned char uc, size_t Width -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemset2DAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr cdstDevice + cyhStream = phStream + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD2D8Async(cdstDevice, dstPitch, uc, Width, Height, chStream) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD2D8Async(cydstDevice, dstPitch, uc, Width, Height, cyhStream) return (CUresult(err),) {{endif}} @@ -25836,25 +25836,25 @@ def cuMemsetD2D16Async(dstDevice, size_t dstPitch, unsigned short us, size_t Wid -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD2D32Async`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemset2DAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr cdstDevice + cyhStream = phStream + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD2D16Async(cdstDevice, dstPitch, us, Width, Height, chStream) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD2D16Async(cydstDevice, dstPitch, us, Width, Height, cyhStream) return (CUresult(err),) {{endif}} @@ -25895,25 +25895,25 @@ def cuMemsetD2D32Async(dstDevice, size_t dstPitch, unsigned int ui, size_t Width -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cuMemsetD32Async`, :py:obj:`~.cudaMemset2DAsync` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr cdstDevice + cyhStream = phStream + cdef cydriver.CUdeviceptr cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdeviceptr,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdeviceptr(dstDevice)) - cdstDevice = pdstDevice - err = ccuda.cuMemsetD2D32Async(cdstDevice, dstPitch, ui, Width, Height, chStream) + cydstDevice = pdstDevice + err = cydriver.cuMemsetD2D32Async(cydstDevice, dstPitch, ui, Width, Height, cyhStream) return (CUresult(err),) {{endif}} @@ -25980,8 +25980,8 @@ def cuArrayCreate(pAllocateArray : Optional[CUDA_ARRAY_DESCRIPTOR]): :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMallocArray` """ cdef CUarray pHandle = CUarray() - cdef ccuda.CUDA_ARRAY_DESCRIPTOR* cpAllocateArray_ptr = pAllocateArray._ptr if pAllocateArray != None else NULL - err = ccuda.cuArrayCreate(pHandle._ptr, cpAllocateArray_ptr) + cdef cydriver.CUDA_ARRAY_DESCRIPTOR* cypAllocateArray_ptr = pAllocateArray._ptr if pAllocateArray != None else NULL + err = cydriver.cuArrayCreate(pHandle._ptr, cypAllocateArray_ptr) return (CUresult(err), pHandle) {{endif}} @@ -26012,17 +26012,17 @@ def cuArrayGetDescriptor(hArray): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaArrayGetInfo` """ - cdef ccuda.CUarray chArray + cdef cydriver.CUarray cyhArray if hArray is None: - chArray = 0 + cyhArray = 0 elif isinstance(hArray, (CUarray,)): phArray = int(hArray) - chArray = phArray + cyhArray = phArray else: phArray = int(CUarray(hArray)) - chArray = phArray + cyhArray = phArray cdef CUDA_ARRAY_DESCRIPTOR pArrayDescriptor = CUDA_ARRAY_DESCRIPTOR() - err = ccuda.cuArrayGetDescriptor(pArrayDescriptor._ptr, chArray) + err = cydriver.cuArrayGetDescriptor(pArrayDescriptor._ptr, cyhArray) return (CUresult(err), pArrayDescriptor) {{endif}} @@ -26066,17 +26066,17 @@ def cuArrayGetSparseProperties(array): -------- :py:obj:`~.cuMipmappedArrayGetSparseProperties`, :py:obj:`~.cuMemMapArrayAsync` """ - cdef ccuda.CUarray carray + cdef cydriver.CUarray cyarray if array is None: - carray = 0 + cyarray = 0 elif isinstance(array, (CUarray,)): parray = int(array) - carray = parray + cyarray = parray else: parray = int(CUarray(array)) - carray = parray + cyarray = parray cdef CUDA_ARRAY_SPARSE_PROPERTIES sparseProperties = CUDA_ARRAY_SPARSE_PROPERTIES() - err = ccuda.cuArrayGetSparseProperties(sparseProperties._ptr, carray) + err = cydriver.cuArrayGetSparseProperties(sparseProperties._ptr, cyarray) return (CUresult(err), sparseProperties) {{endif}} @@ -26122,17 +26122,17 @@ def cuMipmappedArrayGetSparseProperties(mipmap): -------- :py:obj:`~.cuArrayGetSparseProperties`, :py:obj:`~.cuMemMapArrayAsync` """ - cdef ccuda.CUmipmappedArray cmipmap + cdef cydriver.CUmipmappedArray cymipmap if mipmap is None: - cmipmap = 0 + cymipmap = 0 elif isinstance(mipmap, (CUmipmappedArray,)): pmipmap = int(mipmap) - cmipmap = pmipmap + cymipmap = pmipmap else: pmipmap = int(CUmipmappedArray(mipmap)) - cmipmap = pmipmap + cymipmap = pmipmap cdef CUDA_ARRAY_SPARSE_PROPERTIES sparseProperties = CUDA_ARRAY_SPARSE_PROPERTIES() - err = ccuda.cuMipmappedArrayGetSparseProperties(sparseProperties._ptr, cmipmap) + err = cydriver.cuMipmappedArrayGetSparseProperties(sparseProperties._ptr, cymipmap) return (CUresult(err), sparseProperties) {{endif}} @@ -26170,26 +26170,26 @@ def cuArrayGetMemoryRequirements(array, device): -------- :py:obj:`~.cuMipmappedArrayGetMemoryRequirements`, :py:obj:`~.cuMemMapArrayAsync` """ - cdef ccuda.CUdevice cdevice + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice - cdef ccuda.CUarray carray + cydevice = pdevice + cdef cydriver.CUarray cyarray if array is None: - carray = 0 + cyarray = 0 elif isinstance(array, (CUarray,)): parray = int(array) - carray = parray + cyarray = parray else: parray = int(CUarray(array)) - carray = parray + cyarray = parray cdef CUDA_ARRAY_MEMORY_REQUIREMENTS memoryRequirements = CUDA_ARRAY_MEMORY_REQUIREMENTS() - err = ccuda.cuArrayGetMemoryRequirements(memoryRequirements._ptr, carray, cdevice) + err = cydriver.cuArrayGetMemoryRequirements(memoryRequirements._ptr, cyarray, cydevice) return (CUresult(err), memoryRequirements) {{endif}} @@ -26228,26 +26228,26 @@ def cuMipmappedArrayGetMemoryRequirements(mipmap, device): -------- :py:obj:`~.cuArrayGetMemoryRequirements`, :py:obj:`~.cuMemMapArrayAsync` """ - cdef ccuda.CUdevice cdevice + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice - cdef ccuda.CUmipmappedArray cmipmap + cydevice = pdevice + cdef cydriver.CUmipmappedArray cymipmap if mipmap is None: - cmipmap = 0 + cymipmap = 0 elif isinstance(mipmap, (CUmipmappedArray,)): pmipmap = int(mipmap) - cmipmap = pmipmap + cymipmap = pmipmap else: pmipmap = int(CUmipmappedArray(mipmap)) - cmipmap = pmipmap + cymipmap = pmipmap cdef CUDA_ARRAY_MEMORY_REQUIREMENTS memoryRequirements = CUDA_ARRAY_MEMORY_REQUIREMENTS() - err = ccuda.cuMipmappedArrayGetMemoryRequirements(memoryRequirements._ptr, cmipmap, cdevice) + err = cydriver.cuMipmappedArrayGetMemoryRequirements(memoryRequirements._ptr, cymipmap, cydevice) return (CUresult(err), memoryRequirements) {{endif}} @@ -26291,17 +26291,17 @@ def cuArrayGetPlane(hArray, unsigned int planeIdx): -------- :py:obj:`~.cuArrayCreate`, :py:obj:`~.cudaArrayGetPlane` """ - cdef ccuda.CUarray chArray + cdef cydriver.CUarray cyhArray if hArray is None: - chArray = 0 + cyhArray = 0 elif isinstance(hArray, (CUarray,)): phArray = int(hArray) - chArray = phArray + cyhArray = phArray else: phArray = int(CUarray(hArray)) - chArray = phArray + cyhArray = phArray cdef CUarray pPlaneArray = CUarray() - err = ccuda.cuArrayGetPlane(pPlaneArray._ptr, chArray, planeIdx) + err = cydriver.cuArrayGetPlane(pPlaneArray._ptr, cyhArray, planeIdx) return (CUresult(err), pPlaneArray) {{endif}} @@ -26327,16 +26327,16 @@ def cuArrayDestroy(hArray): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaFreeArray` """ - cdef ccuda.CUarray chArray + cdef cydriver.CUarray cyhArray if hArray is None: - chArray = 0 + cyhArray = 0 elif isinstance(hArray, (CUarray,)): phArray = int(hArray) - chArray = phArray + cyhArray = phArray else: phArray = int(CUarray(hArray)) - chArray = phArray - err = ccuda.cuArrayDestroy(chArray) + cyhArray = phArray + err = cydriver.cuArrayDestroy(cyhArray) return (CUresult(err),) {{endif}} @@ -26467,8 +26467,8 @@ def cuArray3DCreate(pAllocateArray : Optional[CUDA_ARRAY3D_DESCRIPTOR]): :py:obj:`~.cuArray3DGetDescriptor`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaMalloc3DArray` """ cdef CUarray pHandle = CUarray() - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR* cpAllocateArray_ptr = pAllocateArray._ptr if pAllocateArray != None else NULL - err = ccuda.cuArray3DCreate(pHandle._ptr, cpAllocateArray_ptr) + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR* cypAllocateArray_ptr = pAllocateArray._ptr if pAllocateArray != None else NULL + err = cydriver.cuArray3DCreate(pHandle._ptr, cypAllocateArray_ptr) return (CUresult(err), pHandle) {{endif}} @@ -26503,17 +26503,17 @@ def cuArray3DGetDescriptor(hArray): -------- :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArrayDestroy`, :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemAllocPitch`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DAsync`, :py:obj:`~.cuMemcpy2DUnaligned`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuMemcpy3DAsync`, :py:obj:`~.cuMemcpyAtoA`, :py:obj:`~.cuMemcpyAtoD`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpyDtoA`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpyDtoDAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemGetAddressRange`, :py:obj:`~.cuMemGetInfo`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`, :py:obj:`~.cudaArrayGetInfo` """ - cdef ccuda.CUarray chArray + cdef cydriver.CUarray cyhArray if hArray is None: - chArray = 0 + cyhArray = 0 elif isinstance(hArray, (CUarray,)): phArray = int(hArray) - chArray = phArray + cyhArray = phArray else: phArray = int(CUarray(hArray)) - chArray = phArray + cyhArray = phArray cdef CUDA_ARRAY3D_DESCRIPTOR pArrayDescriptor = CUDA_ARRAY3D_DESCRIPTOR() - err = ccuda.cuArray3DGetDescriptor(pArrayDescriptor._ptr, chArray) + err = cydriver.cuArray3DGetDescriptor(pArrayDescriptor._ptr, cyhArray) return (CUresult(err), pArrayDescriptor) {{endif}} @@ -26632,8 +26632,8 @@ def cuMipmappedArrayCreate(pMipmappedArrayDesc : Optional[CUDA_ARRAY3D_DESCRIPTO :py:obj:`~.cuMipmappedArrayDestroy`, :py:obj:`~.cuMipmappedArrayGetLevel`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cudaMallocMipmappedArray` """ cdef CUmipmappedArray pHandle = CUmipmappedArray() - cdef ccuda.CUDA_ARRAY3D_DESCRIPTOR* cpMipmappedArrayDesc_ptr = pMipmappedArrayDesc._ptr if pMipmappedArrayDesc != None else NULL - err = ccuda.cuMipmappedArrayCreate(pHandle._ptr, cpMipmappedArrayDesc_ptr, numMipmapLevels) + cdef cydriver.CUDA_ARRAY3D_DESCRIPTOR* cypMipmappedArrayDesc_ptr = pMipmappedArrayDesc._ptr if pMipmappedArrayDesc != None else NULL + err = cydriver.cuMipmappedArrayCreate(pHandle._ptr, cypMipmappedArrayDesc_ptr, numMipmapLevels) return (CUresult(err), pHandle) {{endif}} @@ -26667,17 +26667,17 @@ def cuMipmappedArrayGetLevel(hMipmappedArray, unsigned int level): -------- :py:obj:`~.cuMipmappedArrayCreate`, :py:obj:`~.cuMipmappedArrayDestroy`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cudaGetMipmappedArrayLevel` """ - cdef ccuda.CUmipmappedArray chMipmappedArray + cdef cydriver.CUmipmappedArray cyhMipmappedArray if hMipmappedArray is None: - chMipmappedArray = 0 + cyhMipmappedArray = 0 elif isinstance(hMipmappedArray, (CUmipmappedArray,)): phMipmappedArray = int(hMipmappedArray) - chMipmappedArray = phMipmappedArray + cyhMipmappedArray = phMipmappedArray else: phMipmappedArray = int(CUmipmappedArray(hMipmappedArray)) - chMipmappedArray = phMipmappedArray + cyhMipmappedArray = phMipmappedArray cdef CUarray pLevelArray = CUarray() - err = ccuda.cuMipmappedArrayGetLevel(pLevelArray._ptr, chMipmappedArray, level) + err = cydriver.cuMipmappedArrayGetLevel(pLevelArray._ptr, cyhMipmappedArray, level) return (CUresult(err), pLevelArray) {{endif}} @@ -26703,16 +26703,16 @@ def cuMipmappedArrayDestroy(hMipmappedArray): -------- :py:obj:`~.cuMipmappedArrayCreate`, :py:obj:`~.cuMipmappedArrayGetLevel`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cudaFreeMipmappedArray` """ - cdef ccuda.CUmipmappedArray chMipmappedArray + cdef cydriver.CUmipmappedArray cyhMipmappedArray if hMipmappedArray is None: - chMipmappedArray = 0 + cyhMipmappedArray = 0 elif isinstance(hMipmappedArray, (CUmipmappedArray,)): phMipmappedArray = int(hMipmappedArray) - chMipmappedArray = phMipmappedArray + cyhMipmappedArray = phMipmappedArray else: phMipmappedArray = int(CUmipmappedArray(hMipmappedArray)) - chMipmappedArray = phMipmappedArray - err = ccuda.cuMipmappedArrayDestroy(chMipmappedArray) + cyhMipmappedArray = phMipmappedArray + err = cydriver.cuMipmappedArrayDestroy(cyhMipmappedArray) return (CUresult(err),) {{endif}} @@ -26767,19 +26767,19 @@ def cuMemGetHandleForAddressRange(dptr, size_t size, handleType not None : CUmem handle : Any Pointer to the location where the returned handle will be stored. """ - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr + cydptr = pdptr cdef int handle = 0 - cdef void* chandle_ptr = &handle - cdef ccuda.CUmemRangeHandleType chandleType = handleType.value - err = ccuda.cuMemGetHandleForAddressRange(chandle_ptr, cdptr, size, chandleType, flags) + cdef void* cyhandle_ptr = &handle + cdef cydriver.CUmemRangeHandleType cyhandleType = handleType.value + err = cydriver.cuMemGetHandleForAddressRange(cyhandle_ptr, cydptr, size, cyhandleType, flags) return (CUresult(err), handle) {{endif}} @@ -26817,17 +26817,17 @@ def cuMemAddressReserve(size_t size, size_t alignment, addr, unsigned long long -------- :py:obj:`~.cuMemAddressFree` """ - cdef ccuda.CUdeviceptr caddr + cdef cydriver.CUdeviceptr cyaddr if addr is None: - caddr = 0 + cyaddr = 0 elif isinstance(addr, (CUdeviceptr,)): paddr = int(addr) - caddr = paddr + cyaddr = paddr else: paddr = int(CUdeviceptr(addr)) - caddr = paddr + cyaddr = paddr cdef CUdeviceptr ptr = CUdeviceptr() - err = ccuda.cuMemAddressReserve(ptr._ptr, size, alignment, caddr, flags) + err = cydriver.cuMemAddressReserve(ptr._ptr, size, alignment, cyaddr, flags) return (CUresult(err), ptr) {{endif}} @@ -26857,16 +26857,16 @@ def cuMemAddressFree(ptr, size_t size): -------- :py:obj:`~.cuMemAddressReserve` """ - cdef ccuda.CUdeviceptr cptr + cdef cydriver.CUdeviceptr cyptr if ptr is None: - cptr = 0 + cyptr = 0 elif isinstance(ptr, (CUdeviceptr,)): pptr = int(ptr) - cptr = pptr + cyptr = pptr else: pptr = int(CUdeviceptr(ptr)) - cptr = pptr - err = ccuda.cuMemAddressFree(cptr, size) + cyptr = pptr + err = cydriver.cuMemAddressFree(cyptr, size) return (CUresult(err),) {{endif}} @@ -26944,8 +26944,8 @@ def cuMemCreate(size_t size, prop : Optional[CUmemAllocationProp], unsigned long :py:obj:`~.cuMemRelease`, :py:obj:`~.cuMemExportToShareableHandle`, :py:obj:`~.cuMemImportFromShareableHandle` """ cdef CUmemGenericAllocationHandle handle = CUmemGenericAllocationHandle() - cdef ccuda.CUmemAllocationProp* cprop_ptr = prop._ptr if prop != None else NULL - err = ccuda.cuMemCreate(handle._ptr, size, cprop_ptr, flags) + cdef cydriver.CUmemAllocationProp* cyprop_ptr = prop._ptr if prop != None else NULL + err = cydriver.cuMemCreate(handle._ptr, size, cyprop_ptr, flags) return (CUresult(err), handle) {{endif}} @@ -26980,16 +26980,16 @@ def cuMemRelease(handle): -------- :py:obj:`~.cuMemCreate` """ - cdef ccuda.CUmemGenericAllocationHandle chandle + cdef cydriver.CUmemGenericAllocationHandle cyhandle if handle is None: - chandle = 0 + cyhandle = 0 elif isinstance(handle, (CUmemGenericAllocationHandle,)): phandle = int(handle) - chandle = phandle + cyhandle = phandle else: phandle = int(CUmemGenericAllocationHandle(handle)) - chandle = phandle - err = ccuda.cuMemRelease(chandle) + cyhandle = phandle + err = cydriver.cuMemRelease(cyhandle) return (CUresult(err),) {{endif}} @@ -27049,25 +27049,25 @@ def cuMemMap(ptr, size_t size, size_t offset, handle, unsigned long long flags): -------- :py:obj:`~.cuMemUnmap`, :py:obj:`~.cuMemSetAccess`, :py:obj:`~.cuMemCreate`, :py:obj:`~.cuMemAddressReserve`, :py:obj:`~.cuMemImportFromShareableHandle` """ - cdef ccuda.CUmemGenericAllocationHandle chandle + cdef cydriver.CUmemGenericAllocationHandle cyhandle if handle is None: - chandle = 0 + cyhandle = 0 elif isinstance(handle, (CUmemGenericAllocationHandle,)): phandle = int(handle) - chandle = phandle + cyhandle = phandle else: phandle = int(CUmemGenericAllocationHandle(handle)) - chandle = phandle - cdef ccuda.CUdeviceptr cptr + cyhandle = phandle + cdef cydriver.CUdeviceptr cyptr if ptr is None: - cptr = 0 + cyptr = 0 elif isinstance(ptr, (CUdeviceptr,)): pptr = int(ptr) - cptr = pptr + cyptr = pptr else: pptr = int(CUdeviceptr(ptr)) - cptr = pptr - err = ccuda.cuMemMap(cptr, size, offset, chandle, flags) + cyptr = pptr + err = cydriver.cuMemMap(cyptr, size, offset, cyhandle, flags) return (CUresult(err),) {{endif}} @@ -27217,29 +27217,29 @@ def cuMemMapArrayAsync(mapInfoList : Optional[Tuple[CUarrayMapInfo] | List[CUarr -------- :py:obj:`~.cuMipmappedArrayCreate`, :py:obj:`~.cuArrayCreate`, :py:obj:`~.cuArray3DCreate`, :py:obj:`~.cuMemCreate`, :py:obj:`~.cuArrayGetSparseProperties`, :py:obj:`~.cuMipmappedArrayGetSparseProperties` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream + cyhStream = phStream mapInfoList = [] if mapInfoList is None else mapInfoList if not all(isinstance(_x, (CUarrayMapInfo,)) for _x in mapInfoList): - raise TypeError("Argument 'mapInfoList' is not instance of type (expected Tuple[ccuda.CUarrayMapInfo,] or List[ccuda.CUarrayMapInfo,]") - cdef ccuda.CUarrayMapInfo* cmapInfoList = NULL + raise TypeError("Argument 'mapInfoList' is not instance of type (expected Tuple[cydriver.CUarrayMapInfo,] or List[cydriver.CUarrayMapInfo,]") + cdef cydriver.CUarrayMapInfo* cymapInfoList = NULL if len(mapInfoList) > 0: - cmapInfoList = calloc(len(mapInfoList), sizeof(ccuda.CUarrayMapInfo)) - if cmapInfoList is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(mapInfoList)) + 'x' + str(sizeof(ccuda.CUarrayMapInfo))) + cymapInfoList = calloc(len(mapInfoList), sizeof(cydriver.CUarrayMapInfo)) + if cymapInfoList is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(mapInfoList)) + 'x' + str(sizeof(cydriver.CUarrayMapInfo))) for idx in range(len(mapInfoList)): - string.memcpy(&cmapInfoList[idx], (mapInfoList[idx])._ptr, sizeof(ccuda.CUarrayMapInfo)) + string.memcpy(&cymapInfoList[idx], (mapInfoList[idx])._ptr, sizeof(cydriver.CUarrayMapInfo)) if count > len(mapInfoList): raise RuntimeError("List is too small: " + str(len(mapInfoList)) + " < " + str(count)) - err = ccuda.cuMemMapArrayAsync((mapInfoList[0])._ptr if len(mapInfoList) == 1 else cmapInfoList, count, chStream) - if cmapInfoList is not NULL: - free(cmapInfoList) + err = cydriver.cuMemMapArrayAsync((mapInfoList[0])._ptr if len(mapInfoList) == 1 else cymapInfoList, count, cyhStream) + if cymapInfoList is not NULL: + free(cymapInfoList) return (CUresult(err),) {{endif}} @@ -27278,16 +27278,16 @@ def cuMemUnmap(ptr, size_t size): -------- :py:obj:`~.cuMemCreate`, :py:obj:`~.cuMemAddressReserve` """ - cdef ccuda.CUdeviceptr cptr + cdef cydriver.CUdeviceptr cyptr if ptr is None: - cptr = 0 + cyptr = 0 elif isinstance(ptr, (CUdeviceptr,)): pptr = int(ptr) - cptr = pptr + cyptr = pptr else: pptr = int(CUdeviceptr(ptr)) - cptr = pptr - err = ccuda.cuMemUnmap(cptr, size) + cyptr = pptr + err = cydriver.cuMemUnmap(cyptr, size) return (CUresult(err),) {{endif}} @@ -27338,27 +27338,27 @@ def cuMemSetAccess(ptr, size_t size, desc : Optional[Tuple[CUmemAccessDesc] | Li """ desc = [] if desc is None else desc if not all(isinstance(_x, (CUmemAccessDesc,)) for _x in desc): - raise TypeError("Argument 'desc' is not instance of type (expected Tuple[ccuda.CUmemAccessDesc,] or List[ccuda.CUmemAccessDesc,]") - cdef ccuda.CUdeviceptr cptr + raise TypeError("Argument 'desc' is not instance of type (expected Tuple[cydriver.CUmemAccessDesc,] or List[cydriver.CUmemAccessDesc,]") + cdef cydriver.CUdeviceptr cyptr if ptr is None: - cptr = 0 + cyptr = 0 elif isinstance(ptr, (CUdeviceptr,)): pptr = int(ptr) - cptr = pptr + cyptr = pptr else: pptr = int(CUdeviceptr(ptr)) - cptr = pptr - cdef ccuda.CUmemAccessDesc* cdesc = NULL + cyptr = pptr + cdef cydriver.CUmemAccessDesc* cydesc = NULL if len(desc) > 0: - cdesc = calloc(len(desc), sizeof(ccuda.CUmemAccessDesc)) - if cdesc is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(desc)) + 'x' + str(sizeof(ccuda.CUmemAccessDesc))) + cydesc = calloc(len(desc), sizeof(cydriver.CUmemAccessDesc)) + if cydesc is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(desc)) + 'x' + str(sizeof(cydriver.CUmemAccessDesc))) for idx in range(len(desc)): - string.memcpy(&cdesc[idx], (desc[idx])._ptr, sizeof(ccuda.CUmemAccessDesc)) + string.memcpy(&cydesc[idx], (desc[idx])._ptr, sizeof(cydriver.CUmemAccessDesc)) if count > len(desc): raise RuntimeError("List is too small: " + str(len(desc)) + " < " + str(count)) - err = ccuda.cuMemSetAccess(cptr, size, (desc[0])._ptr if len(desc) == 1 else cdesc, count) - if cdesc is not NULL: - free(cdesc) + err = cydriver.cuMemSetAccess(cyptr, size, (desc[0])._ptr if len(desc) == 1 else cydesc, count) + if cydesc is not NULL: + free(cydesc) return (CUresult(err),) {{endif}} @@ -27386,18 +27386,18 @@ def cuMemGetAccess(location : Optional[CUmemLocation], ptr): -------- :py:obj:`~.cuMemSetAccess` """ - cdef ccuda.CUdeviceptr cptr + cdef cydriver.CUdeviceptr cyptr if ptr is None: - cptr = 0 + cyptr = 0 elif isinstance(ptr, (CUdeviceptr,)): pptr = int(ptr) - cptr = pptr + cyptr = pptr else: pptr = int(CUdeviceptr(ptr)) - cptr = pptr + cyptr = pptr cdef unsigned long long flags = 0 - cdef ccuda.CUmemLocation* clocation_ptr = location._ptr if location != None else NULL - err = ccuda.cuMemGetAccess(&flags, clocation_ptr, cptr) + cdef cydriver.CUmemLocation* cylocation_ptr = location._ptr if location != None else NULL + err = cydriver.cuMemGetAccess(&flags, cylocation_ptr, cyptr) return (CUresult(err), flags) {{endif}} @@ -27443,20 +27443,20 @@ def cuMemExportToShareableHandle(handle, handleType not None : CUmemAllocationHa -------- :py:obj:`~.cuMemImportFromShareableHandle` """ - cdef ccuda.CUmemGenericAllocationHandle chandle + cdef cydriver.CUmemGenericAllocationHandle cyhandle if handle is None: - chandle = 0 + cyhandle = 0 elif isinstance(handle, (CUmemGenericAllocationHandle,)): phandle = int(handle) - chandle = phandle + cyhandle = phandle else: phandle = int(CUmemGenericAllocationHandle(handle)) - chandle = phandle - cdef utils.HelperCUmemAllocationHandleType cshareableHandle = utils.HelperCUmemAllocationHandleType(handleType) - cdef void* cshareableHandle_ptr = cshareableHandle.cptr - cdef ccuda.CUmemAllocationHandleType chandleType = handleType.value - err = ccuda.cuMemExportToShareableHandle(cshareableHandle_ptr, chandle, chandleType, flags) - return (CUresult(err), cshareableHandle.pyObj()) + cyhandle = phandle + cdef utils.HelperCUmemAllocationHandleType cyshareableHandle = utils.HelperCUmemAllocationHandleType(handleType) + cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr + cdef cydriver.CUmemAllocationHandleType cyhandleType = handleType.value + err = cydriver.cuMemExportToShareableHandle(cyshareableHandle_ptr, cyhandle, cyhandleType, flags) + return (CUresult(err), cyshareableHandle.pyObj()) {{endif}} {{if 'cuMemImportFromShareableHandle' in found_functions}} @@ -27499,10 +27499,10 @@ def cuMemImportFromShareableHandle(osHandle, shHandleType not None : CUmemAlloca Importing shareable handles exported from some graphics APIs(VUlkan, OpenGL, etc) created on devices under an SLI group may not be supported, and thus this API will return CUDA_ERROR_NOT_SUPPORTED. There is no guarantee that the contents of `handle` will be the same CUDA memory handle for the same given OS shareable handle, or the same underlying allocation. """ cdef CUmemGenericAllocationHandle handle = CUmemGenericAllocationHandle() - cosHandle = utils.HelperInputVoidPtr(osHandle) - cdef void* cosHandle_ptr = cosHandle.cptr - cdef ccuda.CUmemAllocationHandleType cshHandleType = shHandleType.value - err = ccuda.cuMemImportFromShareableHandle(handle._ptr, cosHandle_ptr, cshHandleType) + cyosHandle = utils.HelperInputVoidPtr(osHandle) + cdef void* cyosHandle_ptr = cyosHandle.cptr + cdef cydriver.CUmemAllocationHandleType cyshHandleType = shHandleType.value + err = cydriver.cuMemImportFromShareableHandle(handle._ptr, cyosHandle_ptr, cyshHandleType) return (CUresult(err), handle) {{endif}} @@ -27536,9 +27536,9 @@ def cuMemGetAllocationGranularity(prop : Optional[CUmemAllocationProp], option n :py:obj:`~.cuMemCreate`, :py:obj:`~.cuMemMap` """ cdef size_t granularity = 0 - cdef ccuda.CUmemAllocationProp* cprop_ptr = prop._ptr if prop != None else NULL - cdef ccuda.CUmemAllocationGranularity_flags coption = option.value - err = ccuda.cuMemGetAllocationGranularity(&granularity, cprop_ptr, coption) + cdef cydriver.CUmemAllocationProp* cyprop_ptr = prop._ptr if prop != None else NULL + cdef cydriver.CUmemAllocationGranularity_flags cyoption = option.value + err = cydriver.cuMemGetAllocationGranularity(&granularity, cyprop_ptr, cyoption) return (CUresult(err), granularity) {{endif}} @@ -27565,17 +27565,17 @@ def cuMemGetAllocationPropertiesFromHandle(handle): -------- :py:obj:`~.cuMemCreate`, :py:obj:`~.cuMemImportFromShareableHandle` """ - cdef ccuda.CUmemGenericAllocationHandle chandle + cdef cydriver.CUmemGenericAllocationHandle cyhandle if handle is None: - chandle = 0 + cyhandle = 0 elif isinstance(handle, (CUmemGenericAllocationHandle,)): phandle = int(handle) - chandle = phandle + cyhandle = phandle else: phandle = int(CUmemGenericAllocationHandle(handle)) - chandle = phandle + cyhandle = phandle cdef CUmemAllocationProp prop = CUmemAllocationProp() - err = ccuda.cuMemGetAllocationPropertiesFromHandle(prop._ptr, chandle) + err = cydriver.cuMemGetAllocationPropertiesFromHandle(prop._ptr, cyhandle) return (CUresult(err), prop) {{endif}} @@ -27611,9 +27611,9 @@ def cuMemRetainAllocationHandle(addr): The address `addr`, can be any address in a range previously mapped by :py:obj:`~.cuMemMap`, and not necessarily the start address. """ cdef CUmemGenericAllocationHandle handle = CUmemGenericAllocationHandle() - caddr = utils.HelperInputVoidPtr(addr) - cdef void* caddr_ptr = caddr.cptr - err = ccuda.cuMemRetainAllocationHandle(handle._ptr, caddr_ptr) + cyaddr = utils.HelperInputVoidPtr(addr) + cdef void* cyaddr_ptr = cyaddr.cptr + err = cydriver.cuMemRetainAllocationHandle(handle._ptr, cyaddr_ptr) return (CUresult(err), handle) {{endif}} @@ -27644,25 +27644,25 @@ def cuMemFreeAsync(dptr, hStream): ----- During stream capture, this function results in the creation of a free node and must therefore be passed the address of a graph allocation. """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr cdptr + cyhStream = phStream + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr - err = ccuda.cuMemFreeAsync(cdptr, chStream) + cydptr = pdptr + err = cydriver.cuMemFreeAsync(cydptr, cyhStream) return (CUresult(err),) {{endif}} @@ -27704,17 +27704,17 @@ def cuMemAllocAsync(size_t bytesize, hStream): During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool's properties are used to set the node's creation parameters. """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream + cyhStream = phStream cdef CUdeviceptr dptr = CUdeviceptr() - err = ccuda.cuMemAllocAsync(dptr._ptr, bytesize, chStream) + err = cydriver.cuMemAllocAsync(dptr._ptr, bytesize, cyhStream) return (CUresult(err), dptr) {{endif}} @@ -27755,16 +27755,16 @@ def cuMemPoolTrimTo(pool, size_t minBytesToKeep): : Allocations that have been asynchronously freed but whose completion has not been observed on the host (eg. by a synchronize) can count as outstanding. """ - cdef ccuda.CUmemoryPool cpool + cdef cydriver.CUmemoryPool cypool if pool is None: - cpool = 0 + cypool = 0 elif isinstance(pool, (CUmemoryPool,)): ppool = int(pool) - cpool = ppool + cypool = ppool else: ppool = int(CUmemoryPool(pool)) - cpool = ppool - err = ccuda.cuMemPoolTrimTo(cpool, minBytesToKeep) + cypool = ppool + err = cydriver.cuMemPoolTrimTo(cypool, minBytesToKeep) return (CUresult(err),) {{endif}} @@ -27827,19 +27827,19 @@ def cuMemPoolSetAttribute(pool, attr not None : CUmemPool_attribute, value): -------- :py:obj:`~.cuMemAllocAsync`, :py:obj:`~.cuMemFreeAsync`, :py:obj:`~.cuDeviceGetDefaultMemPool`, :py:obj:`~.cuDeviceGetMemPool`, :py:obj:`~.cuMemPoolCreate` """ - cdef ccuda.CUmemoryPool cpool + cdef cydriver.CUmemoryPool cypool if pool is None: - cpool = 0 + cypool = 0 elif isinstance(pool, (CUmemoryPool,)): ppool = int(pool) - cpool = ppool + cypool = ppool else: ppool = int(CUmemoryPool(pool)) - cpool = ppool - cdef ccuda.CUmemPool_attribute cattr = attr.value - cdef utils.HelperCUmemPool_attribute cvalue = utils.HelperCUmemPool_attribute(attr, value, is_getter=False) - cdef void* cvalue_ptr = cvalue.cptr - err = ccuda.cuMemPoolSetAttribute(cpool, cattr, cvalue_ptr) + cypool = ppool + cdef cydriver.CUmemPool_attribute cyattr = attr.value + cdef utils.HelperCUmemPool_attribute cyvalue = utils.HelperCUmemPool_attribute(attr, value, is_getter=False) + cdef void* cyvalue_ptr = cyvalue.cptr + err = cydriver.cuMemPoolSetAttribute(cypool, cyattr, cyvalue_ptr) return (CUresult(err),) {{endif}} @@ -27909,20 +27909,20 @@ def cuMemPoolGetAttribute(pool, attr not None : CUmemPool_attribute): -------- :py:obj:`~.cuMemAllocAsync`, :py:obj:`~.cuMemFreeAsync`, :py:obj:`~.cuDeviceGetDefaultMemPool`, :py:obj:`~.cuDeviceGetMemPool`, :py:obj:`~.cuMemPoolCreate` """ - cdef ccuda.CUmemoryPool cpool + cdef cydriver.CUmemoryPool cypool if pool is None: - cpool = 0 + cypool = 0 elif isinstance(pool, (CUmemoryPool,)): ppool = int(pool) - cpool = ppool + cypool = ppool else: ppool = int(CUmemoryPool(pool)) - cpool = ppool - cdef ccuda.CUmemPool_attribute cattr = attr.value - cdef utils.HelperCUmemPool_attribute cvalue = utils.HelperCUmemPool_attribute(attr, 0, is_getter=True) - cdef void* cvalue_ptr = cvalue.cptr - err = ccuda.cuMemPoolGetAttribute(cpool, cattr, cvalue_ptr) - return (CUresult(err), cvalue.pyObj()) + cypool = ppool + cdef cydriver.CUmemPool_attribute cyattr = attr.value + cdef utils.HelperCUmemPool_attribute cyvalue = utils.HelperCUmemPool_attribute(attr, 0, is_getter=True) + cdef void* cyvalue_ptr = cyvalue.cptr + err = cydriver.cuMemPoolGetAttribute(cypool, cyattr, cyvalue_ptr) + return (CUresult(err), cyvalue.pyObj()) {{endif}} {{if 'cuMemPoolSetAccess' in found_functions}} @@ -27952,27 +27952,27 @@ def cuMemPoolSetAccess(pool, map : Optional[Tuple[CUmemAccessDesc] | List[CUmemA """ map = [] if map is None else map if not all(isinstance(_x, (CUmemAccessDesc,)) for _x in map): - raise TypeError("Argument 'map' is not instance of type (expected Tuple[ccuda.CUmemAccessDesc,] or List[ccuda.CUmemAccessDesc,]") - cdef ccuda.CUmemoryPool cpool + raise TypeError("Argument 'map' is not instance of type (expected Tuple[cydriver.CUmemAccessDesc,] or List[cydriver.CUmemAccessDesc,]") + cdef cydriver.CUmemoryPool cypool if pool is None: - cpool = 0 + cypool = 0 elif isinstance(pool, (CUmemoryPool,)): ppool = int(pool) - cpool = ppool + cypool = ppool else: ppool = int(CUmemoryPool(pool)) - cpool = ppool - cdef ccuda.CUmemAccessDesc* cmap = NULL + cypool = ppool + cdef cydriver.CUmemAccessDesc* cymap = NULL if len(map) > 0: - cmap = calloc(len(map), sizeof(ccuda.CUmemAccessDesc)) - if cmap is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(map)) + 'x' + str(sizeof(ccuda.CUmemAccessDesc))) + cymap = calloc(len(map), sizeof(cydriver.CUmemAccessDesc)) + if cymap is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(map)) + 'x' + str(sizeof(cydriver.CUmemAccessDesc))) for idx in range(len(map)): - string.memcpy(&cmap[idx], (map[idx])._ptr, sizeof(ccuda.CUmemAccessDesc)) + string.memcpy(&cymap[idx], (map[idx])._ptr, sizeof(cydriver.CUmemAccessDesc)) if count > len(map): raise RuntimeError("List is too small: " + str(len(map)) + " < " + str(count)) - err = ccuda.cuMemPoolSetAccess(cpool, (map[0])._ptr if len(map) == 1 else cmap, count) - if cmap is not NULL: - free(cmap) + err = cydriver.cuMemPoolSetAccess(cypool, (map[0])._ptr if len(map) == 1 else cymap, count) + if cymap is not NULL: + free(cymap) return (CUresult(err),) {{endif}} @@ -28003,18 +28003,18 @@ def cuMemPoolGetAccess(memPool, location : Optional[CUmemLocation]): -------- :py:obj:`~.cuMemAllocAsync`, :py:obj:`~.cuMemFreeAsync`, :py:obj:`~.cuDeviceGetDefaultMemPool`, :py:obj:`~.cuDeviceGetMemPool`, :py:obj:`~.cuMemPoolCreate` """ - cdef ccuda.CUmemoryPool cmemPool + cdef cydriver.CUmemoryPool cymemPool if memPool is None: - cmemPool = 0 + cymemPool = 0 elif isinstance(memPool, (CUmemoryPool,)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(CUmemoryPool(memPool)) - cmemPool = pmemPool - cdef ccuda.CUmemAccess_flags flags - cdef ccuda.CUmemLocation* clocation_ptr = location._ptr if location != None else NULL - err = ccuda.cuMemPoolGetAccess(&flags, cmemPool, clocation_ptr) + cymemPool = pmemPool + cdef cydriver.CUmemAccess_flags flags + cdef cydriver.CUmemLocation* cylocation_ptr = location._ptr if location != None else NULL + err = cydriver.cuMemPoolGetAccess(&flags, cymemPool, cylocation_ptr) return (CUresult(err), CUmemAccess_flags(flags)) {{endif}} @@ -28080,8 +28080,8 @@ def cuMemPoolCreate(poolProps : Optional[CUmemPoolProps]): Specifying CU_MEM_HANDLE_TYPE_NONE creates a memory pool that will not support IPC. """ cdef CUmemoryPool pool = CUmemoryPool() - cdef ccuda.CUmemPoolProps* cpoolProps_ptr = poolProps._ptr if poolProps != None else NULL - err = ccuda.cuMemPoolCreate(pool._ptr, cpoolProps_ptr) + cdef cydriver.CUmemPoolProps* cypoolProps_ptr = poolProps._ptr if poolProps != None else NULL + err = cydriver.cuMemPoolCreate(pool._ptr, cypoolProps_ptr) return (CUresult(err), pool) {{endif}} @@ -28118,16 +28118,16 @@ def cuMemPoolDestroy(pool): ----- A device's default memory pool cannot be destroyed. """ - cdef ccuda.CUmemoryPool cpool + cdef cydriver.CUmemoryPool cypool if pool is None: - cpool = 0 + cypool = 0 elif isinstance(pool, (CUmemoryPool,)): ppool = int(pool) - cpool = ppool + cypool = ppool else: ppool = int(CUmemoryPool(pool)) - cpool = ppool - err = ccuda.cuMemPoolDestroy(cpool) + cypool = ppool + err = cydriver.cuMemPoolDestroy(cypool) return (CUresult(err),) {{endif}} @@ -28166,26 +28166,26 @@ def cuMemAllocFromPoolAsync(size_t bytesize, pool, hStream): ----- During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool's properties are used to set the node's creation parameters. """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUmemoryPool cpool + cyhStream = phStream + cdef cydriver.CUmemoryPool cypool if pool is None: - cpool = 0 + cypool = 0 elif isinstance(pool, (CUmemoryPool,)): ppool = int(pool) - cpool = ppool + cypool = ppool else: ppool = int(CUmemoryPool(pool)) - cpool = ppool + cypool = ppool cdef CUdeviceptr dptr = CUdeviceptr() - err = ccuda.cuMemAllocFromPoolAsync(dptr._ptr, bytesize, cpool, chStream) + err = cydriver.cuMemAllocFromPoolAsync(dptr._ptr, bytesize, cypool, cyhStream) return (CUresult(err), dptr) {{endif}} @@ -28228,20 +28228,20 @@ def cuMemPoolExportToShareableHandle(pool, handleType not None : CUmemAllocation ----- : To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than CU_MEM_HANDLE_TYPE_NONE. """ - cdef ccuda.CUmemoryPool cpool + cdef cydriver.CUmemoryPool cypool if pool is None: - cpool = 0 + cypool = 0 elif isinstance(pool, (CUmemoryPool,)): ppool = int(pool) - cpool = ppool + cypool = ppool else: ppool = int(CUmemoryPool(pool)) - cpool = ppool - cdef utils.HelperCUmemAllocationHandleType chandle_out = utils.HelperCUmemAllocationHandleType(handleType) - cdef void* chandle_out_ptr = chandle_out.cptr - cdef ccuda.CUmemAllocationHandleType chandleType = handleType.value - err = ccuda.cuMemPoolExportToShareableHandle(chandle_out_ptr, cpool, chandleType, flags) - return (CUresult(err), chandle_out.pyObj()) + cypool = ppool + cdef utils.HelperCUmemAllocationHandleType cyhandle_out = utils.HelperCUmemAllocationHandleType(handleType) + cdef void* cyhandle_out_ptr = cyhandle_out.cptr + cdef cydriver.CUmemAllocationHandleType cyhandleType = handleType.value + err = cydriver.cuMemPoolExportToShareableHandle(cyhandle_out_ptr, cypool, cyhandleType, flags) + return (CUresult(err), cyhandle_out.pyObj()) {{endif}} {{if 'cuMemPoolImportFromShareableHandle' in found_functions}} @@ -28283,10 +28283,10 @@ def cuMemPoolImportFromShareableHandle(handle, handleType not None : CUmemAlloca Imported memory pools do not support creating new allocations. As such imported memory pools may not be used in cuDeviceSetMemPool or :py:obj:`~.cuMemAllocFromPoolAsync` calls. """ cdef CUmemoryPool pool_out = CUmemoryPool() - chandle = utils.HelperInputVoidPtr(handle) - cdef void* chandle_ptr = chandle.cptr - cdef ccuda.CUmemAllocationHandleType chandleType = handleType.value - err = ccuda.cuMemPoolImportFromShareableHandle(pool_out._ptr, chandle_ptr, chandleType, flags) + cyhandle = utils.HelperInputVoidPtr(handle) + cdef void* cyhandle_ptr = cyhandle.cptr + cdef cydriver.CUmemAllocationHandleType cyhandleType = handleType.value + err = cydriver.cuMemPoolImportFromShareableHandle(pool_out._ptr, cyhandle_ptr, cyhandleType, flags) return (CUresult(err), pool_out) {{endif}} @@ -28317,17 +28317,17 @@ def cuMemPoolExportPointer(ptr): -------- :py:obj:`~.cuMemPoolExportToShareableHandle`, :py:obj:`~.cuMemPoolImportFromShareableHandle`, :py:obj:`~.cuMemPoolImportPointer` """ - cdef ccuda.CUdeviceptr cptr + cdef cydriver.CUdeviceptr cyptr if ptr is None: - cptr = 0 + cyptr = 0 elif isinstance(ptr, (CUdeviceptr,)): pptr = int(ptr) - cptr = pptr + cyptr = pptr else: pptr = int(CUdeviceptr(ptr)) - cptr = pptr + cyptr = pptr cdef CUmemPoolPtrExportData shareData_out = CUmemPoolPtrExportData() - err = ccuda.cuMemPoolExportPointer(shareData_out._ptr, cptr) + err = cydriver.cuMemPoolExportPointer(shareData_out._ptr, cyptr) return (CUresult(err), shareData_out) {{endif}} @@ -28367,18 +28367,18 @@ def cuMemPoolImportPointer(pool, shareData : Optional[CUmemPoolPtrExportData]): ----- The cuMemFreeAsync api may be used in the exporting process before the cuMemFreeAsync operation completes in its stream as long as the cuMemFreeAsync in the exporting process specifies a stream with a stream dependency on the importing process's cuMemFreeAsync. """ - cdef ccuda.CUmemoryPool cpool + cdef cydriver.CUmemoryPool cypool if pool is None: - cpool = 0 + cypool = 0 elif isinstance(pool, (CUmemoryPool,)): ppool = int(pool) - cpool = ppool + cypool = ppool else: ppool = int(CUmemoryPool(pool)) - cpool = ppool + cypool = ppool cdef CUdeviceptr ptr_out = CUdeviceptr() - cdef ccuda.CUmemPoolPtrExportData* cshareData_ptr = shareData._ptr if shareData != None else NULL - err = ccuda.cuMemPoolImportPointer(ptr_out._ptr, cpool, cshareData_ptr) + cdef cydriver.CUmemPoolPtrExportData* cyshareData_ptr = shareData._ptr if shareData != None else NULL + err = cydriver.cuMemPoolImportPointer(ptr_out._ptr, cypool, cyshareData_ptr) return (CUresult(err), ptr_out) {{endif}} @@ -28435,8 +28435,8 @@ def cuMulticastCreate(prop : Optional[CUmulticastObjectProp]): :py:obj:`~.cuMemCreate`, :py:obj:`~.cuMemRelease`, :py:obj:`~.cuMemExportToShareableHandle`, :py:obj:`~.cuMemImportFromShareableHandle` """ cdef CUmemGenericAllocationHandle mcHandle = CUmemGenericAllocationHandle() - cdef ccuda.CUmulticastObjectProp* cprop_ptr = prop._ptr if prop != None else NULL - err = ccuda.cuMulticastCreate(mcHandle._ptr, cprop_ptr) + cdef cydriver.CUmulticastObjectProp* cyprop_ptr = prop._ptr if prop != None else NULL + err = cydriver.cuMulticastCreate(mcHandle._ptr, cyprop_ptr) return (CUresult(err), mcHandle) {{endif}} @@ -28475,25 +28475,25 @@ def cuMulticastAddDevice(mcHandle, dev): -------- :py:obj:`~.cuMulticastCreate`, :py:obj:`~.cuMulticastBindMem`, :py:obj:`~.cuMulticastBindAddr` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - cdef ccuda.CUmemGenericAllocationHandle cmcHandle + cydev = pdev + cdef cydriver.CUmemGenericAllocationHandle cymcHandle if mcHandle is None: - cmcHandle = 0 + cymcHandle = 0 elif isinstance(mcHandle, (CUmemGenericAllocationHandle,)): pmcHandle = int(mcHandle) - cmcHandle = pmcHandle + cymcHandle = pmcHandle else: pmcHandle = int(CUmemGenericAllocationHandle(mcHandle)) - cmcHandle = pmcHandle - err = ccuda.cuMulticastAddDevice(cmcHandle, cdev) + cymcHandle = pmcHandle + err = cydriver.cuMulticastAddDevice(cymcHandle, cydev) return (CUresult(err),) {{endif}} @@ -28551,25 +28551,25 @@ def cuMulticastBindMem(mcHandle, size_t mcOffset, memHandle, size_t memOffset, s -------- :py:obj:`~.cuMulticastCreate`, :py:obj:`~.cuMulticastAddDevice`, :py:obj:`~.cuMemCreate` """ - cdef ccuda.CUmemGenericAllocationHandle cmemHandle + cdef cydriver.CUmemGenericAllocationHandle cymemHandle if memHandle is None: - cmemHandle = 0 + cymemHandle = 0 elif isinstance(memHandle, (CUmemGenericAllocationHandle,)): pmemHandle = int(memHandle) - cmemHandle = pmemHandle + cymemHandle = pmemHandle else: pmemHandle = int(CUmemGenericAllocationHandle(memHandle)) - cmemHandle = pmemHandle - cdef ccuda.CUmemGenericAllocationHandle cmcHandle + cymemHandle = pmemHandle + cdef cydriver.CUmemGenericAllocationHandle cymcHandle if mcHandle is None: - cmcHandle = 0 + cymcHandle = 0 elif isinstance(mcHandle, (CUmemGenericAllocationHandle,)): pmcHandle = int(mcHandle) - cmcHandle = pmcHandle + cymcHandle = pmcHandle else: pmcHandle = int(CUmemGenericAllocationHandle(mcHandle)) - cmcHandle = pmcHandle - err = ccuda.cuMulticastBindMem(cmcHandle, mcOffset, cmemHandle, memOffset, size, flags) + cymcHandle = pmcHandle + err = cydriver.cuMulticastBindMem(cymcHandle, mcOffset, cymemHandle, memOffset, size, flags) return (CUresult(err),) {{endif}} @@ -28623,25 +28623,25 @@ def cuMulticastBindAddr(mcHandle, size_t mcOffset, memptr, size_t size, unsigned -------- :py:obj:`~.cuMulticastCreate`, :py:obj:`~.cuMulticastAddDevice`, :py:obj:`~.cuMemCreate` """ - cdef ccuda.CUdeviceptr cmemptr + cdef cydriver.CUdeviceptr cymemptr if memptr is None: - cmemptr = 0 + cymemptr = 0 elif isinstance(memptr, (CUdeviceptr,)): pmemptr = int(memptr) - cmemptr = pmemptr + cymemptr = pmemptr else: pmemptr = int(CUdeviceptr(memptr)) - cmemptr = pmemptr - cdef ccuda.CUmemGenericAllocationHandle cmcHandle + cymemptr = pmemptr + cdef cydriver.CUmemGenericAllocationHandle cymcHandle if mcHandle is None: - cmcHandle = 0 + cymcHandle = 0 elif isinstance(mcHandle, (CUmemGenericAllocationHandle,)): pmcHandle = int(mcHandle) - cmcHandle = pmcHandle + cymcHandle = pmcHandle else: pmcHandle = int(CUmemGenericAllocationHandle(mcHandle)) - cmcHandle = pmcHandle - err = ccuda.cuMulticastBindAddr(cmcHandle, mcOffset, cmemptr, size, flags) + cymcHandle = pmcHandle + err = cydriver.cuMulticastBindAddr(cymcHandle, mcOffset, cymemptr, size, flags) return (CUresult(err),) {{endif}} @@ -28683,25 +28683,25 @@ def cuMulticastUnbind(mcHandle, dev, size_t mcOffset, size_t size): ----- Warning: The `mcOffset` and the `size` must match the corresponding values specified during the bind call. Any other values may result in undefined behavior. """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - cdef ccuda.CUmemGenericAllocationHandle cmcHandle + cydev = pdev + cdef cydriver.CUmemGenericAllocationHandle cymcHandle if mcHandle is None: - cmcHandle = 0 + cymcHandle = 0 elif isinstance(mcHandle, (CUmemGenericAllocationHandle,)): pmcHandle = int(mcHandle) - cmcHandle = pmcHandle + cymcHandle = pmcHandle else: pmcHandle = int(CUmemGenericAllocationHandle(mcHandle)) - cmcHandle = pmcHandle - err = ccuda.cuMulticastUnbind(cmcHandle, cdev, mcOffset, size) + cymcHandle = pmcHandle + err = cydriver.cuMulticastUnbind(cymcHandle, cydev, mcOffset, size) return (CUresult(err),) {{endif}} @@ -28735,9 +28735,9 @@ def cuMulticastGetGranularity(prop : Optional[CUmulticastObjectProp], option not :py:obj:`~.cuMulticastCreate`, :py:obj:`~.cuMulticastBindMem`, :py:obj:`~.cuMulticastBindAddr`, :py:obj:`~.cuMulticastUnbind` """ cdef size_t granularity = 0 - cdef ccuda.CUmulticastObjectProp* cprop_ptr = prop._ptr if prop != None else NULL - cdef ccuda.CUmulticastGranularity_flags coption = option.value - err = ccuda.cuMulticastGetGranularity(&granularity, cprop_ptr, coption) + cdef cydriver.CUmulticastObjectProp* cyprop_ptr = prop._ptr if prop != None else NULL + cdef cydriver.CUmulticastGranularity_flags cyoption = option.value + err = cydriver.cuMulticastGetGranularity(&granularity, cyprop_ptr, cyoption) return (CUresult(err), granularity) {{endif}} @@ -28930,20 +28930,20 @@ def cuPointerGetAttribute(attribute not None : CUpointer_attribute, ptr): -------- :py:obj:`~.cuPointerSetAttribute`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostRegister`, :py:obj:`~.cuMemHostUnregister`, :py:obj:`~.cudaPointerGetAttributes` """ - cdef ccuda.CUdeviceptr cptr + cdef cydriver.CUdeviceptr cyptr if ptr is None: - cptr = 0 + cyptr = 0 elif isinstance(ptr, (CUdeviceptr,)): pptr = int(ptr) - cptr = pptr + cyptr = pptr else: pptr = int(CUdeviceptr(ptr)) - cptr = pptr - cdef utils.HelperCUpointer_attribute cdata = utils.HelperCUpointer_attribute(attribute, 0, is_getter=True) - cdef void* cdata_ptr = cdata.cptr - cdef ccuda.CUpointer_attribute cattribute = attribute.value - err = ccuda.cuPointerGetAttribute(cdata_ptr, cattribute, cptr) - return (CUresult(err), cdata.pyObj()) + cyptr = pptr + cdef utils.HelperCUpointer_attribute cydata = utils.HelperCUpointer_attribute(attribute, 0, is_getter=True) + cdef void* cydata_ptr = cydata.cptr + cdef cydriver.CUpointer_attribute cyattribute = attribute.value + err = cydriver.cuPointerGetAttribute(cydata_ptr, cyattribute, cyptr) + return (CUresult(err), cydata.pyObj()) {{endif}} {{if 'cuMemPrefetchAsync' in found_functions}} @@ -29034,34 +29034,34 @@ def cuMemPrefetchAsync(devPtr, size_t count, dstDevice, hStream): -------- :py:obj:`~.cuMemcpy`, :py:obj:`~.cuMemcpyPeer`, :py:obj:`~.cuMemcpyAsync`, :py:obj:`~.cuMemcpy3DPeerAsync`, :py:obj:`~.cuMemAdvise`, :py:obj:`~.cuMemPrefetchAsync` :py:obj:`~.cudaMemPrefetchAsync_v2` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdevice cdstDevice + cyhStream = phStream + cdef cydriver.CUdevice cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdevice,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdevice(dstDevice)) - cdstDevice = pdstDevice - cdef ccuda.CUdeviceptr cdevPtr + cydstDevice = pdstDevice + cdef cydriver.CUdeviceptr cydevPtr if devPtr is None: - cdevPtr = 0 + cydevPtr = 0 elif isinstance(devPtr, (CUdeviceptr,)): pdevPtr = int(devPtr) - cdevPtr = pdevPtr + cydevPtr = pdevPtr else: pdevPtr = int(CUdeviceptr(devPtr)) - cdevPtr = pdevPtr - err = ccuda.cuMemPrefetchAsync(cdevPtr, count, cdstDevice, chStream) + cydevPtr = pdevPtr + err = cydriver.cuMemPrefetchAsync(cydevPtr, count, cydstDevice, cyhStream) return (CUresult(err),) {{endif}} @@ -29166,25 +29166,25 @@ def cuMemPrefetchAsync_v2(devPtr, size_t count, location not None : CUmemLocatio -------- :py:obj:`~.cuMemcpy`, :py:obj:`~.cuMemcpyPeer`, :py:obj:`~.cuMemcpyAsync`, :py:obj:`~.cuMemcpy3DPeerAsync`, :py:obj:`~.cuMemAdvise`, :py:obj:`~.cuMemPrefetchAsync` :py:obj:`~.cudaMemPrefetchAsync_v2` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUdeviceptr cdevPtr + cyhStream = phStream + cdef cydriver.CUdeviceptr cydevPtr if devPtr is None: - cdevPtr = 0 + cydevPtr = 0 elif isinstance(devPtr, (CUdeviceptr,)): pdevPtr = int(devPtr) - cdevPtr = pdevPtr + cydevPtr = pdevPtr else: pdevPtr = int(CUdeviceptr(devPtr)) - cdevPtr = pdevPtr - err = ccuda.cuMemPrefetchAsync_v2(cdevPtr, count, location._ptr[0], flags, chStream) + cydevPtr = pdevPtr + err = cydriver.cuMemPrefetchAsync_v2(cydevPtr, count, location._ptr[0], flags, cyhStream) return (CUresult(err),) {{endif}} @@ -29351,26 +29351,26 @@ def cuMemAdvise(devPtr, size_t count, advice not None : CUmem_advise, device): -------- :py:obj:`~.cuMemcpy`, :py:obj:`~.cuMemcpyPeer`, :py:obj:`~.cuMemcpyAsync`, :py:obj:`~.cuMemcpy3DPeerAsync`, :py:obj:`~.cuMemPrefetchAsync`, :py:obj:`~.cuMemAdvise_v2` :py:obj:`~.cudaMemAdvise` """ - cdef ccuda.CUdevice cdevice + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice - cdef ccuda.CUdeviceptr cdevPtr + cydevice = pdevice + cdef cydriver.CUdeviceptr cydevPtr if devPtr is None: - cdevPtr = 0 + cydevPtr = 0 elif isinstance(devPtr, (CUdeviceptr,)): pdevPtr = int(devPtr) - cdevPtr = pdevPtr + cydevPtr = pdevPtr else: pdevPtr = int(CUdeviceptr(devPtr)) - cdevPtr = pdevPtr - cdef ccuda.CUmem_advise cadvice = advice.value - err = ccuda.cuMemAdvise(cdevPtr, count, cadvice, cdevice) + cydevPtr = pdevPtr + cdef cydriver.CUmem_advise cyadvice = advice.value + err = cydriver.cuMemAdvise(cydevPtr, count, cyadvice, cydevice) return (CUresult(err),) {{endif}} @@ -29566,17 +29566,17 @@ def cuMemAdvise_v2(devPtr, size_t count, advice not None : CUmem_advise, locatio -------- :py:obj:`~.cuMemcpy`, :py:obj:`~.cuMemcpyPeer`, :py:obj:`~.cuMemcpyAsync`, :py:obj:`~.cuMemcpy3DPeerAsync`, :py:obj:`~.cuMemPrefetchAsync`, :py:obj:`~.cuMemAdvise` :py:obj:`~.cudaMemAdvise` """ - cdef ccuda.CUdeviceptr cdevPtr + cdef cydriver.CUdeviceptr cydevPtr if devPtr is None: - cdevPtr = 0 + cydevPtr = 0 elif isinstance(devPtr, (CUdeviceptr,)): pdevPtr = int(devPtr) - cdevPtr = pdevPtr + cydevPtr = pdevPtr else: pdevPtr = int(CUdeviceptr(devPtr)) - cdevPtr = pdevPtr - cdef ccuda.CUmem_advise cadvice = advice.value - err = ccuda.cuMemAdvise_v2(cdevPtr, count, cadvice, location._ptr[0]) + cydevPtr = pdevPtr + cdef cydriver.CUmem_advise cyadvice = advice.value + err = cydriver.cuMemAdvise_v2(cydevPtr, count, cyadvice, location._ptr[0]) return (CUresult(err),) {{endif}} @@ -29720,20 +29720,20 @@ def cuMemRangeGetAttribute(size_t dataSize, attribute not None : CUmem_range_att -------- :py:obj:`~.cuMemRangeGetAttributes`, :py:obj:`~.cuMemPrefetchAsync`, :py:obj:`~.cuMemAdvise`, :py:obj:`~.cudaMemRangeGetAttribute` """ - cdef ccuda.CUdeviceptr cdevPtr + cdef cydriver.CUdeviceptr cydevPtr if devPtr is None: - cdevPtr = 0 + cydevPtr = 0 elif isinstance(devPtr, (CUdeviceptr,)): pdevPtr = int(devPtr) - cdevPtr = pdevPtr + cydevPtr = pdevPtr else: pdevPtr = int(CUdeviceptr(devPtr)) - cdevPtr = pdevPtr - cdef utils.HelperCUmem_range_attribute cdata = utils.HelperCUmem_range_attribute(attribute, dataSize) - cdef void* cdata_ptr = cdata.cptr - cdef ccuda.CUmem_range_attribute cattribute = attribute.value - err = ccuda.cuMemRangeGetAttribute(cdata_ptr, dataSize, cattribute, cdevPtr, count) - return (CUresult(err), cdata.pyObj()) + cydevPtr = pdevPtr + cdef utils.HelperCUmem_range_attribute cydata = utils.HelperCUmem_range_attribute(attribute, dataSize) + cdef void* cydata_ptr = cydata.cptr + cdef cydriver.CUmem_range_attribute cyattribute = attribute.value + err = cydriver.cuMemRangeGetAttribute(cydata_ptr, dataSize, cyattribute, cydevPtr, count) + return (CUresult(err), cydata.pyObj()) {{endif}} {{if 'cuMemRangeGetAttributes' in found_functions}} @@ -29796,28 +29796,28 @@ def cuMemRangeGetAttributes(dataSizes : Tuple[int] | List[int], attributes : Opt -------- :py:obj:`~.cuMemRangeGetAttribute`, :py:obj:`~.cuMemAdvise`, :py:obj:`~.cuMemPrefetchAsync`, :py:obj:`~.cudaMemRangeGetAttributes` """ - cdef ccuda.CUdeviceptr cdevPtr + cdef cydriver.CUdeviceptr cydevPtr if devPtr is None: - cdevPtr = 0 + cydevPtr = 0 elif isinstance(devPtr, (CUdeviceptr,)): pdevPtr = int(devPtr) - cdevPtr = pdevPtr + cydevPtr = pdevPtr else: pdevPtr = int(CUdeviceptr(devPtr)) - cdevPtr = pdevPtr + cydevPtr = pdevPtr attributes = [] if attributes is None else attributes if not all(isinstance(_x, (CUmem_range_attribute)) for _x in attributes): - raise TypeError("Argument 'attributes' is not instance of type (expected Tuple[ccuda.CUmem_range_attribute] or List[ccuda.CUmem_range_attribute]") + raise TypeError("Argument 'attributes' is not instance of type (expected Tuple[cydriver.CUmem_range_attribute] or List[cydriver.CUmem_range_attribute]") if not all(isinstance(_x, (int)) for _x in dataSizes): raise TypeError("Argument 'dataSizes' is not instance of type (expected Tuple[int] or List[int]") pylist = [utils.HelperCUmem_range_attribute(pyattributes, pydataSizes) for (pyattributes, pydataSizes) in zip(attributes, dataSizes)] cdef utils.InputVoidPtrPtrHelper voidStarHelperdata = utils.InputVoidPtrPtrHelper(pylist) - cdef void** cvoidStarHelper_ptr = voidStarHelperdata.cptr - cdef vector[size_t] cdataSizes = dataSizes - cdef vector[ccuda.CUmem_range_attribute] cattributes = [pyattributes.value for pyattributes in (attributes)] + cdef void** cyvoidStarHelper_ptr = voidStarHelperdata.cptr + cdef vector[size_t] cydataSizes = dataSizes + cdef vector[cydriver.CUmem_range_attribute] cyattributes = [pyattributes.value for pyattributes in (attributes)] if numAttributes > len(dataSizes): raise RuntimeError("List is too small: " + str(len(dataSizes)) + " < " + str(numAttributes)) if numAttributes > len(attributes): raise RuntimeError("List is too small: " + str(len(attributes)) + " < " + str(numAttributes)) - err = ccuda.cuMemRangeGetAttributes(cvoidStarHelper_ptr, cdataSizes.data(), cattributes.data(), numAttributes, cdevPtr, count) + err = cydriver.cuMemRangeGetAttributes(cyvoidStarHelper_ptr, cydataSizes.data(), cyattributes.data(), numAttributes, cydevPtr, count) return (CUresult(err), [obj.pyObj() for obj in pylist]) {{endif}} @@ -29861,19 +29861,19 @@ def cuPointerSetAttribute(value, attribute not None : CUpointer_attribute, ptr): -------- :py:obj:`~.cuPointerGetAttribute`, :py:obj:`~.cuPointerGetAttributes`, :py:obj:`~.cuMemAlloc`, :py:obj:`~.cuMemFree`, :py:obj:`~.cuMemAllocHost`, :py:obj:`~.cuMemFreeHost`, :py:obj:`~.cuMemHostAlloc`, :py:obj:`~.cuMemHostRegister`, :py:obj:`~.cuMemHostUnregister` """ - cdef ccuda.CUdeviceptr cptr + cdef cydriver.CUdeviceptr cyptr if ptr is None: - cptr = 0 + cyptr = 0 elif isinstance(ptr, (CUdeviceptr,)): pptr = int(ptr) - cptr = pptr + cyptr = pptr else: pptr = int(CUdeviceptr(ptr)) - cptr = pptr - cdef utils.HelperCUpointer_attribute cvalue = utils.HelperCUpointer_attribute(attribute, value, is_getter=False) - cdef void* cvalue_ptr = cvalue.cptr - cdef ccuda.CUpointer_attribute cattribute = attribute.value - err = ccuda.cuPointerSetAttribute(cvalue_ptr, cattribute, cptr) + cyptr = pptr + cdef utils.HelperCUpointer_attribute cyvalue = utils.HelperCUpointer_attribute(attribute, value, is_getter=False) + cdef void* cyvalue_ptr = cyvalue.cptr + cdef cydriver.CUpointer_attribute cyattribute = attribute.value + err = cydriver.cuPointerSetAttribute(cyvalue_ptr, cyattribute, cyptr) return (CUresult(err),) {{endif}} @@ -29946,24 +29946,24 @@ def cuPointerGetAttributes(unsigned int numAttributes, attributes : Optional[Tup -------- :py:obj:`~.cuPointerGetAttribute`, :py:obj:`~.cuPointerSetAttribute`, :py:obj:`~.cudaPointerGetAttributes` """ - cdef ccuda.CUdeviceptr cptr + cdef cydriver.CUdeviceptr cyptr if ptr is None: - cptr = 0 + cyptr = 0 elif isinstance(ptr, (CUdeviceptr,)): pptr = int(ptr) - cptr = pptr + cyptr = pptr else: pptr = int(CUdeviceptr(ptr)) - cptr = pptr + cyptr = pptr attributes = [] if attributes is None else attributes if not all(isinstance(_x, (CUpointer_attribute)) for _x in attributes): - raise TypeError("Argument 'attributes' is not instance of type (expected Tuple[ccuda.CUpointer_attribute] or List[ccuda.CUpointer_attribute]") + raise TypeError("Argument 'attributes' is not instance of type (expected Tuple[cydriver.CUpointer_attribute] or List[cydriver.CUpointer_attribute]") if numAttributes > len(attributes): raise RuntimeError("List is too small: " + str(len(attributes)) + " < " + str(numAttributes)) - cdef vector[ccuda.CUpointer_attribute] cattributes = [pyattributes.value for pyattributes in (attributes)] + cdef vector[cydriver.CUpointer_attribute] cyattributes = [pyattributes.value for pyattributes in (attributes)] pylist = [utils.HelperCUpointer_attribute(pyattributes, 0, is_getter=True) for pyattributes in attributes] cdef utils.InputVoidPtrPtrHelper voidStarHelperdata = utils.InputVoidPtrPtrHelper(pylist) - cdef void** cvoidStarHelper_ptr = voidStarHelperdata.cptr - err = ccuda.cuPointerGetAttributes(numAttributes, cattributes.data(), cvoidStarHelper_ptr, cptr) + cdef void** cyvoidStarHelper_ptr = voidStarHelperdata.cptr + err = cydriver.cuPointerGetAttributes(numAttributes, cyattributes.data(), cyvoidStarHelper_ptr, cyptr) return (CUresult(err), [obj.pyObj() for obj in pylist]) {{endif}} @@ -30002,7 +30002,7 @@ def cuStreamCreate(unsigned int Flags): :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuStreamCreateWithPriority`, :py:obj:`~.cuGreenCtxStreamCreate`, :py:obj:`~.cuStreamGetPriority`, :py:obj:`~.cuStreamGetFlags`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuStreamQuery`, :py:obj:`~.cuStreamSynchronize`, :py:obj:`~.cuStreamAddCallback`, :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags` """ cdef CUstream phStream = CUstream() - err = ccuda.cuStreamCreate(phStream._ptr, Flags) + err = cydriver.cuStreamCreate(phStream._ptr, Flags) return (CUresult(err), phStream) {{endif}} @@ -30054,7 +30054,7 @@ def cuStreamCreateWithPriority(unsigned int flags, int priority): In the current implementation, only compute kernels launched in priority streams are affected by the stream's priority. Stream priorities have no effect on host-to-device and device-to-host memory operations. """ cdef CUstream phStream = CUstream() - err = ccuda.cuStreamCreateWithPriority(phStream._ptr, flags, priority) + err = cydriver.cuStreamCreateWithPriority(phStream._ptr, flags, priority) return (CUresult(err), phStream) {{endif}} @@ -30090,17 +30090,17 @@ def cuStreamGetPriority(hStream): -------- :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamCreateWithPriority`, :py:obj:`~.cuGreenCtxStreamCreate`, :py:obj:`~.cuCtxGetStreamPriorityRange`, :py:obj:`~.cuStreamGetFlags`, :py:obj:`~.cudaStreamGetPriority` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream + cyhStream = phStream cdef int priority = 0 - err = ccuda.cuStreamGetPriority(chStream, &priority) + err = cydriver.cuStreamGetPriority(cyhStream, &priority) return (CUresult(err), priority) {{endif}} @@ -30133,17 +30133,17 @@ def cuStreamGetFlags(hStream): -------- :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuGreenCtxStreamCreate`, :py:obj:`~.cuStreamGetPriority`, :py:obj:`~.cudaStreamGetFlags` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream + cyhStream = phStream cdef unsigned int flags = 0 - err = ccuda.cuStreamGetFlags(chStream, &flags) + err = cydriver.cuStreamGetFlags(cyhStream, &flags) return (CUresult(err), flags) {{endif}} @@ -30188,17 +30188,17 @@ def cuStreamGetId(hStream): -------- :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamGetPriority`, :py:obj:`~.cudaStreamGetId` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream + cyhStream = phStream cdef unsigned long long streamId = 0 - err = ccuda.cuStreamGetId(chStream, &streamId) + err = cydriver.cuStreamGetId(cyhStream, &streamId) return (CUresult(err), streamId) {{endif}} @@ -30252,17 +30252,17 @@ def cuStreamGetCtx(hStream): -------- :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuStreamCreateWithPriority`, :py:obj:`~.cuStreamGetPriority`, :py:obj:`~.cuStreamGetFlags`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuStreamQuery`, :py:obj:`~.cuStreamSynchronize`, :py:obj:`~.cuStreamAddCallback`, :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cuStreamGetCtx_v2`, :py:obj:`~.cudaStreamCreateWithFlags` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream + cyhStream = phStream cdef CUcontext pctx = CUcontext() - err = ccuda.cuStreamGetCtx(chStream, pctx._ptr) + err = cydriver.cuStreamGetCtx(cyhStream, pctx._ptr) return (CUresult(err), pctx) {{endif}} @@ -30327,18 +30327,18 @@ def cuStreamGetCtx_v2(hStream): -------- :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuStreamCreate` :py:obj:`~.cuStreamCreateWithPriority`, :py:obj:`~.cuGreenCtxStreamCreate`, :py:obj:`~.cuStreamGetPriority`, :py:obj:`~.cuStreamGetFlags`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuStreamQuery`, :py:obj:`~.cuStreamSynchronize`, :py:obj:`~.cuStreamAddCallback`, :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream + cyhStream = phStream cdef CUcontext pCtx = CUcontext() cdef CUgreenCtx pGreenCtx = CUgreenCtx() - err = ccuda.cuStreamGetCtx_v2(chStream, pCtx._ptr, pGreenCtx._ptr) + err = cydriver.cuStreamGetCtx_v2(cyhStream, pCtx._ptr, pGreenCtx._ptr) return (CUresult(err), pCtx, pGreenCtx) {{endif}} @@ -30380,25 +30380,25 @@ def cuStreamWaitEvent(hStream, hEvent, unsigned int Flags): -------- :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuEventRecord`, :py:obj:`~.cuStreamQuery`, :py:obj:`~.cuStreamSynchronize`, :py:obj:`~.cuStreamAddCallback`, :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cudaStreamWaitEvent` """ - cdef ccuda.CUevent chEvent + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - cdef ccuda.CUstream chStream + cyhEvent = phEvent + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - err = ccuda.cuStreamWaitEvent(chStream, chEvent, Flags) + cyhStream = phStream + err = cydriver.cuStreamWaitEvent(cyhStream, cyhEvent, Flags) return (CUresult(err),) {{endif}} @@ -30473,27 +30473,27 @@ def cuStreamAddCallback(hStream, callback, userData, unsigned int flags): ----- This function is slated for eventual deprecation and removal. If you do not require the callback to execute in case of a device error, consider using :py:obj:`~.cuLaunchHostFunc`. Additionally, this function is not supported with :py:obj:`~.cuStreamBeginCapture` and :py:obj:`~.cuStreamEndCapture`, unlike :py:obj:`~.cuLaunchHostFunc`. """ - cdef ccuda.CUstreamCallback ccallback + cdef cydriver.CUstreamCallback cycallback if callback is None: - ccallback = 0 + cycallback = 0 elif isinstance(callback, (CUstreamCallback,)): pcallback = int(callback) - ccallback = pcallback + cycallback = pcallback else: pcallback = int(CUstreamCallback(callback)) - ccallback = pcallback - cdef ccuda.CUstream chStream + cycallback = pcallback + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cuserData = utils.HelperInputVoidPtr(userData) - cdef void* cuserData_ptr = cuserData.cptr - err = ccuda.cuStreamAddCallback(chStream, ccallback, cuserData_ptr, flags) + cyhStream = phStream + cyuserData = utils.HelperInputVoidPtr(userData) + cdef void* cyuserData_ptr = cyuserData.cptr + err = cydriver.cuStreamAddCallback(cyhStream, cycallback, cyuserData_ptr, flags) return (CUresult(err),) {{endif}} @@ -30539,17 +30539,17 @@ def cuStreamBeginCapture(hStream, mode not None : CUstreamCaptureMode): ----- Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects. """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUstreamCaptureMode cmode = mode.value - err = ccuda.cuStreamBeginCapture(chStream, cmode) + cyhStream = phStream + cdef cydriver.CUstreamCaptureMode cymode = mode.value + err = cydriver.cuStreamBeginCapture(cyhStream, cymode) return (CUresult(err),) {{endif}} @@ -30609,51 +30609,51 @@ def cuStreamBeginCaptureToGraph(hStream, hGraph, dependencies : Optional[Tuple[C """ dependencyData = [] if dependencyData is None else dependencyData if not all(isinstance(_x, (CUgraphEdgeData,)) for _x in dependencyData): - raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[ccuda.CUgraphEdgeData,] or List[ccuda.CUgraphEdgeData,]") + raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[cydriver.CUgraphEdgeData,] or List[cydriver.CUgraphEdgeData,]") dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUstream chStream + cyhGraph = phGraph + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUgraphNode* cdependencies = NULL + cyhStream = phStream + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] - cdef ccuda.CUgraphEdgeData* cdependencyData = NULL + cydependencies[idx] = (dependencies[idx])._ptr[0] + cdef cydriver.CUgraphEdgeData* cydependencyData = NULL if len(dependencyData) > 0: - cdependencyData = calloc(len(dependencyData), sizeof(ccuda.CUgraphEdgeData)) - if cdependencyData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(ccuda.CUgraphEdgeData))) + cydependencyData = calloc(len(dependencyData), sizeof(cydriver.CUgraphEdgeData)) + if cydependencyData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cydriver.CUgraphEdgeData))) for idx in range(len(dependencyData)): - string.memcpy(&cdependencyData[idx], (dependencyData[idx])._ptr, sizeof(ccuda.CUgraphEdgeData)) + string.memcpy(&cydependencyData[idx], (dependencyData[idx])._ptr, sizeof(cydriver.CUgraphEdgeData)) if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) if numDependencies > len(dependencyData): raise RuntimeError("List is too small: " + str(len(dependencyData)) + " < " + str(numDependencies)) - cdef ccuda.CUstreamCaptureMode cmode = mode.value - err = ccuda.cuStreamBeginCaptureToGraph(chStream, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cdependencyData, numDependencies, cmode) - if cdependencies is not NULL: - free(cdependencies) - if cdependencyData is not NULL: - free(cdependencyData) + cdef cydriver.CUstreamCaptureMode cymode = mode.value + err = cydriver.cuStreamBeginCaptureToGraph(cyhStream, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cydependencyData, numDependencies, cymode) + if cydependencies is not NULL: + free(cydependencies) + if cydependencyData is not NULL: + free(cydependencyData) return (CUresult(err),) {{endif}} @@ -30723,9 +30723,9 @@ def cuThreadExchangeStreamCaptureMode(mode not None : CUstreamCaptureMode): -------- :py:obj:`~.cuStreamBeginCapture` """ - cdef ccuda.CUstreamCaptureMode cmode = mode.value - err = ccuda.cuThreadExchangeStreamCaptureMode(&cmode) - return (CUresult(err), CUstreamCaptureMode(cmode)) + cdef cydriver.CUstreamCaptureMode cymode = mode.value + err = cydriver.cuThreadExchangeStreamCaptureMode(&cymode) + return (CUresult(err), CUstreamCaptureMode(cymode)) {{endif}} {{if 'cuStreamEndCapture' in found_functions}} @@ -30760,17 +30760,17 @@ def cuStreamEndCapture(hStream): -------- :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamBeginCapture`, :py:obj:`~.cuStreamIsCapturing`, :py:obj:`~.cuGraphDestroy` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream + cyhStream = phStream cdef CUgraph phGraph = CUgraph() - err = ccuda.cuStreamEndCapture(chStream, phGraph._ptr) + err = cydriver.cuStreamEndCapture(cyhStream, phGraph._ptr) return (CUresult(err), phGraph) {{endif}} @@ -30821,17 +30821,17 @@ def cuStreamIsCapturing(hStream): -------- :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamBeginCapture`, :py:obj:`~.cuStreamEndCapture` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUstreamCaptureStatus captureStatus - err = ccuda.cuStreamIsCapturing(chStream, &captureStatus) + cyhStream = phStream + cdef cydriver.CUstreamCaptureStatus captureStatus + err = cydriver.cuStreamIsCapturing(cyhStream, &captureStatus) return (CUresult(err), CUstreamCaptureStatus(captureStatus)) {{endif}} @@ -30896,24 +30896,24 @@ def cuStreamGetCaptureInfo(hStream): -------- :py:obj:`~.cuStreamGetCaptureInfo_v3` :py:obj:`~.cuStreamBeginCapture`, :py:obj:`~.cuStreamIsCapturing`, :py:obj:`~.cuStreamUpdateCaptureDependencies` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUstreamCaptureStatus captureStatus_out + cyhStream = phStream + cdef cydriver.CUstreamCaptureStatus captureStatus_out cdef cuuint64_t id_out = cuuint64_t() cdef CUgraph graph_out = CUgraph() - cdef const ccuda.CUgraphNode* cdependencies_out = NULL + cdef const cydriver.CUgraphNode* cydependencies_out = NULL pydependencies_out = [] cdef size_t numDependencies_out = 0 - err = ccuda.cuStreamGetCaptureInfo(chStream, &captureStatus_out, id_out._ptr, graph_out._ptr, &cdependencies_out, &numDependencies_out) + err = cydriver.cuStreamGetCaptureInfo(cyhStream, &captureStatus_out, id_out._ptr, graph_out._ptr, &cydependencies_out, &numDependencies_out) if CUresult(err) == CUresult(0): - pydependencies_out = [CUgraphNode(init_value=cdependencies_out[idx]) for idx in range(numDependencies_out)] + pydependencies_out = [CUgraphNode(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] return (CUresult(err), CUstreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, numDependencies_out) {{endif}} @@ -30990,28 +30990,28 @@ def cuStreamGetCaptureInfo_v3(hStream): -------- :py:obj:`~.cuStreamGetCaptureInfo` :py:obj:`~.cuStreamBeginCapture`, :py:obj:`~.cuStreamIsCapturing`, :py:obj:`~.cuStreamUpdateCaptureDependencies` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUstreamCaptureStatus captureStatus_out + cyhStream = phStream + cdef cydriver.CUstreamCaptureStatus captureStatus_out cdef cuuint64_t id_out = cuuint64_t() cdef CUgraph graph_out = CUgraph() - cdef const ccuda.CUgraphNode* cdependencies_out = NULL + cdef const cydriver.CUgraphNode* cydependencies_out = NULL pydependencies_out = [] - cdef const ccuda.CUgraphEdgeData* cedgeData_out = NULL + cdef const cydriver.CUgraphEdgeData* cyedgeData_out = NULL pyedgeData_out = [] cdef size_t numDependencies_out = 0 - err = ccuda.cuStreamGetCaptureInfo_v3(chStream, &captureStatus_out, id_out._ptr, graph_out._ptr, &cdependencies_out, &cedgeData_out, &numDependencies_out) + err = cydriver.cuStreamGetCaptureInfo_v3(cyhStream, &captureStatus_out, id_out._ptr, graph_out._ptr, &cydependencies_out, &cyedgeData_out, &numDependencies_out) if CUresult(err) == CUresult(0): - pydependencies_out = [CUgraphNode(init_value=cdependencies_out[idx]) for idx in range(numDependencies_out)] + pydependencies_out = [CUgraphNode(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] if CUresult(err) == CUresult(0): - pyedgeData_out = [CUgraphEdgeData(_ptr=&cedgeData_out[idx]) for idx in range(numDependencies_out)] + pyedgeData_out = [CUgraphEdgeData(_ptr=&cyedgeData_out[idx]) for idx in range(numDependencies_out)] return (CUresult(err), CUstreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, pyedgeData_out, numDependencies_out) {{endif}} @@ -31064,28 +31064,28 @@ def cuStreamUpdateCaptureDependencies(hStream, dependencies : Optional[Tuple[CUg """ dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUstream chStream + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUgraphNode* cdependencies = NULL + cyhStream = phStream + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - err = ccuda.cuStreamUpdateCaptureDependencies(chStream, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, flags) - if cdependencies is not NULL: - free(cdependencies) + err = cydriver.cuStreamUpdateCaptureDependencies(cyhStream, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, flags) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err),) {{endif}} @@ -31136,39 +31136,39 @@ def cuStreamUpdateCaptureDependencies_v2(hStream, dependencies : Optional[Tuple[ """ dependencyData = [] if dependencyData is None else dependencyData if not all(isinstance(_x, (CUgraphEdgeData,)) for _x in dependencyData): - raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[ccuda.CUgraphEdgeData,] or List[ccuda.CUgraphEdgeData,]") + raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[cydriver.CUgraphEdgeData,] or List[cydriver.CUgraphEdgeData,]") dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUstream chStream + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUgraphNode* cdependencies = NULL + cyhStream = phStream + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] - cdef ccuda.CUgraphEdgeData* cdependencyData = NULL + cydependencies[idx] = (dependencies[idx])._ptr[0] + cdef cydriver.CUgraphEdgeData* cydependencyData = NULL if len(dependencyData) > 0: - cdependencyData = calloc(len(dependencyData), sizeof(ccuda.CUgraphEdgeData)) - if cdependencyData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(ccuda.CUgraphEdgeData))) + cydependencyData = calloc(len(dependencyData), sizeof(cydriver.CUgraphEdgeData)) + if cydependencyData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cydriver.CUgraphEdgeData))) for idx in range(len(dependencyData)): - string.memcpy(&cdependencyData[idx], (dependencyData[idx])._ptr, sizeof(ccuda.CUgraphEdgeData)) - err = ccuda.cuStreamUpdateCaptureDependencies_v2(chStream, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cdependencyData, numDependencies, flags) - if cdependencies is not NULL: - free(cdependencies) - if cdependencyData is not NULL: - free(cdependencyData) + string.memcpy(&cydependencyData[idx], (dependencyData[idx])._ptr, sizeof(cydriver.CUgraphEdgeData)) + err = cydriver.cuStreamUpdateCaptureDependencies_v2(cyhStream, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cydependencyData, numDependencies, flags) + if cydependencies is not NULL: + free(cydependencies) + if cydependencyData is not NULL: + free(cydependencyData) return (CUresult(err),) {{endif}} @@ -31264,25 +31264,25 @@ def cuStreamAttachMemAsync(hStream, dptr, size_t length, unsigned int flags): -------- :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamQuery`, :py:obj:`~.cuStreamSynchronize`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuMemAllocManaged`, :py:obj:`~.cudaStreamAttachMemAsync` """ - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr - cdef ccuda.CUstream chStream + cydptr = pdptr + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - err = ccuda.cuStreamAttachMemAsync(chStream, cdptr, length, flags) + cyhStream = phStream + err = cydriver.cuStreamAttachMemAsync(cyhStream, cydptr, length, flags) return (CUresult(err),) {{endif}} @@ -31314,16 +31314,16 @@ def cuStreamQuery(hStream): -------- :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuStreamSynchronize`, :py:obj:`~.cuStreamAddCallback`, :py:obj:`~.cudaStreamQuery` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - err = ccuda.cuStreamQuery(chStream) + cyhStream = phStream + err = cydriver.cuStreamQuery(cyhStream) return (CUresult(err),) {{endif}} @@ -31354,16 +31354,16 @@ def cuStreamSynchronize(hStream): -------- :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuStreamQuery`, :py:obj:`~.cuStreamAddCallback`, :py:obj:`~.cudaStreamSynchronize` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - err = ccuda.cuStreamSynchronize(chStream) + cyhStream = phStream + err = cydriver.cuStreamSynchronize(cyhStream) return (CUresult(err),) {{endif}} @@ -31395,16 +31395,16 @@ def cuStreamDestroy(hStream): -------- :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuStreamQuery`, :py:obj:`~.cuStreamSynchronize`, :py:obj:`~.cuStreamAddCallback`, :py:obj:`~.cudaStreamDestroy` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - err = ccuda.cuStreamDestroy(chStream) + cyhStream = phStream + err = cydriver.cuStreamDestroy(cyhStream) return (CUresult(err),) {{endif}} @@ -31433,25 +31433,25 @@ def cuStreamCopyAttributes(dst, src): -------- :py:obj:`~.CUaccessPolicyWindow` """ - cdef ccuda.CUstream csrc + cdef cydriver.CUstream cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (CUstream,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(CUstream(src)) - csrc = psrc - cdef ccuda.CUstream cdst + cysrc = psrc + cdef cydriver.CUstream cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (CUstream,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(CUstream(dst)) - cdst = pdst - err = ccuda.cuStreamCopyAttributes(cdst, csrc) + cydst = pdst + err = cydriver.cuStreamCopyAttributes(cydst, cysrc) return (CUresult(err),) {{endif}} @@ -31482,18 +31482,18 @@ def cuStreamGetAttribute(hStream, attr not None : CUstreamAttrID): -------- :py:obj:`~.CUaccessPolicyWindow` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUstreamAttrID cattr = attr.value + cyhStream = phStream + cdef cydriver.CUstreamAttrID cyattr = attr.value cdef CUstreamAttrValue value_out = CUstreamAttrValue() - err = ccuda.cuStreamGetAttribute(chStream, cattr, value_out._ptr) + err = cydriver.cuStreamGetAttribute(cyhStream, cyattr, value_out._ptr) return (CUresult(err), value_out) {{endif}} @@ -31525,18 +31525,18 @@ def cuStreamSetAttribute(hStream, attr not None : CUstreamAttrID, value : Option -------- :py:obj:`~.CUaccessPolicyWindow` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUstreamAttrID cattr = attr.value - cdef ccuda.CUstreamAttrValue* cvalue_ptr = value._ptr if value != None else NULL - err = ccuda.cuStreamSetAttribute(chStream, cattr, cvalue_ptr) + cyhStream = phStream + cdef cydriver.CUstreamAttrID cyattr = attr.value + cdef cydriver.CUstreamAttrValue* cyvalue_ptr = value._ptr if value != None else NULL + err = cydriver.cuStreamSetAttribute(cyhStream, cyattr, cyvalue_ptr) return (CUresult(err),) {{endif}} @@ -31584,7 +31584,7 @@ def cuEventCreate(unsigned int Flags): :py:obj:`~.cuEventRecord`, :py:obj:`~.cuEventQuery`, :py:obj:`~.cuEventSynchronize`, :py:obj:`~.cuEventDestroy`, :py:obj:`~.cuEventElapsedTime`, :py:obj:`~.cudaEventCreate`, :py:obj:`~.cudaEventCreateWithFlags` """ cdef CUevent phEvent = CUevent() - err = ccuda.cuEventCreate(phEvent._ptr, Flags) + err = cydriver.cuEventCreate(phEvent._ptr, Flags) return (CUresult(err), phEvent) {{endif}} @@ -31627,25 +31627,25 @@ def cuEventRecord(hEvent, hStream): -------- :py:obj:`~.cuEventCreate`, :py:obj:`~.cuEventQuery`, :py:obj:`~.cuEventSynchronize`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuEventDestroy`, :py:obj:`~.cuEventElapsedTime`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cuEventRecordWithFlags` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUevent chEvent + cyhStream = phStream + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - err = ccuda.cuEventRecord(chEvent, chStream) + cyhEvent = phEvent + err = cydriver.cuEventRecord(cyhEvent, cyhStream) return (CUresult(err),) {{endif}} @@ -31698,25 +31698,25 @@ def cuEventRecordWithFlags(hEvent, hStream, unsigned int flags): -------- :py:obj:`~.cuEventCreate`, :py:obj:`~.cuEventQuery`, :py:obj:`~.cuEventSynchronize`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuEventDestroy`, :py:obj:`~.cuEventElapsedTime`, :py:obj:`~.cuEventRecord`, :py:obj:`~.cudaEventRecord` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUevent chEvent + cyhStream = phStream + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - err = ccuda.cuEventRecordWithFlags(chEvent, chStream, flags) + cyhEvent = phEvent + err = cydriver.cuEventRecordWithFlags(cyhEvent, cyhStream, flags) return (CUresult(err),) {{endif}} @@ -31752,16 +31752,16 @@ def cuEventQuery(hEvent): -------- :py:obj:`~.cuEventCreate`, :py:obj:`~.cuEventRecord`, :py:obj:`~.cuEventSynchronize`, :py:obj:`~.cuEventDestroy`, :py:obj:`~.cuEventElapsedTime`, :py:obj:`~.cudaEventQuery` """ - cdef ccuda.CUevent chEvent + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - err = ccuda.cuEventQuery(chEvent) + cyhEvent = phEvent + err = cydriver.cuEventQuery(cyhEvent) return (CUresult(err),) {{endif}} @@ -31796,16 +31796,16 @@ def cuEventSynchronize(hEvent): -------- :py:obj:`~.cuEventCreate`, :py:obj:`~.cuEventRecord`, :py:obj:`~.cuEventQuery`, :py:obj:`~.cuEventDestroy`, :py:obj:`~.cuEventElapsedTime`, :py:obj:`~.cudaEventSynchronize` """ - cdef ccuda.CUevent chEvent + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - err = ccuda.cuEventSynchronize(chEvent) + cyhEvent = phEvent + err = cydriver.cuEventSynchronize(cyhEvent) return (CUresult(err),) {{endif}} @@ -31837,16 +31837,16 @@ def cuEventDestroy(hEvent): -------- :py:obj:`~.cuEventCreate`, :py:obj:`~.cuEventRecord`, :py:obj:`~.cuEventQuery`, :py:obj:`~.cuEventSynchronize`, :py:obj:`~.cuEventElapsedTime`, :py:obj:`~.cudaEventDestroy` """ - cdef ccuda.CUevent chEvent + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - err = ccuda.cuEventDestroy(chEvent) + cyhEvent = phEvent + err = cydriver.cuEventDestroy(cyhEvent) return (CUresult(err),) {{endif}} @@ -31895,26 +31895,26 @@ def cuEventElapsedTime(hStart, hEnd): -------- :py:obj:`~.cuEventCreate`, :py:obj:`~.cuEventRecord`, :py:obj:`~.cuEventQuery`, :py:obj:`~.cuEventSynchronize`, :py:obj:`~.cuEventDestroy`, :py:obj:`~.cudaEventElapsedTime` """ - cdef ccuda.CUevent chEnd + cdef cydriver.CUevent cyhEnd if hEnd is None: - chEnd = 0 + cyhEnd = 0 elif isinstance(hEnd, (CUevent,)): phEnd = int(hEnd) - chEnd = phEnd + cyhEnd = phEnd else: phEnd = int(CUevent(hEnd)) - chEnd = phEnd - cdef ccuda.CUevent chStart + cyhEnd = phEnd + cdef cydriver.CUevent cyhStart if hStart is None: - chStart = 0 + cyhStart = 0 elif isinstance(hStart, (CUevent,)): phStart = int(hStart) - chStart = phStart + cyhStart = phStart else: phStart = int(CUevent(hStart)) - chStart = phStart + cyhStart = phStart cdef float pMilliseconds = 0 - err = ccuda.cuEventElapsedTime(&pMilliseconds, chStart, chEnd) + err = cydriver.cuEventElapsedTime(&pMilliseconds, cyhStart, cyhEnd) return (CUresult(err), pMilliseconds) {{endif}} @@ -32066,8 +32066,8 @@ def cuImportExternalMemory(memHandleDesc : Optional[CUDA_EXTERNAL_MEMORY_HANDLE_ and Cache Control" chapter from Vulkan specification. """ cdef CUexternalMemory extMem_out = CUexternalMemory() - cdef ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC* cmemHandleDesc_ptr = memHandleDesc._ptr if memHandleDesc != None else NULL - err = ccuda.cuImportExternalMemory(extMem_out._ptr, cmemHandleDesc_ptr) + cdef cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC* cymemHandleDesc_ptr = memHandleDesc._ptr if memHandleDesc != None else NULL + err = cydriver.cuImportExternalMemory(extMem_out._ptr, cymemHandleDesc_ptr) return (CUresult(err), extMem_out) {{endif}} @@ -32124,18 +32124,18 @@ def cuExternalMemoryGetMappedBuffer(extMem, bufferDesc : Optional[CUDA_EXTERNAL_ -------- :py:obj:`~.cuImportExternalMemory`, :py:obj:`~.cuDestroyExternalMemory`, :py:obj:`~.cuExternalMemoryGetMappedMipmappedArray` """ - cdef ccuda.CUexternalMemory cextMem + cdef cydriver.CUexternalMemory cyextMem if extMem is None: - cextMem = 0 + cyextMem = 0 elif isinstance(extMem, (CUexternalMemory,)): pextMem = int(extMem) - cextMem = pextMem + cyextMem = pextMem else: pextMem = int(CUexternalMemory(extMem)) - cextMem = pextMem + cyextMem = pextMem cdef CUdeviceptr devPtr = CUdeviceptr() - cdef ccuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC* cbufferDesc_ptr = bufferDesc._ptr if bufferDesc != None else NULL - err = ccuda.cuExternalMemoryGetMappedBuffer(devPtr._ptr, cextMem, cbufferDesc_ptr) + cdef cydriver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC* cybufferDesc_ptr = bufferDesc._ptr if bufferDesc != None else NULL + err = cydriver.cuExternalMemoryGetMappedBuffer(devPtr._ptr, cyextMem, cybufferDesc_ptr) return (CUresult(err), devPtr) {{endif}} @@ -32195,18 +32195,18 @@ def cuExternalMemoryGetMappedMipmappedArray(extMem, mipmapDesc : Optional[CUDA_E -------- :py:obj:`~.cuImportExternalMemory`, :py:obj:`~.cuDestroyExternalMemory`, :py:obj:`~.cuExternalMemoryGetMappedBuffer` """ - cdef ccuda.CUexternalMemory cextMem + cdef cydriver.CUexternalMemory cyextMem if extMem is None: - cextMem = 0 + cyextMem = 0 elif isinstance(extMem, (CUexternalMemory,)): pextMem = int(extMem) - cextMem = pextMem + cyextMem = pextMem else: pextMem = int(CUexternalMemory(extMem)) - cextMem = pextMem + cyextMem = pextMem cdef CUmipmappedArray mipmap = CUmipmappedArray() - cdef ccuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC* cmipmapDesc_ptr = mipmapDesc._ptr if mipmapDesc != None else NULL - err = ccuda.cuExternalMemoryGetMappedMipmappedArray(mipmap._ptr, cextMem, cmipmapDesc_ptr) + cdef cydriver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC* cymipmapDesc_ptr = mipmapDesc._ptr if mipmapDesc != None else NULL + err = cydriver.cuExternalMemoryGetMappedMipmappedArray(mipmap._ptr, cyextMem, cymipmapDesc_ptr) return (CUresult(err), mipmap) {{endif}} @@ -32235,16 +32235,16 @@ def cuDestroyExternalMemory(extMem): -------- :py:obj:`~.cuImportExternalMemory`, :py:obj:`~.cuExternalMemoryGetMappedBuffer`, :py:obj:`~.cuExternalMemoryGetMappedMipmappedArray` """ - cdef ccuda.CUexternalMemory cextMem + cdef cydriver.CUexternalMemory cyextMem if extMem is None: - cextMem = 0 + cyextMem = 0 elif isinstance(extMem, (CUexternalMemory,)): pextMem = int(extMem) - cextMem = pextMem + cyextMem = pextMem else: pextMem = int(CUexternalMemory(extMem)) - cextMem = pextMem - err = ccuda.cuDestroyExternalMemory(cextMem) + cyextMem = pextMem + err = cydriver.cuDestroyExternalMemory(cyextMem) return (CUresult(err),) {{endif}} @@ -32391,8 +32391,8 @@ def cuImportExternalSemaphore(semHandleDesc : Optional[CUDA_EXTERNAL_SEMAPHORE_H :py:obj:`~.cuDestroyExternalSemaphore`, :py:obj:`~.cuSignalExternalSemaphoresAsync`, :py:obj:`~.cuWaitExternalSemaphoresAsync` """ cdef CUexternalSemaphore extSem_out = CUexternalSemaphore() - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC* csemHandleDesc_ptr = semHandleDesc._ptr if semHandleDesc != None else NULL - err = ccuda.cuImportExternalSemaphore(extSem_out._ptr, csemHandleDesc_ptr) + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC* cysemHandleDesc_ptr = semHandleDesc._ptr if semHandleDesc != None else NULL + err = cydriver.cuImportExternalSemaphore(extSem_out._ptr, cysemHandleDesc_ptr) return (CUresult(err), extSem_out) {{endif}} @@ -32491,43 +32491,43 @@ def cuSignalExternalSemaphoresAsync(extSemArray : Optional[Tuple[CUexternalSemap -------- :py:obj:`~.cuImportExternalSemaphore`, :py:obj:`~.cuDestroyExternalSemaphore`, :py:obj:`~.cuWaitExternalSemaphoresAsync` """ - cdef ccuda.CUstream cstream + cdef cydriver.CUstream cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (CUstream,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(CUstream(stream)) - cstream = pstream + cystream = pstream paramsArray = [] if paramsArray is None else paramsArray if not all(isinstance(_x, (CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,)) for _x in paramsArray): - raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,] or List[ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,]") + raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,] or List[cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS,]") extSemArray = [] if extSemArray is None else extSemArray if not all(isinstance(_x, (CUexternalSemaphore,)) for _x in extSemArray): - raise TypeError("Argument 'extSemArray' is not instance of type (expected Tuple[ccuda.CUexternalSemaphore,] or List[ccuda.CUexternalSemaphore,]") - cdef ccuda.CUexternalSemaphore* cextSemArray = NULL + raise TypeError("Argument 'extSemArray' is not instance of type (expected Tuple[cydriver.CUexternalSemaphore,] or List[cydriver.CUexternalSemaphore,]") + cdef cydriver.CUexternalSemaphore* cyextSemArray = NULL if len(extSemArray) > 0: - cextSemArray = calloc(len(extSemArray), sizeof(ccuda.CUexternalSemaphore)) - if cextSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(ccuda.CUexternalSemaphore))) + cyextSemArray = calloc(len(extSemArray), sizeof(cydriver.CUexternalSemaphore)) + if cyextSemArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cydriver.CUexternalSemaphore))) else: for idx in range(len(extSemArray)): - cextSemArray[idx] = (extSemArray[idx])._ptr[0] - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* cparamsArray = NULL + cyextSemArray[idx] = (extSemArray[idx])._ptr[0] + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* cyparamsArray = NULL if len(paramsArray) > 0: - cparamsArray = calloc(len(paramsArray), sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) - if cparamsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS))) + cyparamsArray = calloc(len(paramsArray), sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) + if cyparamsArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS))) for idx in range(len(paramsArray)): - string.memcpy(&cparamsArray[idx], (paramsArray[idx])._ptr, sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) + string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._ptr, sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS)) if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) - err = ccuda.cuSignalExternalSemaphoresAsync((extSemArray[0])._ptr if len(extSemArray) == 1 else cextSemArray, (paramsArray[0])._ptr if len(paramsArray) == 1 else cparamsArray, numExtSems, cstream) - if cextSemArray is not NULL: - free(cextSemArray) - if cparamsArray is not NULL: - free(cparamsArray) + err = cydriver.cuSignalExternalSemaphoresAsync((extSemArray[0])._ptr if len(extSemArray) == 1 else cyextSemArray, (paramsArray[0])._ptr if len(paramsArray) == 1 else cyparamsArray, numExtSems, cystream) + if cyextSemArray is not NULL: + free(cyextSemArray) + if cyparamsArray is not NULL: + free(cyparamsArray) return (CUresult(err),) {{endif}} @@ -32617,43 +32617,43 @@ def cuWaitExternalSemaphoresAsync(extSemArray : Optional[Tuple[CUexternalSemapho -------- :py:obj:`~.cuImportExternalSemaphore`, :py:obj:`~.cuDestroyExternalSemaphore`, :py:obj:`~.cuSignalExternalSemaphoresAsync` """ - cdef ccuda.CUstream cstream + cdef cydriver.CUstream cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (CUstream,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(CUstream(stream)) - cstream = pstream + cystream = pstream paramsArray = [] if paramsArray is None else paramsArray if not all(isinstance(_x, (CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,)) for _x in paramsArray): - raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,] or List[ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,]") + raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,] or List[cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS,]") extSemArray = [] if extSemArray is None else extSemArray if not all(isinstance(_x, (CUexternalSemaphore,)) for _x in extSemArray): - raise TypeError("Argument 'extSemArray' is not instance of type (expected Tuple[ccuda.CUexternalSemaphore,] or List[ccuda.CUexternalSemaphore,]") - cdef ccuda.CUexternalSemaphore* cextSemArray = NULL + raise TypeError("Argument 'extSemArray' is not instance of type (expected Tuple[cydriver.CUexternalSemaphore,] or List[cydriver.CUexternalSemaphore,]") + cdef cydriver.CUexternalSemaphore* cyextSemArray = NULL if len(extSemArray) > 0: - cextSemArray = calloc(len(extSemArray), sizeof(ccuda.CUexternalSemaphore)) - if cextSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(ccuda.CUexternalSemaphore))) + cyextSemArray = calloc(len(extSemArray), sizeof(cydriver.CUexternalSemaphore)) + if cyextSemArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cydriver.CUexternalSemaphore))) else: for idx in range(len(extSemArray)): - cextSemArray[idx] = (extSemArray[idx])._ptr[0] - cdef ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* cparamsArray = NULL + cyextSemArray[idx] = (extSemArray[idx])._ptr[0] + cdef cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* cyparamsArray = NULL if len(paramsArray) > 0: - cparamsArray = calloc(len(paramsArray), sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) - if cparamsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS))) + cyparamsArray = calloc(len(paramsArray), sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) + if cyparamsArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS))) for idx in range(len(paramsArray)): - string.memcpy(&cparamsArray[idx], (paramsArray[idx])._ptr, sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) + string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._ptr, sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS)) if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) - err = ccuda.cuWaitExternalSemaphoresAsync((extSemArray[0])._ptr if len(extSemArray) == 1 else cextSemArray, (paramsArray[0])._ptr if len(paramsArray) == 1 else cparamsArray, numExtSems, cstream) - if cextSemArray is not NULL: - free(cextSemArray) - if cparamsArray is not NULL: - free(cparamsArray) + err = cydriver.cuWaitExternalSemaphoresAsync((extSemArray[0])._ptr if len(extSemArray) == 1 else cyextSemArray, (paramsArray[0])._ptr if len(paramsArray) == 1 else cyparamsArray, numExtSems, cystream) + if cyextSemArray is not NULL: + free(cyextSemArray) + if cyparamsArray is not NULL: + free(cyparamsArray) return (CUresult(err),) {{endif}} @@ -32681,16 +32681,16 @@ def cuDestroyExternalSemaphore(extSem): -------- :py:obj:`~.cuImportExternalSemaphore`, :py:obj:`~.cuSignalExternalSemaphoresAsync`, :py:obj:`~.cuWaitExternalSemaphoresAsync` """ - cdef ccuda.CUexternalSemaphore cextSem + cdef cydriver.CUexternalSemaphore cyextSem if extSem is None: - cextSem = 0 + cyextSem = 0 elif isinstance(extSem, (CUexternalSemaphore,)): pextSem = int(extSem) - cextSem = pextSem + cyextSem = pextSem else: pextSem = int(CUexternalSemaphore(extSem)) - cextSem = pextSem - err = ccuda.cuDestroyExternalSemaphore(cextSem) + cyextSem = pextSem + err = cydriver.cuDestroyExternalSemaphore(cyextSem) return (CUresult(err),) {{endif}} @@ -32739,34 +32739,34 @@ def cuStreamWaitValue32(stream, addr, value, unsigned int flags): ----- Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. """ - cdef ccuda.cuuint32_t cvalue + cdef cydriver.cuuint32_t cyvalue if value is None: - cvalue = 0 + cyvalue = 0 elif isinstance(value, (cuuint32_t,)): pvalue = int(value) - cvalue = pvalue + cyvalue = pvalue else: pvalue = int(cuuint32_t(value)) - cvalue = pvalue - cdef ccuda.CUdeviceptr caddr + cyvalue = pvalue + cdef cydriver.CUdeviceptr cyaddr if addr is None: - caddr = 0 + cyaddr = 0 elif isinstance(addr, (CUdeviceptr,)): paddr = int(addr) - caddr = paddr + cyaddr = paddr else: paddr = int(CUdeviceptr(addr)) - caddr = paddr - cdef ccuda.CUstream cstream + cyaddr = paddr + cdef cydriver.CUstream cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (CUstream,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(CUstream(stream)) - cstream = pstream - err = ccuda.cuStreamWaitValue32(cstream, caddr, cvalue, flags) + cystream = pstream + err = cydriver.cuStreamWaitValue32(cystream, cyaddr, cyvalue, flags) return (CUresult(err),) {{endif}} @@ -32813,34 +32813,34 @@ def cuStreamWaitValue64(stream, addr, value, unsigned int flags): ----- Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. """ - cdef ccuda.cuuint64_t cvalue + cdef cydriver.cuuint64_t cyvalue if value is None: - cvalue = 0 + cyvalue = 0 elif isinstance(value, (cuuint64_t,)): pvalue = int(value) - cvalue = pvalue + cyvalue = pvalue else: pvalue = int(cuuint64_t(value)) - cvalue = pvalue - cdef ccuda.CUdeviceptr caddr + cyvalue = pvalue + cdef cydriver.CUdeviceptr cyaddr if addr is None: - caddr = 0 + cyaddr = 0 elif isinstance(addr, (CUdeviceptr,)): paddr = int(addr) - caddr = paddr + cyaddr = paddr else: paddr = int(CUdeviceptr(addr)) - caddr = paddr - cdef ccuda.CUstream cstream + cyaddr = paddr + cdef cydriver.CUstream cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (CUstream,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(CUstream(stream)) - cstream = pstream - err = ccuda.cuStreamWaitValue64(cstream, caddr, cvalue, flags) + cystream = pstream + err = cydriver.cuStreamWaitValue64(cystream, cyaddr, cyvalue, flags) return (CUresult(err),) {{endif}} @@ -32877,34 +32877,34 @@ def cuStreamWriteValue32(stream, addr, value, unsigned int flags): -------- :py:obj:`~.cuStreamWriteValue64`, :py:obj:`~.cuStreamWaitValue32`, :py:obj:`~.cuStreamWaitValue64`, :py:obj:`~.cuStreamBatchMemOp`, :py:obj:`~.cuMemHostRegister`, :py:obj:`~.cuEventRecord` """ - cdef ccuda.cuuint32_t cvalue + cdef cydriver.cuuint32_t cyvalue if value is None: - cvalue = 0 + cyvalue = 0 elif isinstance(value, (cuuint32_t,)): pvalue = int(value) - cvalue = pvalue + cyvalue = pvalue else: pvalue = int(cuuint32_t(value)) - cvalue = pvalue - cdef ccuda.CUdeviceptr caddr + cyvalue = pvalue + cdef cydriver.CUdeviceptr cyaddr if addr is None: - caddr = 0 + cyaddr = 0 elif isinstance(addr, (CUdeviceptr,)): paddr = int(addr) - caddr = paddr + cyaddr = paddr else: paddr = int(CUdeviceptr(addr)) - caddr = paddr - cdef ccuda.CUstream cstream + cyaddr = paddr + cdef cydriver.CUstream cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (CUstream,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(CUstream(stream)) - cstream = pstream - err = ccuda.cuStreamWriteValue32(cstream, caddr, cvalue, flags) + cystream = pstream + err = cydriver.cuStreamWriteValue32(cystream, cyaddr, cyvalue, flags) return (CUresult(err),) {{endif}} @@ -32943,34 +32943,34 @@ def cuStreamWriteValue64(stream, addr, value, unsigned int flags): -------- :py:obj:`~.cuStreamWriteValue32`, :py:obj:`~.cuStreamWaitValue32`, :py:obj:`~.cuStreamWaitValue64`, :py:obj:`~.cuStreamBatchMemOp`, :py:obj:`~.cuMemHostRegister`, :py:obj:`~.cuEventRecord` """ - cdef ccuda.cuuint64_t cvalue + cdef cydriver.cuuint64_t cyvalue if value is None: - cvalue = 0 + cyvalue = 0 elif isinstance(value, (cuuint64_t,)): pvalue = int(value) - cvalue = pvalue + cyvalue = pvalue else: pvalue = int(cuuint64_t(value)) - cvalue = pvalue - cdef ccuda.CUdeviceptr caddr + cyvalue = pvalue + cdef cydriver.CUdeviceptr cyaddr if addr is None: - caddr = 0 + cyaddr = 0 elif isinstance(addr, (CUdeviceptr,)): paddr = int(addr) - caddr = paddr + cyaddr = paddr else: paddr = int(CUdeviceptr(addr)) - caddr = paddr - cdef ccuda.CUstream cstream + cyaddr = paddr + cdef cydriver.CUstream cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (CUstream,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(CUstream(stream)) - cstream = pstream - err = ccuda.cuStreamWriteValue64(cstream, caddr, cvalue, flags) + cystream = pstream + err = cydriver.cuStreamWriteValue64(cystream, cyaddr, cyvalue, flags) return (CUresult(err),) {{endif}} @@ -33021,27 +33021,27 @@ def cuStreamBatchMemOp(stream, unsigned int count, paramArray : Optional[Tuple[C """ paramArray = [] if paramArray is None else paramArray if not all(isinstance(_x, (CUstreamBatchMemOpParams,)) for _x in paramArray): - raise TypeError("Argument 'paramArray' is not instance of type (expected Tuple[ccuda.CUstreamBatchMemOpParams,] or List[ccuda.CUstreamBatchMemOpParams,]") - cdef ccuda.CUstream cstream + raise TypeError("Argument 'paramArray' is not instance of type (expected Tuple[cydriver.CUstreamBatchMemOpParams,] or List[cydriver.CUstreamBatchMemOpParams,]") + cdef cydriver.CUstream cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (CUstream,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(CUstream(stream)) - cstream = pstream + cystream = pstream if count > len(paramArray): raise RuntimeError("List is too small: " + str(len(paramArray)) + " < " + str(count)) - cdef ccuda.CUstreamBatchMemOpParams* cparamArray = NULL + cdef cydriver.CUstreamBatchMemOpParams* cyparamArray = NULL if len(paramArray) > 0: - cparamArray = calloc(len(paramArray), sizeof(ccuda.CUstreamBatchMemOpParams)) - if cparamArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramArray)) + 'x' + str(sizeof(ccuda.CUstreamBatchMemOpParams))) + cyparamArray = calloc(len(paramArray), sizeof(cydriver.CUstreamBatchMemOpParams)) + if cyparamArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramArray)) + 'x' + str(sizeof(cydriver.CUstreamBatchMemOpParams))) for idx in range(len(paramArray)): - string.memcpy(&cparamArray[idx], (paramArray[idx])._ptr, sizeof(ccuda.CUstreamBatchMemOpParams)) - err = ccuda.cuStreamBatchMemOp(cstream, count, (paramArray[0])._ptr if len(paramArray) == 1 else cparamArray, flags) - if cparamArray is not NULL: - free(cparamArray) + string.memcpy(&cyparamArray[idx], (paramArray[idx])._ptr, sizeof(cydriver.CUstreamBatchMemOpParams)) + err = cydriver.cuStreamBatchMemOp(cystream, count, (paramArray[0])._ptr if len(paramArray) == 1 else cyparamArray, flags) + if cyparamArray is not NULL: + free(cyparamArray) return (CUresult(err),) {{endif}} @@ -33164,18 +33164,18 @@ def cuFuncGetAttribute(attrib not None : CUfunction_attribute, hfunc): -------- :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cuLaunchKernel`, :py:obj:`~.cudaFuncGetAttributes`, :py:obj:`~.cudaFuncSetAttribute`, :py:obj:`~.cuFuncIsLoaded`, :py:obj:`~.cuFuncLoad`, :py:obj:`~.cuKernelGetAttribute` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc + cyhfunc = phfunc cdef int pi = 0 - cdef ccuda.CUfunction_attribute cattrib = attrib.value - err = ccuda.cuFuncGetAttribute(&pi, cattrib, chfunc) + cdef cydriver.CUfunction_attribute cyattrib = attrib.value + err = cydriver.cuFuncGetAttribute(&pi, cyattrib, cyhfunc) return (CUresult(err), pi) {{endif}} @@ -33259,17 +33259,17 @@ def cuFuncSetAttribute(hfunc, attrib not None : CUfunction_attribute, int value) -------- :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cuLaunchKernel`, :py:obj:`~.cudaFuncGetAttributes`, :py:obj:`~.cudaFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - cdef ccuda.CUfunction_attribute cattrib = attrib.value - err = ccuda.cuFuncSetAttribute(chfunc, cattrib, value) + cyhfunc = phfunc + cdef cydriver.CUfunction_attribute cyattrib = attrib.value + err = cydriver.cuFuncSetAttribute(cyhfunc, cyattrib, value) return (CUresult(err),) {{endif}} @@ -33325,17 +33325,17 @@ def cuFuncSetCacheConfig(hfunc, config not None : CUfunc_cache): -------- :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuLaunchKernel`, :py:obj:`~.cudaFuncSetCacheConfig`, :py:obj:`~.cuKernelSetCacheConfig` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - cdef ccuda.CUfunc_cache cconfig = config.value - err = ccuda.cuFuncSetCacheConfig(chfunc, cconfig) + cyhfunc = phfunc + cdef cydriver.CUfunc_cache cyconfig = config.value + err = cydriver.cuFuncSetCacheConfig(cyhfunc, cyconfig) return (CUresult(err),) {{endif}} @@ -33367,17 +33367,17 @@ def cuFuncGetModule(hfunc): hmod : :py:obj:`~.CUmodule` Returned module handle """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc + cyhfunc = phfunc cdef CUmodule hmod = CUmodule() - err = ccuda.cuFuncGetModule(hmod._ptr, chfunc) + err = cydriver.cuFuncGetModule(hmod._ptr, cyhfunc) return (CUresult(err), hmod) {{endif}} @@ -33407,17 +33407,17 @@ def cuFuncGetName(hfunc): name : bytes The returned name of the function """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc + cyhfunc = phfunc cdef const char* name = NULL - err = ccuda.cuFuncGetName(&name, chfunc) + err = cydriver.cuFuncGetName(&name, cyhfunc) return (CUresult(err), name) {{endif}} @@ -33459,18 +33459,18 @@ def cuFuncGetParamInfo(func, size_t paramIndex): -------- :py:obj:`~.cuKernelGetParamInfo` """ - cdef ccuda.CUfunction cfunc + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc + cyfunc = pfunc cdef size_t paramOffset = 0 cdef size_t paramSize = 0 - err = ccuda.cuFuncGetParamInfo(cfunc, paramIndex, ¶mOffset, ¶mSize) + err = cydriver.cuFuncGetParamInfo(cyfunc, paramIndex, ¶mOffset, ¶mSize) return (CUresult(err), paramOffset, paramSize) {{endif}} @@ -33498,17 +33498,17 @@ def cuFuncIsLoaded(function): -------- :py:obj:`~.cuFuncLoad`, :py:obj:`~.cuModuleEnumerateFunctions` """ - cdef ccuda.CUfunction cfunction + cdef cydriver.CUfunction cyfunction if function is None: - cfunction = 0 + cyfunction = 0 elif isinstance(function, (CUfunction,)): pfunction = int(function) - cfunction = pfunction + cyfunction = pfunction else: pfunction = int(CUfunction(function)) - cfunction = pfunction - cdef ccuda.CUfunctionLoadingState state - err = ccuda.cuFuncIsLoaded(&state, cfunction) + cyfunction = pfunction + cdef cydriver.CUfunctionLoadingState state + err = cydriver.cuFuncIsLoaded(&state, cyfunction) return (CUresult(err), CUfunctionLoadingState(state)) {{endif}} @@ -33535,16 +33535,16 @@ def cuFuncLoad(function): -------- :py:obj:`~.cuModuleEnumerateFunctions`, :py:obj:`~.cuFuncIsLoaded` """ - cdef ccuda.CUfunction cfunction + cdef cydriver.CUfunction cyfunction if function is None: - cfunction = 0 + cyfunction = 0 elif isinstance(function, (CUfunction,)): pfunction = int(function) - cfunction = pfunction + cyfunction = pfunction else: pfunction = int(CUfunction(function)) - cfunction = pfunction - err = ccuda.cuFuncLoad(cfunction) + cyfunction = pfunction + err = cydriver.cuFuncLoad(cyfunction) return (CUresult(err),) {{endif}} @@ -33658,26 +33658,26 @@ def cuLaunchKernel(f, unsigned int gridDimX, unsigned int gridDimY, unsigned int -------- :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cudaLaunchKernel`, :py:obj:`~.cuLibraryGetKernel`, :py:obj:`~.cuKernelSetCacheConfig`, :py:obj:`~.cuKernelGetAttribute`, :py:obj:`~.cuKernelSetAttribute` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUfunction cf + cyhStream = phStream + cdef cydriver.CUfunction cyf if f is None: - cf = 0 + cyf = 0 elif isinstance(f, (CUfunction,)): pf = int(f) - cf = pf + cyf = pf else: pf = int(CUfunction(f)) - cf = pf - ckernelParams = utils.HelperKernelParams(kernelParams) - err = ccuda.cuLaunchKernel(cf, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, chStream, ckernelParams.ckernelParams, extra) + cyf = pf + cykernelParams = utils.HelperKernelParams(kernelParams) + err = cydriver.cuLaunchKernel(cyf, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, cyhStream, cykernelParams.ckernelParams, extra) return (CUresult(err),) {{endif}} @@ -33874,18 +33874,18 @@ def cuLaunchKernelEx(config : Optional[CUlaunchConfig], f, kernelParams, void_pt -------- :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cudaLaunchKernel`, :py:obj:`~.cudaLaunchKernelEx`, :py:obj:`~.cuLibraryGetKernel`, :py:obj:`~.cuKernelSetCacheConfig`, :py:obj:`~.cuKernelGetAttribute`, :py:obj:`~.cuKernelSetAttribute` """ - cdef ccuda.CUfunction cf + cdef cydriver.CUfunction cyf if f is None: - cf = 0 + cyf = 0 elif isinstance(f, (CUfunction,)): pf = int(f) - cf = pf + cyf = pf else: pf = int(CUfunction(f)) - cf = pf - cdef ccuda.CUlaunchConfig* cconfig_ptr = config._ptr if config != None else NULL - ckernelParams = utils.HelperKernelParams(kernelParams) - err = ccuda.cuLaunchKernelEx(cconfig_ptr, cf, ckernelParams.ckernelParams, extra) + cyf = pf + cdef cydriver.CUlaunchConfig* cyconfig_ptr = config._ptr if config != None else NULL + cykernelParams = utils.HelperKernelParams(kernelParams) + err = cydriver.cuLaunchKernelEx(cyconfig_ptr, cyf, cykernelParams.ckernelParams, extra) return (CUresult(err),) {{endif}} @@ -33979,26 +33979,26 @@ def cuLaunchCooperativeKernel(f, unsigned int gridDimX, unsigned int gridDimY, u -------- :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuLaunchCooperativeKernelMultiDevice`, :py:obj:`~.cudaLaunchCooperativeKernel`, :py:obj:`~.cuLibraryGetKernel`, :py:obj:`~.cuKernelSetCacheConfig`, :py:obj:`~.cuKernelGetAttribute`, :py:obj:`~.cuKernelSetAttribute` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUfunction cf + cyhStream = phStream + cdef cydriver.CUfunction cyf if f is None: - cf = 0 + cyf = 0 elif isinstance(f, (CUfunction,)): pf = int(f) - cf = pf + cyf = pf else: pf = int(CUfunction(f)) - cf = pf - ckernelParams = utils.HelperKernelParams(kernelParams) - err = ccuda.cuLaunchCooperativeKernel(cf, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, chStream, ckernelParams.ckernelParams) + cyf = pf + cykernelParams = utils.HelperKernelParams(kernelParams) + err = cydriver.cuLaunchCooperativeKernel(cyf, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, cyhStream, cykernelParams.ckernelParams) return (CUresult(err),) {{endif}} @@ -34158,18 +34158,18 @@ def cuLaunchCooperativeKernelMultiDevice(launchParamsList : Optional[Tuple[CUDA_ """ launchParamsList = [] if launchParamsList is None else launchParamsList if not all(isinstance(_x, (CUDA_LAUNCH_PARAMS,)) for _x in launchParamsList): - raise TypeError("Argument 'launchParamsList' is not instance of type (expected Tuple[ccuda.CUDA_LAUNCH_PARAMS,] or List[ccuda.CUDA_LAUNCH_PARAMS,]") - cdef ccuda.CUDA_LAUNCH_PARAMS* claunchParamsList = NULL + raise TypeError("Argument 'launchParamsList' is not instance of type (expected Tuple[cydriver.CUDA_LAUNCH_PARAMS,] or List[cydriver.CUDA_LAUNCH_PARAMS,]") + cdef cydriver.CUDA_LAUNCH_PARAMS* cylaunchParamsList = NULL if len(launchParamsList) > 0: - claunchParamsList = calloc(len(launchParamsList), sizeof(ccuda.CUDA_LAUNCH_PARAMS)) - if claunchParamsList is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(launchParamsList)) + 'x' + str(sizeof(ccuda.CUDA_LAUNCH_PARAMS))) + cylaunchParamsList = calloc(len(launchParamsList), sizeof(cydriver.CUDA_LAUNCH_PARAMS)) + if cylaunchParamsList is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(launchParamsList)) + 'x' + str(sizeof(cydriver.CUDA_LAUNCH_PARAMS))) for idx in range(len(launchParamsList)): - string.memcpy(&claunchParamsList[idx], (launchParamsList[idx])._ptr, sizeof(ccuda.CUDA_LAUNCH_PARAMS)) + string.memcpy(&cylaunchParamsList[idx], (launchParamsList[idx])._ptr, sizeof(cydriver.CUDA_LAUNCH_PARAMS)) if numDevices > len(launchParamsList): raise RuntimeError("List is too small: " + str(len(launchParamsList)) + " < " + str(numDevices)) - err = ccuda.cuLaunchCooperativeKernelMultiDevice((launchParamsList[0])._ptr if len(launchParamsList) == 1 else claunchParamsList, numDevices, flags) - if claunchParamsList is not NULL: - free(claunchParamsList) + err = cydriver.cuLaunchCooperativeKernelMultiDevice((launchParamsList[0])._ptr if len(launchParamsList) == 1 else cylaunchParamsList, numDevices, flags) + if cylaunchParamsList is not NULL: + free(cylaunchParamsList) return (CUresult(err),) {{endif}} @@ -34236,27 +34236,27 @@ def cuLaunchHostFunc(hStream, fn, userData): -------- :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamQuery`, :py:obj:`~.cuStreamSynchronize`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuMemAllocManaged`, :py:obj:`~.cuStreamAttachMemAsync`, :py:obj:`~.cuStreamAddCallback` """ - cdef ccuda.CUhostFn cfn + cdef cydriver.CUhostFn cyfn if fn is None: - cfn = 0 + cyfn = 0 elif isinstance(fn, (CUhostFn,)): pfn = int(fn) - cfn = pfn + cyfn = pfn else: pfn = int(CUhostFn(fn)) - cfn = pfn - cdef ccuda.CUstream chStream + cyfn = pfn + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cuserData = utils.HelperInputVoidPtr(userData) - cdef void* cuserData_ptr = cuserData.cptr - err = ccuda.cuLaunchHostFunc(chStream, cfn, cuserData_ptr) + cyhStream = phStream + cyuserData = utils.HelperInputVoidPtr(userData) + cdef void* cyuserData_ptr = cyuserData.cptr + err = cydriver.cuLaunchHostFunc(cyhStream, cyfn, cyuserData_ptr) return (CUresult(err),) {{endif}} @@ -34291,16 +34291,16 @@ def cuFuncSetBlockShape(hfunc, int x, int y, int z): -------- :py:obj:`~.cuFuncSetSharedSize`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuParamSetSize`, :py:obj:`~.cuParamSeti`, :py:obj:`~.cuParamSetf`, :py:obj:`~.cuParamSetv`, :py:obj:`~.cuLaunch`, :py:obj:`~.cuLaunchGrid`, :py:obj:`~.cuLaunchGridAsync`, :py:obj:`~.cuLaunchKernel` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - err = ccuda.cuFuncSetBlockShape(chfunc, x, y, z) + cyhfunc = phfunc + err = cydriver.cuFuncSetBlockShape(cyhfunc, x, y, z) return (CUresult(err),) {{endif}} @@ -34332,16 +34332,16 @@ def cuFuncSetSharedSize(hfunc, unsigned int numbytes): -------- :py:obj:`~.cuFuncSetBlockShape`, :py:obj:`~.cuFuncSetCacheConfig`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuParamSetSize`, :py:obj:`~.cuParamSeti`, :py:obj:`~.cuParamSetf`, :py:obj:`~.cuParamSetv`, :py:obj:`~.cuLaunch`, :py:obj:`~.cuLaunchGrid`, :py:obj:`~.cuLaunchGridAsync`, :py:obj:`~.cuLaunchKernel` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - err = ccuda.cuFuncSetSharedSize(chfunc, numbytes) + cyhfunc = phfunc + err = cydriver.cuFuncSetSharedSize(cyhfunc, numbytes) return (CUresult(err),) {{endif}} @@ -34372,16 +34372,16 @@ def cuParamSetSize(hfunc, unsigned int numbytes): -------- :py:obj:`~.cuFuncSetBlockShape`, :py:obj:`~.cuFuncSetSharedSize`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuParamSetf`, :py:obj:`~.cuParamSeti`, :py:obj:`~.cuParamSetv`, :py:obj:`~.cuLaunch`, :py:obj:`~.cuLaunchGrid`, :py:obj:`~.cuLaunchGridAsync`, :py:obj:`~.cuLaunchKernel` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - err = ccuda.cuParamSetSize(chfunc, numbytes) + cyhfunc = phfunc + err = cydriver.cuParamSetSize(cyhfunc, numbytes) return (CUresult(err),) {{endif}} @@ -34415,16 +34415,16 @@ def cuParamSeti(hfunc, int offset, unsigned int value): -------- :py:obj:`~.cuFuncSetBlockShape`, :py:obj:`~.cuFuncSetSharedSize`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuParamSetSize`, :py:obj:`~.cuParamSetf`, :py:obj:`~.cuParamSetv`, :py:obj:`~.cuLaunch`, :py:obj:`~.cuLaunchGrid`, :py:obj:`~.cuLaunchGridAsync`, :py:obj:`~.cuLaunchKernel` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - err = ccuda.cuParamSeti(chfunc, offset, value) + cyhfunc = phfunc + err = cydriver.cuParamSeti(cyhfunc, offset, value) return (CUresult(err),) {{endif}} @@ -34458,16 +34458,16 @@ def cuParamSetf(hfunc, int offset, float value): -------- :py:obj:`~.cuFuncSetBlockShape`, :py:obj:`~.cuFuncSetSharedSize`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuParamSetSize`, :py:obj:`~.cuParamSeti`, :py:obj:`~.cuParamSetv`, :py:obj:`~.cuLaunch`, :py:obj:`~.cuLaunchGrid`, :py:obj:`~.cuLaunchGridAsync`, :py:obj:`~.cuLaunchKernel` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - err = ccuda.cuParamSetf(chfunc, offset, value) + cyhfunc = phfunc + err = cydriver.cuParamSetf(cyhfunc, offset, value) return (CUresult(err),) {{endif}} @@ -34503,18 +34503,18 @@ def cuParamSetv(hfunc, int offset, ptr, unsigned int numbytes): -------- :py:obj:`~.cuFuncSetBlockShape`, :py:obj:`~.cuFuncSetSharedSize`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuParamSetSize`, :py:obj:`~.cuParamSetf`, :py:obj:`~.cuParamSeti`, :py:obj:`~.cuLaunch`, :py:obj:`~.cuLaunchGrid`, :py:obj:`~.cuLaunchGridAsync`, :py:obj:`~.cuLaunchKernel` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - cptr = utils.HelperInputVoidPtr(ptr) - cdef void* cptr_ptr = cptr.cptr - err = ccuda.cuParamSetv(chfunc, offset, cptr_ptr, numbytes) + cyhfunc = phfunc + cyptr = utils.HelperInputVoidPtr(ptr) + cdef void* cyptr_ptr = cyptr.cptr + err = cydriver.cuParamSetv(cyhfunc, offset, cyptr_ptr, numbytes) return (CUresult(err),) {{endif}} @@ -34556,16 +34556,16 @@ def cuLaunch(f): -------- :py:obj:`~.cuFuncSetBlockShape`, :py:obj:`~.cuFuncSetSharedSize`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuParamSetSize`, :py:obj:`~.cuParamSetf`, :py:obj:`~.cuParamSeti`, :py:obj:`~.cuParamSetv`, :py:obj:`~.cuLaunchGrid`, :py:obj:`~.cuLaunchGridAsync`, :py:obj:`~.cuLaunchKernel` """ - cdef ccuda.CUfunction cf + cdef cydriver.CUfunction cyf if f is None: - cf = 0 + cyf = 0 elif isinstance(f, (CUfunction,)): pf = int(f) - cf = pf + cyf = pf else: pf = int(CUfunction(f)) - cf = pf - err = ccuda.cuLaunch(cf) + cyf = pf + err = cydriver.cuLaunch(cyf) return (CUresult(err),) {{endif}} @@ -34611,16 +34611,16 @@ def cuLaunchGrid(f, int grid_width, int grid_height): -------- :py:obj:`~.cuFuncSetBlockShape`, :py:obj:`~.cuFuncSetSharedSize`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuParamSetSize`, :py:obj:`~.cuParamSetf`, :py:obj:`~.cuParamSeti`, :py:obj:`~.cuParamSetv`, :py:obj:`~.cuLaunch`, :py:obj:`~.cuLaunchGridAsync`, :py:obj:`~.cuLaunchKernel` """ - cdef ccuda.CUfunction cf + cdef cydriver.CUfunction cyf if f is None: - cf = 0 + cyf = 0 elif isinstance(f, (CUfunction,)): pf = int(f) - cf = pf + cyf = pf else: pf = int(CUfunction(f)) - cf = pf - err = ccuda.cuLaunchGrid(cf, grid_width, grid_height) + cyf = pf + err = cydriver.cuLaunchGrid(cyf, grid_width, grid_height) return (CUresult(err),) {{endif}} @@ -34674,25 +34674,25 @@ def cuLaunchGridAsync(f, int grid_width, int grid_height, hStream): ----- In certain cases where cubins are created with no ABI (i.e., using `ptxas` `None` `no`), this function may serialize kernel launches. The CUDA driver retains asynchronous behavior by growing the per-thread stack as needed per launch and not shrinking it afterwards. """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUfunction cf + cyhStream = phStream + cdef cydriver.CUfunction cyf if f is None: - cf = 0 + cyf = 0 elif isinstance(f, (CUfunction,)): pf = int(f) - cf = pf + cyf = pf else: pf = int(CUfunction(f)) - cf = pf - err = ccuda.cuLaunchGridAsync(cf, grid_width, grid_height, chStream) + cyf = pf + err = cydriver.cuLaunchGridAsync(cyf, grid_width, grid_height, cyhStream) return (CUresult(err),) {{endif}} @@ -34724,25 +34724,25 @@ def cuParamSetTexRef(hfunc, int texunit, hTexRef): CUresult :py:obj:`~.CUDA_SUCCESS`, :py:obj:`~.CUDA_ERROR_DEINITIALIZED`, :py:obj:`~.CUDA_ERROR_NOT_INITIALIZED`, :py:obj:`~.CUDA_ERROR_INVALID_CONTEXT`, :py:obj:`~.CUDA_ERROR_INVALID_VALUE` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUfunction chfunc + cyhTexRef = phTexRef + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - err = ccuda.cuParamSetTexRef(chfunc, texunit, chTexRef) + cyhfunc = phfunc + err = cydriver.cuParamSetTexRef(cyhfunc, texunit, cyhTexRef) return (CUresult(err),) {{endif}} @@ -34805,17 +34805,17 @@ def cuFuncSetSharedMemConfig(hfunc, config not None : CUsharedconfig): -------- :py:obj:`~.cuCtxGetCacheConfig`, :py:obj:`~.cuCtxSetCacheConfig`, :py:obj:`~.cuCtxGetSharedMemConfig`, :py:obj:`~.cuCtxSetSharedMemConfig`, :py:obj:`~.cuFuncGetAttribute`, :py:obj:`~.cuLaunchKernel`, :py:obj:`~.cudaFuncSetSharedMemConfig` """ - cdef ccuda.CUfunction chfunc + cdef cydriver.CUfunction cyhfunc if hfunc is None: - chfunc = 0 + cyhfunc = 0 elif isinstance(hfunc, (CUfunction,)): phfunc = int(hfunc) - chfunc = phfunc + cyhfunc = phfunc else: phfunc = int(CUfunction(hfunc)) - chfunc = phfunc - cdef ccuda.CUsharedconfig cconfig = config.value - err = ccuda.cuFuncSetSharedMemConfig(chfunc, cconfig) + cyhfunc = phfunc + cdef cydriver.CUsharedconfig cyconfig = config.value + err = cydriver.cuFuncSetSharedMemConfig(cyhfunc, cyconfig) return (CUresult(err),) {{endif}} @@ -34844,7 +34844,7 @@ def cuGraphCreate(unsigned int flags): :py:obj:`~.cuGraphAddChildGraphNode`, :py:obj:`~.cuGraphAddEmptyNode`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphAddHostNode`, :py:obj:`~.cuGraphAddMemcpyNode`, :py:obj:`~.cuGraphAddMemsetNode`, :py:obj:`~.cuGraphInstantiate`, :py:obj:`~.cuGraphDestroy`, :py:obj:`~.cuGraphGetNodes`, :py:obj:`~.cuGraphGetRootNodes`, :py:obj:`~.cuGraphGetEdges`, :py:obj:`~.cuGraphClone` """ cdef CUgraph phGraph = CUgraph() - err = ccuda.cuGraphCreate(phGraph._ptr, flags) + err = cydriver.cuGraphCreate(phGraph._ptr, flags) return (CUresult(err), phGraph) {{endif}} @@ -34941,30 +34941,30 @@ def cuGraphAddKernelNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] | Li """ dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccuda.CUDA_KERNEL_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphAddKernelNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cnodeParams_ptr) - if cdependencies is not NULL: - free(cdependencies) + cdef cydriver.CUDA_KERNEL_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphAddKernelNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cynodeParams_ptr) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -35001,17 +35001,17 @@ def cuGraphKernelNodeGetParams(hNode): -------- :py:obj:`~.cuLaunchKernel`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphKernelNodeSetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUDA_KERNEL_NODE_PARAMS nodeParams = CUDA_KERNEL_NODE_PARAMS() - err = ccuda.cuGraphKernelNodeGetParams(chNode, nodeParams._ptr) + err = cydriver.cuGraphKernelNodeGetParams(cyhNode, nodeParams._ptr) return (CUresult(err), nodeParams) {{endif}} @@ -35039,17 +35039,17 @@ def cuGraphKernelNodeSetParams(hNode, nodeParams : Optional[CUDA_KERNEL_NODE_PAR -------- :py:obj:`~.cuGraphNodeSetParams`, :py:obj:`~.cuLaunchKernel`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphKernelNodeGetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUDA_KERNEL_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphKernelNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cydriver.CUDA_KERNEL_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphKernelNodeSetParams(cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -35103,41 +35103,41 @@ def cuGraphAddMemcpyNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] | Li -------- :py:obj:`~.cuGraphAddNode`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuGraphMemcpyNodeGetParams`, :py:obj:`~.cuGraphMemcpyNodeSetParams`, :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphDestroyNode`, :py:obj:`~.cuGraphAddChildGraphNode`, :py:obj:`~.cuGraphAddEmptyNode`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphAddHostNode`, :py:obj:`~.cuGraphAddMemsetNode` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx + cyctx = pctx dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccuda.CUDA_MEMCPY3D* ccopyParams_ptr = copyParams._ptr if copyParams != None else NULL - err = ccuda.cuGraphAddMemcpyNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, ccopyParams_ptr, cctx) - if cdependencies is not NULL: - free(cdependencies) + cdef cydriver.CUDA_MEMCPY3D* cycopyParams_ptr = copyParams._ptr if copyParams != None else NULL + err = cydriver.cuGraphAddMemcpyNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cycopyParams_ptr, cyctx) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -35165,17 +35165,17 @@ def cuGraphMemcpyNodeGetParams(hNode): -------- :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuGraphAddMemcpyNode`, :py:obj:`~.cuGraphMemcpyNodeSetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUDA_MEMCPY3D nodeParams = CUDA_MEMCPY3D() - err = ccuda.cuGraphMemcpyNodeGetParams(chNode, nodeParams._ptr) + err = cydriver.cuGraphMemcpyNodeGetParams(cyhNode, nodeParams._ptr) return (CUresult(err), nodeParams) {{endif}} @@ -35203,17 +35203,17 @@ def cuGraphMemcpyNodeSetParams(hNode, nodeParams : Optional[CUDA_MEMCPY3D]): -------- :py:obj:`~.cuGraphNodeSetParams`, :py:obj:`~.cuMemcpy3D`, :py:obj:`~.cuGraphAddMemcpyNode`, :py:obj:`~.cuGraphMemcpyNodeGetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUDA_MEMCPY3D* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphMemcpyNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cydriver.CUDA_MEMCPY3D* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphMemcpyNodeSetParams(cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -35257,41 +35257,41 @@ def cuGraphAddMemsetNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] | Li -------- :py:obj:`~.cuGraphAddNode`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuGraphMemsetNodeGetParams`, :py:obj:`~.cuGraphMemsetNodeSetParams`, :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphDestroyNode`, :py:obj:`~.cuGraphAddChildGraphNode`, :py:obj:`~.cuGraphAddEmptyNode`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphAddHostNode`, :py:obj:`~.cuGraphAddMemcpyNode` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx + cyctx = pctx dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccuda.CUDA_MEMSET_NODE_PARAMS* cmemsetParams_ptr = memsetParams._ptr if memsetParams != None else NULL - err = ccuda.cuGraphAddMemsetNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cmemsetParams_ptr, cctx) - if cdependencies is not NULL: - free(cdependencies) + cdef cydriver.CUDA_MEMSET_NODE_PARAMS* cymemsetParams_ptr = memsetParams._ptr if memsetParams != None else NULL + err = cydriver.cuGraphAddMemsetNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cymemsetParams_ptr, cyctx) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -35319,17 +35319,17 @@ def cuGraphMemsetNodeGetParams(hNode): -------- :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuGraphAddMemsetNode`, :py:obj:`~.cuGraphMemsetNodeSetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUDA_MEMSET_NODE_PARAMS nodeParams = CUDA_MEMSET_NODE_PARAMS() - err = ccuda.cuGraphMemsetNodeGetParams(chNode, nodeParams._ptr) + err = cydriver.cuGraphMemsetNodeGetParams(cyhNode, nodeParams._ptr) return (CUresult(err), nodeParams) {{endif}} @@ -35357,17 +35357,17 @@ def cuGraphMemsetNodeSetParams(hNode, nodeParams : Optional[CUDA_MEMSET_NODE_PAR -------- :py:obj:`~.cuGraphNodeSetParams`, :py:obj:`~.cuMemsetD2D32`, :py:obj:`~.cuGraphAddMemsetNode`, :py:obj:`~.cuGraphMemsetNodeGetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUDA_MEMSET_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphMemsetNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cydriver.CUDA_MEMSET_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphMemsetNodeSetParams(cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -35411,30 +35411,30 @@ def cuGraphAddHostNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] | List """ dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccuda.CUDA_HOST_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphAddHostNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cnodeParams_ptr) - if cdependencies is not NULL: - free(cdependencies) + cdef cydriver.CUDA_HOST_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphAddHostNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cynodeParams_ptr) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -35462,17 +35462,17 @@ def cuGraphHostNodeGetParams(hNode): -------- :py:obj:`~.cuLaunchHostFunc`, :py:obj:`~.cuGraphAddHostNode`, :py:obj:`~.cuGraphHostNodeSetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUDA_HOST_NODE_PARAMS nodeParams = CUDA_HOST_NODE_PARAMS() - err = ccuda.cuGraphHostNodeGetParams(chNode, nodeParams._ptr) + err = cydriver.cuGraphHostNodeGetParams(cyhNode, nodeParams._ptr) return (CUresult(err), nodeParams) {{endif}} @@ -35500,17 +35500,17 @@ def cuGraphHostNodeSetParams(hNode, nodeParams : Optional[CUDA_HOST_NODE_PARAMS] -------- :py:obj:`~.cuGraphNodeSetParams`, :py:obj:`~.cuLaunchHostFunc`, :py:obj:`~.cuGraphAddHostNode`, :py:obj:`~.cuGraphHostNodeGetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUDA_HOST_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphHostNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cydriver.CUDA_HOST_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphHostNodeSetParams(cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -35555,40 +35555,40 @@ def cuGraphAddChildGraphNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] -------- :py:obj:`~.cuGraphAddNode`, :py:obj:`~.cuGraphChildGraphNodeGetGraph`, :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphDestroyNode`, :py:obj:`~.cuGraphAddEmptyNode`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphAddHostNode`, :py:obj:`~.cuGraphAddMemcpyNode`, :py:obj:`~.cuGraphAddMemsetNode`, :py:obj:`~.cuGraphClone` """ - cdef ccuda.CUgraph cchildGraph + cdef cydriver.CUgraph cychildGraph if childGraph is None: - cchildGraph = 0 + cychildGraph = 0 elif isinstance(childGraph, (CUgraph,)): pchildGraph = int(childGraph) - cchildGraph = pchildGraph + cychildGraph = pchildGraph else: pchildGraph = int(CUgraph(childGraph)) - cchildGraph = pchildGraph + cychildGraph = pchildGraph dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - err = ccuda.cuGraphAddChildGraphNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cchildGraph) - if cdependencies is not NULL: - free(cdependencies) + err = cydriver.cuGraphAddChildGraphNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cychildGraph) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -35621,17 +35621,17 @@ def cuGraphChildGraphNodeGetGraph(hNode): -------- :py:obj:`~.cuGraphAddChildGraphNode`, :py:obj:`~.cuGraphNodeFindInClone` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUgraph phGraph = CUgraph() - err = ccuda.cuGraphChildGraphNodeGetGraph(chNode, phGraph._ptr) + err = cydriver.cuGraphChildGraphNodeGetGraph(cyhNode, phGraph._ptr) return (CUresult(err), phGraph) {{endif}} @@ -35676,29 +35676,29 @@ def cuGraphAddEmptyNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] | Lis """ dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - err = ccuda.cuGraphAddEmptyNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies) - if cdependencies is not NULL: - free(cdependencies) + err = cydriver.cuGraphAddEmptyNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -35740,40 +35740,40 @@ def cuGraphAddEventRecordNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] -------- :py:obj:`~.cuGraphAddNode`, :py:obj:`~.cuGraphAddEventWaitNode`, :py:obj:`~.cuEventRecordWithFlags`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphDestroyNode`, :py:obj:`~.cuGraphAddChildGraphNode`, :py:obj:`~.cuGraphAddEmptyNode`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphAddMemcpyNode`, :py:obj:`~.cuGraphAddMemsetNode` """ - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent + cyevent = pevent dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - err = ccuda.cuGraphAddEventRecordNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cevent) - if cdependencies is not NULL: - free(cdependencies) + err = cydriver.cuGraphAddEventRecordNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cyevent) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -35801,17 +35801,17 @@ def cuGraphEventRecordNodeGetEvent(hNode): -------- :py:obj:`~.cuGraphAddEventRecordNode`, :py:obj:`~.cuGraphEventRecordNodeSetEvent`, :py:obj:`~.cuGraphEventWaitNodeGetEvent`, :py:obj:`~.cuEventRecordWithFlags`, :py:obj:`~.cuStreamWaitEvent` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUevent event_out = CUevent() - err = ccuda.cuGraphEventRecordNodeGetEvent(chNode, event_out._ptr) + err = cydriver.cuGraphEventRecordNodeGetEvent(cyhNode, event_out._ptr) return (CUresult(err), event_out) {{endif}} @@ -35839,25 +35839,25 @@ def cuGraphEventRecordNodeSetEvent(hNode, event): -------- :py:obj:`~.cuGraphNodeSetParams`, :py:obj:`~.cuGraphAddEventRecordNode`, :py:obj:`~.cuGraphEventRecordNodeGetEvent`, :py:obj:`~.cuGraphEventWaitNodeSetEvent`, :py:obj:`~.cuEventRecordWithFlags`, :py:obj:`~.cuStreamWaitEvent` """ - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent - cdef ccuda.CUgraphNode chNode + cyevent = pevent + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - err = ccuda.cuGraphEventRecordNodeSetEvent(chNode, cevent) + cyhNode = phNode + err = cydriver.cuGraphEventRecordNodeSetEvent(cyhNode, cyevent) return (CUresult(err),) {{endif}} @@ -35901,40 +35901,40 @@ def cuGraphAddEventWaitNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] | -------- :py:obj:`~.cuGraphAddNode`, :py:obj:`~.cuGraphAddEventRecordNode`, :py:obj:`~.cuEventRecordWithFlags`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphDestroyNode`, :py:obj:`~.cuGraphAddChildGraphNode`, :py:obj:`~.cuGraphAddEmptyNode`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphAddMemcpyNode`, :py:obj:`~.cuGraphAddMemsetNode` """ - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent + cyevent = pevent dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - err = ccuda.cuGraphAddEventWaitNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cevent) - if cdependencies is not NULL: - free(cdependencies) + err = cydriver.cuGraphAddEventWaitNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cyevent) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -35962,17 +35962,17 @@ def cuGraphEventWaitNodeGetEvent(hNode): -------- :py:obj:`~.cuGraphAddEventWaitNode`, :py:obj:`~.cuGraphEventWaitNodeSetEvent`, :py:obj:`~.cuGraphEventRecordNodeGetEvent`, :py:obj:`~.cuEventRecordWithFlags`, :py:obj:`~.cuStreamWaitEvent` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUevent event_out = CUevent() - err = ccuda.cuGraphEventWaitNodeGetEvent(chNode, event_out._ptr) + err = cydriver.cuGraphEventWaitNodeGetEvent(cyhNode, event_out._ptr) return (CUresult(err), event_out) {{endif}} @@ -36000,25 +36000,25 @@ def cuGraphEventWaitNodeSetEvent(hNode, event): -------- :py:obj:`~.cuGraphNodeSetParams`, :py:obj:`~.cuGraphAddEventWaitNode`, :py:obj:`~.cuGraphEventWaitNodeGetEvent`, :py:obj:`~.cuGraphEventRecordNodeSetEvent`, :py:obj:`~.cuEventRecordWithFlags`, :py:obj:`~.cuStreamWaitEvent` """ - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent - cdef ccuda.CUgraphNode chNode + cyevent = pevent + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - err = ccuda.cuGraphEventWaitNodeSetEvent(chNode, cevent) + cyhNode = phNode + err = cydriver.cuGraphEventWaitNodeSetEvent(cyhNode, cyevent) return (CUresult(err),) {{endif}} @@ -36063,30 +36063,30 @@ def cuGraphAddExternalSemaphoresSignalNode(hGraph, dependencies : Optional[Tuple """ dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphAddExternalSemaphoresSignalNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cnodeParams_ptr) - if cdependencies is not NULL: - free(cdependencies) + cdef cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphAddExternalSemaphoresSignalNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cynodeParams_ptr) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -36120,17 +36120,17 @@ def cuGraphExternalSemaphoresSignalNodeGetParams(hNode): -------- :py:obj:`~.cuLaunchKernel`, :py:obj:`~.cuGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cuGraphExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cuSignalExternalSemaphoresAsync`, :py:obj:`~.cuWaitExternalSemaphoresAsync` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUDA_EXT_SEM_SIGNAL_NODE_PARAMS params_out = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS() - err = ccuda.cuGraphExternalSemaphoresSignalNodeGetParams(chNode, params_out._ptr) + err = cydriver.cuGraphExternalSemaphoresSignalNodeGetParams(cyhNode, params_out._ptr) return (CUresult(err), params_out) {{endif}} @@ -36159,17 +36159,17 @@ def cuGraphExternalSemaphoresSignalNodeSetParams(hNode, nodeParams : Optional[CU -------- :py:obj:`~.cuGraphNodeSetParams`, :py:obj:`~.cuGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cuGraphExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cuSignalExternalSemaphoresAsync`, :py:obj:`~.cuWaitExternalSemaphoresAsync` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphExternalSemaphoresSignalNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphExternalSemaphoresSignalNodeSetParams(cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -36214,30 +36214,30 @@ def cuGraphAddExternalSemaphoresWaitNode(hGraph, dependencies : Optional[Tuple[C """ dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphAddExternalSemaphoresWaitNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cnodeParams_ptr) - if cdependencies is not NULL: - free(cdependencies) + cdef cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphAddExternalSemaphoresWaitNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cynodeParams_ptr) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -36271,17 +36271,17 @@ def cuGraphExternalSemaphoresWaitNodeGetParams(hNode): -------- :py:obj:`~.cuLaunchKernel`, :py:obj:`~.cuGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cuGraphExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cuSignalExternalSemaphoresAsync`, :py:obj:`~.cuWaitExternalSemaphoresAsync` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUDA_EXT_SEM_WAIT_NODE_PARAMS params_out = CUDA_EXT_SEM_WAIT_NODE_PARAMS() - err = ccuda.cuGraphExternalSemaphoresWaitNodeGetParams(chNode, params_out._ptr) + err = cydriver.cuGraphExternalSemaphoresWaitNodeGetParams(cyhNode, params_out._ptr) return (CUresult(err), params_out) {{endif}} @@ -36310,17 +36310,17 @@ def cuGraphExternalSemaphoresWaitNodeSetParams(hNode, nodeParams : Optional[CUDA -------- :py:obj:`~.cuGraphNodeSetParams`, :py:obj:`~.cuGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cuGraphExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cuSignalExternalSemaphoresAsync`, :py:obj:`~.cuWaitExternalSemaphoresAsync` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphExternalSemaphoresWaitNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphExternalSemaphoresWaitNodeSetParams(cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -36368,30 +36368,30 @@ def cuGraphAddBatchMemOpNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] """ dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphAddBatchMemOpNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cnodeParams_ptr) - if cdependencies is not NULL: - free(cdependencies) + cdef cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphAddBatchMemOpNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cynodeParams_ptr) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -36424,17 +36424,17 @@ def cuGraphBatchMemOpNodeGetParams(hNode): -------- :py:obj:`~.cuStreamBatchMemOp`, :py:obj:`~.cuGraphAddBatchMemOpNode`, :py:obj:`~.cuGraphBatchMemOpNodeSetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUDA_BATCH_MEM_OP_NODE_PARAMS nodeParams_out = CUDA_BATCH_MEM_OP_NODE_PARAMS() - err = ccuda.cuGraphBatchMemOpNodeGetParams(chNode, nodeParams_out._ptr) + err = cydriver.cuGraphBatchMemOpNodeGetParams(cyhNode, nodeParams_out._ptr) return (CUresult(err), nodeParams_out) {{endif}} @@ -36465,17 +36465,17 @@ def cuGraphBatchMemOpNodeSetParams(hNode, nodeParams : Optional[CUDA_BATCH_MEM_O -------- :py:obj:`~.cuGraphNodeSetParams`, :py:obj:`~.cuStreamBatchMemOp`, :py:obj:`~.cuGraphAddBatchMemOpNode`, :py:obj:`~.cuGraphBatchMemOpNodeGetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphBatchMemOpNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphBatchMemOpNodeSetParams(cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -36528,26 +36528,26 @@ def cuGraphExecBatchMemOpNodeSetParams(hGraphExec, hNode, nodeParams : Optional[ -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuStreamBatchMemOp`, :py:obj:`~.cuGraphAddBatchMemOpNode`, :py:obj:`~.cuGraphBatchMemOpNodeGetParams`, :py:obj:`~.cuGraphBatchMemOpNodeSetParams`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - cdef ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphExecBatchMemOpNodeSetParams(chGraphExec, chNode, cnodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphExecBatchMemOpNodeSetParams(cyhGraphExec, cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -36631,30 +36631,30 @@ def cuGraphAddMemAllocNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] | """ dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccuda.CUDA_MEM_ALLOC_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphAddMemAllocNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cnodeParams_ptr) - if cdependencies is not NULL: - free(cdependencies) + cdef cydriver.CUDA_MEM_ALLOC_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphAddMemAllocNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cynodeParams_ptr) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -36685,17 +36685,17 @@ def cuGraphMemAllocNodeGetParams(hNode): -------- :py:obj:`~.cuGraphAddMemAllocNode`, :py:obj:`~.cuGraphMemFreeNodeGetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUDA_MEM_ALLOC_NODE_PARAMS params_out = CUDA_MEM_ALLOC_NODE_PARAMS() - err = ccuda.cuGraphMemAllocNodeGetParams(chNode, params_out._ptr) + err = cydriver.cuGraphMemAllocNodeGetParams(cyhNode, params_out._ptr) return (CUresult(err), params_out) {{endif}} @@ -36754,40 +36754,40 @@ def cuGraphAddMemFreeNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] | L -------- :py:obj:`~.cuGraphAddNode`, :py:obj:`~.cuGraphAddMemAllocNode`, :py:obj:`~.cuGraphMemFreeNodeGetParams`, :py:obj:`~.cuDeviceGraphMemTrim`, :py:obj:`~.cuDeviceGetGraphMemAttribute`, :py:obj:`~.cuDeviceSetGraphMemAttribute`, :py:obj:`~.cuMemAllocAsync`, :py:obj:`~.cuMemFreeAsync`, :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphDestroyNode`, :py:obj:`~.cuGraphAddChildGraphNode`, :py:obj:`~.cuGraphAddEmptyNode`, :py:obj:`~.cuGraphAddEventRecordNode`, :py:obj:`~.cuGraphAddEventWaitNode`, :py:obj:`~.cuGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cuGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphAddMemcpyNode`, :py:obj:`~.cuGraphAddMemsetNode` """ - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr + cydptr = pdptr dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - err = ccuda.cuGraphAddMemFreeNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cdptr) - if cdependencies is not NULL: - free(cdependencies) + err = cydriver.cuGraphAddMemFreeNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cydptr) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -36815,17 +36815,17 @@ def cuGraphMemFreeNodeGetParams(hNode): -------- :py:obj:`~.cuGraphAddMemFreeNode`, :py:obj:`~.cuGraphMemAllocNodeGetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode + cyhNode = phNode cdef CUdeviceptr dptr_out = CUdeviceptr() - err = ccuda.cuGraphMemFreeNodeGetParams(chNode, dptr_out._ptr) + err = cydriver.cuGraphMemFreeNodeGetParams(cyhNode, dptr_out._ptr) return (CUresult(err), dptr_out) {{endif}} @@ -36853,16 +36853,16 @@ def cuDeviceGraphMemTrim(device): -------- :py:obj:`~.cuGraphAddMemAllocNode`, :py:obj:`~.cuGraphAddMemFreeNode`, :py:obj:`~.cuDeviceSetGraphMemAttribute`, :py:obj:`~.cuDeviceGetGraphMemAttribute` """ - cdef ccuda.CUdevice cdevice + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice - err = ccuda.cuDeviceGraphMemTrim(cdevice) + cydevice = pdevice + err = cydriver.cuDeviceGraphMemTrim(cydevice) return (CUresult(err),) {{endif}} @@ -36907,20 +36907,20 @@ def cuDeviceGetGraphMemAttribute(device, attr not None : CUgraphMem_attribute): -------- :py:obj:`~.cuDeviceSetGraphMemAttribute`, :py:obj:`~.cuGraphAddMemAllocNode`, :py:obj:`~.cuGraphAddMemFreeNode` """ - cdef ccuda.CUdevice cdevice + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice - cdef ccuda.CUgraphMem_attribute cattr = attr.value - cdef utils.HelperCUgraphMem_attribute cvalue = utils.HelperCUgraphMem_attribute(attr, 0, is_getter=True) - cdef void* cvalue_ptr = cvalue.cptr - err = ccuda.cuDeviceGetGraphMemAttribute(cdevice, cattr, cvalue_ptr) - return (CUresult(err), cvalue.pyObj()) + cydevice = pdevice + cdef cydriver.CUgraphMem_attribute cyattr = attr.value + cdef utils.HelperCUgraphMem_attribute cyvalue = utils.HelperCUgraphMem_attribute(attr, 0, is_getter=True) + cdef void* cyvalue_ptr = cyvalue.cptr + err = cydriver.cuDeviceGetGraphMemAttribute(cydevice, cyattr, cyvalue_ptr) + return (CUresult(err), cyvalue.pyObj()) {{endif}} {{if 'cuDeviceSetGraphMemAttribute' in found_functions}} @@ -36957,19 +36957,19 @@ def cuDeviceSetGraphMemAttribute(device, attr not None : CUgraphMem_attribute, v -------- :py:obj:`~.cuDeviceGetGraphMemAttribute`, :py:obj:`~.cuGraphAddMemAllocNode`, :py:obj:`~.cuGraphAddMemFreeNode` """ - cdef ccuda.CUdevice cdevice + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice - cdef ccuda.CUgraphMem_attribute cattr = attr.value - cdef utils.HelperCUgraphMem_attribute cvalue = utils.HelperCUgraphMem_attribute(attr, value, is_getter=False) - cdef void* cvalue_ptr = cvalue.cptr - err = ccuda.cuDeviceSetGraphMemAttribute(cdevice, cattr, cvalue_ptr) + cydevice = pdevice + cdef cydriver.CUgraphMem_attribute cyattr = attr.value + cdef utils.HelperCUgraphMem_attribute cyvalue = utils.HelperCUgraphMem_attribute(attr, value, is_getter=False) + cdef void* cyvalue_ptr = cyvalue.cptr + err = cydriver.cuDeviceSetGraphMemAttribute(cydevice, cyattr, cyvalue_ptr) return (CUresult(err),) {{endif}} @@ -37003,17 +37003,17 @@ def cuGraphClone(originalGraph): -------- :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphNodeFindInClone` """ - cdef ccuda.CUgraph coriginalGraph + cdef cydriver.CUgraph cyoriginalGraph if originalGraph is None: - coriginalGraph = 0 + cyoriginalGraph = 0 elif isinstance(originalGraph, (CUgraph,)): poriginalGraph = int(originalGraph) - coriginalGraph = poriginalGraph + cyoriginalGraph = poriginalGraph else: poriginalGraph = int(CUgraph(originalGraph)) - coriginalGraph = poriginalGraph + cyoriginalGraph = poriginalGraph cdef CUgraph phGraphClone = CUgraph() - err = ccuda.cuGraphClone(phGraphClone._ptr, coriginalGraph) + err = cydriver.cuGraphClone(phGraphClone._ptr, cyoriginalGraph) return (CUresult(err), phGraphClone) {{endif}} @@ -37050,26 +37050,26 @@ def cuGraphNodeFindInClone(hOriginalNode, hClonedGraph): -------- :py:obj:`~.cuGraphClone` """ - cdef ccuda.CUgraph chClonedGraph + cdef cydriver.CUgraph cyhClonedGraph if hClonedGraph is None: - chClonedGraph = 0 + cyhClonedGraph = 0 elif isinstance(hClonedGraph, (CUgraph,)): phClonedGraph = int(hClonedGraph) - chClonedGraph = phClonedGraph + cyhClonedGraph = phClonedGraph else: phClonedGraph = int(CUgraph(hClonedGraph)) - chClonedGraph = phClonedGraph - cdef ccuda.CUgraphNode chOriginalNode + cyhClonedGraph = phClonedGraph + cdef cydriver.CUgraphNode cyhOriginalNode if hOriginalNode is None: - chOriginalNode = 0 + cyhOriginalNode = 0 elif isinstance(hOriginalNode, (CUgraphNode,)): phOriginalNode = int(hOriginalNode) - chOriginalNode = phOriginalNode + cyhOriginalNode = phOriginalNode else: phOriginalNode = int(CUgraphNode(hOriginalNode)) - chOriginalNode = phOriginalNode + cyhOriginalNode = phOriginalNode cdef CUgraphNode phNode = CUgraphNode() - err = ccuda.cuGraphNodeFindInClone(phNode._ptr, chOriginalNode, chClonedGraph) + err = cydriver.cuGraphNodeFindInClone(phNode._ptr, cyhOriginalNode, cyhClonedGraph) return (CUresult(err), phNode) {{endif}} @@ -37097,17 +37097,17 @@ def cuGraphNodeGetType(hNode): -------- :py:obj:`~.cuGraphGetNodes`, :py:obj:`~.cuGraphGetRootNodes`, :py:obj:`~.cuGraphChildGraphNodeGetGraph`, :py:obj:`~.cuGraphKernelNodeGetParams`, :py:obj:`~.cuGraphKernelNodeSetParams`, :py:obj:`~.cuGraphHostNodeGetParams`, :py:obj:`~.cuGraphHostNodeSetParams`, :py:obj:`~.cuGraphMemcpyNodeGetParams`, :py:obj:`~.cuGraphMemcpyNodeSetParams`, :py:obj:`~.cuGraphMemsetNodeGetParams`, :py:obj:`~.cuGraphMemsetNodeSetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphNodeType typename - err = ccuda.cuGraphNodeGetType(chNode, &typename) + cyhNode = phNode + cdef cydriver.CUgraphNodeType typename + err = cydriver.cuGraphNodeGetType(cyhNode, &typename) return (CUresult(err), CUgraphNodeType(typename)) {{endif}} @@ -37145,26 +37145,26 @@ def cuGraphGetNodes(hGraph, size_t numNodes = 0): :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphGetRootNodes`, :py:obj:`~.cuGraphGetEdges`, :py:obj:`~.cuGraphNodeGetType`, :py:obj:`~.cuGraphNodeGetDependencies`, :py:obj:`~.cuGraphNodeGetDependentNodes` """ cdef size_t _graph_length = numNodes - cdef ccuda.CUgraph chGraph + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUgraphNode* cnodes = NULL + cyhGraph = phGraph + cdef cydriver.CUgraphNode* cynodes = NULL pynodes = [] if _graph_length != 0: - cnodes = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if cnodes is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - err = ccuda.cuGraphGetNodes(chGraph, cnodes, &numNodes) + cynodes = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cynodes is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + err = cydriver.cuGraphGetNodes(cyhGraph, cynodes, &numNodes) if CUresult(err) == CUresult(0): - pynodes = [CUgraphNode(init_value=cnodes[idx]) for idx in range(_graph_length)] - if cnodes is not NULL: - free(cnodes) + pynodes = [CUgraphNode(init_value=cynodes[idx]) for idx in range(_graph_length)] + if cynodes is not NULL: + free(cynodes) return (CUresult(err), pynodes, numNodes) {{endif}} @@ -37202,26 +37202,26 @@ def cuGraphGetRootNodes(hGraph, size_t numRootNodes = 0): :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphGetNodes`, :py:obj:`~.cuGraphGetEdges`, :py:obj:`~.cuGraphNodeGetType`, :py:obj:`~.cuGraphNodeGetDependencies`, :py:obj:`~.cuGraphNodeGetDependentNodes` """ cdef size_t _graph_length = numRootNodes - cdef ccuda.CUgraph chGraph + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUgraphNode* crootNodes = NULL + cyhGraph = phGraph + cdef cydriver.CUgraphNode* cyrootNodes = NULL pyrootNodes = [] if _graph_length != 0: - crootNodes = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if crootNodes is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - err = ccuda.cuGraphGetRootNodes(chGraph, crootNodes, &numRootNodes) + cyrootNodes = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cyrootNodes is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + err = cydriver.cuGraphGetRootNodes(cyhGraph, cyrootNodes, &numRootNodes) if CUresult(err) == CUresult(0): - pyrootNodes = [CUgraphNode(init_value=crootNodes[idx]) for idx in range(_graph_length)] - if crootNodes is not NULL: - free(crootNodes) + pyrootNodes = [CUgraphNode(init_value=cyrootNodes[idx]) for idx in range(_graph_length)] + if cyrootNodes is not NULL: + free(cyrootNodes) return (CUresult(err), pyrootNodes, numRootNodes) {{endif}} @@ -37263,36 +37263,36 @@ def cuGraphGetEdges(hGraph, size_t numEdges = 0): :py:obj:`~.cuGraphGetNodes`, :py:obj:`~.cuGraphGetRootNodes`, :py:obj:`~.cuGraphAddDependencies`, :py:obj:`~.cuGraphRemoveDependencies`, :py:obj:`~.cuGraphNodeGetDependencies`, :py:obj:`~.cuGraphNodeGetDependentNodes` """ cdef size_t _graph_length = numEdges - cdef ccuda.CUgraph chGraph + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUgraphNode* cfrom_ = NULL + cyhGraph = phGraph + cdef cydriver.CUgraphNode* cyfrom_ = NULL pyfrom_ = [] if _graph_length != 0: - cfrom_ = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - cdef ccuda.CUgraphNode* cto = NULL + cyfrom_ = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + cdef cydriver.CUgraphNode* cyto = NULL pyto = [] if _graph_length != 0: - cto = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - err = ccuda.cuGraphGetEdges(chGraph, cfrom_, cto, &numEdges) + cyto = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + err = cydriver.cuGraphGetEdges(cyhGraph, cyfrom_, cyto, &numEdges) if CUresult(err) == CUresult(0): - pyfrom_ = [CUgraphNode(init_value=cfrom_[idx]) for idx in range(_graph_length)] - if cfrom_ is not NULL: - free(cfrom_) + pyfrom_ = [CUgraphNode(init_value=cyfrom_[idx]) for idx in range(_graph_length)] + if cyfrom_ is not NULL: + free(cyfrom_) if CUresult(err) == CUresult(0): - pyto = [CUgraphNode(init_value=cto[idx]) for idx in range(_graph_length)] - if cto is not NULL: - free(cto) + pyto = [CUgraphNode(init_value=cyto[idx]) for idx in range(_graph_length)] + if cyto is not NULL: + free(cyto) return (CUresult(err), pyfrom_, pyto, numEdges) {{endif}} @@ -37341,46 +37341,46 @@ def cuGraphGetEdges_v2(hGraph, size_t numEdges = 0): :py:obj:`~.cuGraphGetNodes`, :py:obj:`~.cuGraphGetRootNodes`, :py:obj:`~.cuGraphAddDependencies`, :py:obj:`~.cuGraphRemoveDependencies`, :py:obj:`~.cuGraphNodeGetDependencies`, :py:obj:`~.cuGraphNodeGetDependentNodes` """ cdef size_t _graph_length = numEdges - cdef ccuda.CUgraph chGraph + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUgraphNode* cfrom_ = NULL + cyhGraph = phGraph + cdef cydriver.CUgraphNode* cyfrom_ = NULL pyfrom_ = [] if _graph_length != 0: - cfrom_ = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - cdef ccuda.CUgraphNode* cto = NULL + cyfrom_ = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + cdef cydriver.CUgraphNode* cyto = NULL pyto = [] if _graph_length != 0: - cto = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - cdef ccuda.CUgraphEdgeData* cedgeData = NULL + cyto = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + cdef cydriver.CUgraphEdgeData* cyedgeData = NULL pyedgeData = [] if _graph_length != 0: - cedgeData = calloc(_graph_length, sizeof(ccuda.CUgraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphEdgeData))) - err = ccuda.cuGraphGetEdges_v2(chGraph, cfrom_, cto, cedgeData, &numEdges) + cyedgeData = calloc(_graph_length, sizeof(cydriver.CUgraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphEdgeData))) + err = cydriver.cuGraphGetEdges_v2(cyhGraph, cyfrom_, cyto, cyedgeData, &numEdges) if CUresult(err) == CUresult(0): - pyfrom_ = [CUgraphNode(init_value=cfrom_[idx]) for idx in range(_graph_length)] - if cfrom_ is not NULL: - free(cfrom_) + pyfrom_ = [CUgraphNode(init_value=cyfrom_[idx]) for idx in range(_graph_length)] + if cyfrom_ is not NULL: + free(cyfrom_) if CUresult(err) == CUresult(0): - pyto = [CUgraphNode(init_value=cto[idx]) for idx in range(_graph_length)] - if cto is not NULL: - free(cto) + pyto = [CUgraphNode(init_value=cyto[idx]) for idx in range(_graph_length)] + if cyto is not NULL: + free(cyto) if CUresult(err) == CUresult(0): - pyedgeData = [CUgraphEdgeData(_ptr=&cedgeData[idx]) for idx in range(_graph_length)] - if cedgeData is not NULL: - free(cedgeData) + pyedgeData = [CUgraphEdgeData(_ptr=&cyedgeData[idx]) for idx in range(_graph_length)] + if cyedgeData is not NULL: + free(cyedgeData) return (CUresult(err), pyfrom_, pyto, pyedgeData, numEdges) {{endif}} @@ -37419,26 +37419,26 @@ def cuGraphNodeGetDependencies(hNode, size_t numDependencies = 0): :py:obj:`~.cuGraphNodeGetDependentNodes`, :py:obj:`~.cuGraphGetNodes`, :py:obj:`~.cuGraphGetRootNodes`, :py:obj:`~.cuGraphGetEdges`, :py:obj:`~.cuGraphAddDependencies`, :py:obj:`~.cuGraphRemoveDependencies` """ cdef size_t _graph_length = numDependencies - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphNode* cdependencies = NULL + cyhNode = phNode + cdef cydriver.CUgraphNode* cydependencies = NULL pydependencies = [] if _graph_length != 0: - cdependencies = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - err = ccuda.cuGraphNodeGetDependencies(chNode, cdependencies, &numDependencies) + cydependencies = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + err = cydriver.cuGraphNodeGetDependencies(cyhNode, cydependencies, &numDependencies) if CUresult(err) == CUresult(0): - pydependencies = [CUgraphNode(init_value=cdependencies[idx]) for idx in range(_graph_length)] - if cdependencies is not NULL: - free(cdependencies) + pydependencies = [CUgraphNode(init_value=cydependencies[idx]) for idx in range(_graph_length)] + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), pydependencies, numDependencies) {{endif}} @@ -37484,36 +37484,36 @@ def cuGraphNodeGetDependencies_v2(hNode, size_t numDependencies = 0): :py:obj:`~.cuGraphNodeGetDependentNodes`, :py:obj:`~.cuGraphGetNodes`, :py:obj:`~.cuGraphGetRootNodes`, :py:obj:`~.cuGraphGetEdges`, :py:obj:`~.cuGraphAddDependencies`, :py:obj:`~.cuGraphRemoveDependencies` """ cdef size_t _graph_length = numDependencies - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphNode* cdependencies = NULL + cyhNode = phNode + cdef cydriver.CUgraphNode* cydependencies = NULL pydependencies = [] if _graph_length != 0: - cdependencies = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - cdef ccuda.CUgraphEdgeData* cedgeData = NULL + cydependencies = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + cdef cydriver.CUgraphEdgeData* cyedgeData = NULL pyedgeData = [] if _graph_length != 0: - cedgeData = calloc(_graph_length, sizeof(ccuda.CUgraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphEdgeData))) - err = ccuda.cuGraphNodeGetDependencies_v2(chNode, cdependencies, cedgeData, &numDependencies) + cyedgeData = calloc(_graph_length, sizeof(cydriver.CUgraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphEdgeData))) + err = cydriver.cuGraphNodeGetDependencies_v2(cyhNode, cydependencies, cyedgeData, &numDependencies) if CUresult(err) == CUresult(0): - pydependencies = [CUgraphNode(init_value=cdependencies[idx]) for idx in range(_graph_length)] - if cdependencies is not NULL: - free(cdependencies) + pydependencies = [CUgraphNode(init_value=cydependencies[idx]) for idx in range(_graph_length)] + if cydependencies is not NULL: + free(cydependencies) if CUresult(err) == CUresult(0): - pyedgeData = [CUgraphEdgeData(_ptr=&cedgeData[idx]) for idx in range(_graph_length)] - if cedgeData is not NULL: - free(cedgeData) + pyedgeData = [CUgraphEdgeData(_ptr=&cyedgeData[idx]) for idx in range(_graph_length)] + if cyedgeData is not NULL: + free(cyedgeData) return (CUresult(err), pydependencies, pyedgeData, numDependencies) {{endif}} @@ -37552,26 +37552,26 @@ def cuGraphNodeGetDependentNodes(hNode, size_t numDependentNodes = 0): :py:obj:`~.cuGraphNodeGetDependencies`, :py:obj:`~.cuGraphGetNodes`, :py:obj:`~.cuGraphGetRootNodes`, :py:obj:`~.cuGraphGetEdges`, :py:obj:`~.cuGraphAddDependencies`, :py:obj:`~.cuGraphRemoveDependencies` """ cdef size_t _graph_length = numDependentNodes - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphNode* cdependentNodes = NULL + cyhNode = phNode + cdef cydriver.CUgraphNode* cydependentNodes = NULL pydependentNodes = [] if _graph_length != 0: - cdependentNodes = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if cdependentNodes is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - err = ccuda.cuGraphNodeGetDependentNodes(chNode, cdependentNodes, &numDependentNodes) + cydependentNodes = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cydependentNodes is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + err = cydriver.cuGraphNodeGetDependentNodes(cyhNode, cydependentNodes, &numDependentNodes) if CUresult(err) == CUresult(0): - pydependentNodes = [CUgraphNode(init_value=cdependentNodes[idx]) for idx in range(_graph_length)] - if cdependentNodes is not NULL: - free(cdependentNodes) + pydependentNodes = [CUgraphNode(init_value=cydependentNodes[idx]) for idx in range(_graph_length)] + if cydependentNodes is not NULL: + free(cydependentNodes) return (CUresult(err), pydependentNodes, numDependentNodes) {{endif}} @@ -37617,36 +37617,36 @@ def cuGraphNodeGetDependentNodes_v2(hNode, size_t numDependentNodes = 0): :py:obj:`~.cuGraphNodeGetDependencies`, :py:obj:`~.cuGraphGetNodes`, :py:obj:`~.cuGraphGetRootNodes`, :py:obj:`~.cuGraphGetEdges`, :py:obj:`~.cuGraphAddDependencies`, :py:obj:`~.cuGraphRemoveDependencies` """ cdef size_t _graph_length = numDependentNodes - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphNode* cdependentNodes = NULL + cyhNode = phNode + cdef cydriver.CUgraphNode* cydependentNodes = NULL pydependentNodes = [] if _graph_length != 0: - cdependentNodes = calloc(_graph_length, sizeof(ccuda.CUgraphNode)) - if cdependentNodes is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphNode))) - cdef ccuda.CUgraphEdgeData* cedgeData = NULL + cydependentNodes = calloc(_graph_length, sizeof(cydriver.CUgraphNode)) + if cydependentNodes is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphNode))) + cdef cydriver.CUgraphEdgeData* cyedgeData = NULL pyedgeData = [] if _graph_length != 0: - cedgeData = calloc(_graph_length, sizeof(ccuda.CUgraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccuda.CUgraphEdgeData))) - err = ccuda.cuGraphNodeGetDependentNodes_v2(chNode, cdependentNodes, cedgeData, &numDependentNodes) + cyedgeData = calloc(_graph_length, sizeof(cydriver.CUgraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cydriver.CUgraphEdgeData))) + err = cydriver.cuGraphNodeGetDependentNodes_v2(cyhNode, cydependentNodes, cyedgeData, &numDependentNodes) if CUresult(err) == CUresult(0): - pydependentNodes = [CUgraphNode(init_value=cdependentNodes[idx]) for idx in range(_graph_length)] - if cdependentNodes is not NULL: - free(cdependentNodes) + pydependentNodes = [CUgraphNode(init_value=cydependentNodes[idx]) for idx in range(_graph_length)] + if cydependentNodes is not NULL: + free(cydependentNodes) if CUresult(err) == CUresult(0): - pyedgeData = [CUgraphEdgeData(_ptr=&cedgeData[idx]) for idx in range(_graph_length)] - if cedgeData is not NULL: - free(cedgeData) + pyedgeData = [CUgraphEdgeData(_ptr=&cyedgeData[idx]) for idx in range(_graph_length)] + if cyedgeData is not NULL: + free(cyedgeData) return (CUresult(err), pydependentNodes, pyedgeData, numDependentNodes) {{endif}} @@ -37685,40 +37685,40 @@ def cuGraphAddDependencies(hGraph, from_ : Optional[Tuple[CUgraphNode] | List[CU """ to = [] if to is None else to if not all(isinstance(_x, (CUgraphNode,)) for _x in to): - raise TypeError("Argument 'to' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") + raise TypeError("Argument 'to' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") from_ = [] if from_ is None else from_ if not all(isinstance(_x, (CUgraphNode,)) for _x in from_): - raise TypeError("Argument 'from_' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'from_' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUgraphNode* cfrom_ = NULL + cyhGraph = phGraph + cdef cydriver.CUgraphNode* cyfrom_ = NULL if len(from_) > 0: - cfrom_ = calloc(len(from_), sizeof(ccuda.CUgraphNode)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cyfrom_ = calloc(len(from_), sizeof(cydriver.CUgraphNode)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(from_)): - cfrom_[idx] = (from_[idx])._ptr[0] - cdef ccuda.CUgraphNode* cto = NULL + cyfrom_[idx] = (from_[idx])._ptr[0] + cdef cydriver.CUgraphNode* cyto = NULL if len(to) > 0: - cto = calloc(len(to), sizeof(ccuda.CUgraphNode)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cyto = calloc(len(to), sizeof(cydriver.CUgraphNode)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(to)): - cto[idx] = (to[idx])._ptr[0] - err = ccuda.cuGraphAddDependencies(chGraph, (from_[0])._ptr if len(from_) == 1 else cfrom_, (to[0])._ptr if len(to) == 1 else cto, numDependencies) - if cfrom_ is not NULL: - free(cfrom_) - if cto is not NULL: - free(cto) + cyto[idx] = (to[idx])._ptr[0] + err = cydriver.cuGraphAddDependencies(cyhGraph, (from_[0])._ptr if len(from_) == 1 else cyfrom_, (to[0])._ptr if len(to) == 1 else cyto, numDependencies) + if cyfrom_ is not NULL: + free(cyfrom_) + if cyto is not NULL: + free(cyto) return (CUresult(err),) {{endif}} @@ -37760,52 +37760,52 @@ def cuGraphAddDependencies_v2(hGraph, from_ : Optional[Tuple[CUgraphNode] | List """ edgeData = [] if edgeData is None else edgeData if not all(isinstance(_x, (CUgraphEdgeData,)) for _x in edgeData): - raise TypeError("Argument 'edgeData' is not instance of type (expected Tuple[ccuda.CUgraphEdgeData,] or List[ccuda.CUgraphEdgeData,]") + raise TypeError("Argument 'edgeData' is not instance of type (expected Tuple[cydriver.CUgraphEdgeData,] or List[cydriver.CUgraphEdgeData,]") to = [] if to is None else to if not all(isinstance(_x, (CUgraphNode,)) for _x in to): - raise TypeError("Argument 'to' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") + raise TypeError("Argument 'to' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") from_ = [] if from_ is None else from_ if not all(isinstance(_x, (CUgraphNode,)) for _x in from_): - raise TypeError("Argument 'from_' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'from_' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUgraphNode* cfrom_ = NULL + cyhGraph = phGraph + cdef cydriver.CUgraphNode* cyfrom_ = NULL if len(from_) > 0: - cfrom_ = calloc(len(from_), sizeof(ccuda.CUgraphNode)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cyfrom_ = calloc(len(from_), sizeof(cydriver.CUgraphNode)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(from_)): - cfrom_[idx] = (from_[idx])._ptr[0] - cdef ccuda.CUgraphNode* cto = NULL + cyfrom_[idx] = (from_[idx])._ptr[0] + cdef cydriver.CUgraphNode* cyto = NULL if len(to) > 0: - cto = calloc(len(to), sizeof(ccuda.CUgraphNode)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cyto = calloc(len(to), sizeof(cydriver.CUgraphNode)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(to)): - cto[idx] = (to[idx])._ptr[0] - cdef ccuda.CUgraphEdgeData* cedgeData = NULL + cyto[idx] = (to[idx])._ptr[0] + cdef cydriver.CUgraphEdgeData* cyedgeData = NULL if len(edgeData) > 0: - cedgeData = calloc(len(edgeData), sizeof(ccuda.CUgraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(edgeData)) + 'x' + str(sizeof(ccuda.CUgraphEdgeData))) + cyedgeData = calloc(len(edgeData), sizeof(cydriver.CUgraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(edgeData)) + 'x' + str(sizeof(cydriver.CUgraphEdgeData))) for idx in range(len(edgeData)): - string.memcpy(&cedgeData[idx], (edgeData[idx])._ptr, sizeof(ccuda.CUgraphEdgeData)) - err = ccuda.cuGraphAddDependencies_v2(chGraph, (from_[0])._ptr if len(from_) == 1 else cfrom_, (to[0])._ptr if len(to) == 1 else cto, (edgeData[0])._ptr if len(edgeData) == 1 else cedgeData, numDependencies) - if cfrom_ is not NULL: - free(cfrom_) - if cto is not NULL: - free(cto) - if cedgeData is not NULL: - free(cedgeData) + string.memcpy(&cyedgeData[idx], (edgeData[idx])._ptr, sizeof(cydriver.CUgraphEdgeData)) + err = cydriver.cuGraphAddDependencies_v2(cyhGraph, (from_[0])._ptr if len(from_) == 1 else cyfrom_, (to[0])._ptr if len(to) == 1 else cyto, (edgeData[0])._ptr if len(edgeData) == 1 else cyedgeData, numDependencies) + if cyfrom_ is not NULL: + free(cyfrom_) + if cyto is not NULL: + free(cyto) + if cyedgeData is not NULL: + free(cyedgeData) return (CUresult(err),) {{endif}} @@ -37848,40 +37848,40 @@ def cuGraphRemoveDependencies(hGraph, from_ : Optional[Tuple[CUgraphNode] | List """ to = [] if to is None else to if not all(isinstance(_x, (CUgraphNode,)) for _x in to): - raise TypeError("Argument 'to' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") + raise TypeError("Argument 'to' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") from_ = [] if from_ is None else from_ if not all(isinstance(_x, (CUgraphNode,)) for _x in from_): - raise TypeError("Argument 'from_' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'from_' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUgraphNode* cfrom_ = NULL + cyhGraph = phGraph + cdef cydriver.CUgraphNode* cyfrom_ = NULL if len(from_) > 0: - cfrom_ = calloc(len(from_), sizeof(ccuda.CUgraphNode)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cyfrom_ = calloc(len(from_), sizeof(cydriver.CUgraphNode)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(from_)): - cfrom_[idx] = (from_[idx])._ptr[0] - cdef ccuda.CUgraphNode* cto = NULL + cyfrom_[idx] = (from_[idx])._ptr[0] + cdef cydriver.CUgraphNode* cyto = NULL if len(to) > 0: - cto = calloc(len(to), sizeof(ccuda.CUgraphNode)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cyto = calloc(len(to), sizeof(cydriver.CUgraphNode)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(to)): - cto[idx] = (to[idx])._ptr[0] - err = ccuda.cuGraphRemoveDependencies(chGraph, (from_[0])._ptr if len(from_) == 1 else cfrom_, (to[0])._ptr if len(to) == 1 else cto, numDependencies) - if cfrom_ is not NULL: - free(cfrom_) - if cto is not NULL: - free(cto) + cyto[idx] = (to[idx])._ptr[0] + err = cydriver.cuGraphRemoveDependencies(cyhGraph, (from_[0])._ptr if len(from_) == 1 else cyfrom_, (to[0])._ptr if len(to) == 1 else cyto, numDependencies) + if cyfrom_ is not NULL: + free(cyfrom_) + if cyto is not NULL: + free(cyto) return (CUresult(err),) {{endif}} @@ -37929,52 +37929,52 @@ def cuGraphRemoveDependencies_v2(hGraph, from_ : Optional[Tuple[CUgraphNode] | L """ edgeData = [] if edgeData is None else edgeData if not all(isinstance(_x, (CUgraphEdgeData,)) for _x in edgeData): - raise TypeError("Argument 'edgeData' is not instance of type (expected Tuple[ccuda.CUgraphEdgeData,] or List[ccuda.CUgraphEdgeData,]") + raise TypeError("Argument 'edgeData' is not instance of type (expected Tuple[cydriver.CUgraphEdgeData,] or List[cydriver.CUgraphEdgeData,]") to = [] if to is None else to if not all(isinstance(_x, (CUgraphNode,)) for _x in to): - raise TypeError("Argument 'to' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") + raise TypeError("Argument 'to' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") from_ = [] if from_ is None else from_ if not all(isinstance(_x, (CUgraphNode,)) for _x in from_): - raise TypeError("Argument 'from_' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'from_' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUgraphNode* cfrom_ = NULL + cyhGraph = phGraph + cdef cydriver.CUgraphNode* cyfrom_ = NULL if len(from_) > 0: - cfrom_ = calloc(len(from_), sizeof(ccuda.CUgraphNode)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cyfrom_ = calloc(len(from_), sizeof(cydriver.CUgraphNode)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(from_)): - cfrom_[idx] = (from_[idx])._ptr[0] - cdef ccuda.CUgraphNode* cto = NULL + cyfrom_[idx] = (from_[idx])._ptr[0] + cdef cydriver.CUgraphNode* cyto = NULL if len(to) > 0: - cto = calloc(len(to), sizeof(ccuda.CUgraphNode)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cyto = calloc(len(to), sizeof(cydriver.CUgraphNode)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(to)): - cto[idx] = (to[idx])._ptr[0] - cdef ccuda.CUgraphEdgeData* cedgeData = NULL + cyto[idx] = (to[idx])._ptr[0] + cdef cydriver.CUgraphEdgeData* cyedgeData = NULL if len(edgeData) > 0: - cedgeData = calloc(len(edgeData), sizeof(ccuda.CUgraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(edgeData)) + 'x' + str(sizeof(ccuda.CUgraphEdgeData))) + cyedgeData = calloc(len(edgeData), sizeof(cydriver.CUgraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(edgeData)) + 'x' + str(sizeof(cydriver.CUgraphEdgeData))) for idx in range(len(edgeData)): - string.memcpy(&cedgeData[idx], (edgeData[idx])._ptr, sizeof(ccuda.CUgraphEdgeData)) - err = ccuda.cuGraphRemoveDependencies_v2(chGraph, (from_[0])._ptr if len(from_) == 1 else cfrom_, (to[0])._ptr if len(to) == 1 else cto, (edgeData[0])._ptr if len(edgeData) == 1 else cedgeData, numDependencies) - if cfrom_ is not NULL: - free(cfrom_) - if cto is not NULL: - free(cto) - if cedgeData is not NULL: - free(cedgeData) + string.memcpy(&cyedgeData[idx], (edgeData[idx])._ptr, sizeof(cydriver.CUgraphEdgeData)) + err = cydriver.cuGraphRemoveDependencies_v2(cyhGraph, (from_[0])._ptr if len(from_) == 1 else cyfrom_, (to[0])._ptr if len(to) == 1 else cyto, (edgeData[0])._ptr if len(edgeData) == 1 else cyedgeData, numDependencies) + if cyfrom_ is not NULL: + free(cyfrom_) + if cyto is not NULL: + free(cyto) + if cyedgeData is not NULL: + free(cyedgeData) return (CUresult(err),) {{endif}} @@ -38004,16 +38004,16 @@ def cuGraphDestroyNode(hNode): -------- :py:obj:`~.cuGraphAddChildGraphNode`, :py:obj:`~.cuGraphAddEmptyNode`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphAddHostNode`, :py:obj:`~.cuGraphAddMemcpyNode`, :py:obj:`~.cuGraphAddMemsetNode` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - err = ccuda.cuGraphDestroyNode(chNode) + cyhNode = phNode + err = cydriver.cuGraphDestroyNode(cyhNode) return (CUresult(err),) {{endif}} @@ -38106,17 +38106,17 @@ def cuGraphInstantiate(hGraph, unsigned long long flags): -------- :py:obj:`~.cuGraphInstantiate`, :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphUpload`, :py:obj:`~.cuGraphLaunch`, :py:obj:`~.cuGraphExecDestroy` """ - cdef ccuda.CUgraph chGraph + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphExec phGraphExec = CUgraphExec() - err = ccuda.cuGraphInstantiate(phGraphExec._ptr, chGraph, flags) + err = cydriver.cuGraphInstantiate(phGraphExec._ptr, cyhGraph, flags) return (CUresult(err), phGraphExec) {{endif}} @@ -38249,18 +38249,18 @@ def cuGraphInstantiateWithParams(hGraph, instantiateParams : Optional[CUDA_GRAPH -------- :py:obj:`~.cuGraphCreate`, :py:obj:`~.cuGraphInstantiate`, :py:obj:`~.cuGraphExecDestroy` """ - cdef ccuda.CUgraph chGraph + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphExec phGraphExec = CUgraphExec() - cdef ccuda.CUDA_GRAPH_INSTANTIATE_PARAMS* cinstantiateParams_ptr = instantiateParams._ptr if instantiateParams != None else NULL - err = ccuda.cuGraphInstantiateWithParams(phGraphExec._ptr, chGraph, cinstantiateParams_ptr) + cdef cydriver.CUDA_GRAPH_INSTANTIATE_PARAMS* cyinstantiateParams_ptr = instantiateParams._ptr if instantiateParams != None else NULL + err = cydriver.cuGraphInstantiateWithParams(phGraphExec._ptr, cyhGraph, cyinstantiateParams_ptr) return (CUresult(err), phGraphExec) {{endif}} @@ -38291,17 +38291,17 @@ def cuGraphExecGetFlags(hGraphExec): -------- :py:obj:`~.cuGraphInstantiate`, :py:obj:`~.cuGraphInstantiateWithParams` """ - cdef ccuda.CUgraphExec chGraphExec + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec cdef cuuint64_t flags = cuuint64_t() - err = ccuda.cuGraphExecGetFlags(chGraphExec, flags._ptr) + err = cydriver.cuGraphExecGetFlags(cyhGraphExec, flags._ptr) return (CUresult(err), flags) {{endif}} @@ -38364,26 +38364,26 @@ def cuGraphExecKernelNodeSetParams(hGraphExec, hNode, nodeParams : Optional[CUDA -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuGraphAddKernelNode`, :py:obj:`~.cuGraphKernelNodeSetParams`, :py:obj:`~.cuGraphExecMemcpyNodeSetParams`, :py:obj:`~.cuGraphExecMemsetNodeSetParams`, :py:obj:`~.cuGraphExecHostNodeSetParams`, :py:obj:`~.cuGraphExecChildGraphNodeSetParams`, :py:obj:`~.cuGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cuGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cuGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - cdef ccuda.CUDA_KERNEL_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphExecKernelNodeSetParams(chGraphExec, chNode, cnodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cydriver.CUDA_KERNEL_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphExecKernelNodeSetParams(cyhGraphExec, cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -38432,35 +38432,35 @@ def cuGraphExecMemcpyNodeSetParams(hGraphExec, hNode, copyParams : Optional[CUDA -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuGraphAddMemcpyNode`, :py:obj:`~.cuGraphMemcpyNodeSetParams`, :py:obj:`~.cuGraphExecKernelNodeSetParams`, :py:obj:`~.cuGraphExecMemsetNodeSetParams`, :py:obj:`~.cuGraphExecHostNodeSetParams`, :py:obj:`~.cuGraphExecChildGraphNodeSetParams`, :py:obj:`~.cuGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cuGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cuGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - cdef ccuda.CUgraphNode chNode + cyctx = pctx + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - cdef ccuda.CUDA_MEMCPY3D* ccopyParams_ptr = copyParams._ptr if copyParams != None else NULL - err = ccuda.cuGraphExecMemcpyNodeSetParams(chGraphExec, chNode, ccopyParams_ptr, cctx) + cyhGraphExec = phGraphExec + cdef cydriver.CUDA_MEMCPY3D* cycopyParams_ptr = copyParams._ptr if copyParams != None else NULL + err = cydriver.cuGraphExecMemcpyNodeSetParams(cyhGraphExec, cyhNode, cycopyParams_ptr, cyctx) return (CUresult(err),) {{endif}} @@ -38514,35 +38514,35 @@ def cuGraphExecMemsetNodeSetParams(hGraphExec, hNode, memsetParams : Optional[CU -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuGraphAddMemsetNode`, :py:obj:`~.cuGraphMemsetNodeSetParams`, :py:obj:`~.cuGraphExecKernelNodeSetParams`, :py:obj:`~.cuGraphExecMemcpyNodeSetParams`, :py:obj:`~.cuGraphExecHostNodeSetParams`, :py:obj:`~.cuGraphExecChildGraphNodeSetParams`, :py:obj:`~.cuGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cuGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cuGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - cdef ccuda.CUgraphNode chNode + cyctx = pctx + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - cdef ccuda.CUDA_MEMSET_NODE_PARAMS* cmemsetParams_ptr = memsetParams._ptr if memsetParams != None else NULL - err = ccuda.cuGraphExecMemsetNodeSetParams(chGraphExec, chNode, cmemsetParams_ptr, cctx) + cyhGraphExec = phGraphExec + cdef cydriver.CUDA_MEMSET_NODE_PARAMS* cymemsetParams_ptr = memsetParams._ptr if memsetParams != None else NULL + err = cydriver.cuGraphExecMemsetNodeSetParams(cyhGraphExec, cyhNode, cymemsetParams_ptr, cyctx) return (CUresult(err),) {{endif}} @@ -38579,26 +38579,26 @@ def cuGraphExecHostNodeSetParams(hGraphExec, hNode, nodeParams : Optional[CUDA_H -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuGraphAddHostNode`, :py:obj:`~.cuGraphHostNodeSetParams`, :py:obj:`~.cuGraphExecKernelNodeSetParams`, :py:obj:`~.cuGraphExecMemcpyNodeSetParams`, :py:obj:`~.cuGraphExecMemsetNodeSetParams`, :py:obj:`~.cuGraphExecChildGraphNodeSetParams`, :py:obj:`~.cuGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cuGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cuGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - cdef ccuda.CUDA_HOST_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphExecHostNodeSetParams(chGraphExec, chNode, cnodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cydriver.CUDA_HOST_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphExecHostNodeSetParams(cyhGraphExec, cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -38643,34 +38643,34 @@ def cuGraphExecChildGraphNodeSetParams(hGraphExec, hNode, childGraph): -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuGraphAddChildGraphNode`, :py:obj:`~.cuGraphChildGraphNodeGetGraph`, :py:obj:`~.cuGraphExecKernelNodeSetParams`, :py:obj:`~.cuGraphExecMemcpyNodeSetParams`, :py:obj:`~.cuGraphExecMemsetNodeSetParams`, :py:obj:`~.cuGraphExecHostNodeSetParams`, :py:obj:`~.cuGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cuGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cuGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUgraph cchildGraph + cdef cydriver.CUgraph cychildGraph if childGraph is None: - cchildGraph = 0 + cychildGraph = 0 elif isinstance(childGraph, (CUgraph,)): pchildGraph = int(childGraph) - cchildGraph = pchildGraph + cychildGraph = pchildGraph else: pchildGraph = int(CUgraph(childGraph)) - cchildGraph = pchildGraph - cdef ccuda.CUgraphNode chNode + cychildGraph = pchildGraph + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - err = ccuda.cuGraphExecChildGraphNodeSetParams(chGraphExec, chNode, cchildGraph) + cyhGraphExec = phGraphExec + err = cydriver.cuGraphExecChildGraphNodeSetParams(cyhGraphExec, cyhNode, cychildGraph) return (CUresult(err),) {{endif}} @@ -38708,34 +38708,34 @@ def cuGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event): -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuGraphAddEventRecordNode`, :py:obj:`~.cuGraphEventRecordNodeGetEvent`, :py:obj:`~.cuGraphEventWaitNodeSetEvent`, :py:obj:`~.cuEventRecordWithFlags`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuGraphExecKernelNodeSetParams`, :py:obj:`~.cuGraphExecMemcpyNodeSetParams`, :py:obj:`~.cuGraphExecMemsetNodeSetParams`, :py:obj:`~.cuGraphExecHostNodeSetParams`, :py:obj:`~.cuGraphExecChildGraphNodeSetParams`, :py:obj:`~.cuGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cuGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent - cdef ccuda.CUgraphNode chNode + cyevent = pevent + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - err = ccuda.cuGraphExecEventRecordNodeSetEvent(chGraphExec, chNode, cevent) + cyhGraphExec = phGraphExec + err = cydriver.cuGraphExecEventRecordNodeSetEvent(cyhGraphExec, cyhNode, cyevent) return (CUresult(err),) {{endif}} @@ -38773,34 +38773,34 @@ def cuGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event): -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuGraphAddEventWaitNode`, :py:obj:`~.cuGraphEventWaitNodeGetEvent`, :py:obj:`~.cuGraphEventRecordNodeSetEvent`, :py:obj:`~.cuEventRecordWithFlags`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuGraphExecKernelNodeSetParams`, :py:obj:`~.cuGraphExecMemcpyNodeSetParams`, :py:obj:`~.cuGraphExecMemsetNodeSetParams`, :py:obj:`~.cuGraphExecHostNodeSetParams`, :py:obj:`~.cuGraphExecChildGraphNodeSetParams`, :py:obj:`~.cuGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cuGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUevent cevent + cdef cydriver.CUevent cyevent if event is None: - cevent = 0 + cyevent = 0 elif isinstance(event, (CUevent,)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(CUevent(event)) - cevent = pevent - cdef ccuda.CUgraphNode chNode + cyevent = pevent + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - err = ccuda.cuGraphExecEventWaitNodeSetEvent(chGraphExec, chNode, cevent) + cyhGraphExec = phGraphExec + err = cydriver.cuGraphExecEventWaitNodeSetEvent(cyhGraphExec, cyhNode, cyevent) return (CUresult(err),) {{endif}} @@ -38842,26 +38842,26 @@ def cuGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, nodePara -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cuImportExternalSemaphore`, :py:obj:`~.cuSignalExternalSemaphoresAsync`, :py:obj:`~.cuWaitExternalSemaphoresAsync`, :py:obj:`~.cuGraphExecKernelNodeSetParams`, :py:obj:`~.cuGraphExecMemcpyNodeSetParams`, :py:obj:`~.cuGraphExecMemsetNodeSetParams`, :py:obj:`~.cuGraphExecHostNodeSetParams`, :py:obj:`~.cuGraphExecChildGraphNodeSetParams`, :py:obj:`~.cuGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cuGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cuGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - cdef ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphExecExternalSemaphoresSignalNodeSetParams(chGraphExec, chNode, cnodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphExecExternalSemaphoresSignalNodeSetParams(cyhGraphExec, cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -38903,26 +38903,26 @@ def cuGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, nodeParams -------- :py:obj:`~.cuGraphExecNodeSetParams`, :py:obj:`~.cuGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cuImportExternalSemaphore`, :py:obj:`~.cuSignalExternalSemaphoresAsync`, :py:obj:`~.cuWaitExternalSemaphoresAsync`, :py:obj:`~.cuGraphExecKernelNodeSetParams`, :py:obj:`~.cuGraphExecMemcpyNodeSetParams`, :py:obj:`~.cuGraphExecMemsetNodeSetParams`, :py:obj:`~.cuGraphExecHostNodeSetParams`, :py:obj:`~.cuGraphExecChildGraphNodeSetParams`, :py:obj:`~.cuGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cuGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cuGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - cdef ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphExecExternalSemaphoresWaitNodeSetParams(chGraphExec, chNode, cnodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphExecExternalSemaphoresWaitNodeSetParams(cyhGraphExec, cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -38974,25 +38974,25 @@ def cuGraphNodeSetEnabled(hGraphExec, hNode, unsigned int isEnabled): ----- Currently only kernel, memset and memcpy nodes are supported. """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - err = ccuda.cuGraphNodeSetEnabled(chGraphExec, chNode, isEnabled) + cyhGraphExec = phGraphExec + err = cydriver.cuGraphNodeSetEnabled(cyhGraphExec, cyhNode, isEnabled) return (CUresult(err),) {{endif}} @@ -39033,26 +39033,26 @@ def cuGraphNodeGetEnabled(hGraphExec, hNode): This function will not reflect device-side updates for device-updatable kernel nodes. """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec cdef unsigned int isEnabled = 0 - err = ccuda.cuGraphNodeGetEnabled(chGraphExec, chNode, &isEnabled) + err = cydriver.cuGraphNodeGetEnabled(cyhGraphExec, cyhNode, &isEnabled) return (CUresult(err), isEnabled) {{endif}} @@ -39084,25 +39084,25 @@ def cuGraphUpload(hGraphExec, hStream): -------- :py:obj:`~.cuGraphInstantiate`, :py:obj:`~.cuGraphLaunch`, :py:obj:`~.cuGraphExecDestroy` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUgraphExec chGraphExec + cyhStream = phStream + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - err = ccuda.cuGraphUpload(chGraphExec, chStream) + cyhGraphExec = phGraphExec + err = cydriver.cuGraphUpload(cyhGraphExec, cyhStream) return (CUresult(err),) {{endif}} @@ -39139,25 +39139,25 @@ def cuGraphLaunch(hGraphExec, hStream): -------- :py:obj:`~.cuGraphInstantiate`, :py:obj:`~.cuGraphUpload`, :py:obj:`~.cuGraphExecDestroy` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUgraphExec chGraphExec + cyhStream = phStream + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - err = ccuda.cuGraphLaunch(chGraphExec, chStream) + cyhGraphExec = phGraphExec + err = cydriver.cuGraphLaunch(cyhGraphExec, cyhStream) return (CUresult(err),) {{endif}} @@ -39185,16 +39185,16 @@ def cuGraphExecDestroy(hGraphExec): -------- :py:obj:`~.cuGraphInstantiate`, :py:obj:`~.cuGraphUpload`, :py:obj:`~.cuGraphLaunch` """ - cdef ccuda.CUgraphExec chGraphExec + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - err = ccuda.cuGraphExecDestroy(chGraphExec) + cyhGraphExec = phGraphExec + err = cydriver.cuGraphExecDestroy(cyhGraphExec) return (CUresult(err),) {{endif}} @@ -39220,16 +39220,16 @@ def cuGraphDestroy(hGraph): -------- :py:obj:`~.cuGraphCreate` """ - cdef ccuda.CUgraph chGraph + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - err = ccuda.cuGraphDestroy(chGraph) + cyhGraph = phGraph + err = cydriver.cuGraphDestroy(cyhGraph) return (CUresult(err),) {{endif}} @@ -39389,26 +39389,26 @@ def cuGraphExecUpdate(hGraphExec, hGraph): -------- :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUgraph chGraph + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - cdef ccuda.CUgraphExec chGraphExec + cyhGraph = phGraph + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec cdef CUgraphExecUpdateResultInfo resultInfo = CUgraphExecUpdateResultInfo() - err = ccuda.cuGraphExecUpdate(chGraphExec, chGraph, resultInfo._ptr) + err = cydriver.cuGraphExecUpdate(cyhGraphExec, cyhGraph, resultInfo._ptr) return (CUresult(err), resultInfo) {{endif}} @@ -39438,25 +39438,25 @@ def cuGraphKernelNodeCopyAttributes(dst, src): -------- :py:obj:`~.CUaccessPolicyWindow` """ - cdef ccuda.CUgraphNode csrc + cdef cydriver.CUgraphNode cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (CUgraphNode,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(CUgraphNode(src)) - csrc = psrc - cdef ccuda.CUgraphNode cdst + cysrc = psrc + cdef cydriver.CUgraphNode cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (CUgraphNode,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(CUgraphNode(dst)) - cdst = pdst - err = ccuda.cuGraphKernelNodeCopyAttributes(cdst, csrc) + cydst = pdst + err = cydriver.cuGraphKernelNodeCopyAttributes(cydst, cysrc) return (CUresult(err),) {{endif}} @@ -39487,18 +39487,18 @@ def cuGraphKernelNodeGetAttribute(hNode, attr not None : CUkernelNodeAttrID): -------- :py:obj:`~.CUaccessPolicyWindow` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUkernelNodeAttrID cattr = attr.value + cyhNode = phNode + cdef cydriver.CUkernelNodeAttrID cyattr = attr.value cdef CUkernelNodeAttrValue value_out = CUkernelNodeAttrValue() - err = ccuda.cuGraphKernelNodeGetAttribute(chNode, cattr, value_out._ptr) + err = cydriver.cuGraphKernelNodeGetAttribute(cyhNode, cyattr, value_out._ptr) return (CUresult(err), value_out) {{endif}} @@ -39529,18 +39529,18 @@ def cuGraphKernelNodeSetAttribute(hNode, attr not None : CUkernelNodeAttrID, val -------- :py:obj:`~.CUaccessPolicyWindow` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUkernelNodeAttrID cattr = attr.value - cdef ccuda.CUkernelNodeAttrValue* cvalue_ptr = value._ptr if value != None else NULL - err = ccuda.cuGraphKernelNodeSetAttribute(chNode, cattr, cvalue_ptr) + cyhNode = phNode + cdef cydriver.CUkernelNodeAttrID cyattr = attr.value + cdef cydriver.CUkernelNodeAttrValue* cyvalue_ptr = value._ptr if value != None else NULL + err = cydriver.cuGraphKernelNodeSetAttribute(cyhNode, cyattr, cyvalue_ptr) return (CUresult(err),) {{endif}} @@ -39571,16 +39571,16 @@ def cuGraphDebugDotPrint(hGraph, char* path, unsigned int flags): CUresult :py:obj:`~.CUDA_SUCCESS`, :py:obj:`~.CUDA_ERROR_INVALID_VALUE`, :py:obj:`~.CUDA_ERROR_OPERATING_SYSTEM` """ - cdef ccuda.CUgraph chGraph + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph - err = ccuda.cuGraphDebugDotPrint(chGraph, path, flags) + cyhGraph = phGraph + err = cydriver.cuGraphDebugDotPrint(cyhGraph, path, flags) return (CUresult(err),) {{endif}} @@ -39628,19 +39628,19 @@ def cuUserObjectCreate(ptr, destroy, unsigned int initialRefcount, unsigned int -------- :py:obj:`~.cuUserObjectRetain`, :py:obj:`~.cuUserObjectRelease`, :py:obj:`~.cuGraphRetainUserObject`, :py:obj:`~.cuGraphReleaseUserObject`, :py:obj:`~.cuGraphCreate` """ - cdef ccuda.CUhostFn cdestroy + cdef cydriver.CUhostFn cydestroy if destroy is None: - cdestroy = 0 + cydestroy = 0 elif isinstance(destroy, (CUhostFn,)): pdestroy = int(destroy) - cdestroy = pdestroy + cydestroy = pdestroy else: pdestroy = int(CUhostFn(destroy)) - cdestroy = pdestroy + cydestroy = pdestroy cdef CUuserObject object_out = CUuserObject() - cptr = utils.HelperInputVoidPtr(ptr) - cdef void* cptr_ptr = cptr.cptr - err = ccuda.cuUserObjectCreate(object_out._ptr, cptr_ptr, cdestroy, initialRefcount, flags) + cyptr = utils.HelperInputVoidPtr(ptr) + cdef void* cyptr_ptr = cyptr.cptr + err = cydriver.cuUserObjectCreate(object_out._ptr, cyptr_ptr, cydestroy, initialRefcount, flags) return (CUresult(err), object_out) {{endif}} @@ -39673,16 +39673,16 @@ def cuUserObjectRetain(object, unsigned int count): -------- :py:obj:`~.cuUserObjectCreate`, :py:obj:`~.cuUserObjectRelease`, :py:obj:`~.cuGraphRetainUserObject`, :py:obj:`~.cuGraphReleaseUserObject`, :py:obj:`~.cuGraphCreate` """ - cdef ccuda.CUuserObject cobject + cdef cydriver.CUuserObject cyobject if object is None: - cobject = 0 + cyobject = 0 elif isinstance(object, (CUuserObject,)): pobject = int(object) - cobject = pobject + cyobject = pobject else: pobject = int(CUuserObject(object)) - cobject = pobject - err = ccuda.cuUserObjectRetain(cobject, count) + cyobject = pobject + err = cydriver.cuUserObjectRetain(cyobject, count) return (CUresult(err),) {{endif}} @@ -39718,16 +39718,16 @@ def cuUserObjectRelease(object, unsigned int count): -------- :py:obj:`~.cuUserObjectCreate`, :py:obj:`~.cuUserObjectRetain`, :py:obj:`~.cuGraphRetainUserObject`, :py:obj:`~.cuGraphReleaseUserObject`, :py:obj:`~.cuGraphCreate` """ - cdef ccuda.CUuserObject cobject + cdef cydriver.CUuserObject cyobject if object is None: - cobject = 0 + cyobject = 0 elif isinstance(object, (CUuserObject,)): pobject = int(object) - cobject = pobject + cyobject = pobject else: pobject = int(CUuserObject(object)) - cobject = pobject - err = ccuda.cuUserObjectRelease(cobject, count) + cyobject = pobject + err = cydriver.cuUserObjectRelease(cyobject, count) return (CUresult(err),) {{endif}} @@ -39766,25 +39766,25 @@ def cuGraphRetainUserObject(graph, object, unsigned int count, unsigned int flag -------- :py:obj:`~.cuUserObjectCreate`, :py:obj:`~.cuUserObjectRetain`, :py:obj:`~.cuUserObjectRelease`, :py:obj:`~.cuGraphReleaseUserObject`, :py:obj:`~.cuGraphCreate` """ - cdef ccuda.CUuserObject cobject + cdef cydriver.CUuserObject cyobject if object is None: - cobject = 0 + cyobject = 0 elif isinstance(object, (CUuserObject,)): pobject = int(object) - cobject = pobject + cyobject = pobject else: pobject = int(CUuserObject(object)) - cobject = pobject - cdef ccuda.CUgraph cgraph + cyobject = pobject + cdef cydriver.CUgraph cygraph if graph is None: - cgraph = 0 + cygraph = 0 elif isinstance(graph, (CUgraph,)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(CUgraph(graph)) - cgraph = pgraph - err = ccuda.cuGraphRetainUserObject(cgraph, cobject, count, flags) + cygraph = pgraph + err = cydriver.cuGraphRetainUserObject(cygraph, cyobject, count, flags) return (CUresult(err),) {{endif}} @@ -39818,25 +39818,25 @@ def cuGraphReleaseUserObject(graph, object, unsigned int count): -------- :py:obj:`~.cuUserObjectCreate`, :py:obj:`~.cuUserObjectRetain`, :py:obj:`~.cuUserObjectRelease`, :py:obj:`~.cuGraphRetainUserObject`, :py:obj:`~.cuGraphCreate` """ - cdef ccuda.CUuserObject cobject + cdef cydriver.CUuserObject cyobject if object is None: - cobject = 0 + cyobject = 0 elif isinstance(object, (CUuserObject,)): pobject = int(object) - cobject = pobject + cyobject = pobject else: pobject = int(CUuserObject(object)) - cobject = pobject - cdef ccuda.CUgraph cgraph + cyobject = pobject + cdef cydriver.CUgraph cygraph if graph is None: - cgraph = 0 + cygraph = 0 elif isinstance(graph, (CUgraph,)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(CUgraph(graph)) - cgraph = pgraph - err = ccuda.cuGraphReleaseUserObject(cgraph, cobject, count) + cygraph = pgraph + err = cydriver.cuGraphReleaseUserObject(cygraph, cyobject, count) return (CUresult(err),) {{endif}} @@ -39889,30 +39889,30 @@ def cuGraphAddNode(hGraph, dependencies : Optional[Tuple[CUgraphNode] | List[CUg """ dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccuda.CUgraphNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphAddNode(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, cnodeParams_ptr) - if cdependencies is not NULL: - free(cdependencies) + cdef cydriver.CUgraphNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphAddNode(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, cynodeParams_ptr) + if cydependencies is not NULL: + free(cydependencies) return (CUresult(err), phGraphNode) {{endif}} @@ -39968,43 +39968,43 @@ def cuGraphAddNode_v2(hGraph, dependencies : Optional[Tuple[CUgraphNode] | List[ """ dependencyData = [] if dependencyData is None else dependencyData if not all(isinstance(_x, (CUgraphEdgeData,)) for _x in dependencyData): - raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[ccuda.CUgraphEdgeData,] or List[ccuda.CUgraphEdgeData,]") + raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[cydriver.CUgraphEdgeData,] or List[cydriver.CUgraphEdgeData,]") dependencies = [] if dependencies is None else dependencies if not all(isinstance(_x, (CUgraphNode,)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccuda.CUgraphNode,] or List[ccuda.CUgraphNode,]") - cdef ccuda.CUgraph chGraph + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cydriver.CUgraphNode,] or List[cydriver.CUgraphNode,]") + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphNode phGraphNode = CUgraphNode() - cdef ccuda.CUgraphNode* cdependencies = NULL + cdef cydriver.CUgraphNode* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccuda.CUgraphNode)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccuda.CUgraphNode))) + cydependencies = calloc(len(dependencies), sizeof(cydriver.CUgraphNode)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cydriver.CUgraphNode))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] - cdef ccuda.CUgraphEdgeData* cdependencyData = NULL + cydependencies[idx] = (dependencies[idx])._ptr[0] + cdef cydriver.CUgraphEdgeData* cydependencyData = NULL if len(dependencyData) > 0: - cdependencyData = calloc(len(dependencyData), sizeof(ccuda.CUgraphEdgeData)) - if cdependencyData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(ccuda.CUgraphEdgeData))) + cydependencyData = calloc(len(dependencyData), sizeof(cydriver.CUgraphEdgeData)) + if cydependencyData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cydriver.CUgraphEdgeData))) for idx in range(len(dependencyData)): - string.memcpy(&cdependencyData[idx], (dependencyData[idx])._ptr, sizeof(ccuda.CUgraphEdgeData)) + string.memcpy(&cydependencyData[idx], (dependencyData[idx])._ptr, sizeof(cydriver.CUgraphEdgeData)) if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) if numDependencies > len(dependencyData): raise RuntimeError("List is too small: " + str(len(dependencyData)) + " < " + str(numDependencies)) - cdef ccuda.CUgraphNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphAddNode_v2(phGraphNode._ptr, chGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cdependencyData, numDependencies, cnodeParams_ptr) - if cdependencies is not NULL: - free(cdependencies) - if cdependencyData is not NULL: - free(cdependencyData) + cdef cydriver.CUgraphNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphAddNode_v2(phGraphNode._ptr, cyhGraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cydependencyData, numDependencies, cynodeParams_ptr) + if cydependencies is not NULL: + free(cydependencies) + if cydependencyData is not NULL: + free(cydependencyData) return (CUresult(err), phGraphNode) {{endif}} @@ -40038,17 +40038,17 @@ def cuGraphNodeSetParams(hNode, nodeParams : Optional[CUgraphNodeParams]): -------- :py:obj:`~.cuGraphAddNode`, :py:obj:`~.cuGraphExecNodeSetParams` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cydriver.CUgraphNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphNodeSetParams(cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -40090,26 +40090,26 @@ def cuGraphExecNodeSetParams(hGraphExec, hNode, nodeParams : Optional[CUgraphNod -------- :py:obj:`~.cuGraphAddNode`, :py:obj:`~.cuGraphNodeSetParams` :py:obj:`~.cuGraphExecUpdate`, :py:obj:`~.cuGraphInstantiate` """ - cdef ccuda.CUgraphNode chNode + cdef cydriver.CUgraphNode cyhNode if hNode is None: - chNode = 0 + cyhNode = 0 elif isinstance(hNode, (CUgraphNode,)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(CUgraphNode(hNode)) - chNode = phNode - cdef ccuda.CUgraphExec chGraphExec + cyhNode = phNode + cdef cydriver.CUgraphExec cyhGraphExec if hGraphExec is None: - chGraphExec = 0 + cyhGraphExec = 0 elif isinstance(hGraphExec, (CUgraphExec,)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(CUgraphExec(hGraphExec)) - chGraphExec = phGraphExec - cdef ccuda.CUgraphNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccuda.cuGraphExecNodeSetParams(chGraphExec, chNode, cnodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cydriver.CUgraphNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cydriver.cuGraphExecNodeSetParams(cyhGraphExec, cyhNode, cynodeParams_ptr) return (CUresult(err),) {{endif}} @@ -40152,26 +40152,26 @@ def cuGraphConditionalHandleCreate(hGraph, ctx, unsigned int defaultLaunchValue, -------- :py:obj:`~.cuGraphAddNode` """ - cdef ccuda.CUcontext cctx + cdef cydriver.CUcontext cyctx if ctx is None: - cctx = 0 + cyctx = 0 elif isinstance(ctx, (CUcontext,)): pctx = int(ctx) - cctx = pctx + cyctx = pctx else: pctx = int(CUcontext(ctx)) - cctx = pctx - cdef ccuda.CUgraph chGraph + cyctx = pctx + cdef cydriver.CUgraph cyhGraph if hGraph is None: - chGraph = 0 + cyhGraph = 0 elif isinstance(hGraph, (CUgraph,)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(CUgraph(hGraph)) - chGraph = phGraph + cyhGraph = phGraph cdef CUgraphConditionalHandle pHandle_out = CUgraphConditionalHandle() - err = ccuda.cuGraphConditionalHandleCreate(pHandle_out._ptr, chGraph, cctx, defaultLaunchValue, flags) + err = cydriver.cuGraphConditionalHandleCreate(pHandle_out._ptr, cyhGraph, cyctx, defaultLaunchValue, flags) return (CUresult(err), pHandle_out) {{endif}} @@ -40210,17 +40210,17 @@ def cuOccupancyMaxActiveBlocksPerMultiprocessor(func, int blockSize, size_t dyna -------- :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessor` """ - cdef ccuda.CUfunction cfunc + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc + cyfunc = pfunc cdef int numBlocks = 0 - err = ccuda.cuOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cfunc, blockSize, dynamicSMemSize) + err = cydriver.cuOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cyfunc, blockSize, dynamicSMemSize) return (CUresult(err), numBlocks) {{endif}} @@ -40277,17 +40277,17 @@ def cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(func, int blockSize, si -------- :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` """ - cdef ccuda.CUfunction cfunc + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc + cyfunc = pfunc cdef int numBlocks = 0 - err = ccuda.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cfunc, blockSize, dynamicSMemSize, flags) + err = cydriver.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cyfunc, blockSize, dynamicSMemSize, flags) return (CUresult(err), numBlocks) {{endif}} @@ -40353,27 +40353,27 @@ def cuOccupancyMaxPotentialBlockSize(func, blockSizeToDynamicSMemSize, size_t dy -------- :py:obj:`~.cudaOccupancyMaxPotentialBlockSize` """ - cdef ccuda.CUoccupancyB2DSize cblockSizeToDynamicSMemSize + cdef cydriver.CUoccupancyB2DSize cyblockSizeToDynamicSMemSize if blockSizeToDynamicSMemSize is None: - cblockSizeToDynamicSMemSize = 0 + cyblockSizeToDynamicSMemSize = 0 elif isinstance(blockSizeToDynamicSMemSize, (CUoccupancyB2DSize,)): pblockSizeToDynamicSMemSize = int(blockSizeToDynamicSMemSize) - cblockSizeToDynamicSMemSize = pblockSizeToDynamicSMemSize + cyblockSizeToDynamicSMemSize = pblockSizeToDynamicSMemSize else: pblockSizeToDynamicSMemSize = int(CUoccupancyB2DSize(blockSizeToDynamicSMemSize)) - cblockSizeToDynamicSMemSize = pblockSizeToDynamicSMemSize - cdef ccuda.CUfunction cfunc + cyblockSizeToDynamicSMemSize = pblockSizeToDynamicSMemSize + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc + cyfunc = pfunc cdef int minGridSize = 0 cdef int blockSize = 0 - err = ccuda.cuOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cfunc, cblockSizeToDynamicSMemSize, dynamicSMemSize, blockSizeLimit) + err = cydriver.cuOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cyfunc, cyblockSizeToDynamicSMemSize, dynamicSMemSize, blockSizeLimit) return (CUresult(err), minGridSize, blockSize) {{endif}} @@ -40438,27 +40438,27 @@ def cuOccupancyMaxPotentialBlockSizeWithFlags(func, blockSizeToDynamicSMemSize, -------- :py:obj:`~.cudaOccupancyMaxPotentialBlockSizeWithFlags` """ - cdef ccuda.CUoccupancyB2DSize cblockSizeToDynamicSMemSize + cdef cydriver.CUoccupancyB2DSize cyblockSizeToDynamicSMemSize if blockSizeToDynamicSMemSize is None: - cblockSizeToDynamicSMemSize = 0 + cyblockSizeToDynamicSMemSize = 0 elif isinstance(blockSizeToDynamicSMemSize, (CUoccupancyB2DSize,)): pblockSizeToDynamicSMemSize = int(blockSizeToDynamicSMemSize) - cblockSizeToDynamicSMemSize = pblockSizeToDynamicSMemSize + cyblockSizeToDynamicSMemSize = pblockSizeToDynamicSMemSize else: pblockSizeToDynamicSMemSize = int(CUoccupancyB2DSize(blockSizeToDynamicSMemSize)) - cblockSizeToDynamicSMemSize = pblockSizeToDynamicSMemSize - cdef ccuda.CUfunction cfunc + cyblockSizeToDynamicSMemSize = pblockSizeToDynamicSMemSize + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc + cyfunc = pfunc cdef int minGridSize = 0 cdef int blockSize = 0 - err = ccuda.cuOccupancyMaxPotentialBlockSizeWithFlags(&minGridSize, &blockSize, cfunc, cblockSizeToDynamicSMemSize, dynamicSMemSize, blockSizeLimit, flags) + err = cydriver.cuOccupancyMaxPotentialBlockSizeWithFlags(&minGridSize, &blockSize, cyfunc, cyblockSizeToDynamicSMemSize, dynamicSMemSize, blockSizeLimit, flags) return (CUresult(err), minGridSize, blockSize) {{endif}} @@ -40493,17 +40493,17 @@ def cuOccupancyAvailableDynamicSMemPerBlock(func, int numBlocks, int blockSize): dynamicSmemSize : int Returned maximum dynamic shared memory """ - cdef ccuda.CUfunction cfunc + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc + cyfunc = pfunc cdef size_t dynamicSmemSize = 0 - err = ccuda.cuOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cfunc, numBlocks, blockSize) + err = cydriver.cuOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cyfunc, numBlocks, blockSize) return (CUresult(err), dynamicSmemSize) {{endif}} @@ -40550,18 +40550,18 @@ def cuOccupancyMaxPotentialClusterSize(func, config : Optional[CUlaunchConfig]): -------- :py:obj:`~.cudaFuncGetAttributes`, :py:obj:`~.cuFuncGetAttribute` """ - cdef ccuda.CUfunction cfunc + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc + cyfunc = pfunc cdef int clusterSize = 0 - cdef ccuda.CUlaunchConfig* cconfig_ptr = config._ptr if config != None else NULL - err = ccuda.cuOccupancyMaxPotentialClusterSize(&clusterSize, cfunc, cconfig_ptr) + cdef cydriver.CUlaunchConfig* cyconfig_ptr = config._ptr if config != None else NULL + err = cydriver.cuOccupancyMaxPotentialClusterSize(&clusterSize, cyfunc, cyconfig_ptr) return (CUresult(err), clusterSize) {{endif}} @@ -40608,18 +40608,18 @@ def cuOccupancyMaxActiveClusters(func, config : Optional[CUlaunchConfig]): -------- :py:obj:`~.cudaFuncGetAttributes`, :py:obj:`~.cuFuncGetAttribute` """ - cdef ccuda.CUfunction cfunc + cdef cydriver.CUfunction cyfunc if func is None: - cfunc = 0 + cyfunc = 0 elif isinstance(func, (CUfunction,)): pfunc = int(func) - cfunc = pfunc + cyfunc = pfunc else: pfunc = int(CUfunction(func)) - cfunc = pfunc + cyfunc = pfunc cdef int numClusters = 0 - cdef ccuda.CUlaunchConfig* cconfig_ptr = config._ptr if config != None else NULL - err = ccuda.cuOccupancyMaxActiveClusters(&numClusters, cfunc, cconfig_ptr) + cdef cydriver.CUlaunchConfig* cyconfig_ptr = config._ptr if config != None else NULL + err = cydriver.cuOccupancyMaxActiveClusters(&numClusters, cyfunc, cyconfig_ptr) return (CUresult(err), numClusters) {{endif}} @@ -40655,25 +40655,25 @@ def cuTexRefSetArray(hTexRef, hArray, unsigned int Flags): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUarray chArray + cdef cydriver.CUarray cyhArray if hArray is None: - chArray = 0 + cyhArray = 0 elif isinstance(hArray, (CUarray,)): phArray = int(hArray) - chArray = phArray + cyhArray = phArray else: phArray = int(CUarray(hArray)) - chArray = phArray - cdef ccuda.CUtexref chTexRef + cyhArray = phArray + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - err = ccuda.cuTexRefSetArray(chTexRef, chArray, Flags) + cyhTexRef = phTexRef + err = cydriver.cuTexRefSetArray(cyhTexRef, cyhArray, Flags) return (CUresult(err),) {{endif}} @@ -40709,25 +40709,25 @@ def cuTexRefSetMipmappedArray(hTexRef, hMipmappedArray, unsigned int Flags): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUmipmappedArray chMipmappedArray + cdef cydriver.CUmipmappedArray cyhMipmappedArray if hMipmappedArray is None: - chMipmappedArray = 0 + cyhMipmappedArray = 0 elif isinstance(hMipmappedArray, (CUmipmappedArray,)): phMipmappedArray = int(hMipmappedArray) - chMipmappedArray = phMipmappedArray + cyhMipmappedArray = phMipmappedArray else: phMipmappedArray = int(CUmipmappedArray(hMipmappedArray)) - chMipmappedArray = phMipmappedArray - cdef ccuda.CUtexref chTexRef + cyhMipmappedArray = phMipmappedArray + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - err = ccuda.cuTexRefSetMipmappedArray(chTexRef, chMipmappedArray, Flags) + cyhTexRef = phTexRef + err = cydriver.cuTexRefSetMipmappedArray(cyhTexRef, cyhMipmappedArray, Flags) return (CUresult(err),) {{endif}} @@ -40782,26 +40782,26 @@ def cuTexRefSetAddress(hTexRef, dptr, size_t numbytes): -------- :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr - cdef ccuda.CUtexref chTexRef + cydptr = pdptr + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef + cyhTexRef = phTexRef cdef size_t ByteOffset = 0 - err = ccuda.cuTexRefSetAddress(&ByteOffset, chTexRef, cdptr, numbytes) + err = cydriver.cuTexRefSetAddress(&ByteOffset, cyhTexRef, cydptr, numbytes) return (CUresult(err), ByteOffset) {{endif}} @@ -40864,26 +40864,26 @@ def cuTexRefSetAddress2D(hTexRef, desc : Optional[CUDA_ARRAY_DESCRIPTOR], dptr, -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUdeviceptr cdptr + cdef cydriver.CUdeviceptr cydptr if dptr is None: - cdptr = 0 + cydptr = 0 elif isinstance(dptr, (CUdeviceptr,)): pdptr = int(dptr) - cdptr = pdptr + cydptr = pdptr else: pdptr = int(CUdeviceptr(dptr)) - cdptr = pdptr - cdef ccuda.CUtexref chTexRef + cydptr = pdptr + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUDA_ARRAY_DESCRIPTOR* cdesc_ptr = desc._ptr if desc != None else NULL - err = ccuda.cuTexRefSetAddress2D(chTexRef, cdesc_ptr, cdptr, Pitch) + cyhTexRef = phTexRef + cdef cydriver.CUDA_ARRAY_DESCRIPTOR* cydesc_ptr = desc._ptr if desc != None else NULL + err = cydriver.cuTexRefSetAddress2D(cyhTexRef, cydesc_ptr, cydptr, Pitch) return (CUresult(err),) {{endif}} @@ -40919,17 +40919,17 @@ def cuTexRefSetFormat(hTexRef, fmt not None : CUarray_format, int NumPackedCompo -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat`, :py:obj:`~.cudaCreateChannelDesc` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUarray_format cfmt = fmt.value - err = ccuda.cuTexRefSetFormat(chTexRef, cfmt, NumPackedComponents) + cyhTexRef = phTexRef + cdef cydriver.CUarray_format cyfmt = fmt.value + err = cydriver.cuTexRefSetFormat(cyhTexRef, cyfmt, NumPackedComponents) return (CUresult(err),) {{endif}} @@ -40972,17 +40972,17 @@ def cuTexRefSetAddressMode(hTexRef, int dim, am not None : CUaddress_mode): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUaddress_mode cam = am.value - err = ccuda.cuTexRefSetAddressMode(chTexRef, dim, cam) + cyhTexRef = phTexRef + cdef cydriver.CUaddress_mode cyam = am.value + err = cydriver.cuTexRefSetAddressMode(cyhTexRef, dim, cyam) return (CUresult(err),) {{endif}} @@ -41019,17 +41019,17 @@ def cuTexRefSetFilterMode(hTexRef, fm not None : CUfilter_mode): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUfilter_mode cfm = fm.value - err = ccuda.cuTexRefSetFilterMode(chTexRef, cfm) + cyhTexRef = phTexRef + cdef cydriver.CUfilter_mode cyfm = fm.value + err = cydriver.cuTexRefSetFilterMode(cyhTexRef, cyfm) return (CUresult(err),) {{endif}} @@ -41066,17 +41066,17 @@ def cuTexRefSetMipmapFilterMode(hTexRef, fm not None : CUfilter_mode): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUfilter_mode cfm = fm.value - err = ccuda.cuTexRefSetMipmapFilterMode(chTexRef, cfm) + cyhTexRef = phTexRef + cdef cydriver.CUfilter_mode cyfm = fm.value + err = cydriver.cuTexRefSetMipmapFilterMode(cyhTexRef, cyfm) return (CUresult(err),) {{endif}} @@ -41111,16 +41111,16 @@ def cuTexRefSetMipmapLevelBias(hTexRef, float bias): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - err = ccuda.cuTexRefSetMipmapLevelBias(chTexRef, bias) + cyhTexRef = phTexRef + err = cydriver.cuTexRefSetMipmapLevelBias(cyhTexRef, bias) return (CUresult(err),) {{endif}} @@ -41157,16 +41157,16 @@ def cuTexRefSetMipmapLevelClamp(hTexRef, float minMipmapLevelClamp, float maxMip -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - err = ccuda.cuTexRefSetMipmapLevelClamp(chTexRef, minMipmapLevelClamp, maxMipmapLevelClamp) + cyhTexRef = phTexRef + err = cydriver.cuTexRefSetMipmapLevelClamp(cyhTexRef, minMipmapLevelClamp, maxMipmapLevelClamp) return (CUresult(err),) {{endif}} @@ -41200,16 +41200,16 @@ def cuTexRefSetMaxAnisotropy(hTexRef, unsigned int maxAniso): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - err = ccuda.cuTexRefSetMaxAnisotropy(chTexRef, maxAniso) + cyhTexRef = phTexRef + err = cydriver.cuTexRefSetMaxAnisotropy(cyhTexRef, maxAniso) return (CUresult(err),) {{endif}} @@ -41248,16 +41248,16 @@ def cuTexRefSetBorderColor(hTexRef, float pBorderColor): -------- :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetBorderColor` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - err = ccuda.cuTexRefSetBorderColor(chTexRef, &pBorderColor) + cyhTexRef = phTexRef + err = cydriver.cuTexRefSetBorderColor(cyhTexRef, &pBorderColor) return (CUresult(err),) {{endif}} @@ -41306,16 +41306,16 @@ def cuTexRefSetFlags(hTexRef, unsigned int Flags): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - err = ccuda.cuTexRefSetFlags(chTexRef, Flags) + cyhTexRef = phTexRef + err = cydriver.cuTexRefSetFlags(cyhTexRef, Flags) return (CUresult(err),) {{endif}} @@ -41347,17 +41347,17 @@ def cuTexRefGetAddress(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef + cyhTexRef = phTexRef cdef CUdeviceptr pdptr = CUdeviceptr() - err = ccuda.cuTexRefGetAddress(pdptr._ptr, chTexRef) + err = cydriver.cuTexRefGetAddress(pdptr._ptr, cyhTexRef) return (CUresult(err), pdptr) {{endif}} @@ -41389,17 +41389,17 @@ def cuTexRefGetArray(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef + cyhTexRef = phTexRef cdef CUarray phArray = CUarray() - err = ccuda.cuTexRefGetArray(phArray._ptr, chTexRef) + err = cydriver.cuTexRefGetArray(phArray._ptr, cyhTexRef) return (CUresult(err), phArray) {{endif}} @@ -41432,17 +41432,17 @@ def cuTexRefGetMipmappedArray(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef + cyhTexRef = phTexRef cdef CUmipmappedArray phMipmappedArray = CUmipmappedArray() - err = ccuda.cuTexRefGetMipmappedArray(phMipmappedArray._ptr, chTexRef) + err = cydriver.cuTexRefGetMipmappedArray(phMipmappedArray._ptr, cyhTexRef) return (CUresult(err), phMipmappedArray) {{endif}} @@ -41476,17 +41476,17 @@ def cuTexRefGetAddressMode(hTexRef, int dim): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUaddress_mode pam - err = ccuda.cuTexRefGetAddressMode(&pam, chTexRef, dim) + cyhTexRef = phTexRef + cdef cydriver.CUaddress_mode pam + err = cydriver.cuTexRefGetAddressMode(&pam, cyhTexRef, dim) return (CUresult(err), CUaddress_mode(pam)) {{endif}} @@ -41517,17 +41517,17 @@ def cuTexRefGetFilterMode(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUfilter_mode pfm - err = ccuda.cuTexRefGetFilterMode(&pfm, chTexRef) + cyhTexRef = phTexRef + cdef cydriver.CUfilter_mode pfm + err = cydriver.cuTexRefGetFilterMode(&pfm, cyhTexRef) return (CUresult(err), CUfilter_mode(pfm)) {{endif}} @@ -41561,18 +41561,18 @@ def cuTexRefGetFormat(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUarray_format pFormat + cyhTexRef = phTexRef + cdef cydriver.CUarray_format pFormat cdef int pNumChannels = 0 - err = ccuda.cuTexRefGetFormat(&pFormat, &pNumChannels, chTexRef) + err = cydriver.cuTexRefGetFormat(&pFormat, &pNumChannels, cyhTexRef) return (CUresult(err), CUarray_format(pFormat), pNumChannels) {{endif}} @@ -41603,17 +41603,17 @@ def cuTexRefGetMipmapFilterMode(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - cdef ccuda.CUfilter_mode pfm - err = ccuda.cuTexRefGetMipmapFilterMode(&pfm, chTexRef) + cyhTexRef = phTexRef + cdef cydriver.CUfilter_mode pfm + err = cydriver.cuTexRefGetMipmapFilterMode(&pfm, cyhTexRef) return (CUresult(err), CUfilter_mode(pfm)) {{endif}} @@ -41645,17 +41645,17 @@ def cuTexRefGetMipmapLevelBias(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef + cyhTexRef = phTexRef cdef float pbias = 0 - err = ccuda.cuTexRefGetMipmapLevelBias(&pbias, chTexRef) + err = cydriver.cuTexRefGetMipmapLevelBias(&pbias, cyhTexRef) return (CUresult(err), pbias) {{endif}} @@ -41689,18 +41689,18 @@ def cuTexRefGetMipmapLevelClamp(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef + cyhTexRef = phTexRef cdef float pminMipmapLevelClamp = 0 cdef float pmaxMipmapLevelClamp = 0 - err = ccuda.cuTexRefGetMipmapLevelClamp(&pminMipmapLevelClamp, &pmaxMipmapLevelClamp, chTexRef) + err = cydriver.cuTexRefGetMipmapLevelClamp(&pminMipmapLevelClamp, &pmaxMipmapLevelClamp, cyhTexRef) return (CUresult(err), pminMipmapLevelClamp, pmaxMipmapLevelClamp) {{endif}} @@ -41731,17 +41731,17 @@ def cuTexRefGetMaxAnisotropy(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFlags`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef + cyhTexRef = phTexRef cdef int pmaxAniso = 0 - err = ccuda.cuTexRefGetMaxAnisotropy(&pmaxAniso, chTexRef) + err = cydriver.cuTexRefGetMaxAnisotropy(&pmaxAniso, cyhTexRef) return (CUresult(err), pmaxAniso) {{endif}} @@ -41775,17 +41775,17 @@ def cuTexRefGetBorderColor(hTexRef): -------- :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetBorderColor` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef + cyhTexRef = phTexRef cdef float pBorderColor = 0 - err = ccuda.cuTexRefGetBorderColor(&pBorderColor, chTexRef) + err = cydriver.cuTexRefGetBorderColor(&pBorderColor, cyhTexRef) return (CUresult(err), pBorderColor) {{endif}} @@ -41815,17 +41815,17 @@ def cuTexRefGetFlags(hTexRef): -------- :py:obj:`~.cuTexRefSetAddress`, :py:obj:`~.cuTexRefSetAddress2D`, :py:obj:`~.cuTexRefSetAddressMode`, :py:obj:`~.cuTexRefSetArray`, :py:obj:`~.cuTexRefSetFilterMode`, :py:obj:`~.cuTexRefSetFlags`, :py:obj:`~.cuTexRefSetFormat`, :py:obj:`~.cuTexRefGetAddress`, :py:obj:`~.cuTexRefGetAddressMode`, :py:obj:`~.cuTexRefGetArray`, :py:obj:`~.cuTexRefGetFilterMode`, :py:obj:`~.cuTexRefGetFormat` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef + cyhTexRef = phTexRef cdef unsigned int pFlags = 0 - err = ccuda.cuTexRefGetFlags(&pFlags, chTexRef) + err = cydriver.cuTexRefGetFlags(&pFlags, cyhTexRef) return (CUresult(err), pFlags) {{endif}} @@ -41856,7 +41856,7 @@ def cuTexRefCreate(): :py:obj:`~.cuTexRefDestroy` """ cdef CUtexref pTexRef = CUtexref() - err = ccuda.cuTexRefCreate(pTexRef._ptr) + err = cydriver.cuTexRefCreate(pTexRef._ptr) return (CUresult(err), pTexRef) {{endif}} @@ -41884,16 +41884,16 @@ def cuTexRefDestroy(hTexRef): -------- :py:obj:`~.cuTexRefCreate` """ - cdef ccuda.CUtexref chTexRef + cdef cydriver.CUtexref cyhTexRef if hTexRef is None: - chTexRef = 0 + cyhTexRef = 0 elif isinstance(hTexRef, (CUtexref,)): phTexRef = int(hTexRef) - chTexRef = phTexRef + cyhTexRef = phTexRef else: phTexRef = int(CUtexref(hTexRef)) - chTexRef = phTexRef - err = ccuda.cuTexRefDestroy(chTexRef) + cyhTexRef = phTexRef + err = cydriver.cuTexRefDestroy(cyhTexRef) return (CUresult(err),) {{endif}} @@ -41930,25 +41930,25 @@ def cuSurfRefSetArray(hSurfRef, hArray, unsigned int Flags): -------- :py:obj:`~.cuModuleGetSurfRef`, :py:obj:`~.cuSurfRefGetArray` """ - cdef ccuda.CUarray chArray + cdef cydriver.CUarray cyhArray if hArray is None: - chArray = 0 + cyhArray = 0 elif isinstance(hArray, (CUarray,)): phArray = int(hArray) - chArray = phArray + cyhArray = phArray else: phArray = int(CUarray(hArray)) - chArray = phArray - cdef ccuda.CUsurfref chSurfRef + cyhArray = phArray + cdef cydriver.CUsurfref cyhSurfRef if hSurfRef is None: - chSurfRef = 0 + cyhSurfRef = 0 elif isinstance(hSurfRef, (CUsurfref,)): phSurfRef = int(hSurfRef) - chSurfRef = phSurfRef + cyhSurfRef = phSurfRef else: phSurfRef = int(CUsurfref(hSurfRef)) - chSurfRef = phSurfRef - err = ccuda.cuSurfRefSetArray(chSurfRef, chArray, Flags) + cyhSurfRef = phSurfRef + err = cydriver.cuSurfRefSetArray(cyhSurfRef, cyhArray, Flags) return (CUresult(err),) {{endif}} @@ -41980,17 +41980,17 @@ def cuSurfRefGetArray(hSurfRef): -------- :py:obj:`~.cuModuleGetSurfRef`, :py:obj:`~.cuSurfRefSetArray` """ - cdef ccuda.CUsurfref chSurfRef + cdef cydriver.CUsurfref cyhSurfRef if hSurfRef is None: - chSurfRef = 0 + cyhSurfRef = 0 elif isinstance(hSurfRef, (CUsurfref,)): phSurfRef = int(hSurfRef) - chSurfRef = phSurfRef + cyhSurfRef = phSurfRef else: phSurfRef = int(CUsurfref(hSurfRef)) - chSurfRef = phSurfRef + cyhSurfRef = phSurfRef cdef CUarray phArray = CUarray() - err = ccuda.cuSurfRefGetArray(phArray._ptr, chSurfRef) + err = cydriver.cuSurfRefGetArray(phArray._ptr, cyhSurfRef) return (CUresult(err), phArray) {{endif}} @@ -42224,10 +42224,10 @@ def cuTexObjectCreate(pResDesc : Optional[CUDA_RESOURCE_DESC], pTexDesc : Option :py:obj:`~.cuTexObjectDestroy`, :py:obj:`~.cudaCreateTextureObject` """ cdef CUtexObject pTexObject = CUtexObject() - cdef ccuda.CUDA_RESOURCE_DESC* cpResDesc_ptr = pResDesc._ptr if pResDesc != None else NULL - cdef ccuda.CUDA_TEXTURE_DESC* cpTexDesc_ptr = pTexDesc._ptr if pTexDesc != None else NULL - cdef ccuda.CUDA_RESOURCE_VIEW_DESC* cpResViewDesc_ptr = pResViewDesc._ptr if pResViewDesc != None else NULL - err = ccuda.cuTexObjectCreate(pTexObject._ptr, cpResDesc_ptr, cpTexDesc_ptr, cpResViewDesc_ptr) + cdef cydriver.CUDA_RESOURCE_DESC* cypResDesc_ptr = pResDesc._ptr if pResDesc != None else NULL + cdef cydriver.CUDA_TEXTURE_DESC* cypTexDesc_ptr = pTexDesc._ptr if pTexDesc != None else NULL + cdef cydriver.CUDA_RESOURCE_VIEW_DESC* cypResViewDesc_ptr = pResViewDesc._ptr if pResViewDesc != None else NULL + err = cydriver.cuTexObjectCreate(pTexObject._ptr, cypResDesc_ptr, cypTexDesc_ptr, cypResViewDesc_ptr) return (CUresult(err), pTexObject) {{endif}} @@ -42253,16 +42253,16 @@ def cuTexObjectDestroy(texObject): -------- :py:obj:`~.cuTexObjectCreate`, :py:obj:`~.cudaDestroyTextureObject` """ - cdef ccuda.CUtexObject ctexObject + cdef cydriver.CUtexObject cytexObject if texObject is None: - ctexObject = 0 + cytexObject = 0 elif isinstance(texObject, (CUtexObject,)): ptexObject = int(texObject) - ctexObject = ptexObject + cytexObject = ptexObject else: ptexObject = int(CUtexObject(texObject)) - ctexObject = ptexObject - err = ccuda.cuTexObjectDestroy(ctexObject) + cytexObject = ptexObject + err = cydriver.cuTexObjectDestroy(cytexObject) return (CUresult(err),) {{endif}} @@ -42291,17 +42291,17 @@ def cuTexObjectGetResourceDesc(texObject): -------- :py:obj:`~.cuTexObjectCreate`, :py:obj:`~.cudaGetTextureObjectResourceDesc`, """ - cdef ccuda.CUtexObject ctexObject + cdef cydriver.CUtexObject cytexObject if texObject is None: - ctexObject = 0 + cytexObject = 0 elif isinstance(texObject, (CUtexObject,)): ptexObject = int(texObject) - ctexObject = ptexObject + cytexObject = ptexObject else: ptexObject = int(CUtexObject(texObject)) - ctexObject = ptexObject + cytexObject = ptexObject cdef CUDA_RESOURCE_DESC pResDesc = CUDA_RESOURCE_DESC() - err = ccuda.cuTexObjectGetResourceDesc(pResDesc._ptr, ctexObject) + err = cydriver.cuTexObjectGetResourceDesc(pResDesc._ptr, cytexObject) return (CUresult(err), pResDesc) {{endif}} @@ -42330,17 +42330,17 @@ def cuTexObjectGetTextureDesc(texObject): -------- :py:obj:`~.cuTexObjectCreate`, :py:obj:`~.cudaGetTextureObjectTextureDesc` """ - cdef ccuda.CUtexObject ctexObject + cdef cydriver.CUtexObject cytexObject if texObject is None: - ctexObject = 0 + cytexObject = 0 elif isinstance(texObject, (CUtexObject,)): ptexObject = int(texObject) - ctexObject = ptexObject + cytexObject = ptexObject else: ptexObject = int(CUtexObject(texObject)) - ctexObject = ptexObject + cytexObject = ptexObject cdef CUDA_TEXTURE_DESC pTexDesc = CUDA_TEXTURE_DESC() - err = ccuda.cuTexObjectGetTextureDesc(pTexDesc._ptr, ctexObject) + err = cydriver.cuTexObjectGetTextureDesc(pTexDesc._ptr, cytexObject) return (CUresult(err), pTexDesc) {{endif}} @@ -42370,17 +42370,17 @@ def cuTexObjectGetResourceViewDesc(texObject): -------- :py:obj:`~.cuTexObjectCreate`, :py:obj:`~.cudaGetTextureObjectResourceViewDesc` """ - cdef ccuda.CUtexObject ctexObject + cdef cydriver.CUtexObject cytexObject if texObject is None: - ctexObject = 0 + cytexObject = 0 elif isinstance(texObject, (CUtexObject,)): ptexObject = int(texObject) - ctexObject = ptexObject + cytexObject = ptexObject else: ptexObject = int(CUtexObject(texObject)) - ctexObject = ptexObject + cytexObject = ptexObject cdef CUDA_RESOURCE_VIEW_DESC pResViewDesc = CUDA_RESOURCE_VIEW_DESC() - err = ccuda.cuTexObjectGetResourceViewDesc(pResViewDesc._ptr, ctexObject) + err = cydriver.cuTexObjectGetResourceViewDesc(pResViewDesc._ptr, cytexObject) return (CUresult(err), pResViewDesc) {{endif}} @@ -42419,8 +42419,8 @@ def cuSurfObjectCreate(pResDesc : Optional[CUDA_RESOURCE_DESC]): :py:obj:`~.cuSurfObjectDestroy`, :py:obj:`~.cudaCreateSurfaceObject` """ cdef CUsurfObject pSurfObject = CUsurfObject() - cdef ccuda.CUDA_RESOURCE_DESC* cpResDesc_ptr = pResDesc._ptr if pResDesc != None else NULL - err = ccuda.cuSurfObjectCreate(pSurfObject._ptr, cpResDesc_ptr) + cdef cydriver.CUDA_RESOURCE_DESC* cypResDesc_ptr = pResDesc._ptr if pResDesc != None else NULL + err = cydriver.cuSurfObjectCreate(pSurfObject._ptr, cypResDesc_ptr) return (CUresult(err), pSurfObject) {{endif}} @@ -42446,16 +42446,16 @@ def cuSurfObjectDestroy(surfObject): -------- :py:obj:`~.cuSurfObjectCreate`, :py:obj:`~.cudaDestroySurfaceObject` """ - cdef ccuda.CUsurfObject csurfObject + cdef cydriver.CUsurfObject cysurfObject if surfObject is None: - csurfObject = 0 + cysurfObject = 0 elif isinstance(surfObject, (CUsurfObject,)): psurfObject = int(surfObject) - csurfObject = psurfObject + cysurfObject = psurfObject else: psurfObject = int(CUsurfObject(surfObject)) - csurfObject = psurfObject - err = ccuda.cuSurfObjectDestroy(csurfObject) + cysurfObject = psurfObject + err = cydriver.cuSurfObjectDestroy(cysurfObject) return (CUresult(err),) {{endif}} @@ -42484,17 +42484,17 @@ def cuSurfObjectGetResourceDesc(surfObject): -------- :py:obj:`~.cuSurfObjectCreate`, :py:obj:`~.cudaGetSurfaceObjectResourceDesc` """ - cdef ccuda.CUsurfObject csurfObject + cdef cydriver.CUsurfObject cysurfObject if surfObject is None: - csurfObject = 0 + cysurfObject = 0 elif isinstance(surfObject, (CUsurfObject,)): psurfObject = int(surfObject) - csurfObject = psurfObject + cysurfObject = psurfObject else: psurfObject = int(CUsurfObject(surfObject)) - csurfObject = psurfObject + cysurfObject = psurfObject cdef CUDA_RESOURCE_DESC pResDesc = CUDA_RESOURCE_DESC() - err = ccuda.cuSurfObjectGetResourceDesc(pResDesc._ptr, csurfObject) + err = cydriver.cuSurfObjectGetResourceDesc(pResDesc._ptr, cysurfObject) return (CUresult(err), pResDesc) {{endif}} @@ -42660,74 +42660,74 @@ def cuTensorMapEncodeTiled(tensorDataType not None : CUtensorMapDataType, tensor """ elementStrides = [] if elementStrides is None else elementStrides if not all(isinstance(_x, (cuuint32_t,)) for _x in elementStrides): - raise TypeError("Argument 'elementStrides' is not instance of type (expected Tuple[ccuda.cuuint32_t,] or List[ccuda.cuuint32_t,]") + raise TypeError("Argument 'elementStrides' is not instance of type (expected Tuple[cydriver.cuuint32_t,] or List[cydriver.cuuint32_t,]") boxDim = [] if boxDim is None else boxDim if not all(isinstance(_x, (cuuint32_t,)) for _x in boxDim): - raise TypeError("Argument 'boxDim' is not instance of type (expected Tuple[ccuda.cuuint32_t,] or List[ccuda.cuuint32_t,]") + raise TypeError("Argument 'boxDim' is not instance of type (expected Tuple[cydriver.cuuint32_t,] or List[cydriver.cuuint32_t,]") globalStrides = [] if globalStrides is None else globalStrides if not all(isinstance(_x, (cuuint64_t,)) for _x in globalStrides): - raise TypeError("Argument 'globalStrides' is not instance of type (expected Tuple[ccuda.cuuint64_t,] or List[ccuda.cuuint64_t,]") + raise TypeError("Argument 'globalStrides' is not instance of type (expected Tuple[cydriver.cuuint64_t,] or List[cydriver.cuuint64_t,]") globalDim = [] if globalDim is None else globalDim if not all(isinstance(_x, (cuuint64_t,)) for _x in globalDim): - raise TypeError("Argument 'globalDim' is not instance of type (expected Tuple[ccuda.cuuint64_t,] or List[ccuda.cuuint64_t,]") - cdef ccuda.cuuint32_t ctensorRank + raise TypeError("Argument 'globalDim' is not instance of type (expected Tuple[cydriver.cuuint64_t,] or List[cydriver.cuuint64_t,]") + cdef cydriver.cuuint32_t cytensorRank if tensorRank is None: - ctensorRank = 0 + cytensorRank = 0 elif isinstance(tensorRank, (cuuint32_t,)): ptensorRank = int(tensorRank) - ctensorRank = ptensorRank + cytensorRank = ptensorRank else: ptensorRank = int(cuuint32_t(tensorRank)) - ctensorRank = ptensorRank + cytensorRank = ptensorRank cdef CUtensorMap tensorMap = CUtensorMap() - cdef ccuda.CUtensorMapDataType ctensorDataType = tensorDataType.value - cglobalAddress = utils.HelperInputVoidPtr(globalAddress) - cdef void* cglobalAddress_ptr = cglobalAddress.cptr - cdef ccuda.cuuint64_t* cglobalDim = NULL + cdef cydriver.CUtensorMapDataType cytensorDataType = tensorDataType.value + cyglobalAddress = utils.HelperInputVoidPtr(globalAddress) + cdef void* cyglobalAddress_ptr = cyglobalAddress.cptr + cdef cydriver.cuuint64_t* cyglobalDim = NULL if len(globalDim) > 0: - cglobalDim = calloc(len(globalDim), sizeof(ccuda.cuuint64_t)) - if cglobalDim is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(globalDim)) + 'x' + str(sizeof(ccuda.cuuint64_t))) + cyglobalDim = calloc(len(globalDim), sizeof(cydriver.cuuint64_t)) + if cyglobalDim is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(globalDim)) + 'x' + str(sizeof(cydriver.cuuint64_t))) else: for idx in range(len(globalDim)): - cglobalDim[idx] = (globalDim[idx])._ptr[0] - cdef ccuda.cuuint64_t* cglobalStrides = NULL + cyglobalDim[idx] = (globalDim[idx])._ptr[0] + cdef cydriver.cuuint64_t* cyglobalStrides = NULL if len(globalStrides) > 0: - cglobalStrides = calloc(len(globalStrides), sizeof(ccuda.cuuint64_t)) - if cglobalStrides is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(globalStrides)) + 'x' + str(sizeof(ccuda.cuuint64_t))) + cyglobalStrides = calloc(len(globalStrides), sizeof(cydriver.cuuint64_t)) + if cyglobalStrides is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(globalStrides)) + 'x' + str(sizeof(cydriver.cuuint64_t))) else: for idx in range(len(globalStrides)): - cglobalStrides[idx] = (globalStrides[idx])._ptr[0] - cdef ccuda.cuuint32_t* cboxDim = NULL + cyglobalStrides[idx] = (globalStrides[idx])._ptr[0] + cdef cydriver.cuuint32_t* cyboxDim = NULL if len(boxDim) > 0: - cboxDim = calloc(len(boxDim), sizeof(ccuda.cuuint32_t)) - if cboxDim is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(boxDim)) + 'x' + str(sizeof(ccuda.cuuint32_t))) + cyboxDim = calloc(len(boxDim), sizeof(cydriver.cuuint32_t)) + if cyboxDim is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(boxDim)) + 'x' + str(sizeof(cydriver.cuuint32_t))) else: for idx in range(len(boxDim)): - cboxDim[idx] = (boxDim[idx])._ptr[0] - cdef ccuda.cuuint32_t* celementStrides = NULL + cyboxDim[idx] = (boxDim[idx])._ptr[0] + cdef cydriver.cuuint32_t* cyelementStrides = NULL if len(elementStrides) > 0: - celementStrides = calloc(len(elementStrides), sizeof(ccuda.cuuint32_t)) - if celementStrides is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(elementStrides)) + 'x' + str(sizeof(ccuda.cuuint32_t))) + cyelementStrides = calloc(len(elementStrides), sizeof(cydriver.cuuint32_t)) + if cyelementStrides is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(elementStrides)) + 'x' + str(sizeof(cydriver.cuuint32_t))) else: for idx in range(len(elementStrides)): - celementStrides[idx] = (elementStrides[idx])._ptr[0] - cdef ccuda.CUtensorMapInterleave cinterleave = interleave.value - cdef ccuda.CUtensorMapSwizzle cswizzle = swizzle.value - cdef ccuda.CUtensorMapL2promotion cl2Promotion = l2Promotion.value - cdef ccuda.CUtensorMapFloatOOBfill coobFill = oobFill.value - err = ccuda.cuTensorMapEncodeTiled(tensorMap._ptr, ctensorDataType, ctensorRank, cglobalAddress_ptr, (globalDim[0])._ptr if len(globalDim) == 1 else cglobalDim, (globalStrides[0])._ptr if len(globalStrides) == 1 else cglobalStrides, (boxDim[0])._ptr if len(boxDim) == 1 else cboxDim, (elementStrides[0])._ptr if len(elementStrides) == 1 else celementStrides, cinterleave, cswizzle, cl2Promotion, coobFill) - if cglobalDim is not NULL: - free(cglobalDim) - if cglobalStrides is not NULL: - free(cglobalStrides) - if cboxDim is not NULL: - free(cboxDim) - if celementStrides is not NULL: - free(celementStrides) + cyelementStrides[idx] = (elementStrides[idx])._ptr[0] + cdef cydriver.CUtensorMapInterleave cyinterleave = interleave.value + cdef cydriver.CUtensorMapSwizzle cyswizzle = swizzle.value + cdef cydriver.CUtensorMapL2promotion cyl2Promotion = l2Promotion.value + cdef cydriver.CUtensorMapFloatOOBfill cyoobFill = oobFill.value + err = cydriver.cuTensorMapEncodeTiled(tensorMap._ptr, cytensorDataType, cytensorRank, cyglobalAddress_ptr, (globalDim[0])._ptr if len(globalDim) == 1 else cyglobalDim, (globalStrides[0])._ptr if len(globalStrides) == 1 else cyglobalStrides, (boxDim[0])._ptr if len(boxDim) == 1 else cyboxDim, (elementStrides[0])._ptr if len(elementStrides) == 1 else cyelementStrides, cyinterleave, cyswizzle, cyl2Promotion, cyoobFill) + if cyglobalDim is not NULL: + free(cyglobalDim) + if cyglobalStrides is not NULL: + free(cyglobalStrides) + if cyboxDim is not NULL: + free(cyboxDim) + if cyelementStrides is not NULL: + free(cyelementStrides) return (CUresult(err), tensorMap) {{endif}} @@ -42923,25 +42923,25 @@ def cuTensorMapEncodeIm2col(tensorDataType not None : CUtensorMapDataType, tenso """ elementStrides = [] if elementStrides is None else elementStrides if not all(isinstance(_x, (cuuint32_t,)) for _x in elementStrides): - raise TypeError("Argument 'elementStrides' is not instance of type (expected Tuple[ccuda.cuuint32_t,] or List[ccuda.cuuint32_t,]") - cdef ccuda.cuuint32_t cpixelsPerColumn + raise TypeError("Argument 'elementStrides' is not instance of type (expected Tuple[cydriver.cuuint32_t,] or List[cydriver.cuuint32_t,]") + cdef cydriver.cuuint32_t cypixelsPerColumn if pixelsPerColumn is None: - cpixelsPerColumn = 0 + cypixelsPerColumn = 0 elif isinstance(pixelsPerColumn, (cuuint32_t,)): ppixelsPerColumn = int(pixelsPerColumn) - cpixelsPerColumn = ppixelsPerColumn + cypixelsPerColumn = ppixelsPerColumn else: ppixelsPerColumn = int(cuuint32_t(pixelsPerColumn)) - cpixelsPerColumn = ppixelsPerColumn - cdef ccuda.cuuint32_t cchannelsPerPixel + cypixelsPerColumn = ppixelsPerColumn + cdef cydriver.cuuint32_t cychannelsPerPixel if channelsPerPixel is None: - cchannelsPerPixel = 0 + cychannelsPerPixel = 0 elif isinstance(channelsPerPixel, (cuuint32_t,)): pchannelsPerPixel = int(channelsPerPixel) - cchannelsPerPixel = pchannelsPerPixel + cychannelsPerPixel = pchannelsPerPixel else: pchannelsPerPixel = int(cuuint32_t(channelsPerPixel)) - cchannelsPerPixel = pchannelsPerPixel + cychannelsPerPixel = pchannelsPerPixel pixelBoxUpperCorner = [] if pixelBoxUpperCorner is None else pixelBoxUpperCorner if not all(isinstance(_x, (int)) for _x in pixelBoxUpperCorner): raise TypeError("Argument 'pixelBoxUpperCorner' is not instance of type (expected Tuple[int] or List[int]") @@ -42950,60 +42950,60 @@ def cuTensorMapEncodeIm2col(tensorDataType not None : CUtensorMapDataType, tenso raise TypeError("Argument 'pixelBoxLowerCorner' is not instance of type (expected Tuple[int] or List[int]") globalStrides = [] if globalStrides is None else globalStrides if not all(isinstance(_x, (cuuint64_t,)) for _x in globalStrides): - raise TypeError("Argument 'globalStrides' is not instance of type (expected Tuple[ccuda.cuuint64_t,] or List[ccuda.cuuint64_t,]") + raise TypeError("Argument 'globalStrides' is not instance of type (expected Tuple[cydriver.cuuint64_t,] or List[cydriver.cuuint64_t,]") globalDim = [] if globalDim is None else globalDim if not all(isinstance(_x, (cuuint64_t,)) for _x in globalDim): - raise TypeError("Argument 'globalDim' is not instance of type (expected Tuple[ccuda.cuuint64_t,] or List[ccuda.cuuint64_t,]") - cdef ccuda.cuuint32_t ctensorRank + raise TypeError("Argument 'globalDim' is not instance of type (expected Tuple[cydriver.cuuint64_t,] or List[cydriver.cuuint64_t,]") + cdef cydriver.cuuint32_t cytensorRank if tensorRank is None: - ctensorRank = 0 + cytensorRank = 0 elif isinstance(tensorRank, (cuuint32_t,)): ptensorRank = int(tensorRank) - ctensorRank = ptensorRank + cytensorRank = ptensorRank else: ptensorRank = int(cuuint32_t(tensorRank)) - ctensorRank = ptensorRank + cytensorRank = ptensorRank cdef CUtensorMap tensorMap = CUtensorMap() - cdef ccuda.CUtensorMapDataType ctensorDataType = tensorDataType.value - cglobalAddress = utils.HelperInputVoidPtr(globalAddress) - cdef void* cglobalAddress_ptr = cglobalAddress.cptr - cdef ccuda.cuuint64_t* cglobalDim = NULL + cdef cydriver.CUtensorMapDataType cytensorDataType = tensorDataType.value + cyglobalAddress = utils.HelperInputVoidPtr(globalAddress) + cdef void* cyglobalAddress_ptr = cyglobalAddress.cptr + cdef cydriver.cuuint64_t* cyglobalDim = NULL if len(globalDim) > 0: - cglobalDim = calloc(len(globalDim), sizeof(ccuda.cuuint64_t)) - if cglobalDim is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(globalDim)) + 'x' + str(sizeof(ccuda.cuuint64_t))) + cyglobalDim = calloc(len(globalDim), sizeof(cydriver.cuuint64_t)) + if cyglobalDim is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(globalDim)) + 'x' + str(sizeof(cydriver.cuuint64_t))) else: for idx in range(len(globalDim)): - cglobalDim[idx] = (globalDim[idx])._ptr[0] - cdef ccuda.cuuint64_t* cglobalStrides = NULL + cyglobalDim[idx] = (globalDim[idx])._ptr[0] + cdef cydriver.cuuint64_t* cyglobalStrides = NULL if len(globalStrides) > 0: - cglobalStrides = calloc(len(globalStrides), sizeof(ccuda.cuuint64_t)) - if cglobalStrides is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(globalStrides)) + 'x' + str(sizeof(ccuda.cuuint64_t))) + cyglobalStrides = calloc(len(globalStrides), sizeof(cydriver.cuuint64_t)) + if cyglobalStrides is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(globalStrides)) + 'x' + str(sizeof(cydriver.cuuint64_t))) else: for idx in range(len(globalStrides)): - cglobalStrides[idx] = (globalStrides[idx])._ptr[0] - cdef vector[int] cpixelBoxLowerCorner = pixelBoxLowerCorner - cdef vector[int] cpixelBoxUpperCorner = pixelBoxUpperCorner - cdef ccuda.cuuint32_t* celementStrides = NULL + cyglobalStrides[idx] = (globalStrides[idx])._ptr[0] + cdef vector[int] cypixelBoxLowerCorner = pixelBoxLowerCorner + cdef vector[int] cypixelBoxUpperCorner = pixelBoxUpperCorner + cdef cydriver.cuuint32_t* cyelementStrides = NULL if len(elementStrides) > 0: - celementStrides = calloc(len(elementStrides), sizeof(ccuda.cuuint32_t)) - if celementStrides is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(elementStrides)) + 'x' + str(sizeof(ccuda.cuuint32_t))) + cyelementStrides = calloc(len(elementStrides), sizeof(cydriver.cuuint32_t)) + if cyelementStrides is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(elementStrides)) + 'x' + str(sizeof(cydriver.cuuint32_t))) else: for idx in range(len(elementStrides)): - celementStrides[idx] = (elementStrides[idx])._ptr[0] - cdef ccuda.CUtensorMapInterleave cinterleave = interleave.value - cdef ccuda.CUtensorMapSwizzle cswizzle = swizzle.value - cdef ccuda.CUtensorMapL2promotion cl2Promotion = l2Promotion.value - cdef ccuda.CUtensorMapFloatOOBfill coobFill = oobFill.value - err = ccuda.cuTensorMapEncodeIm2col(tensorMap._ptr, ctensorDataType, ctensorRank, cglobalAddress_ptr, (globalDim[0])._ptr if len(globalDim) == 1 else cglobalDim, (globalStrides[0])._ptr if len(globalStrides) == 1 else cglobalStrides, cpixelBoxLowerCorner.data(), cpixelBoxUpperCorner.data(), cchannelsPerPixel, cpixelsPerColumn, (elementStrides[0])._ptr if len(elementStrides) == 1 else celementStrides, cinterleave, cswizzle, cl2Promotion, coobFill) - if cglobalDim is not NULL: - free(cglobalDim) - if cglobalStrides is not NULL: - free(cglobalStrides) - if celementStrides is not NULL: - free(celementStrides) + cyelementStrides[idx] = (elementStrides[idx])._ptr[0] + cdef cydriver.CUtensorMapInterleave cyinterleave = interleave.value + cdef cydriver.CUtensorMapSwizzle cyswizzle = swizzle.value + cdef cydriver.CUtensorMapL2promotion cyl2Promotion = l2Promotion.value + cdef cydriver.CUtensorMapFloatOOBfill cyoobFill = oobFill.value + err = cydriver.cuTensorMapEncodeIm2col(tensorMap._ptr, cytensorDataType, cytensorRank, cyglobalAddress_ptr, (globalDim[0])._ptr if len(globalDim) == 1 else cyglobalDim, (globalStrides[0])._ptr if len(globalStrides) == 1 else cyglobalStrides, cypixelBoxLowerCorner.data(), cypixelBoxUpperCorner.data(), cychannelsPerPixel, cypixelsPerColumn, (elementStrides[0])._ptr if len(elementStrides) == 1 else cyelementStrides, cyinterleave, cyswizzle, cyl2Promotion, cyoobFill) + if cyglobalDim is not NULL: + free(cyglobalDim) + if cyglobalStrides is not NULL: + free(cyglobalStrides) + if cyelementStrides is not NULL: + free(cyelementStrides) return (CUresult(err), tensorMap) {{endif}} @@ -43037,10 +43037,10 @@ def cuTensorMapReplaceAddress(tensorMap : Optional[CUtensorMap], globalAddress): -------- :py:obj:`~.cuTensorMapEncodeTiled`, :py:obj:`~.cuTensorMapEncodeIm2col` """ - cdef ccuda.CUtensorMap* ctensorMap_ptr = tensorMap._ptr if tensorMap != None else NULL - cglobalAddress = utils.HelperInputVoidPtr(globalAddress) - cdef void* cglobalAddress_ptr = cglobalAddress.cptr - err = ccuda.cuTensorMapReplaceAddress(ctensorMap_ptr, cglobalAddress_ptr) + cdef cydriver.CUtensorMap* cytensorMap_ptr = tensorMap._ptr if tensorMap != None else NULL + cyglobalAddress = utils.HelperInputVoidPtr(globalAddress) + cdef void* cyglobalAddress_ptr = cyglobalAddress.cptr + err = cydriver.cuTensorMapReplaceAddress(cytensorMap_ptr, cyglobalAddress_ptr) return (CUresult(err),) {{endif}} @@ -43076,26 +43076,26 @@ def cuDeviceCanAccessPeer(dev, peerDev): -------- :py:obj:`~.cuCtxEnablePeerAccess`, :py:obj:`~.cuCtxDisablePeerAccess`, :py:obj:`~.cudaDeviceCanAccessPeer` """ - cdef ccuda.CUdevice cpeerDev + cdef cydriver.CUdevice cypeerDev if peerDev is None: - cpeerDev = 0 + cypeerDev = 0 elif isinstance(peerDev, (CUdevice,)): ppeerDev = int(peerDev) - cpeerDev = ppeerDev + cypeerDev = ppeerDev else: ppeerDev = int(CUdevice(peerDev)) - cpeerDev = ppeerDev - cdef ccuda.CUdevice cdev + cypeerDev = ppeerDev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev + cydev = pdev cdef int canAccessPeer = 0 - err = ccuda.cuDeviceCanAccessPeer(&canAccessPeer, cdev, cpeerDev) + err = cydriver.cuDeviceCanAccessPeer(&canAccessPeer, cydev, cypeerDev) return (CUresult(err), canAccessPeer) {{endif}} @@ -43156,16 +43156,16 @@ def cuCtxEnablePeerAccess(peerContext, unsigned int Flags): -------- :py:obj:`~.cuDeviceCanAccessPeer`, :py:obj:`~.cuCtxDisablePeerAccess`, :py:obj:`~.cudaDeviceEnablePeerAccess` """ - cdef ccuda.CUcontext cpeerContext + cdef cydriver.CUcontext cypeerContext if peerContext is None: - cpeerContext = 0 + cypeerContext = 0 elif isinstance(peerContext, (CUcontext,)): ppeerContext = int(peerContext) - cpeerContext = ppeerContext + cypeerContext = ppeerContext else: ppeerContext = int(CUcontext(peerContext)) - cpeerContext = ppeerContext - err = ccuda.cuCtxEnablePeerAccess(cpeerContext, Flags) + cypeerContext = ppeerContext + err = cydriver.cuCtxEnablePeerAccess(cypeerContext, Flags) return (CUresult(err),) {{endif}} @@ -43196,16 +43196,16 @@ def cuCtxDisablePeerAccess(peerContext): -------- :py:obj:`~.cuDeviceCanAccessPeer`, :py:obj:`~.cuCtxEnablePeerAccess`, :py:obj:`~.cudaDeviceDisablePeerAccess` """ - cdef ccuda.CUcontext cpeerContext + cdef cydriver.CUcontext cypeerContext if peerContext is None: - cpeerContext = 0 + cypeerContext = 0 elif isinstance(peerContext, (CUcontext,)): ppeerContext = int(peerContext) - cpeerContext = ppeerContext + cypeerContext = ppeerContext else: ppeerContext = int(CUcontext(peerContext)) - cpeerContext = ppeerContext - err = ccuda.cuCtxDisablePeerAccess(cpeerContext) + cypeerContext = ppeerContext + err = cydriver.cuCtxDisablePeerAccess(cypeerContext) return (CUresult(err),) {{endif}} @@ -43258,27 +43258,27 @@ def cuDeviceGetP2PAttribute(attrib not None : CUdevice_P2PAttribute, srcDevice, -------- :py:obj:`~.cuCtxEnablePeerAccess`, :py:obj:`~.cuCtxDisablePeerAccess`, :py:obj:`~.cuDeviceCanAccessPeer`, :py:obj:`~.cudaDeviceGetP2PAttribute` """ - cdef ccuda.CUdevice cdstDevice + cdef cydriver.CUdevice cydstDevice if dstDevice is None: - cdstDevice = 0 + cydstDevice = 0 elif isinstance(dstDevice, (CUdevice,)): pdstDevice = int(dstDevice) - cdstDevice = pdstDevice + cydstDevice = pdstDevice else: pdstDevice = int(CUdevice(dstDevice)) - cdstDevice = pdstDevice - cdef ccuda.CUdevice csrcDevice + cydstDevice = pdstDevice + cdef cydriver.CUdevice cysrcDevice if srcDevice is None: - csrcDevice = 0 + cysrcDevice = 0 elif isinstance(srcDevice, (CUdevice,)): psrcDevice = int(srcDevice) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice else: psrcDevice = int(CUdevice(srcDevice)) - csrcDevice = psrcDevice + cysrcDevice = psrcDevice cdef int value = 0 - cdef ccuda.CUdevice_P2PAttribute cattrib = attrib.value - err = ccuda.cuDeviceGetP2PAttribute(&value, cattrib, csrcDevice, cdstDevice) + cdef cydriver.CUdevice_P2PAttribute cyattrib = attrib.value + err = cydriver.cuDeviceGetP2PAttribute(&value, cyattrib, cysrcDevice, cydstDevice) return (CUresult(err), value) {{endif}} @@ -43308,16 +43308,16 @@ def cuGraphicsUnregisterResource(resource): -------- :py:obj:`~.cuGraphicsD3D9RegisterResource`, :py:obj:`~.cuGraphicsD3D10RegisterResource`, :py:obj:`~.cuGraphicsD3D11RegisterResource`, :py:obj:`~.cuGraphicsGLRegisterBuffer`, :py:obj:`~.cuGraphicsGLRegisterImage`, :py:obj:`~.cudaGraphicsUnregisterResource` """ - cdef ccuda.CUgraphicsResource cresource + cdef cydriver.CUgraphicsResource cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (CUgraphicsResource,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(CUgraphicsResource(resource)) - cresource = presource - err = ccuda.cuGraphicsUnregisterResource(cresource) + cyresource = presource + err = cydriver.cuGraphicsUnregisterResource(cyresource) return (CUresult(err),) {{endif}} @@ -43363,17 +43363,17 @@ def cuGraphicsSubResourceGetMappedArray(resource, unsigned int arrayIndex, unsig -------- :py:obj:`~.cuGraphicsResourceGetMappedPointer`, :py:obj:`~.cudaGraphicsSubResourceGetMappedArray` """ - cdef ccuda.CUgraphicsResource cresource + cdef cydriver.CUgraphicsResource cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (CUgraphicsResource,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(CUgraphicsResource(resource)) - cresource = presource + cyresource = presource cdef CUarray pArray = CUarray() - err = ccuda.cuGraphicsSubResourceGetMappedArray(pArray._ptr, cresource, arrayIndex, mipLevel) + err = cydriver.cuGraphicsSubResourceGetMappedArray(pArray._ptr, cyresource, arrayIndex, mipLevel) return (CUresult(err), pArray) {{endif}} @@ -43408,17 +43408,17 @@ def cuGraphicsResourceGetMappedMipmappedArray(resource): -------- :py:obj:`~.cuGraphicsResourceGetMappedPointer`, :py:obj:`~.cudaGraphicsResourceGetMappedMipmappedArray` """ - cdef ccuda.CUgraphicsResource cresource + cdef cydriver.CUgraphicsResource cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (CUgraphicsResource,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(CUgraphicsResource(resource)) - cresource = presource + cyresource = presource cdef CUmipmappedArray pMipmappedArray = CUmipmappedArray() - err = ccuda.cuGraphicsResourceGetMappedMipmappedArray(pMipmappedArray._ptr, cresource) + err = cydriver.cuGraphicsResourceGetMappedMipmappedArray(pMipmappedArray._ptr, cyresource) return (CUresult(err), pMipmappedArray) {{endif}} @@ -43452,18 +43452,18 @@ def cuGraphicsResourceGetMappedPointer(resource): pSize : int None """ - cdef ccuda.CUgraphicsResource cresource + cdef cydriver.CUgraphicsResource cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (CUgraphicsResource,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(CUgraphicsResource(resource)) - cresource = presource + cyresource = presource cdef CUdeviceptr pDevPtr = CUdeviceptr() cdef size_t pSize = 0 - err = ccuda.cuGraphicsResourceGetMappedPointer(pDevPtr._ptr, &pSize, cresource) + err = cydriver.cuGraphicsResourceGetMappedPointer(pDevPtr._ptr, &pSize, cyresource) return (CUresult(err), pDevPtr, pSize) {{endif}} @@ -43513,16 +43513,16 @@ def cuGraphicsResourceSetMapFlags(resource, unsigned int flags): -------- :py:obj:`~.cuGraphicsMapResources`, :py:obj:`~.cudaGraphicsResourceSetMapFlags` """ - cdef ccuda.CUgraphicsResource cresource + cdef cydriver.CUgraphicsResource cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (CUgraphicsResource,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(CUgraphicsResource(resource)) - cresource = presource - err = ccuda.cuGraphicsResourceSetMapFlags(cresource, flags) + cyresource = presource + err = cydriver.cuGraphicsResourceSetMapFlags(cyresource, flags) return (CUresult(err),) {{endif}} @@ -43566,26 +43566,26 @@ def cuGraphicsMapResources(unsigned int count, resources, hStream): -------- :py:obj:`~.cuGraphicsResourceGetMappedPointer`, :py:obj:`~.cuGraphicsSubResourceGetMappedArray`, :py:obj:`~.cuGraphicsUnmapResources`, :py:obj:`~.cudaGraphicsMapResources` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUgraphicsResource *cresources + cyhStream = phStream + cdef cydriver.CUgraphicsResource *cyresources if resources is None: - cresources = NULL + cyresources = NULL elif isinstance(resources, (CUgraphicsResource,)): presources = resources.getPtr() - cresources = presources + cyresources = presources elif isinstance(resources, (int)): - cresources = resources + cyresources = resources else: - raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) - err = ccuda.cuGraphicsMapResources(count, cresources, chStream) + raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) + err = cydriver.cuGraphicsMapResources(count, cyresources, cyhStream) return (CUresult(err),) {{endif}} @@ -43627,26 +43627,26 @@ def cuGraphicsUnmapResources(unsigned int count, resources, hStream): -------- :py:obj:`~.cuGraphicsMapResources`, :py:obj:`~.cudaGraphicsUnmapResources` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream - cdef ccuda.CUgraphicsResource *cresources + cyhStream = phStream + cdef cydriver.CUgraphicsResource *cyresources if resources is None: - cresources = NULL + cyresources = NULL elif isinstance(resources, (CUgraphicsResource,)): presources = resources.getPtr() - cresources = presources + cyresources = presources elif isinstance(resources, (int)): - cresources = resources + cyresources = resources else: - raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) - err = ccuda.cuGraphicsUnmapResources(count, cresources, chStream) + raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) + err = cydriver.cuGraphicsUnmapResources(count, cyresources, cyhStream) return (CUresult(err),) {{endif}} @@ -43737,18 +43737,18 @@ def cuGetProcAddress(char* symbol, int cudaVersion, flags): -------- :py:obj:`~.cudaGetDriverEntryPoint` """ - cdef ccuda.cuuint64_t cflags + cdef cydriver.cuuint64_t cyflags if flags is None: - cflags = 0 + cyflags = 0 elif isinstance(flags, (cuuint64_t,)): pflags = int(flags) - cflags = pflags + cyflags = pflags else: pflags = int(cuuint64_t(flags)) - cflags = pflags + cyflags = pflags cdef void_ptr pfn = 0 - cdef ccuda.CUdriverProcAddressQueryResult symbolStatus - err = ccuda.cuGetProcAddress(symbol, &pfn, cudaVersion, cflags, &symbolStatus) + cdef cydriver.CUdriverProcAddressQueryResult symbolStatus + err = cydriver.cuGetProcAddress(symbol, &pfn, cudaVersion, cyflags, &symbolStatus) return (CUresult(err), pfn, CUdriverProcAddressQueryResult(symbolStatus)) {{endif}} @@ -43861,12 +43861,12 @@ def cuCoredumpGetAttribute(attrib not None : CUcoredumpSettings): -------- :py:obj:`~.cuCoredumpGetAttributeGlobal`, :py:obj:`~.cuCoredumpSetAttribute`, :py:obj:`~.cuCoredumpSetAttributeGlobal` """ - cdef ccuda.CUcoredumpSettings cattrib = attrib.value - cdef utils.HelperCUcoredumpSettings cvalue = utils.HelperCUcoredumpSettings(attrib, 0, is_getter=True) - cdef void* cvalue_ptr = cvalue.cptr - cdef size_t size = cvalue.size() - err = ccuda.cuCoredumpGetAttribute(cattrib, cvalue_ptr, &size) - return (CUresult(err), cvalue.pyObj()) + cdef cydriver.CUcoredumpSettings cyattrib = attrib.value + cdef utils.HelperCUcoredumpSettings cyvalue = utils.HelperCUcoredumpSettings(attrib, 0, is_getter=True) + cdef void* cyvalue_ptr = cyvalue.cptr + cdef size_t size = cyvalue.size() + err = cydriver.cuCoredumpGetAttribute(cyattrib, cyvalue_ptr, &size) + return (CUresult(err), cyvalue.pyObj()) {{endif}} {{if 'cuCoredumpGetAttributeGlobal' in found_functions}} @@ -43974,12 +43974,12 @@ def cuCoredumpGetAttributeGlobal(attrib not None : CUcoredumpSettings): -------- :py:obj:`~.cuCoredumpGetAttribute`, :py:obj:`~.cuCoredumpSetAttribute`, :py:obj:`~.cuCoredumpSetAttributeGlobal` """ - cdef ccuda.CUcoredumpSettings cattrib = attrib.value - cdef utils.HelperCUcoredumpSettings cvalue = utils.HelperCUcoredumpSettings(attrib, 0, is_getter=True) - cdef void* cvalue_ptr = cvalue.cptr - cdef size_t size = cvalue.size() - err = ccuda.cuCoredumpGetAttributeGlobal(cattrib, cvalue_ptr, &size) - return (CUresult(err), cvalue.pyObj()) + cdef cydriver.CUcoredumpSettings cyattrib = attrib.value + cdef utils.HelperCUcoredumpSettings cyvalue = utils.HelperCUcoredumpSettings(attrib, 0, is_getter=True) + cdef void* cyvalue_ptr = cyvalue.cptr + cdef size_t size = cyvalue.size() + err = cydriver.cuCoredumpGetAttributeGlobal(cyattrib, cyvalue_ptr, &size) + return (CUresult(err), cyvalue.pyObj()) {{endif}} {{if 'cuCoredumpSetAttribute' in found_functions}} @@ -44094,11 +44094,11 @@ def cuCoredumpSetAttribute(attrib not None : CUcoredumpSettings, value): -------- :py:obj:`~.cuCoredumpGetAttributeGlobal`, :py:obj:`~.cuCoredumpGetAttribute`, :py:obj:`~.cuCoredumpSetAttributeGlobal` """ - cdef ccuda.CUcoredumpSettings cattrib = attrib.value - cdef utils.HelperCUcoredumpSettings cvalue = utils.HelperCUcoredumpSettings(attrib, value, is_getter=False) - cdef void* cvalue_ptr = cvalue.cptr - cdef size_t size = cvalue.size() - err = ccuda.cuCoredumpSetAttribute(cattrib, cvalue_ptr, &size) + cdef cydriver.CUcoredumpSettings cyattrib = attrib.value + cdef utils.HelperCUcoredumpSettings cyvalue = utils.HelperCUcoredumpSettings(attrib, value, is_getter=False) + cdef void* cyvalue_ptr = cyvalue.cptr + cdef size_t size = cyvalue.size() + err = cydriver.cuCoredumpSetAttribute(cyattrib, cyvalue_ptr, &size) return (CUresult(err),) {{endif}} @@ -44219,11 +44219,11 @@ def cuCoredumpSetAttributeGlobal(attrib not None : CUcoredumpSettings, value): -------- :py:obj:`~.cuCoredumpGetAttribute`, :py:obj:`~.cuCoredumpGetAttributeGlobal`, :py:obj:`~.cuCoredumpSetAttribute` """ - cdef ccuda.CUcoredumpSettings cattrib = attrib.value - cdef utils.HelperCUcoredumpSettings cvalue = utils.HelperCUcoredumpSettings(attrib, value, is_getter=False) - cdef void* cvalue_ptr = cvalue.cptr - cdef size_t size = cvalue.size() - err = ccuda.cuCoredumpSetAttributeGlobal(cattrib, cvalue_ptr, &size) + cdef cydriver.CUcoredumpSettings cyattrib = attrib.value + cdef utils.HelperCUcoredumpSettings cyvalue = utils.HelperCUcoredumpSettings(attrib, value, is_getter=False) + cdef void* cyvalue_ptr = cyvalue.cptr + cdef size_t size = cyvalue.size() + err = cydriver.cuCoredumpSetAttributeGlobal(cyattrib, cyvalue_ptr, &size) return (CUresult(err),) {{endif}} @@ -44246,8 +44246,8 @@ def cuGetExportTable(pExportTableId : Optional[CUuuid]): None """ cdef void_ptr ppExportTable = 0 - cdef ccuda.CUuuid* cpExportTableId_ptr = pExportTableId._ptr if pExportTableId != None else NULL - err = ccuda.cuGetExportTable(&ppExportTable, cpExportTableId_ptr) + cdef cydriver.CUuuid* cypExportTableId_ptr = pExportTableId._ptr if pExportTableId != None else NULL + err = cydriver.cuGetExportTable(&ppExportTable, cypExportTableId_ptr) return (CUresult(err), ppExportTable) {{endif}} @@ -44303,26 +44303,26 @@ def cuGreenCtxCreate(desc, dev, unsigned int flags): -------- :py:obj:`~.cuGreenCtxDestroy`, :py:obj:`~.cuCtxFromGreenCtx`, :py:obj:`~.cuCtxSetCurrent`, :py:obj:`~.cuCtxPushCurrent`, :py:obj:`~.cuDevResourceGenerateDesc`, :py:obj:`~.cuDevicePrimaryCtxRetain`, :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuCtxCreate_v3` """ - cdef ccuda.CUdevice cdev + cdef cydriver.CUdevice cydev if dev is None: - cdev = 0 + cydev = 0 elif isinstance(dev, (CUdevice,)): pdev = int(dev) - cdev = pdev + cydev = pdev else: pdev = int(CUdevice(dev)) - cdev = pdev - cdef ccuda.CUdevResourceDesc cdesc + cydev = pdev + cdef cydriver.CUdevResourceDesc cydesc if desc is None: - cdesc = 0 + cydesc = 0 elif isinstance(desc, (CUdevResourceDesc,)): pdesc = int(desc) - cdesc = pdesc + cydesc = pdesc else: pdesc = int(CUdevResourceDesc(desc)) - cdesc = pdesc + cydesc = pdesc cdef CUgreenCtx phCtx = CUgreenCtx() - err = ccuda.cuGreenCtxCreate(phCtx._ptr, cdesc, cdev, flags) + err = cydriver.cuGreenCtxCreate(phCtx._ptr, cydesc, cydev, flags) return (CUresult(err), phCtx) {{endif}} @@ -44351,16 +44351,16 @@ def cuGreenCtxDestroy(hCtx): -------- :py:obj:`~.cuGreenCtxCreate`, :py:obj:`~.cuCtxDestroy` """ - cdef ccuda.CUgreenCtx chCtx + cdef cydriver.CUgreenCtx cyhCtx if hCtx is None: - chCtx = 0 + cyhCtx = 0 elif isinstance(hCtx, (CUgreenCtx,)): phCtx = int(hCtx) - chCtx = phCtx + cyhCtx = phCtx else: phCtx = int(CUgreenCtx(hCtx)) - chCtx = phCtx - err = ccuda.cuGreenCtxDestroy(chCtx) + cyhCtx = phCtx + err = cydriver.cuGreenCtxDestroy(cyhCtx) return (CUresult(err),) {{endif}} @@ -44397,17 +44397,17 @@ def cuCtxFromGreenCtx(hCtx): -------- :py:obj:`~.cuGreenCtxCreate` """ - cdef ccuda.CUgreenCtx chCtx + cdef cydriver.CUgreenCtx cyhCtx if hCtx is None: - chCtx = 0 + cyhCtx = 0 elif isinstance(hCtx, (CUgreenCtx,)): phCtx = int(hCtx) - chCtx = phCtx + cyhCtx = phCtx else: phCtx = int(CUgreenCtx(hCtx)) - chCtx = phCtx + cyhCtx = phCtx cdef CUcontext pContext = CUcontext() - err = ccuda.cuCtxFromGreenCtx(pContext._ptr, chCtx) + err = cydriver.cuCtxFromGreenCtx(pContext._ptr, cyhCtx) return (CUresult(err), pContext) {{endif}} @@ -44441,18 +44441,18 @@ def cuDeviceGetDevResource(device, typename not None : CUdevResourceType): -------- :py:obj:`~.cuDevResourceGenerateDesc` """ - cdef ccuda.CUdevice cdevice + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice + cydevice = pdevice cdef CUdevResource resource = CUdevResource() - cdef ccuda.CUdevResourceType ctypename = typename.value - err = ccuda.cuDeviceGetDevResource(cdevice, resource._ptr, ctypename) + cdef cydriver.CUdevResourceType cytypename = typename.value + err = cydriver.cuDeviceGetDevResource(cydevice, resource._ptr, cytypename) return (CUresult(err), resource) {{endif}} @@ -44483,18 +44483,18 @@ def cuCtxGetDevResource(hCtx, typename not None : CUdevResourceType): -------- :py:obj:`~.cuDevResourceGenerateDesc` """ - cdef ccuda.CUcontext chCtx + cdef cydriver.CUcontext cyhCtx if hCtx is None: - chCtx = 0 + cyhCtx = 0 elif isinstance(hCtx, (CUcontext,)): phCtx = int(hCtx) - chCtx = phCtx + cyhCtx = phCtx else: phCtx = int(CUcontext(hCtx)) - chCtx = phCtx + cyhCtx = phCtx cdef CUdevResource resource = CUdevResource() - cdef ccuda.CUdevResourceType ctypename = typename.value - err = ccuda.cuCtxGetDevResource(chCtx, resource._ptr, ctypename) + cdef cydriver.CUdevResourceType cytypename = typename.value + err = cydriver.cuCtxGetDevResource(cyhCtx, resource._ptr, cytypename) return (CUresult(err), resource) {{endif}} @@ -44525,18 +44525,18 @@ def cuGreenCtxGetDevResource(hCtx, typename not None : CUdevResourceType): -------- :py:obj:`~.cuDevResourceGenerateDesc` """ - cdef ccuda.CUgreenCtx chCtx + cdef cydriver.CUgreenCtx cyhCtx if hCtx is None: - chCtx = 0 + cyhCtx = 0 elif isinstance(hCtx, (CUgreenCtx,)): phCtx = int(hCtx) - chCtx = phCtx + cyhCtx = phCtx else: phCtx = int(CUgreenCtx(hCtx)) - chCtx = phCtx + cyhCtx = phCtx cdef CUdevResource resource = CUdevResource() - cdef ccuda.CUdevResourceType ctypename = typename.value - err = ccuda.cuGreenCtxGetDevResource(chCtx, resource._ptr, ctypename) + cdef cydriver.CUdevResourceType cytypename = typename.value + err = cydriver.cuGreenCtxGetDevResource(cyhCtx, resource._ptr, cytypename) return (CUresult(err), resource) {{endif}} @@ -44637,22 +44637,22 @@ def cuDevSmResourceSplitByCount(unsigned int nbGroups, input_ : Optional[CUdevRe -------- :py:obj:`~.cuGreenCtxGetDevResource`, :py:obj:`~.cuCtxGetDevResource`, :py:obj:`~.cuDeviceGetDevResource` """ - cdef ccuda.CUdevResource* cresult = NULL + cdef cydriver.CUdevResource* cyresult = NULL pyresult = [CUdevResource() for idx in range(nbGroups)] if nbGroups != 0: - cresult = calloc(nbGroups, sizeof(ccuda.CUdevResource)) - if cresult is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(nbGroups) + 'x' + str(sizeof(ccuda.CUdevResource))) - cdef unsigned int cnbGroups = nbGroups - cdef ccuda.CUdevResource* cinput__ptr = input_._ptr if input_ != None else NULL + cyresult = calloc(nbGroups, sizeof(cydriver.CUdevResource)) + if cyresult is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(nbGroups) + 'x' + str(sizeof(cydriver.CUdevResource))) + cdef unsigned int cynbGroups = nbGroups + cdef cydriver.CUdevResource* cyinput__ptr = input_._ptr if input_ != None else NULL cdef CUdevResource remaining = CUdevResource() - err = ccuda.cuDevSmResourceSplitByCount(cresult, &cnbGroups, cinput__ptr, remaining._ptr, useFlags, minCount) + err = cydriver.cuDevSmResourceSplitByCount(cyresult, &cynbGroups, cyinput__ptr, remaining._ptr, useFlags, minCount) if CUresult(err) == CUresult(0): for idx in range(nbGroups): - string.memcpy((pyresult[idx])._ptr, &cresult[idx], sizeof(ccuda.CUdevResource)) - if cresult is not NULL: - free(cresult) - return (CUresult(err), pyresult, cnbGroups, remaining) + string.memcpy((pyresult[idx])._ptr, &cyresult[idx], sizeof(cydriver.CUdevResource)) + if cyresult is not NULL: + free(cyresult) + return (CUresult(err), pyresult, cynbGroups, remaining) {{endif}} {{if 'cuDevResourceGenerateDesc' in found_functions}} @@ -44701,19 +44701,19 @@ def cuDevResourceGenerateDesc(resources : Optional[Tuple[CUdevResource] | List[C """ resources = [] if resources is None else resources if not all(isinstance(_x, (CUdevResource,)) for _x in resources): - raise TypeError("Argument 'resources' is not instance of type (expected Tuple[ccuda.CUdevResource,] or List[ccuda.CUdevResource,]") + raise TypeError("Argument 'resources' is not instance of type (expected Tuple[cydriver.CUdevResource,] or List[cydriver.CUdevResource,]") cdef CUdevResourceDesc phDesc = CUdevResourceDesc() - cdef ccuda.CUdevResource* cresources = NULL + cdef cydriver.CUdevResource* cyresources = NULL if len(resources) > 0: - cresources = calloc(len(resources), sizeof(ccuda.CUdevResource)) - if cresources is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(resources)) + 'x' + str(sizeof(ccuda.CUdevResource))) + cyresources = calloc(len(resources), sizeof(cydriver.CUdevResource)) + if cyresources is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(resources)) + 'x' + str(sizeof(cydriver.CUdevResource))) for idx in range(len(resources)): - string.memcpy(&cresources[idx], (resources[idx])._ptr, sizeof(ccuda.CUdevResource)) + string.memcpy(&cyresources[idx], (resources[idx])._ptr, sizeof(cydriver.CUdevResource)) if nbResources > len(resources): raise RuntimeError("List is too small: " + str(len(resources)) + " < " + str(nbResources)) - err = ccuda.cuDevResourceGenerateDesc(phDesc._ptr, (resources[0])._ptr if len(resources) == 1 else cresources, nbResources) - if cresources is not NULL: - free(cresources) + err = cydriver.cuDevResourceGenerateDesc(phDesc._ptr, (resources[0])._ptr if len(resources) == 1 else cyresources, nbResources) + if cyresources is not NULL: + free(cyresources) return (CUresult(err), phDesc) {{endif}} @@ -44751,25 +44751,25 @@ def cuGreenCtxRecordEvent(hCtx, hEvent): ----- The API will return :py:obj:`~.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED` if the specified green context `hCtx` has a stream in the capture mode. In such a case, the call will invalidate all the conflicting captures. """ - cdef ccuda.CUevent chEvent + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - cdef ccuda.CUgreenCtx chCtx + cyhEvent = phEvent + cdef cydriver.CUgreenCtx cyhCtx if hCtx is None: - chCtx = 0 + cyhCtx = 0 elif isinstance(hCtx, (CUgreenCtx,)): phCtx = int(hCtx) - chCtx = phCtx + cyhCtx = phCtx else: phCtx = int(CUgreenCtx(hCtx)) - chCtx = phCtx - err = ccuda.cuGreenCtxRecordEvent(chCtx, chEvent) + cyhCtx = phCtx + err = cydriver.cuGreenCtxRecordEvent(cyhCtx, cyhEvent) return (CUresult(err),) {{endif}} @@ -44807,25 +44807,25 @@ def cuGreenCtxWaitEvent(hCtx, hEvent): The API will return :py:obj:`~.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED` and invalidate the capture if the specified event `hEvent` is part of an ongoing capture sequence or if the specified green context `hCtx` has a stream in the capture mode. """ - cdef ccuda.CUevent chEvent + cdef cydriver.CUevent cyhEvent if hEvent is None: - chEvent = 0 + cyhEvent = 0 elif isinstance(hEvent, (CUevent,)): phEvent = int(hEvent) - chEvent = phEvent + cyhEvent = phEvent else: phEvent = int(CUevent(hEvent)) - chEvent = phEvent - cdef ccuda.CUgreenCtx chCtx + cyhEvent = phEvent + cdef cydriver.CUgreenCtx cyhCtx if hCtx is None: - chCtx = 0 + cyhCtx = 0 elif isinstance(hCtx, (CUgreenCtx,)): phCtx = int(hCtx) - chCtx = phCtx + cyhCtx = phCtx else: phCtx = int(CUgreenCtx(hCtx)) - chCtx = phCtx - err = ccuda.cuGreenCtxWaitEvent(chCtx, chEvent) + cyhCtx = phCtx + err = cydriver.cuGreenCtxWaitEvent(cyhCtx, cyhEvent) return (CUresult(err),) {{endif}} @@ -44873,17 +44873,17 @@ def cuStreamGetGreenCtx(hStream): -------- :py:obj:`~.cuStreamDestroy`, :py:obj:`~.cuStreamCreate`, :py:obj:`~.cuStreamCreateWithPriority`, :py:obj:`~.cuStreamGetCtx_v2`, :py:obj:`~.cuGreenCtxStreamCreate`, :py:obj:`~.cuStreamGetPriority`, :py:obj:`~.cuStreamGetFlags`, :py:obj:`~.cuStreamWaitEvent`, :py:obj:`~.cuStreamQuery`, :py:obj:`~.cuStreamSynchronize`, :py:obj:`~.cuStreamAddCallback`, :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags` """ - cdef ccuda.CUstream chStream + cdef cydriver.CUstream cyhStream if hStream is None: - chStream = 0 + cyhStream = 0 elif isinstance(hStream, (CUstream,)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(CUstream(hStream)) - chStream = phStream + cyhStream = phStream cdef CUgreenCtx phCtx = CUgreenCtx() - err = ccuda.cuStreamGetGreenCtx(chStream, phCtx._ptr) + err = cydriver.cuStreamGetGreenCtx(cyhStream, phCtx._ptr) return (CUresult(err), phCtx) {{endif}} @@ -44946,17 +44946,17 @@ def cuGreenCtxStreamCreate(greenCtx, unsigned int flags, int priority): ----- In the current implementation, only compute kernels launched in priority streams are affected by the stream's priority. Stream priorities have no effect on host-to-device and device-to-host memory operations. """ - cdef ccuda.CUgreenCtx cgreenCtx + cdef cydriver.CUgreenCtx cygreenCtx if greenCtx is None: - cgreenCtx = 0 + cygreenCtx = 0 elif isinstance(greenCtx, (CUgreenCtx,)): pgreenCtx = int(greenCtx) - cgreenCtx = pgreenCtx + cygreenCtx = pgreenCtx else: pgreenCtx = int(CUgreenCtx(greenCtx)) - cgreenCtx = pgreenCtx + cygreenCtx = pgreenCtx cdef CUstream phStream = CUstream() - err = ccuda.cuGreenCtxStreamCreate(phStream._ptr, cgreenCtx, flags, priority) + err = cydriver.cuGreenCtxStreamCreate(phStream._ptr, cygreenCtx, flags, priority) return (CUresult(err), phStream) {{endif}} @@ -44983,7 +44983,7 @@ def cuProfilerStart(): -------- :py:obj:`~.cuProfilerInitialize`, :py:obj:`~.cuProfilerStop`, :py:obj:`~.cudaProfilerStart` """ - err = ccuda.cuProfilerStart() + err = cydriver.cuProfilerStart() return (CUresult(err),) {{endif}} @@ -45010,7 +45010,7 @@ def cuProfilerStop(): -------- :py:obj:`~.cuProfilerInitialize`, :py:obj:`~.cuProfilerStart`, :py:obj:`~.cudaProfilerStop` """ - err = ccuda.cuProfilerStop() + err = cydriver.cuProfilerStop() return (CUresult(err),) {{endif}} @@ -45074,17 +45074,17 @@ def cuGraphicsEGLRegisterImage(image, unsigned int flags): -------- :py:obj:`~.cuGraphicsEGLRegisterImage`, :py:obj:`~.cuGraphicsUnregisterResource`, :py:obj:`~.cuGraphicsResourceSetMapFlags`, :py:obj:`~.cuGraphicsMapResources`, :py:obj:`~.cuGraphicsUnmapResources`, :py:obj:`~.cudaGraphicsEGLRegisterImage` """ - cdef ccuda.EGLImageKHR cimage + cdef cydriver.EGLImageKHR cyimage if image is None: - cimage = 0 + cyimage = 0 elif isinstance(image, (EGLImageKHR,)): pimage = int(image) - cimage = pimage + cyimage = pimage else: pimage = int(EGLImageKHR(image)) - cimage = pimage + cyimage = pimage cdef CUgraphicsResource pCudaResource = CUgraphicsResource() - err = ccuda.cuGraphicsEGLRegisterImage(pCudaResource._ptr, cimage, flags) + err = cydriver.cuGraphicsEGLRegisterImage(pCudaResource._ptr, cyimage, flags) return (CUresult(err), pCudaResource) {{endif}} @@ -45115,17 +45115,17 @@ def cuEGLStreamConsumerConnect(stream): -------- :py:obj:`~.cuEGLStreamConsumerConnect`, :py:obj:`~.cuEGLStreamConsumerDisconnect`, :py:obj:`~.cuEGLStreamConsumerAcquireFrame`, :py:obj:`~.cuEGLStreamConsumerReleaseFrame`, :py:obj:`~.cudaEGLStreamConsumerConnect` """ - cdef ccuda.EGLStreamKHR cstream + cdef cydriver.EGLStreamKHR cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (EGLStreamKHR,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(EGLStreamKHR(stream)) - cstream = pstream + cystream = pstream cdef CUeglStreamConnection conn = CUeglStreamConnection() - err = ccuda.cuEGLStreamConsumerConnect(conn._ptr, cstream) + err = cydriver.cuEGLStreamConsumerConnect(conn._ptr, cystream) return (CUresult(err), conn) {{endif}} @@ -45160,17 +45160,17 @@ def cuEGLStreamConsumerConnectWithFlags(stream, unsigned int flags): -------- :py:obj:`~.cuEGLStreamConsumerConnect`, :py:obj:`~.cuEGLStreamConsumerDisconnect`, :py:obj:`~.cuEGLStreamConsumerAcquireFrame`, :py:obj:`~.cuEGLStreamConsumerReleaseFrame`, :py:obj:`~.cudaEGLStreamConsumerConnectWithFlags` """ - cdef ccuda.EGLStreamKHR cstream + cdef cydriver.EGLStreamKHR cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (EGLStreamKHR,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(EGLStreamKHR(stream)) - cstream = pstream + cystream = pstream cdef CUeglStreamConnection conn = CUeglStreamConnection() - err = ccuda.cuEGLStreamConsumerConnectWithFlags(conn._ptr, cstream, flags) + err = cydriver.cuEGLStreamConsumerConnectWithFlags(conn._ptr, cystream, flags) return (CUresult(err), conn) {{endif}} @@ -45196,17 +45196,17 @@ def cuEGLStreamConsumerDisconnect(conn): -------- :py:obj:`~.cuEGLStreamConsumerConnect`, :py:obj:`~.cuEGLStreamConsumerDisconnect`, :py:obj:`~.cuEGLStreamConsumerAcquireFrame`, :py:obj:`~.cuEGLStreamConsumerReleaseFrame`, :py:obj:`~.cudaEGLStreamConsumerDisconnect` """ - cdef ccuda.CUeglStreamConnection *cconn + cdef cydriver.CUeglStreamConnection *cyconn if conn is None: - cconn = NULL + cyconn = NULL elif isinstance(conn, (CUeglStreamConnection,)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccuda.cuEGLStreamConsumerDisconnect(cconn) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cydriver.cuEGLStreamConsumerDisconnect(cyconn) return (CUresult(err),) {{endif}} @@ -45247,37 +45247,37 @@ def cuEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, unsigned int t -------- :py:obj:`~.cuEGLStreamConsumerConnect`, :py:obj:`~.cuEGLStreamConsumerDisconnect`, :py:obj:`~.cuEGLStreamConsumerAcquireFrame`, :py:obj:`~.cuEGLStreamConsumerReleaseFrame`, :py:obj:`~.cudaEGLStreamConsumerAcquireFrame` """ - cdef ccuda.CUstream *cpStream + cdef cydriver.CUstream *cypStream if pStream is None: - cpStream = NULL + cypStream = NULL elif isinstance(pStream, (CUstream,)): ppStream = pStream.getPtr() - cpStream = ppStream + cypStream = ppStream elif isinstance(pStream, (int)): - cpStream = pStream + cypStream = pStream else: - raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) - cdef ccuda.CUgraphicsResource *cpCudaResource + raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) + cdef cydriver.CUgraphicsResource *cypCudaResource if pCudaResource is None: - cpCudaResource = NULL + cypCudaResource = NULL elif isinstance(pCudaResource, (CUgraphicsResource,)): ppCudaResource = pCudaResource.getPtr() - cpCudaResource = ppCudaResource + cypCudaResource = ppCudaResource elif isinstance(pCudaResource, (int)): - cpCudaResource = pCudaResource + cypCudaResource = pCudaResource else: - raise TypeError("Argument 'pCudaResource' is not instance of type (expected , found " + str(type(pCudaResource))) - cdef ccuda.CUeglStreamConnection *cconn + raise TypeError("Argument 'pCudaResource' is not instance of type (expected , found " + str(type(pCudaResource))) + cdef cydriver.CUeglStreamConnection *cyconn if conn is None: - cconn = NULL + cyconn = NULL elif isinstance(conn, (CUeglStreamConnection,)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccuda.cuEGLStreamConsumerAcquireFrame(cconn, cpCudaResource, cpStream, timeout) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cydriver.cuEGLStreamConsumerAcquireFrame(cyconn, cypCudaResource, cypStream, timeout) return (CUresult(err),) {{endif}} @@ -45311,36 +45311,36 @@ def cuEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream): -------- :py:obj:`~.cuEGLStreamConsumerConnect`, :py:obj:`~.cuEGLStreamConsumerDisconnect`, :py:obj:`~.cuEGLStreamConsumerAcquireFrame`, :py:obj:`~.cuEGLStreamConsumerReleaseFrame`, :py:obj:`~.cudaEGLStreamConsumerReleaseFrame` """ - cdef ccuda.CUstream *cpStream + cdef cydriver.CUstream *cypStream if pStream is None: - cpStream = NULL + cypStream = NULL elif isinstance(pStream, (CUstream,)): ppStream = pStream.getPtr() - cpStream = ppStream + cypStream = ppStream elif isinstance(pStream, (int)): - cpStream = pStream + cypStream = pStream else: - raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) - cdef ccuda.CUgraphicsResource cpCudaResource + raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) + cdef cydriver.CUgraphicsResource cypCudaResource if pCudaResource is None: - cpCudaResource = 0 + cypCudaResource = 0 elif isinstance(pCudaResource, (CUgraphicsResource,)): ppCudaResource = int(pCudaResource) - cpCudaResource = ppCudaResource + cypCudaResource = ppCudaResource else: ppCudaResource = int(CUgraphicsResource(pCudaResource)) - cpCudaResource = ppCudaResource - cdef ccuda.CUeglStreamConnection *cconn + cypCudaResource = ppCudaResource + cdef cydriver.CUeglStreamConnection *cyconn if conn is None: - cconn = NULL + cyconn = NULL elif isinstance(conn, (CUeglStreamConnection,)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccuda.cuEGLStreamConsumerReleaseFrame(cconn, cpCudaResource, cpStream) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cydriver.cuEGLStreamConsumerReleaseFrame(cyconn, cypCudaResource, cypStream) return (CUresult(err),) {{endif}} @@ -45375,35 +45375,35 @@ def cuEGLStreamProducerConnect(stream, width, height): -------- :py:obj:`~.cuEGLStreamProducerConnect`, :py:obj:`~.cuEGLStreamProducerDisconnect`, :py:obj:`~.cuEGLStreamProducerPresentFrame`, :py:obj:`~.cudaEGLStreamProducerConnect` """ - cdef ccuda.EGLint cheight + cdef cydriver.EGLint cyheight if height is None: - cheight = 0 + cyheight = 0 elif isinstance(height, (EGLint,)): pheight = int(height) - cheight = pheight + cyheight = pheight else: pheight = int(EGLint(height)) - cheight = pheight - cdef ccuda.EGLint cwidth + cyheight = pheight + cdef cydriver.EGLint cywidth if width is None: - cwidth = 0 + cywidth = 0 elif isinstance(width, (EGLint,)): pwidth = int(width) - cwidth = pwidth + cywidth = pwidth else: pwidth = int(EGLint(width)) - cwidth = pwidth - cdef ccuda.EGLStreamKHR cstream + cywidth = pwidth + cdef cydriver.EGLStreamKHR cystream if stream is None: - cstream = 0 + cystream = 0 elif isinstance(stream, (EGLStreamKHR,)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(EGLStreamKHR(stream)) - cstream = pstream + cystream = pstream cdef CUeglStreamConnection conn = CUeglStreamConnection() - err = ccuda.cuEGLStreamProducerConnect(conn._ptr, cstream, cwidth, cheight) + err = cydriver.cuEGLStreamProducerConnect(conn._ptr, cystream, cywidth, cyheight) return (CUresult(err), conn) {{endif}} @@ -45429,17 +45429,17 @@ def cuEGLStreamProducerDisconnect(conn): -------- :py:obj:`~.cuEGLStreamProducerConnect`, :py:obj:`~.cuEGLStreamProducerDisconnect`, :py:obj:`~.cuEGLStreamProducerPresentFrame`, :py:obj:`~.cudaEGLStreamProducerDisconnect` """ - cdef ccuda.CUeglStreamConnection *cconn + cdef cydriver.CUeglStreamConnection *cyconn if conn is None: - cconn = NULL + cyconn = NULL elif isinstance(conn, (CUeglStreamConnection,)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccuda.cuEGLStreamProducerDisconnect(cconn) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cydriver.cuEGLStreamProducerDisconnect(cyconn) return (CUresult(err),) {{endif}} @@ -45488,27 +45488,27 @@ def cuEGLStreamProducerPresentFrame(conn, eglframe not None : CUeglFrame, pStrea -------- :py:obj:`~.cuEGLStreamProducerConnect`, :py:obj:`~.cuEGLStreamProducerDisconnect`, :py:obj:`~.cuEGLStreamProducerReturnFrame`, :py:obj:`~.cudaEGLStreamProducerPresentFrame` """ - cdef ccuda.CUstream *cpStream + cdef cydriver.CUstream *cypStream if pStream is None: - cpStream = NULL + cypStream = NULL elif isinstance(pStream, (CUstream,)): ppStream = pStream.getPtr() - cpStream = ppStream + cypStream = ppStream elif isinstance(pStream, (int)): - cpStream = pStream + cypStream = pStream else: - raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) - cdef ccuda.CUeglStreamConnection *cconn + raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) + cdef cydriver.CUeglStreamConnection *cyconn if conn is None: - cconn = NULL + cyconn = NULL elif isinstance(conn, (CUeglStreamConnection,)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccuda.cuEGLStreamProducerPresentFrame(cconn, eglframe._ptr[0], cpStream) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cydriver.cuEGLStreamProducerPresentFrame(cyconn, eglframe._ptr[0], cypStream) return (CUresult(err),) {{endif}} @@ -45541,28 +45541,28 @@ def cuEGLStreamProducerReturnFrame(conn, eglframe : Optional[CUeglFrame], pStrea -------- :py:obj:`~.cuEGLStreamProducerConnect`, :py:obj:`~.cuEGLStreamProducerDisconnect`, :py:obj:`~.cuEGLStreamProducerPresentFrame`, :py:obj:`~.cudaEGLStreamProducerReturnFrame` """ - cdef ccuda.CUstream *cpStream + cdef cydriver.CUstream *cypStream if pStream is None: - cpStream = NULL + cypStream = NULL elif isinstance(pStream, (CUstream,)): ppStream = pStream.getPtr() - cpStream = ppStream + cypStream = ppStream elif isinstance(pStream, (int)): - cpStream = pStream + cypStream = pStream else: - raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) - cdef ccuda.CUeglStreamConnection *cconn + raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) + cdef cydriver.CUeglStreamConnection *cyconn if conn is None: - cconn = NULL + cyconn = NULL elif isinstance(conn, (CUeglStreamConnection,)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - cdef ccuda.CUeglFrame* ceglframe_ptr = eglframe._ptr if eglframe != None else NULL - err = ccuda.cuEGLStreamProducerReturnFrame(cconn, ceglframe_ptr, cpStream) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + cdef cydriver.CUeglFrame* cyeglframe_ptr = eglframe._ptr if eglframe != None else NULL + err = cydriver.cuEGLStreamProducerReturnFrame(cyconn, cyeglframe_ptr, cypStream) return (CUresult(err),) {{endif}} @@ -45599,17 +45599,17 @@ def cuGraphicsResourceGetMappedEglFrame(resource, unsigned int index, unsigned i eglFrame : :py:obj:`~.CUeglFrame` None """ - cdef ccuda.CUgraphicsResource cresource + cdef cydriver.CUgraphicsResource cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (CUgraphicsResource,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(CUgraphicsResource(resource)) - cresource = presource + cyresource = presource cdef CUeglFrame eglFrame = CUeglFrame() - err = ccuda.cuGraphicsResourceGetMappedEglFrame(eglFrame._ptr, cresource, index, mipLevel) + err = cydriver.cuGraphicsResourceGetMappedEglFrame(eglFrame._ptr, cyresource, index, mipLevel) return (CUresult(err), eglFrame) {{endif}} @@ -45656,17 +45656,17 @@ def cuEventCreateFromEGLSync(eglSync, unsigned int flags): -------- :py:obj:`~.cuEventQuery`, :py:obj:`~.cuEventSynchronize`, :py:obj:`~.cuEventDestroy` """ - cdef ccuda.EGLSyncKHR ceglSync + cdef cydriver.EGLSyncKHR cyeglSync if eglSync is None: - ceglSync = 0 + cyeglSync = 0 elif isinstance(eglSync, (EGLSyncKHR,)): peglSync = int(eglSync) - ceglSync = peglSync + cyeglSync = peglSync else: peglSync = int(EGLSyncKHR(eglSync)) - ceglSync = peglSync + cyeglSync = peglSync cdef CUevent phEvent = CUevent() - err = ccuda.cuEventCreateFromEGLSync(phEvent._ptr, ceglSync, flags) + err = cydriver.cuEventCreateFromEGLSync(phEvent._ptr, cyeglSync, flags) return (CUresult(err), phEvent) {{endif}} @@ -45711,17 +45711,17 @@ def cuGraphicsGLRegisterBuffer(buffer, unsigned int Flags): -------- :py:obj:`~.cuGraphicsUnregisterResource`, :py:obj:`~.cuGraphicsMapResources`, :py:obj:`~.cuGraphicsResourceGetMappedPointer`, :py:obj:`~.cudaGraphicsGLRegisterBuffer` """ - cdef ccuda.GLuint cbuffer + cdef cydriver.GLuint cybuffer if buffer is None: - cbuffer = 0 + cybuffer = 0 elif isinstance(buffer, (GLuint,)): pbuffer = int(buffer) - cbuffer = pbuffer + cybuffer = pbuffer else: pbuffer = int(GLuint(buffer)) - cbuffer = pbuffer + cybuffer = pbuffer cdef CUgraphicsResource pCudaResource = CUgraphicsResource() - err = ccuda.cuGraphicsGLRegisterBuffer(pCudaResource._ptr, cbuffer, Flags) + err = cydriver.cuGraphicsGLRegisterBuffer(pCudaResource._ptr, cybuffer, Flags) return (CUresult(err), pCudaResource) {{endif}} @@ -45801,26 +45801,26 @@ def cuGraphicsGLRegisterImage(image, target, unsigned int Flags): -------- :py:obj:`~.cuGraphicsUnregisterResource`, :py:obj:`~.cuGraphicsMapResources`, :py:obj:`~.cuGraphicsSubResourceGetMappedArray`, :py:obj:`~.cudaGraphicsGLRegisterImage` """ - cdef ccuda.GLenum ctarget + cdef cydriver.GLenum cytarget if target is None: - ctarget = 0 + cytarget = 0 elif isinstance(target, (GLenum,)): ptarget = int(target) - ctarget = ptarget + cytarget = ptarget else: ptarget = int(GLenum(target)) - ctarget = ptarget - cdef ccuda.GLuint cimage + cytarget = ptarget + cdef cydriver.GLuint cyimage if image is None: - cimage = 0 + cyimage = 0 elif isinstance(image, (GLuint,)): pimage = int(image) - cimage = pimage + cyimage = pimage else: pimage = int(GLuint(image)) - cimage = pimage + cyimage = pimage cdef CUgraphicsResource pCudaResource = CUgraphicsResource() - err = ccuda.cuGraphicsGLRegisterImage(pCudaResource._ptr, cimage, ctarget, Flags) + err = cydriver.cuGraphicsGLRegisterImage(pCudaResource._ptr, cyimage, cytarget, Flags) return (CUresult(err), pCudaResource) {{endif}} @@ -45875,18 +45875,18 @@ def cuGLGetDevices(unsigned int cudaDeviceCount, deviceList not None : CUGLDevic """ cdef unsigned int pCudaDeviceCount = 0 - cdef ccuda.CUdevice* cpCudaDevices = NULL + cdef cydriver.CUdevice* cypCudaDevices = NULL pypCudaDevices = [] if cudaDeviceCount != 0: - cpCudaDevices = calloc(cudaDeviceCount, sizeof(ccuda.CUdevice)) - if cpCudaDevices is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(cudaDeviceCount) + 'x' + str(sizeof(ccuda.CUdevice))) - cdef ccuda.CUGLDeviceList cdeviceList = deviceList.value - err = ccuda.cuGLGetDevices(&pCudaDeviceCount, cpCudaDevices, cudaDeviceCount, cdeviceList) + cypCudaDevices = calloc(cudaDeviceCount, sizeof(cydriver.CUdevice)) + if cypCudaDevices is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(cudaDeviceCount) + 'x' + str(sizeof(cydriver.CUdevice))) + cdef cydriver.CUGLDeviceList cydeviceList = deviceList.value + err = cydriver.cuGLGetDevices(&pCudaDeviceCount, cypCudaDevices, cudaDeviceCount, cydeviceList) if CUresult(err) == CUresult(0): - pypCudaDevices = [CUdevice(init_value=cpCudaDevices[idx]) for idx in range(cudaDeviceCount)] - if cpCudaDevices is not NULL: - free(cpCudaDevices) + pypCudaDevices = [CUdevice(init_value=cypCudaDevices[idx]) for idx in range(cudaDeviceCount)] + if cypCudaDevices is not NULL: + free(cypCudaDevices) return (CUresult(err), pCudaDeviceCount, pypCudaDevices) {{endif}} @@ -45917,27 +45917,27 @@ def cuVDPAUGetDevice(vdpDevice, vdpGetProcAddress): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuVDPAUCtxCreate`, :py:obj:`~.cuGraphicsVDPAURegisterVideoSurface`, :py:obj:`~.cuGraphicsVDPAURegisterOutputSurface`, :py:obj:`~.cuGraphicsUnregisterResource`, :py:obj:`~.cuGraphicsResourceSetMapFlags`, :py:obj:`~.cuGraphicsMapResources`, :py:obj:`~.cuGraphicsUnmapResources`, :py:obj:`~.cuGraphicsSubResourceGetMappedArray`, :py:obj:`~.cudaVDPAUGetDevice` """ - cdef ccuda.VdpGetProcAddress *cvdpGetProcAddress + cdef cydriver.VdpGetProcAddress *cyvdpGetProcAddress if vdpGetProcAddress is None: - cvdpGetProcAddress = NULL + cyvdpGetProcAddress = NULL elif isinstance(vdpGetProcAddress, (VdpGetProcAddress,)): pvdpGetProcAddress = vdpGetProcAddress.getPtr() - cvdpGetProcAddress = pvdpGetProcAddress + cyvdpGetProcAddress = pvdpGetProcAddress elif isinstance(vdpGetProcAddress, (int)): - cvdpGetProcAddress = vdpGetProcAddress + cyvdpGetProcAddress = vdpGetProcAddress else: - raise TypeError("Argument 'vdpGetProcAddress' is not instance of type (expected , found " + str(type(vdpGetProcAddress))) - cdef ccuda.VdpDevice cvdpDevice + raise TypeError("Argument 'vdpGetProcAddress' is not instance of type (expected , found " + str(type(vdpGetProcAddress))) + cdef cydriver.VdpDevice cyvdpDevice if vdpDevice is None: - cvdpDevice = 0 + cyvdpDevice = 0 elif isinstance(vdpDevice, (VdpDevice,)): pvdpDevice = int(vdpDevice) - cvdpDevice = pvdpDevice + cyvdpDevice = pvdpDevice else: pvdpDevice = int(VdpDevice(vdpDevice)) - cvdpDevice = pvdpDevice + cyvdpDevice = pvdpDevice cdef CUdevice pDevice = CUdevice() - err = ccuda.cuVDPAUGetDevice(pDevice._ptr, cvdpDevice, cvdpGetProcAddress) + err = cydriver.cuVDPAUGetDevice(pDevice._ptr, cyvdpDevice, cyvdpGetProcAddress) return (CUresult(err), pDevice) {{endif}} @@ -45975,36 +45975,36 @@ def cuVDPAUCtxCreate(unsigned int flags, device, vdpDevice, vdpGetProcAddress): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuGraphicsVDPAURegisterVideoSurface`, :py:obj:`~.cuGraphicsVDPAURegisterOutputSurface`, :py:obj:`~.cuGraphicsUnregisterResource`, :py:obj:`~.cuGraphicsResourceSetMapFlags`, :py:obj:`~.cuGraphicsMapResources`, :py:obj:`~.cuGraphicsUnmapResources`, :py:obj:`~.cuGraphicsSubResourceGetMappedArray`, :py:obj:`~.cuVDPAUGetDevice` """ - cdef ccuda.VdpGetProcAddress *cvdpGetProcAddress + cdef cydriver.VdpGetProcAddress *cyvdpGetProcAddress if vdpGetProcAddress is None: - cvdpGetProcAddress = NULL + cyvdpGetProcAddress = NULL elif isinstance(vdpGetProcAddress, (VdpGetProcAddress,)): pvdpGetProcAddress = vdpGetProcAddress.getPtr() - cvdpGetProcAddress = pvdpGetProcAddress + cyvdpGetProcAddress = pvdpGetProcAddress elif isinstance(vdpGetProcAddress, (int)): - cvdpGetProcAddress = vdpGetProcAddress + cyvdpGetProcAddress = vdpGetProcAddress else: - raise TypeError("Argument 'vdpGetProcAddress' is not instance of type (expected , found " + str(type(vdpGetProcAddress))) - cdef ccuda.VdpDevice cvdpDevice + raise TypeError("Argument 'vdpGetProcAddress' is not instance of type (expected , found " + str(type(vdpGetProcAddress))) + cdef cydriver.VdpDevice cyvdpDevice if vdpDevice is None: - cvdpDevice = 0 + cyvdpDevice = 0 elif isinstance(vdpDevice, (VdpDevice,)): pvdpDevice = int(vdpDevice) - cvdpDevice = pvdpDevice + cyvdpDevice = pvdpDevice else: pvdpDevice = int(VdpDevice(vdpDevice)) - cvdpDevice = pvdpDevice - cdef ccuda.CUdevice cdevice + cyvdpDevice = pvdpDevice + cdef cydriver.CUdevice cydevice if device is None: - cdevice = 0 + cydevice = 0 elif isinstance(device, (CUdevice,)): pdevice = int(device) - cdevice = pdevice + cydevice = pdevice else: pdevice = int(CUdevice(device)) - cdevice = pdevice + cydevice = pdevice cdef CUcontext pCtx = CUcontext() - err = ccuda.cuVDPAUCtxCreate(pCtx._ptr, flags, cdevice, cvdpDevice, cvdpGetProcAddress) + err = cydriver.cuVDPAUCtxCreate(pCtx._ptr, flags, cydevice, cyvdpDevice, cyvdpGetProcAddress) return (CUresult(err), pCtx) {{endif}} @@ -46055,17 +46055,17 @@ def cuGraphicsVDPAURegisterVideoSurface(vdpSurface, unsigned int flags): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuVDPAUCtxCreate`, :py:obj:`~.cuGraphicsVDPAURegisterOutputSurface`, :py:obj:`~.cuGraphicsUnregisterResource`, :py:obj:`~.cuGraphicsResourceSetMapFlags`, :py:obj:`~.cuGraphicsMapResources`, :py:obj:`~.cuGraphicsUnmapResources`, :py:obj:`~.cuGraphicsSubResourceGetMappedArray`, :py:obj:`~.cuVDPAUGetDevice`, :py:obj:`~.cudaGraphicsVDPAURegisterVideoSurface` """ - cdef ccuda.VdpVideoSurface cvdpSurface + cdef cydriver.VdpVideoSurface cyvdpSurface if vdpSurface is None: - cvdpSurface = 0 + cyvdpSurface = 0 elif isinstance(vdpSurface, (VdpVideoSurface,)): pvdpSurface = int(vdpSurface) - cvdpSurface = pvdpSurface + cyvdpSurface = pvdpSurface else: pvdpSurface = int(VdpVideoSurface(vdpSurface)) - cvdpSurface = pvdpSurface + cyvdpSurface = pvdpSurface cdef CUgraphicsResource pCudaResource = CUgraphicsResource() - err = ccuda.cuGraphicsVDPAURegisterVideoSurface(pCudaResource._ptr, cvdpSurface, flags) + err = cydriver.cuGraphicsVDPAURegisterVideoSurface(pCudaResource._ptr, cyvdpSurface, flags) return (CUresult(err), pCudaResource) {{endif}} @@ -46116,17 +46116,17 @@ def cuGraphicsVDPAURegisterOutputSurface(vdpSurface, unsigned int flags): -------- :py:obj:`~.cuCtxCreate`, :py:obj:`~.cuVDPAUCtxCreate`, :py:obj:`~.cuGraphicsVDPAURegisterVideoSurface`, :py:obj:`~.cuGraphicsUnregisterResource`, :py:obj:`~.cuGraphicsResourceSetMapFlags`, :py:obj:`~.cuGraphicsMapResources`, :py:obj:`~.cuGraphicsUnmapResources`, :py:obj:`~.cuGraphicsSubResourceGetMappedArray`, :py:obj:`~.cuVDPAUGetDevice`, :py:obj:`~.cudaGraphicsVDPAURegisterOutputSurface` """ - cdef ccuda.VdpOutputSurface cvdpSurface + cdef cydriver.VdpOutputSurface cyvdpSurface if vdpSurface is None: - cvdpSurface = 0 + cyvdpSurface = 0 elif isinstance(vdpSurface, (VdpOutputSurface,)): pvdpSurface = int(vdpSurface) - cvdpSurface = pvdpSurface + cyvdpSurface = pvdpSurface else: pvdpSurface = int(VdpOutputSurface(vdpSurface)) - cvdpSurface = pvdpSurface + cyvdpSurface = pvdpSurface cdef CUgraphicsResource pCudaResource = CUgraphicsResource() - err = ccuda.cuGraphicsVDPAURegisterOutputSurface(pCudaResource._ptr, cvdpSurface, flags) + err = cydriver.cuGraphicsVDPAURegisterOutputSurface(pCudaResource._ptr, cyvdpSurface, flags) return (CUresult(err), pCudaResource) {{endif}} @@ -46146,722 +46146,722 @@ def sizeof(objType): """ {{if 'cuuint32_t' in found_types}} if objType == cuuint32_t: - return sizeof(ccuda.cuuint32_t){{endif}} + return sizeof(cydriver.cuuint32_t){{endif}} {{if 'cuuint64_t' in found_types}} if objType == cuuint64_t: - return sizeof(ccuda.cuuint64_t){{endif}} + return sizeof(cydriver.cuuint64_t){{endif}} {{if 'CUdeviceptr_v2' in found_types}} if objType == CUdeviceptr_v2: - return sizeof(ccuda.CUdeviceptr_v2){{endif}} + return sizeof(cydriver.CUdeviceptr_v2){{endif}} {{if 'CUdeviceptr' in found_types}} if objType == CUdeviceptr: - return sizeof(ccuda.CUdeviceptr){{endif}} + return sizeof(cydriver.CUdeviceptr){{endif}} {{if 'CUdevice_v1' in found_types}} if objType == CUdevice_v1: - return sizeof(ccuda.CUdevice_v1){{endif}} + return sizeof(cydriver.CUdevice_v1){{endif}} {{if 'CUdevice' in found_types}} if objType == CUdevice: - return sizeof(ccuda.CUdevice){{endif}} + return sizeof(cydriver.CUdevice){{endif}} {{if 'CUcontext' in found_types}} if objType == CUcontext: - return sizeof(ccuda.CUcontext){{endif}} + return sizeof(cydriver.CUcontext){{endif}} {{if 'CUmodule' in found_types}} if objType == CUmodule: - return sizeof(ccuda.CUmodule){{endif}} + return sizeof(cydriver.CUmodule){{endif}} {{if 'CUfunction' in found_types}} if objType == CUfunction: - return sizeof(ccuda.CUfunction){{endif}} + return sizeof(cydriver.CUfunction){{endif}} {{if 'CUlibrary' in found_types}} if objType == CUlibrary: - return sizeof(ccuda.CUlibrary){{endif}} + return sizeof(cydriver.CUlibrary){{endif}} {{if 'CUkernel' in found_types}} if objType == CUkernel: - return sizeof(ccuda.CUkernel){{endif}} + return sizeof(cydriver.CUkernel){{endif}} {{if 'CUarray' in found_types}} if objType == CUarray: - return sizeof(ccuda.CUarray){{endif}} + return sizeof(cydriver.CUarray){{endif}} {{if 'CUmipmappedArray' in found_types}} if objType == CUmipmappedArray: - return sizeof(ccuda.CUmipmappedArray){{endif}} + return sizeof(cydriver.CUmipmappedArray){{endif}} {{if 'CUtexref' in found_types}} if objType == CUtexref: - return sizeof(ccuda.CUtexref){{endif}} + return sizeof(cydriver.CUtexref){{endif}} {{if 'CUsurfref' in found_types}} if objType == CUsurfref: - return sizeof(ccuda.CUsurfref){{endif}} + return sizeof(cydriver.CUsurfref){{endif}} {{if 'CUevent' in found_types}} if objType == CUevent: - return sizeof(ccuda.CUevent){{endif}} + return sizeof(cydriver.CUevent){{endif}} {{if 'CUstream' in found_types}} if objType == CUstream: - return sizeof(ccuda.CUstream){{endif}} + return sizeof(cydriver.CUstream){{endif}} {{if 'CUgraphicsResource' in found_types}} if objType == CUgraphicsResource: - return sizeof(ccuda.CUgraphicsResource){{endif}} + return sizeof(cydriver.CUgraphicsResource){{endif}} {{if 'CUtexObject_v1' in found_types}} if objType == CUtexObject_v1: - return sizeof(ccuda.CUtexObject_v1){{endif}} + return sizeof(cydriver.CUtexObject_v1){{endif}} {{if 'CUtexObject' in found_types}} if objType == CUtexObject: - return sizeof(ccuda.CUtexObject){{endif}} + return sizeof(cydriver.CUtexObject){{endif}} {{if 'CUsurfObject_v1' in found_types}} if objType == CUsurfObject_v1: - return sizeof(ccuda.CUsurfObject_v1){{endif}} + return sizeof(cydriver.CUsurfObject_v1){{endif}} {{if 'CUsurfObject' in found_types}} if objType == CUsurfObject: - return sizeof(ccuda.CUsurfObject){{endif}} + return sizeof(cydriver.CUsurfObject){{endif}} {{if 'CUexternalMemory' in found_types}} if objType == CUexternalMemory: - return sizeof(ccuda.CUexternalMemory){{endif}} + return sizeof(cydriver.CUexternalMemory){{endif}} {{if 'CUexternalSemaphore' in found_types}} if objType == CUexternalSemaphore: - return sizeof(ccuda.CUexternalSemaphore){{endif}} + return sizeof(cydriver.CUexternalSemaphore){{endif}} {{if 'CUgraph' in found_types}} if objType == CUgraph: - return sizeof(ccuda.CUgraph){{endif}} + return sizeof(cydriver.CUgraph){{endif}} {{if 'CUgraphNode' in found_types}} if objType == CUgraphNode: - return sizeof(ccuda.CUgraphNode){{endif}} + return sizeof(cydriver.CUgraphNode){{endif}} {{if 'CUgraphExec' in found_types}} if objType == CUgraphExec: - return sizeof(ccuda.CUgraphExec){{endif}} + return sizeof(cydriver.CUgraphExec){{endif}} {{if 'CUmemoryPool' in found_types}} if objType == CUmemoryPool: - return sizeof(ccuda.CUmemoryPool){{endif}} + return sizeof(cydriver.CUmemoryPool){{endif}} {{if 'CUuserObject' in found_types}} if objType == CUuserObject: - return sizeof(ccuda.CUuserObject){{endif}} + return sizeof(cydriver.CUuserObject){{endif}} {{if 'CUgraphConditionalHandle' in found_types}} if objType == CUgraphConditionalHandle: - return sizeof(ccuda.CUgraphConditionalHandle){{endif}} + return sizeof(cydriver.CUgraphConditionalHandle){{endif}} {{if 'CUgraphDeviceNode' in found_types}} if objType == CUgraphDeviceNode: - return sizeof(ccuda.CUgraphDeviceNode){{endif}} + return sizeof(cydriver.CUgraphDeviceNode){{endif}} {{if 'CUasyncCallbackHandle' in found_types}} if objType == CUasyncCallbackHandle: - return sizeof(ccuda.CUasyncCallbackHandle){{endif}} + return sizeof(cydriver.CUasyncCallbackHandle){{endif}} {{if 'CUgreenCtx' in found_types}} if objType == CUgreenCtx: - return sizeof(ccuda.CUgreenCtx){{endif}} + return sizeof(cydriver.CUgreenCtx){{endif}} {{if 'struct CUuuid_st' in found_types}} if objType == CUuuid_st: - return sizeof(ccuda.CUuuid_st){{endif}} + return sizeof(cydriver.CUuuid_st){{endif}} {{if 'CUuuid' in found_types}} if objType == CUuuid: - return sizeof(ccuda.CUuuid){{endif}} + return sizeof(cydriver.CUuuid){{endif}} {{if 'struct CUmemFabricHandle_st' in found_types}} if objType == CUmemFabricHandle_st: - return sizeof(ccuda.CUmemFabricHandle_st){{endif}} + return sizeof(cydriver.CUmemFabricHandle_st){{endif}} {{if 'CUmemFabricHandle_v1' in found_types}} if objType == CUmemFabricHandle_v1: - return sizeof(ccuda.CUmemFabricHandle_v1){{endif}} + return sizeof(cydriver.CUmemFabricHandle_v1){{endif}} {{if 'CUmemFabricHandle' in found_types}} if objType == CUmemFabricHandle: - return sizeof(ccuda.CUmemFabricHandle){{endif}} + return sizeof(cydriver.CUmemFabricHandle){{endif}} {{if 'struct CUipcEventHandle_st' in found_types}} if objType == CUipcEventHandle_st: - return sizeof(ccuda.CUipcEventHandle_st){{endif}} + return sizeof(cydriver.CUipcEventHandle_st){{endif}} {{if 'CUipcEventHandle_v1' in found_types}} if objType == CUipcEventHandle_v1: - return sizeof(ccuda.CUipcEventHandle_v1){{endif}} + return sizeof(cydriver.CUipcEventHandle_v1){{endif}} {{if 'CUipcEventHandle' in found_types}} if objType == CUipcEventHandle: - return sizeof(ccuda.CUipcEventHandle){{endif}} + return sizeof(cydriver.CUipcEventHandle){{endif}} {{if 'struct CUipcMemHandle_st' in found_types}} if objType == CUipcMemHandle_st: - return sizeof(ccuda.CUipcMemHandle_st){{endif}} + return sizeof(cydriver.CUipcMemHandle_st){{endif}} {{if 'CUipcMemHandle_v1' in found_types}} if objType == CUipcMemHandle_v1: - return sizeof(ccuda.CUipcMemHandle_v1){{endif}} + return sizeof(cydriver.CUipcMemHandle_v1){{endif}} {{if 'CUipcMemHandle' in found_types}} if objType == CUipcMemHandle: - return sizeof(ccuda.CUipcMemHandle){{endif}} + return sizeof(cydriver.CUipcMemHandle){{endif}} {{if 'union CUstreamBatchMemOpParams_union' in found_types}} if objType == CUstreamBatchMemOpParams_union: - return sizeof(ccuda.CUstreamBatchMemOpParams_union){{endif}} + return sizeof(cydriver.CUstreamBatchMemOpParams_union){{endif}} {{if 'CUstreamBatchMemOpParams_v1' in found_types}} if objType == CUstreamBatchMemOpParams_v1: - return sizeof(ccuda.CUstreamBatchMemOpParams_v1){{endif}} + return sizeof(cydriver.CUstreamBatchMemOpParams_v1){{endif}} {{if 'CUstreamBatchMemOpParams' in found_types}} if objType == CUstreamBatchMemOpParams: - return sizeof(ccuda.CUstreamBatchMemOpParams){{endif}} + return sizeof(cydriver.CUstreamBatchMemOpParams){{endif}} {{if 'struct CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st' in found_types}} if objType == CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st: - return sizeof(ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st){{endif}} + return sizeof(cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st){{endif}} {{if 'CUDA_BATCH_MEM_OP_NODE_PARAMS_v1' in found_types}} if objType == CUDA_BATCH_MEM_OP_NODE_PARAMS_v1: - return sizeof(ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1){{endif}} {{if 'CUDA_BATCH_MEM_OP_NODE_PARAMS' in found_types}} if objType == CUDA_BATCH_MEM_OP_NODE_PARAMS: - return sizeof(ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS){{endif}} {{if 'struct CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st' in found_types}} if objType == CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st: - return sizeof(ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st){{endif}} + return sizeof(cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st){{endif}} {{if 'CUDA_BATCH_MEM_OP_NODE_PARAMS_v2' in found_types}} if objType == CUDA_BATCH_MEM_OP_NODE_PARAMS_v2: - return sizeof(ccuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2){{endif}} + return sizeof(cydriver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2){{endif}} {{if 'struct CUasyncNotificationInfo_st' in found_types}} if objType == CUasyncNotificationInfo_st: - return sizeof(ccuda.CUasyncNotificationInfo_st){{endif}} + return sizeof(cydriver.CUasyncNotificationInfo_st){{endif}} {{if 'CUasyncNotificationInfo' in found_types}} if objType == CUasyncNotificationInfo: - return sizeof(ccuda.CUasyncNotificationInfo){{endif}} + return sizeof(cydriver.CUasyncNotificationInfo){{endif}} {{if 'CUasyncCallback' in found_types}} if objType == CUasyncCallback: - return sizeof(ccuda.CUasyncCallback){{endif}} + return sizeof(cydriver.CUasyncCallback){{endif}} {{if 'struct CUdevprop_st' in found_types}} if objType == CUdevprop_st: - return sizeof(ccuda.CUdevprop_st){{endif}} + return sizeof(cydriver.CUdevprop_st){{endif}} {{if 'CUdevprop_v1' in found_types}} if objType == CUdevprop_v1: - return sizeof(ccuda.CUdevprop_v1){{endif}} + return sizeof(cydriver.CUdevprop_v1){{endif}} {{if 'CUdevprop' in found_types}} if objType == CUdevprop: - return sizeof(ccuda.CUdevprop){{endif}} + return sizeof(cydriver.CUdevprop){{endif}} {{if 'CUlinkState' in found_types}} if objType == CUlinkState: - return sizeof(ccuda.CUlinkState){{endif}} + return sizeof(cydriver.CUlinkState){{endif}} {{if 'CUhostFn' in found_types}} if objType == CUhostFn: - return sizeof(ccuda.CUhostFn){{endif}} + return sizeof(cydriver.CUhostFn){{endif}} {{if 'struct CUaccessPolicyWindow_st' in found_types}} if objType == CUaccessPolicyWindow_st: - return sizeof(ccuda.CUaccessPolicyWindow_st){{endif}} + return sizeof(cydriver.CUaccessPolicyWindow_st){{endif}} {{if 'CUaccessPolicyWindow_v1' in found_types}} if objType == CUaccessPolicyWindow_v1: - return sizeof(ccuda.CUaccessPolicyWindow_v1){{endif}} + return sizeof(cydriver.CUaccessPolicyWindow_v1){{endif}} {{if 'CUaccessPolicyWindow' in found_types}} if objType == CUaccessPolicyWindow: - return sizeof(ccuda.CUaccessPolicyWindow){{endif}} + return sizeof(cydriver.CUaccessPolicyWindow){{endif}} {{if 'struct CUDA_KERNEL_NODE_PARAMS_st' in found_types}} if objType == CUDA_KERNEL_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_KERNEL_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_KERNEL_NODE_PARAMS_st){{endif}} {{if 'CUDA_KERNEL_NODE_PARAMS_v1' in found_types}} if objType == CUDA_KERNEL_NODE_PARAMS_v1: - return sizeof(ccuda.CUDA_KERNEL_NODE_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_KERNEL_NODE_PARAMS_v1){{endif}} {{if 'struct CUDA_KERNEL_NODE_PARAMS_v2_st' in found_types}} if objType == CUDA_KERNEL_NODE_PARAMS_v2_st: - return sizeof(ccuda.CUDA_KERNEL_NODE_PARAMS_v2_st){{endif}} + return sizeof(cydriver.CUDA_KERNEL_NODE_PARAMS_v2_st){{endif}} {{if 'CUDA_KERNEL_NODE_PARAMS_v2' in found_types}} if objType == CUDA_KERNEL_NODE_PARAMS_v2: - return sizeof(ccuda.CUDA_KERNEL_NODE_PARAMS_v2){{endif}} + return sizeof(cydriver.CUDA_KERNEL_NODE_PARAMS_v2){{endif}} {{if 'CUDA_KERNEL_NODE_PARAMS' in found_types}} if objType == CUDA_KERNEL_NODE_PARAMS: - return sizeof(ccuda.CUDA_KERNEL_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_KERNEL_NODE_PARAMS){{endif}} {{if 'struct CUDA_KERNEL_NODE_PARAMS_v3_st' in found_types}} if objType == CUDA_KERNEL_NODE_PARAMS_v3_st: - return sizeof(ccuda.CUDA_KERNEL_NODE_PARAMS_v3_st){{endif}} + return sizeof(cydriver.CUDA_KERNEL_NODE_PARAMS_v3_st){{endif}} {{if 'CUDA_KERNEL_NODE_PARAMS_v3' in found_types}} if objType == CUDA_KERNEL_NODE_PARAMS_v3: - return sizeof(ccuda.CUDA_KERNEL_NODE_PARAMS_v3){{endif}} + return sizeof(cydriver.CUDA_KERNEL_NODE_PARAMS_v3){{endif}} {{if 'struct CUDA_MEMSET_NODE_PARAMS_st' in found_types}} if objType == CUDA_MEMSET_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_MEMSET_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_MEMSET_NODE_PARAMS_st){{endif}} {{if 'CUDA_MEMSET_NODE_PARAMS_v1' in found_types}} if objType == CUDA_MEMSET_NODE_PARAMS_v1: - return sizeof(ccuda.CUDA_MEMSET_NODE_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_MEMSET_NODE_PARAMS_v1){{endif}} {{if 'CUDA_MEMSET_NODE_PARAMS' in found_types}} if objType == CUDA_MEMSET_NODE_PARAMS: - return sizeof(ccuda.CUDA_MEMSET_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_MEMSET_NODE_PARAMS){{endif}} {{if 'struct CUDA_MEMSET_NODE_PARAMS_v2_st' in found_types}} if objType == CUDA_MEMSET_NODE_PARAMS_v2_st: - return sizeof(ccuda.CUDA_MEMSET_NODE_PARAMS_v2_st){{endif}} + return sizeof(cydriver.CUDA_MEMSET_NODE_PARAMS_v2_st){{endif}} {{if 'CUDA_MEMSET_NODE_PARAMS_v2' in found_types}} if objType == CUDA_MEMSET_NODE_PARAMS_v2: - return sizeof(ccuda.CUDA_MEMSET_NODE_PARAMS_v2){{endif}} + return sizeof(cydriver.CUDA_MEMSET_NODE_PARAMS_v2){{endif}} {{if 'struct CUDA_HOST_NODE_PARAMS_st' in found_types}} if objType == CUDA_HOST_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_HOST_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_HOST_NODE_PARAMS_st){{endif}} {{if 'CUDA_HOST_NODE_PARAMS_v1' in found_types}} if objType == CUDA_HOST_NODE_PARAMS_v1: - return sizeof(ccuda.CUDA_HOST_NODE_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_HOST_NODE_PARAMS_v1){{endif}} {{if 'CUDA_HOST_NODE_PARAMS' in found_types}} if objType == CUDA_HOST_NODE_PARAMS: - return sizeof(ccuda.CUDA_HOST_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_HOST_NODE_PARAMS){{endif}} {{if 'struct CUDA_HOST_NODE_PARAMS_v2_st' in found_types}} if objType == CUDA_HOST_NODE_PARAMS_v2_st: - return sizeof(ccuda.CUDA_HOST_NODE_PARAMS_v2_st){{endif}} + return sizeof(cydriver.CUDA_HOST_NODE_PARAMS_v2_st){{endif}} {{if 'CUDA_HOST_NODE_PARAMS_v2' in found_types}} if objType == CUDA_HOST_NODE_PARAMS_v2: - return sizeof(ccuda.CUDA_HOST_NODE_PARAMS_v2){{endif}} + return sizeof(cydriver.CUDA_HOST_NODE_PARAMS_v2){{endif}} {{if 'struct CUDA_CONDITIONAL_NODE_PARAMS' in found_types}} if objType == CUDA_CONDITIONAL_NODE_PARAMS: - return sizeof(ccuda.CUDA_CONDITIONAL_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_CONDITIONAL_NODE_PARAMS){{endif}} {{if 'struct CUgraphEdgeData_st' in found_types}} if objType == CUgraphEdgeData_st: - return sizeof(ccuda.CUgraphEdgeData_st){{endif}} + return sizeof(cydriver.CUgraphEdgeData_st){{endif}} {{if 'CUgraphEdgeData' in found_types}} if objType == CUgraphEdgeData: - return sizeof(ccuda.CUgraphEdgeData){{endif}} + return sizeof(cydriver.CUgraphEdgeData){{endif}} {{if 'struct CUDA_GRAPH_INSTANTIATE_PARAMS_st' in found_types}} if objType == CUDA_GRAPH_INSTANTIATE_PARAMS_st: - return sizeof(ccuda.CUDA_GRAPH_INSTANTIATE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_GRAPH_INSTANTIATE_PARAMS_st){{endif}} {{if 'CUDA_GRAPH_INSTANTIATE_PARAMS' in found_types}} if objType == CUDA_GRAPH_INSTANTIATE_PARAMS: - return sizeof(ccuda.CUDA_GRAPH_INSTANTIATE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_GRAPH_INSTANTIATE_PARAMS){{endif}} {{if 'struct CUlaunchMemSyncDomainMap_st' in found_types}} if objType == CUlaunchMemSyncDomainMap_st: - return sizeof(ccuda.CUlaunchMemSyncDomainMap_st){{endif}} + return sizeof(cydriver.CUlaunchMemSyncDomainMap_st){{endif}} {{if 'CUlaunchMemSyncDomainMap' in found_types}} if objType == CUlaunchMemSyncDomainMap: - return sizeof(ccuda.CUlaunchMemSyncDomainMap){{endif}} + return sizeof(cydriver.CUlaunchMemSyncDomainMap){{endif}} {{if 'union CUlaunchAttributeValue_union' in found_types}} if objType == CUlaunchAttributeValue_union: - return sizeof(ccuda.CUlaunchAttributeValue_union){{endif}} + return sizeof(cydriver.CUlaunchAttributeValue_union){{endif}} {{if 'CUlaunchAttributeValue' in found_types}} if objType == CUlaunchAttributeValue: - return sizeof(ccuda.CUlaunchAttributeValue){{endif}} + return sizeof(cydriver.CUlaunchAttributeValue){{endif}} {{if 'struct CUlaunchAttribute_st' in found_types}} if objType == CUlaunchAttribute_st: - return sizeof(ccuda.CUlaunchAttribute_st){{endif}} + return sizeof(cydriver.CUlaunchAttribute_st){{endif}} {{if 'CUlaunchAttribute' in found_types}} if objType == CUlaunchAttribute: - return sizeof(ccuda.CUlaunchAttribute){{endif}} + return sizeof(cydriver.CUlaunchAttribute){{endif}} {{if 'struct CUlaunchConfig_st' in found_types}} if objType == CUlaunchConfig_st: - return sizeof(ccuda.CUlaunchConfig_st){{endif}} + return sizeof(cydriver.CUlaunchConfig_st){{endif}} {{if 'CUlaunchConfig' in found_types}} if objType == CUlaunchConfig: - return sizeof(ccuda.CUlaunchConfig){{endif}} + return sizeof(cydriver.CUlaunchConfig){{endif}} {{if 'CUkernelNodeAttrValue_v1' in found_types}} if objType == CUkernelNodeAttrValue_v1: - return sizeof(ccuda.CUkernelNodeAttrValue_v1){{endif}} + return sizeof(cydriver.CUkernelNodeAttrValue_v1){{endif}} {{if 'CUkernelNodeAttrValue' in found_types}} if objType == CUkernelNodeAttrValue: - return sizeof(ccuda.CUkernelNodeAttrValue){{endif}} + return sizeof(cydriver.CUkernelNodeAttrValue){{endif}} {{if 'CUstreamAttrValue_v1' in found_types}} if objType == CUstreamAttrValue_v1: - return sizeof(ccuda.CUstreamAttrValue_v1){{endif}} + return sizeof(cydriver.CUstreamAttrValue_v1){{endif}} {{if 'CUstreamAttrValue' in found_types}} if objType == CUstreamAttrValue: - return sizeof(ccuda.CUstreamAttrValue){{endif}} + return sizeof(cydriver.CUstreamAttrValue){{endif}} {{if 'struct CUexecAffinitySmCount_st' in found_types}} if objType == CUexecAffinitySmCount_st: - return sizeof(ccuda.CUexecAffinitySmCount_st){{endif}} + return sizeof(cydriver.CUexecAffinitySmCount_st){{endif}} {{if 'CUexecAffinitySmCount_v1' in found_types}} if objType == CUexecAffinitySmCount_v1: - return sizeof(ccuda.CUexecAffinitySmCount_v1){{endif}} + return sizeof(cydriver.CUexecAffinitySmCount_v1){{endif}} {{if 'CUexecAffinitySmCount' in found_types}} if objType == CUexecAffinitySmCount: - return sizeof(ccuda.CUexecAffinitySmCount){{endif}} + return sizeof(cydriver.CUexecAffinitySmCount){{endif}} {{if 'struct CUexecAffinityParam_st' in found_types}} if objType == CUexecAffinityParam_st: - return sizeof(ccuda.CUexecAffinityParam_st){{endif}} + return sizeof(cydriver.CUexecAffinityParam_st){{endif}} {{if 'CUexecAffinityParam_v1' in found_types}} if objType == CUexecAffinityParam_v1: - return sizeof(ccuda.CUexecAffinityParam_v1){{endif}} + return sizeof(cydriver.CUexecAffinityParam_v1){{endif}} {{if 'CUexecAffinityParam' in found_types}} if objType == CUexecAffinityParam: - return sizeof(ccuda.CUexecAffinityParam){{endif}} + return sizeof(cydriver.CUexecAffinityParam){{endif}} {{if 'struct CUctxCigParam_st' in found_types}} if objType == CUctxCigParam_st: - return sizeof(ccuda.CUctxCigParam_st){{endif}} + return sizeof(cydriver.CUctxCigParam_st){{endif}} {{if 'CUctxCigParam' in found_types}} if objType == CUctxCigParam: - return sizeof(ccuda.CUctxCigParam){{endif}} + return sizeof(cydriver.CUctxCigParam){{endif}} {{if 'struct CUctxCreateParams_st' in found_types}} if objType == CUctxCreateParams_st: - return sizeof(ccuda.CUctxCreateParams_st){{endif}} + return sizeof(cydriver.CUctxCreateParams_st){{endif}} {{if 'CUctxCreateParams' in found_types}} if objType == CUctxCreateParams: - return sizeof(ccuda.CUctxCreateParams){{endif}} + return sizeof(cydriver.CUctxCreateParams){{endif}} {{if 'struct CUlibraryHostUniversalFunctionAndDataTable_st' in found_types}} if objType == CUlibraryHostUniversalFunctionAndDataTable_st: - return sizeof(ccuda.CUlibraryHostUniversalFunctionAndDataTable_st){{endif}} + return sizeof(cydriver.CUlibraryHostUniversalFunctionAndDataTable_st){{endif}} {{if 'CUlibraryHostUniversalFunctionAndDataTable' in found_types}} if objType == CUlibraryHostUniversalFunctionAndDataTable: - return sizeof(ccuda.CUlibraryHostUniversalFunctionAndDataTable){{endif}} + return sizeof(cydriver.CUlibraryHostUniversalFunctionAndDataTable){{endif}} {{if 'CUstreamCallback' in found_types}} if objType == CUstreamCallback: - return sizeof(ccuda.CUstreamCallback){{endif}} + return sizeof(cydriver.CUstreamCallback){{endif}} {{if 'CUoccupancyB2DSize' in found_types}} if objType == CUoccupancyB2DSize: - return sizeof(ccuda.CUoccupancyB2DSize){{endif}} + return sizeof(cydriver.CUoccupancyB2DSize){{endif}} {{if 'struct CUDA_MEMCPY2D_st' in found_types}} if objType == CUDA_MEMCPY2D_st: - return sizeof(ccuda.CUDA_MEMCPY2D_st){{endif}} + return sizeof(cydriver.CUDA_MEMCPY2D_st){{endif}} {{if 'CUDA_MEMCPY2D_v2' in found_types}} if objType == CUDA_MEMCPY2D_v2: - return sizeof(ccuda.CUDA_MEMCPY2D_v2){{endif}} + return sizeof(cydriver.CUDA_MEMCPY2D_v2){{endif}} {{if 'CUDA_MEMCPY2D' in found_types}} if objType == CUDA_MEMCPY2D: - return sizeof(ccuda.CUDA_MEMCPY2D){{endif}} + return sizeof(cydriver.CUDA_MEMCPY2D){{endif}} {{if 'struct CUDA_MEMCPY3D_st' in found_types}} if objType == CUDA_MEMCPY3D_st: - return sizeof(ccuda.CUDA_MEMCPY3D_st){{endif}} + return sizeof(cydriver.CUDA_MEMCPY3D_st){{endif}} {{if 'CUDA_MEMCPY3D_v2' in found_types}} if objType == CUDA_MEMCPY3D_v2: - return sizeof(ccuda.CUDA_MEMCPY3D_v2){{endif}} + return sizeof(cydriver.CUDA_MEMCPY3D_v2){{endif}} {{if 'CUDA_MEMCPY3D' in found_types}} if objType == CUDA_MEMCPY3D: - return sizeof(ccuda.CUDA_MEMCPY3D){{endif}} + return sizeof(cydriver.CUDA_MEMCPY3D){{endif}} {{if 'struct CUDA_MEMCPY3D_PEER_st' in found_types}} if objType == CUDA_MEMCPY3D_PEER_st: - return sizeof(ccuda.CUDA_MEMCPY3D_PEER_st){{endif}} + return sizeof(cydriver.CUDA_MEMCPY3D_PEER_st){{endif}} {{if 'CUDA_MEMCPY3D_PEER_v1' in found_types}} if objType == CUDA_MEMCPY3D_PEER_v1: - return sizeof(ccuda.CUDA_MEMCPY3D_PEER_v1){{endif}} + return sizeof(cydriver.CUDA_MEMCPY3D_PEER_v1){{endif}} {{if 'CUDA_MEMCPY3D_PEER' in found_types}} if objType == CUDA_MEMCPY3D_PEER: - return sizeof(ccuda.CUDA_MEMCPY3D_PEER){{endif}} + return sizeof(cydriver.CUDA_MEMCPY3D_PEER){{endif}} {{if 'struct CUDA_MEMCPY_NODE_PARAMS_st' in found_types}} if objType == CUDA_MEMCPY_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_MEMCPY_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_MEMCPY_NODE_PARAMS_st){{endif}} {{if 'CUDA_MEMCPY_NODE_PARAMS' in found_types}} if objType == CUDA_MEMCPY_NODE_PARAMS: - return sizeof(ccuda.CUDA_MEMCPY_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_MEMCPY_NODE_PARAMS){{endif}} {{if 'struct CUDA_ARRAY_DESCRIPTOR_st' in found_types}} if objType == CUDA_ARRAY_DESCRIPTOR_st: - return sizeof(ccuda.CUDA_ARRAY_DESCRIPTOR_st){{endif}} + return sizeof(cydriver.CUDA_ARRAY_DESCRIPTOR_st){{endif}} {{if 'CUDA_ARRAY_DESCRIPTOR_v2' in found_types}} if objType == CUDA_ARRAY_DESCRIPTOR_v2: - return sizeof(ccuda.CUDA_ARRAY_DESCRIPTOR_v2){{endif}} + return sizeof(cydriver.CUDA_ARRAY_DESCRIPTOR_v2){{endif}} {{if 'CUDA_ARRAY_DESCRIPTOR' in found_types}} if objType == CUDA_ARRAY_DESCRIPTOR: - return sizeof(ccuda.CUDA_ARRAY_DESCRIPTOR){{endif}} + return sizeof(cydriver.CUDA_ARRAY_DESCRIPTOR){{endif}} {{if 'struct CUDA_ARRAY3D_DESCRIPTOR_st' in found_types}} if objType == CUDA_ARRAY3D_DESCRIPTOR_st: - return sizeof(ccuda.CUDA_ARRAY3D_DESCRIPTOR_st){{endif}} + return sizeof(cydriver.CUDA_ARRAY3D_DESCRIPTOR_st){{endif}} {{if 'CUDA_ARRAY3D_DESCRIPTOR_v2' in found_types}} if objType == CUDA_ARRAY3D_DESCRIPTOR_v2: - return sizeof(ccuda.CUDA_ARRAY3D_DESCRIPTOR_v2){{endif}} + return sizeof(cydriver.CUDA_ARRAY3D_DESCRIPTOR_v2){{endif}} {{if 'CUDA_ARRAY3D_DESCRIPTOR' in found_types}} if objType == CUDA_ARRAY3D_DESCRIPTOR: - return sizeof(ccuda.CUDA_ARRAY3D_DESCRIPTOR){{endif}} + return sizeof(cydriver.CUDA_ARRAY3D_DESCRIPTOR){{endif}} {{if 'struct CUDA_ARRAY_SPARSE_PROPERTIES_st' in found_types}} if objType == CUDA_ARRAY_SPARSE_PROPERTIES_st: - return sizeof(ccuda.CUDA_ARRAY_SPARSE_PROPERTIES_st){{endif}} + return sizeof(cydriver.CUDA_ARRAY_SPARSE_PROPERTIES_st){{endif}} {{if 'CUDA_ARRAY_SPARSE_PROPERTIES_v1' in found_types}} if objType == CUDA_ARRAY_SPARSE_PROPERTIES_v1: - return sizeof(ccuda.CUDA_ARRAY_SPARSE_PROPERTIES_v1){{endif}} + return sizeof(cydriver.CUDA_ARRAY_SPARSE_PROPERTIES_v1){{endif}} {{if 'CUDA_ARRAY_SPARSE_PROPERTIES' in found_types}} if objType == CUDA_ARRAY_SPARSE_PROPERTIES: - return sizeof(ccuda.CUDA_ARRAY_SPARSE_PROPERTIES){{endif}} + return sizeof(cydriver.CUDA_ARRAY_SPARSE_PROPERTIES){{endif}} {{if 'struct CUDA_ARRAY_MEMORY_REQUIREMENTS_st' in found_types}} if objType == CUDA_ARRAY_MEMORY_REQUIREMENTS_st: - return sizeof(ccuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_st){{endif}} + return sizeof(cydriver.CUDA_ARRAY_MEMORY_REQUIREMENTS_st){{endif}} {{if 'CUDA_ARRAY_MEMORY_REQUIREMENTS_v1' in found_types}} if objType == CUDA_ARRAY_MEMORY_REQUIREMENTS_v1: - return sizeof(ccuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1){{endif}} + return sizeof(cydriver.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1){{endif}} {{if 'CUDA_ARRAY_MEMORY_REQUIREMENTS' in found_types}} if objType == CUDA_ARRAY_MEMORY_REQUIREMENTS: - return sizeof(ccuda.CUDA_ARRAY_MEMORY_REQUIREMENTS){{endif}} + return sizeof(cydriver.CUDA_ARRAY_MEMORY_REQUIREMENTS){{endif}} {{if 'struct CUDA_RESOURCE_DESC_st' in found_types}} if objType == CUDA_RESOURCE_DESC_st: - return sizeof(ccuda.CUDA_RESOURCE_DESC_st){{endif}} + return sizeof(cydriver.CUDA_RESOURCE_DESC_st){{endif}} {{if 'CUDA_RESOURCE_DESC_v1' in found_types}} if objType == CUDA_RESOURCE_DESC_v1: - return sizeof(ccuda.CUDA_RESOURCE_DESC_v1){{endif}} + return sizeof(cydriver.CUDA_RESOURCE_DESC_v1){{endif}} {{if 'CUDA_RESOURCE_DESC' in found_types}} if objType == CUDA_RESOURCE_DESC: - return sizeof(ccuda.CUDA_RESOURCE_DESC){{endif}} + return sizeof(cydriver.CUDA_RESOURCE_DESC){{endif}} {{if 'struct CUDA_TEXTURE_DESC_st' in found_types}} if objType == CUDA_TEXTURE_DESC_st: - return sizeof(ccuda.CUDA_TEXTURE_DESC_st){{endif}} + return sizeof(cydriver.CUDA_TEXTURE_DESC_st){{endif}} {{if 'CUDA_TEXTURE_DESC_v1' in found_types}} if objType == CUDA_TEXTURE_DESC_v1: - return sizeof(ccuda.CUDA_TEXTURE_DESC_v1){{endif}} + return sizeof(cydriver.CUDA_TEXTURE_DESC_v1){{endif}} {{if 'CUDA_TEXTURE_DESC' in found_types}} if objType == CUDA_TEXTURE_DESC: - return sizeof(ccuda.CUDA_TEXTURE_DESC){{endif}} + return sizeof(cydriver.CUDA_TEXTURE_DESC){{endif}} {{if 'struct CUDA_RESOURCE_VIEW_DESC_st' in found_types}} if objType == CUDA_RESOURCE_VIEW_DESC_st: - return sizeof(ccuda.CUDA_RESOURCE_VIEW_DESC_st){{endif}} + return sizeof(cydriver.CUDA_RESOURCE_VIEW_DESC_st){{endif}} {{if 'CUDA_RESOURCE_VIEW_DESC_v1' in found_types}} if objType == CUDA_RESOURCE_VIEW_DESC_v1: - return sizeof(ccuda.CUDA_RESOURCE_VIEW_DESC_v1){{endif}} + return sizeof(cydriver.CUDA_RESOURCE_VIEW_DESC_v1){{endif}} {{if 'CUDA_RESOURCE_VIEW_DESC' in found_types}} if objType == CUDA_RESOURCE_VIEW_DESC: - return sizeof(ccuda.CUDA_RESOURCE_VIEW_DESC){{endif}} + return sizeof(cydriver.CUDA_RESOURCE_VIEW_DESC){{endif}} {{if 'struct CUtensorMap_st' in found_types}} if objType == CUtensorMap_st: - return sizeof(ccuda.CUtensorMap_st){{endif}} + return sizeof(cydriver.CUtensorMap_st){{endif}} {{if 'CUtensorMap' in found_types}} if objType == CUtensorMap: - return sizeof(ccuda.CUtensorMap){{endif}} + return sizeof(cydriver.CUtensorMap){{endif}} {{if 'struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st' in found_types}} if objType == CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st: - return sizeof(ccuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st){{endif}} + return sizeof(cydriver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st){{endif}} {{if 'CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1' in found_types}} if objType == CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1: - return sizeof(ccuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1){{endif}} + return sizeof(cydriver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1){{endif}} {{if 'CUDA_POINTER_ATTRIBUTE_P2P_TOKENS' in found_types}} if objType == CUDA_POINTER_ATTRIBUTE_P2P_TOKENS: - return sizeof(ccuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS){{endif}} + return sizeof(cydriver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS){{endif}} {{if 'struct CUDA_LAUNCH_PARAMS_st' in found_types}} if objType == CUDA_LAUNCH_PARAMS_st: - return sizeof(ccuda.CUDA_LAUNCH_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_LAUNCH_PARAMS_st){{endif}} {{if 'CUDA_LAUNCH_PARAMS_v1' in found_types}} if objType == CUDA_LAUNCH_PARAMS_v1: - return sizeof(ccuda.CUDA_LAUNCH_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_LAUNCH_PARAMS_v1){{endif}} {{if 'CUDA_LAUNCH_PARAMS' in found_types}} if objType == CUDA_LAUNCH_PARAMS: - return sizeof(ccuda.CUDA_LAUNCH_PARAMS){{endif}} + return sizeof(cydriver.CUDA_LAUNCH_PARAMS){{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st' in found_types}} if objType == CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st: - return sizeof(ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st){{endif}} {{if 'CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1' in found_types}} if objType == CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1: - return sizeof(ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1){{endif}} {{if 'CUDA_EXTERNAL_MEMORY_HANDLE_DESC' in found_types}} if objType == CUDA_EXTERNAL_MEMORY_HANDLE_DESC: - return sizeof(ccuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC){{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st' in found_types}} if objType == CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st: - return sizeof(ccuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st){{endif}} {{if 'CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1' in found_types}} if objType == CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1: - return sizeof(ccuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1){{endif}} {{if 'CUDA_EXTERNAL_MEMORY_BUFFER_DESC' in found_types}} if objType == CUDA_EXTERNAL_MEMORY_BUFFER_DESC: - return sizeof(ccuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC){{endif}} {{if 'struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st' in found_types}} if objType == CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st: - return sizeof(ccuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st){{endif}} {{if 'CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1' in found_types}} if objType == CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1: - return sizeof(ccuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1){{endif}} {{if 'CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC' in found_types}} if objType == CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC: - return sizeof(ccuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC){{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st' in found_types}} if objType == CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st: - return sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st){{endif}} {{if 'CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1' in found_types}} if objType == CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1: - return sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1){{endif}} {{if 'CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC' in found_types}} if objType == CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC: - return sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC){{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st' in found_types}} if objType == CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st: - return sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st){{endif}} {{if 'CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1' in found_types}} if objType == CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1: - return sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1){{endif}} {{if 'CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS' in found_types}} if objType == CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS: - return sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS){{endif}} {{if 'struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st' in found_types}} if objType == CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st: - return sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st){{endif}} {{if 'CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1' in found_types}} if objType == CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1: - return sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1){{endif}} {{if 'CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS' in found_types}} if objType == CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS: - return sizeof(ccuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS){{endif}} + return sizeof(cydriver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS){{endif}} {{if 'struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st' in found_types}} if objType == CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st){{endif}} {{if 'CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1' in found_types}} if objType == CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1: - return sizeof(ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1){{endif}} {{if 'CUDA_EXT_SEM_SIGNAL_NODE_PARAMS' in found_types}} if objType == CUDA_EXT_SEM_SIGNAL_NODE_PARAMS: - return sizeof(ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS){{endif}} {{if 'struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st' in found_types}} if objType == CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st: - return sizeof(ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st){{endif}} {{if 'CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2' in found_types}} if objType == CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2: - return sizeof(ccuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2){{endif}} {{if 'struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_st' in found_types}} if objType == CUDA_EXT_SEM_WAIT_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st){{endif}} {{if 'CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1' in found_types}} if objType == CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1: - return sizeof(ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1){{endif}} {{if 'CUDA_EXT_SEM_WAIT_NODE_PARAMS' in found_types}} if objType == CUDA_EXT_SEM_WAIT_NODE_PARAMS: - return sizeof(ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS){{endif}} {{if 'struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st' in found_types}} if objType == CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st: - return sizeof(ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st){{endif}} {{if 'CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2' in found_types}} if objType == CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2: - return sizeof(ccuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2){{endif}} + return sizeof(cydriver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2){{endif}} {{if 'CUmemGenericAllocationHandle_v1' in found_types}} if objType == CUmemGenericAllocationHandle_v1: - return sizeof(ccuda.CUmemGenericAllocationHandle_v1){{endif}} + return sizeof(cydriver.CUmemGenericAllocationHandle_v1){{endif}} {{if 'CUmemGenericAllocationHandle' in found_types}} if objType == CUmemGenericAllocationHandle: - return sizeof(ccuda.CUmemGenericAllocationHandle){{endif}} + return sizeof(cydriver.CUmemGenericAllocationHandle){{endif}} {{if 'struct CUarrayMapInfo_st' in found_types}} if objType == CUarrayMapInfo_st: - return sizeof(ccuda.CUarrayMapInfo_st){{endif}} + return sizeof(cydriver.CUarrayMapInfo_st){{endif}} {{if 'CUarrayMapInfo_v1' in found_types}} if objType == CUarrayMapInfo_v1: - return sizeof(ccuda.CUarrayMapInfo_v1){{endif}} + return sizeof(cydriver.CUarrayMapInfo_v1){{endif}} {{if 'CUarrayMapInfo' in found_types}} if objType == CUarrayMapInfo: - return sizeof(ccuda.CUarrayMapInfo){{endif}} + return sizeof(cydriver.CUarrayMapInfo){{endif}} {{if 'struct CUmemLocation_st' in found_types}} if objType == CUmemLocation_st: - return sizeof(ccuda.CUmemLocation_st){{endif}} + return sizeof(cydriver.CUmemLocation_st){{endif}} {{if 'CUmemLocation_v1' in found_types}} if objType == CUmemLocation_v1: - return sizeof(ccuda.CUmemLocation_v1){{endif}} + return sizeof(cydriver.CUmemLocation_v1){{endif}} {{if 'CUmemLocation' in found_types}} if objType == CUmemLocation: - return sizeof(ccuda.CUmemLocation){{endif}} + return sizeof(cydriver.CUmemLocation){{endif}} {{if 'struct CUmemAllocationProp_st' in found_types}} if objType == CUmemAllocationProp_st: - return sizeof(ccuda.CUmemAllocationProp_st){{endif}} + return sizeof(cydriver.CUmemAllocationProp_st){{endif}} {{if 'CUmemAllocationProp_v1' in found_types}} if objType == CUmemAllocationProp_v1: - return sizeof(ccuda.CUmemAllocationProp_v1){{endif}} + return sizeof(cydriver.CUmemAllocationProp_v1){{endif}} {{if 'CUmemAllocationProp' in found_types}} if objType == CUmemAllocationProp: - return sizeof(ccuda.CUmemAllocationProp){{endif}} + return sizeof(cydriver.CUmemAllocationProp){{endif}} {{if 'struct CUmulticastObjectProp_st' in found_types}} if objType == CUmulticastObjectProp_st: - return sizeof(ccuda.CUmulticastObjectProp_st){{endif}} + return sizeof(cydriver.CUmulticastObjectProp_st){{endif}} {{if 'CUmulticastObjectProp_v1' in found_types}} if objType == CUmulticastObjectProp_v1: - return sizeof(ccuda.CUmulticastObjectProp_v1){{endif}} + return sizeof(cydriver.CUmulticastObjectProp_v1){{endif}} {{if 'CUmulticastObjectProp' in found_types}} if objType == CUmulticastObjectProp: - return sizeof(ccuda.CUmulticastObjectProp){{endif}} + return sizeof(cydriver.CUmulticastObjectProp){{endif}} {{if 'struct CUmemAccessDesc_st' in found_types}} if objType == CUmemAccessDesc_st: - return sizeof(ccuda.CUmemAccessDesc_st){{endif}} + return sizeof(cydriver.CUmemAccessDesc_st){{endif}} {{if 'CUmemAccessDesc_v1' in found_types}} if objType == CUmemAccessDesc_v1: - return sizeof(ccuda.CUmemAccessDesc_v1){{endif}} + return sizeof(cydriver.CUmemAccessDesc_v1){{endif}} {{if 'CUmemAccessDesc' in found_types}} if objType == CUmemAccessDesc: - return sizeof(ccuda.CUmemAccessDesc){{endif}} + return sizeof(cydriver.CUmemAccessDesc){{endif}} {{if 'struct CUgraphExecUpdateResultInfo_st' in found_types}} if objType == CUgraphExecUpdateResultInfo_st: - return sizeof(ccuda.CUgraphExecUpdateResultInfo_st){{endif}} + return sizeof(cydriver.CUgraphExecUpdateResultInfo_st){{endif}} {{if 'CUgraphExecUpdateResultInfo_v1' in found_types}} if objType == CUgraphExecUpdateResultInfo_v1: - return sizeof(ccuda.CUgraphExecUpdateResultInfo_v1){{endif}} + return sizeof(cydriver.CUgraphExecUpdateResultInfo_v1){{endif}} {{if 'CUgraphExecUpdateResultInfo' in found_types}} if objType == CUgraphExecUpdateResultInfo: - return sizeof(ccuda.CUgraphExecUpdateResultInfo){{endif}} + return sizeof(cydriver.CUgraphExecUpdateResultInfo){{endif}} {{if 'struct CUmemPoolProps_st' in found_types}} if objType == CUmemPoolProps_st: - return sizeof(ccuda.CUmemPoolProps_st){{endif}} + return sizeof(cydriver.CUmemPoolProps_st){{endif}} {{if 'CUmemPoolProps_v1' in found_types}} if objType == CUmemPoolProps_v1: - return sizeof(ccuda.CUmemPoolProps_v1){{endif}} + return sizeof(cydriver.CUmemPoolProps_v1){{endif}} {{if 'CUmemPoolProps' in found_types}} if objType == CUmemPoolProps: - return sizeof(ccuda.CUmemPoolProps){{endif}} + return sizeof(cydriver.CUmemPoolProps){{endif}} {{if 'struct CUmemPoolPtrExportData_st' in found_types}} if objType == CUmemPoolPtrExportData_st: - return sizeof(ccuda.CUmemPoolPtrExportData_st){{endif}} + return sizeof(cydriver.CUmemPoolPtrExportData_st){{endif}} {{if 'CUmemPoolPtrExportData_v1' in found_types}} if objType == CUmemPoolPtrExportData_v1: - return sizeof(ccuda.CUmemPoolPtrExportData_v1){{endif}} + return sizeof(cydriver.CUmemPoolPtrExportData_v1){{endif}} {{if 'CUmemPoolPtrExportData' in found_types}} if objType == CUmemPoolPtrExportData: - return sizeof(ccuda.CUmemPoolPtrExportData){{endif}} + return sizeof(cydriver.CUmemPoolPtrExportData){{endif}} {{if 'struct CUDA_MEM_ALLOC_NODE_PARAMS_v1_st' in found_types}} if objType == CUDA_MEM_ALLOC_NODE_PARAMS_v1_st: - return sizeof(ccuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st){{endif}} + return sizeof(cydriver.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st){{endif}} {{if 'CUDA_MEM_ALLOC_NODE_PARAMS_v1' in found_types}} if objType == CUDA_MEM_ALLOC_NODE_PARAMS_v1: - return sizeof(ccuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1){{endif}} + return sizeof(cydriver.CUDA_MEM_ALLOC_NODE_PARAMS_v1){{endif}} {{if 'CUDA_MEM_ALLOC_NODE_PARAMS' in found_types}} if objType == CUDA_MEM_ALLOC_NODE_PARAMS: - return sizeof(ccuda.CUDA_MEM_ALLOC_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_MEM_ALLOC_NODE_PARAMS){{endif}} {{if 'struct CUDA_MEM_ALLOC_NODE_PARAMS_v2_st' in found_types}} if objType == CUDA_MEM_ALLOC_NODE_PARAMS_v2_st: - return sizeof(ccuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st){{endif}} + return sizeof(cydriver.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st){{endif}} {{if 'CUDA_MEM_ALLOC_NODE_PARAMS_v2' in found_types}} if objType == CUDA_MEM_ALLOC_NODE_PARAMS_v2: - return sizeof(ccuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2){{endif}} + return sizeof(cydriver.CUDA_MEM_ALLOC_NODE_PARAMS_v2){{endif}} {{if 'struct CUDA_MEM_FREE_NODE_PARAMS_st' in found_types}} if objType == CUDA_MEM_FREE_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_MEM_FREE_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_MEM_FREE_NODE_PARAMS_st){{endif}} {{if 'CUDA_MEM_FREE_NODE_PARAMS' in found_types}} if objType == CUDA_MEM_FREE_NODE_PARAMS: - return sizeof(ccuda.CUDA_MEM_FREE_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_MEM_FREE_NODE_PARAMS){{endif}} {{if 'struct CUDA_CHILD_GRAPH_NODE_PARAMS_st' in found_types}} if objType == CUDA_CHILD_GRAPH_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_CHILD_GRAPH_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_CHILD_GRAPH_NODE_PARAMS_st){{endif}} {{if 'CUDA_CHILD_GRAPH_NODE_PARAMS' in found_types}} if objType == CUDA_CHILD_GRAPH_NODE_PARAMS: - return sizeof(ccuda.CUDA_CHILD_GRAPH_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_CHILD_GRAPH_NODE_PARAMS){{endif}} {{if 'struct CUDA_EVENT_RECORD_NODE_PARAMS_st' in found_types}} if objType == CUDA_EVENT_RECORD_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_EVENT_RECORD_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_EVENT_RECORD_NODE_PARAMS_st){{endif}} {{if 'CUDA_EVENT_RECORD_NODE_PARAMS' in found_types}} if objType == CUDA_EVENT_RECORD_NODE_PARAMS: - return sizeof(ccuda.CUDA_EVENT_RECORD_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_EVENT_RECORD_NODE_PARAMS){{endif}} {{if 'struct CUDA_EVENT_WAIT_NODE_PARAMS_st' in found_types}} if objType == CUDA_EVENT_WAIT_NODE_PARAMS_st: - return sizeof(ccuda.CUDA_EVENT_WAIT_NODE_PARAMS_st){{endif}} + return sizeof(cydriver.CUDA_EVENT_WAIT_NODE_PARAMS_st){{endif}} {{if 'CUDA_EVENT_WAIT_NODE_PARAMS' in found_types}} if objType == CUDA_EVENT_WAIT_NODE_PARAMS: - return sizeof(ccuda.CUDA_EVENT_WAIT_NODE_PARAMS){{endif}} + return sizeof(cydriver.CUDA_EVENT_WAIT_NODE_PARAMS){{endif}} {{if 'struct CUgraphNodeParams_st' in found_types}} if objType == CUgraphNodeParams_st: - return sizeof(ccuda.CUgraphNodeParams_st){{endif}} + return sizeof(cydriver.CUgraphNodeParams_st){{endif}} {{if 'CUgraphNodeParams' in found_types}} if objType == CUgraphNodeParams: - return sizeof(ccuda.CUgraphNodeParams){{endif}} + return sizeof(cydriver.CUgraphNodeParams){{endif}} {{if 'CUdevResourceDesc' in found_types}} if objType == CUdevResourceDesc: - return sizeof(ccuda.CUdevResourceDesc){{endif}} + return sizeof(cydriver.CUdevResourceDesc){{endif}} {{if 'struct CUdevSmResource_st' in found_types}} if objType == CUdevSmResource_st: - return sizeof(ccuda.CUdevSmResource_st){{endif}} + return sizeof(cydriver.CUdevSmResource_st){{endif}} {{if 'CUdevSmResource' in found_types}} if objType == CUdevSmResource: - return sizeof(ccuda.CUdevSmResource){{endif}} + return sizeof(cydriver.CUdevSmResource){{endif}} {{if 'struct CUdevResource_st' in found_types}} if objType == CUdevResource_st: - return sizeof(ccuda.CUdevResource_st){{endif}} + return sizeof(cydriver.CUdevResource_st){{endif}} {{if 'struct CUdevResource_st' in found_types}} if objType == CUdevResource_v1: - return sizeof(ccuda.CUdevResource_v1){{endif}} + return sizeof(cydriver.CUdevResource_v1){{endif}} {{if 'struct CUdevResource_st' in found_types}} if objType == CUdevResource: - return sizeof(ccuda.CUdevResource){{endif}} + return sizeof(cydriver.CUdevResource){{endif}} {{if True}} if objType == CUeglFrame_st: - return sizeof(ccuda.CUeglFrame_st){{endif}} + return sizeof(cydriver.CUeglFrame_st){{endif}} {{if True}} if objType == CUeglFrame_v1: - return sizeof(ccuda.CUeglFrame_v1){{endif}} + return sizeof(cydriver.CUeglFrame_v1){{endif}} {{if True}} if objType == CUeglFrame: - return sizeof(ccuda.CUeglFrame){{endif}} + return sizeof(cydriver.CUeglFrame){{endif}} {{if True}} if objType == CUeglStreamConnection: - return sizeof(ccuda.CUeglStreamConnection){{endif}} + return sizeof(cydriver.CUeglStreamConnection){{endif}} {{if True}} if objType == GLenum: - return sizeof(ccuda.GLenum){{endif}} + return sizeof(cydriver.GLenum){{endif}} {{if True}} if objType == GLuint: - return sizeof(ccuda.GLuint){{endif}} + return sizeof(cydriver.GLuint){{endif}} {{if True}} if objType == EGLImageKHR: - return sizeof(ccuda.EGLImageKHR){{endif}} + return sizeof(cydriver.EGLImageKHR){{endif}} {{if True}} if objType == EGLStreamKHR: - return sizeof(ccuda.EGLStreamKHR){{endif}} + return sizeof(cydriver.EGLStreamKHR){{endif}} {{if True}} if objType == EGLint: - return sizeof(ccuda.EGLint){{endif}} + return sizeof(cydriver.EGLint){{endif}} {{if True}} if objType == EGLSyncKHR: - return sizeof(ccuda.EGLSyncKHR){{endif}} + return sizeof(cydriver.EGLSyncKHR){{endif}} {{if True}} if objType == VdpDevice: - return sizeof(ccuda.VdpDevice){{endif}} + return sizeof(cydriver.VdpDevice){{endif}} {{if True}} if objType == VdpGetProcAddress: - return sizeof(ccuda.VdpGetProcAddress){{endif}} + return sizeof(cydriver.VdpGetProcAddress){{endif}} {{if True}} if objType == VdpVideoSurface: - return sizeof(ccuda.VdpVideoSurface){{endif}} + return sizeof(cydriver.VdpVideoSurface){{endif}} {{if True}} if objType == VdpOutputSurface: - return sizeof(ccuda.VdpOutputSurface){{endif}} + return sizeof(cydriver.VdpOutputSurface){{endif}} raise TypeError("Unknown type: " + str(objType)) diff --git a/examples/0_Introduction/clock_nvrtc_test.py b/cuda/cuda/bindings/examples/0_Introduction/clock_nvrtc_test.py similarity index 95% rename from examples/0_Introduction/clock_nvrtc_test.py rename to cuda/cuda/bindings/examples/0_Introduction/clock_nvrtc_test.py index ce13cb52..663cadef 100644 --- a/examples/0_Introduction/clock_nvrtc_test.py +++ b/cuda/cuda/bindings/examples/0_Introduction/clock_nvrtc_test.py @@ -7,8 +7,8 @@ # is strictly prohibited. import numpy as np from cuda import cuda -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors, findCudaDevice +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors, findCudaDevice clock_nvrtc = '''\ extern "C" __global__ void timedReduction(const float *hinput, float *output, clock_t *timer) diff --git a/examples/0_Introduction/simpleCubemapTexture_test.py b/cuda/cuda/bindings/examples/0_Introduction/simpleCubemapTexture_test.py similarity index 98% rename from examples/0_Introduction/simpleCubemapTexture_test.py rename to cuda/cuda/bindings/examples/0_Introduction/simpleCubemapTexture_test.py index 90933452..dd8f48d2 100644 --- a/examples/0_Introduction/simpleCubemapTexture_test.py +++ b/cuda/cuda/bindings/examples/0_Introduction/simpleCubemapTexture_test.py @@ -11,8 +11,8 @@ import sys import time from cuda import cuda, cudart -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors, findCudaDevice +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors, findCudaDevice simpleCubemapTexture = '''\ extern "C" diff --git a/examples/0_Introduction/simpleP2P_test.py b/cuda/cuda/bindings/examples/0_Introduction/simpleP2P_test.py similarity index 98% rename from examples/0_Introduction/simpleP2P_test.py rename to cuda/cuda/bindings/examples/0_Introduction/simpleP2P_test.py index b5a2a408..2165851c 100644 --- a/examples/0_Introduction/simpleP2P_test.py +++ b/cuda/cuda/bindings/examples/0_Introduction/simpleP2P_test.py @@ -9,8 +9,8 @@ import numpy as np import sys from cuda import cuda, cudart -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors simplep2p = '''\ extern "C" diff --git a/examples/0_Introduction/simpleZeroCopy_test.py b/cuda/cuda/bindings/examples/0_Introduction/simpleZeroCopy_test.py similarity index 96% rename from examples/0_Introduction/simpleZeroCopy_test.py rename to cuda/cuda/bindings/examples/0_Introduction/simpleZeroCopy_test.py index 79f1ba53..8b85173b 100644 --- a/examples/0_Introduction/simpleZeroCopy_test.py +++ b/cuda/cuda/bindings/examples/0_Introduction/simpleZeroCopy_test.py @@ -11,9 +11,9 @@ import random as rnd import sys from cuda import cuda, cudart -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors -from examples.common.helper_string import checkCmdLineFlag +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors +from cuda.bindings.examples.common.helper_string import checkCmdLineFlag simpleZeroCopy = '''\ extern "C" diff --git a/examples/0_Introduction/systemWideAtomics_test.py b/cuda/cuda/bindings/examples/0_Introduction/systemWideAtomics_test.py similarity index 97% rename from examples/0_Introduction/systemWideAtomics_test.py rename to cuda/cuda/bindings/examples/0_Introduction/systemWideAtomics_test.py index feeb0631..f0fbaf60 100644 --- a/examples/0_Introduction/systemWideAtomics_test.py +++ b/cuda/cuda/bindings/examples/0_Introduction/systemWideAtomics_test.py @@ -10,8 +10,8 @@ import sys import os from cuda import cuda, cudart -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors, findCudaDevice +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors, findCudaDevice systemWideAtomics = '''\ #define LOOP_NUM 50 diff --git a/examples/0_Introduction/vectorAddDrv_test.py b/cuda/cuda/bindings/examples/0_Introduction/vectorAddDrv_test.py similarity index 96% rename from examples/0_Introduction/vectorAddDrv_test.py rename to cuda/cuda/bindings/examples/0_Introduction/vectorAddDrv_test.py index 1661037d..4b854576 100644 --- a/examples/0_Introduction/vectorAddDrv_test.py +++ b/cuda/cuda/bindings/examples/0_Introduction/vectorAddDrv_test.py @@ -9,8 +9,8 @@ import math import numpy as np from cuda import cuda -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors, findCudaDeviceDRV +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors, findCudaDeviceDRV vectorAddDrv = '''\ /* Vector addition: C = A + B. diff --git a/examples/0_Introduction/vectorAddMMAP_test.py b/cuda/cuda/bindings/examples/0_Introduction/vectorAddMMAP_test.py similarity index 98% rename from examples/0_Introduction/vectorAddMMAP_test.py rename to cuda/cuda/bindings/examples/0_Introduction/vectorAddMMAP_test.py index 18e549a0..c9357da7 100644 --- a/examples/0_Introduction/vectorAddMMAP_test.py +++ b/cuda/cuda/bindings/examples/0_Introduction/vectorAddMMAP_test.py @@ -10,8 +10,8 @@ import numpy as np import sys from cuda import cuda -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors, findCudaDeviceDRV +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors, findCudaDeviceDRV vectorAddMMAP = '''\ /* Vector addition: C = A + B. diff --git a/examples/2_Concepts_and_Techniques/streamOrderedAllocation_test.py b/cuda/cuda/bindings/examples/2_Concepts_and_Techniques/streamOrderedAllocation_test.py similarity index 97% rename from examples/2_Concepts_and_Techniques/streamOrderedAllocation_test.py rename to cuda/cuda/bindings/examples/2_Concepts_and_Techniques/streamOrderedAllocation_test.py index 0e589bb8..d1b1a676 100644 --- a/examples/2_Concepts_and_Techniques/streamOrderedAllocation_test.py +++ b/cuda/cuda/bindings/examples/2_Concepts_and_Techniques/streamOrderedAllocation_test.py @@ -11,9 +11,9 @@ import random as rnd import sys from cuda import cuda, cudart -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors, findCudaDevice -from examples.common.helper_string import checkCmdLineFlag +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors, findCudaDevice +from cuda.bindings.examples.common.helper_string import checkCmdLineFlag streamOrderedAllocation = '''\ /* Add two vectors on the GPU */ diff --git a/examples/3_CUDA_Features/globalToShmemAsyncCopy_test.py b/cuda/cuda/bindings/examples/3_CUDA_Features/globalToShmemAsyncCopy_test.py similarity index 99% rename from examples/3_CUDA_Features/globalToShmemAsyncCopy_test.py rename to cuda/cuda/bindings/examples/3_CUDA_Features/globalToShmemAsyncCopy_test.py index 3fcfdde5..8121a143 100644 --- a/examples/3_CUDA_Features/globalToShmemAsyncCopy_test.py +++ b/cuda/cuda/bindings/examples/3_CUDA_Features/globalToShmemAsyncCopy_test.py @@ -12,9 +12,9 @@ import pytest from cuda import cuda, cudart from enum import Enum -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors, findCudaDevice -from examples.common.helper_string import checkCmdLineFlag, getCmdLineArgumentInt +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors, findCudaDevice +from cuda.bindings.examples.common.helper_string import checkCmdLineFlag, getCmdLineArgumentInt blockSize = 16 class kernels(Enum): diff --git a/examples/3_CUDA_Features/simpleCudaGraphs_test.py b/cuda/cuda/bindings/examples/3_CUDA_Features/simpleCudaGraphs_test.py similarity index 99% rename from examples/3_CUDA_Features/simpleCudaGraphs_test.py rename to cuda/cuda/bindings/examples/3_CUDA_Features/simpleCudaGraphs_test.py index ca654f68..86c1c4c8 100644 --- a/examples/3_CUDA_Features/simpleCudaGraphs_test.py +++ b/cuda/cuda/bindings/examples/3_CUDA_Features/simpleCudaGraphs_test.py @@ -10,8 +10,8 @@ import pytest import random as rnd from cuda import cuda, cudart -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors, findCudaDevice +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors, findCudaDevice THREADS_PER_BLOCK = 512 GRAPH_LAUNCH_ITERATIONS = 3 diff --git a/examples/4_CUDA_Libraries/conjugateGradientMultiBlockCG_test.py b/cuda/cuda/bindings/examples/4_CUDA_Libraries/conjugateGradientMultiBlockCG_test.py similarity index 98% rename from examples/4_CUDA_Libraries/conjugateGradientMultiBlockCG_test.py rename to cuda/cuda/bindings/examples/4_CUDA_Libraries/conjugateGradientMultiBlockCG_test.py index f4a849f9..604d7d0d 100644 --- a/examples/4_CUDA_Libraries/conjugateGradientMultiBlockCG_test.py +++ b/cuda/cuda/bindings/examples/4_CUDA_Libraries/conjugateGradientMultiBlockCG_test.py @@ -10,8 +10,8 @@ import numpy as np import sys from cuda import cuda, cudart -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors, findCudaDevice +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors, findCudaDevice from random import random conjugateGradientMultiBlockCG = '''\ diff --git a/examples/__init__.py b/cuda/cuda/bindings/examples/__init__.py similarity index 100% rename from examples/__init__.py rename to cuda/cuda/bindings/examples/__init__.py diff --git a/examples/common/common.py b/cuda/cuda/bindings/examples/common/common.py similarity index 97% rename from examples/common/common.py rename to cuda/cuda/bindings/examples/common/common.py index 5667982d..63816054 100644 --- a/examples/common/common.py +++ b/cuda/cuda/bindings/examples/common/common.py @@ -9,7 +9,7 @@ import numpy as np import os from cuda import cuda, cudart, nvrtc -from examples.common.helper_cuda import checkCudaErrors +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors class KernelHelper: def __init__(self, code, devID): diff --git a/examples/common/helper_cuda.py b/cuda/cuda/bindings/examples/common/helper_cuda.py similarity index 94% rename from examples/common/helper_cuda.py rename to cuda/cuda/bindings/examples/common/helper_cuda.py index 712c9c78..b38e7a93 100644 --- a/examples/common/helper_cuda.py +++ b/cuda/cuda/bindings/examples/common/helper_cuda.py @@ -6,7 +6,7 @@ # this software and related documentation outside the terms of the EULA # is strictly prohibited. from cuda import cuda, cudart, nvrtc -from examples.common.helper_string import getCmdLineArgumentInt, checkCmdLineFlag +from cuda.bindings.examples.common.helper_string import getCmdLineArgumentInt, checkCmdLineFlag def _cudaGetErrorEnum(error): if isinstance(error, cuda.CUresult): diff --git a/examples/common/helper_string.py b/cuda/cuda/bindings/examples/common/helper_string.py similarity index 100% rename from examples/common/helper_string.py rename to cuda/cuda/bindings/examples/common/helper_string.py diff --git a/examples/extra/isoFDModelling_test.py b/cuda/cuda/bindings/examples/extra/isoFDModelling_test.py similarity index 99% rename from examples/extra/isoFDModelling_test.py rename to cuda/cuda/bindings/examples/extra/isoFDModelling_test.py index 995c3ec7..73d822c3 100644 --- a/examples/extra/isoFDModelling_test.py +++ b/cuda/cuda/bindings/examples/extra/isoFDModelling_test.py @@ -8,8 +8,8 @@ import numpy as np import time from cuda import cuda, cudart -from examples.common import common -from examples.common.helper_cuda import checkCudaErrors +from cuda.bindings.examples.common import common +from cuda.bindings.examples.common.helper_cuda import checkCudaErrors isoPropagator = '''\ extern "C" diff --git a/examples/extra/jit_program_test.py b/cuda/cuda/bindings/examples/extra/jit_program_test.py similarity index 100% rename from examples/extra/jit_program_test.py rename to cuda/cuda/bindings/examples/extra/jit_program_test.py diff --git a/examples/extra/numba_emm_plugin.py b/cuda/cuda/bindings/examples/extra/numba_emm_plugin.py similarity index 100% rename from examples/extra/numba_emm_plugin.py rename to cuda/cuda/bindings/examples/extra/numba_emm_plugin.py diff --git a/examples/setup.cfg b/cuda/cuda/bindings/examples/setup.cfg similarity index 100% rename from examples/setup.cfg rename to cuda/cuda/bindings/examples/setup.cfg diff --git a/cuda/nvrtc.pxd.in b/cuda/cuda/bindings/nvrtc.pxd.in similarity index 83% rename from cuda/nvrtc.pxd.in rename to cuda/cuda/bindings/nvrtc.pxd.in index 10731987..fdd23e2f 100644 --- a/cuda/nvrtc.pxd.in +++ b/cuda/cuda/bindings/nvrtc.pxd.in @@ -5,8 +5,8 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -cimport cuda.cnvrtc as cnvrtc -cimport cuda._lib.utils as utils +cimport cuda.bindings.cynvrtc as cynvrtc +cimport cuda.bindings._lib.utils as utils {{if 'nvrtcProgram' in found_types}} @@ -21,6 +21,6 @@ cdef class nvrtcProgram: Get memory address of class instance """ - cdef cnvrtc.nvrtcProgram __val - cdef cnvrtc.nvrtcProgram* _ptr + cdef cynvrtc.nvrtcProgram __val + cdef cynvrtc.nvrtcProgram* _ptr {{endif}} diff --git a/cuda/nvrtc.pyx.in b/cuda/cuda/bindings/nvrtc.pyx.in similarity index 80% rename from cuda/nvrtc.pyx.in rename to cuda/cuda/bindings/nvrtc.pyx.in index 89e28931..73e2adeb 100644 --- a/cuda/nvrtc.pyx.in +++ b/cuda/cuda/bindings/nvrtc.pyx.in @@ -50,31 +50,31 @@ class nvrtcResult(IntEnum): NVRTC API functions return nvrtcResult to indicate the call result. """ {{if 'NVRTC_SUCCESS' in found_values}} - NVRTC_SUCCESS = cnvrtc.nvrtcResult.NVRTC_SUCCESS{{endif}} + NVRTC_SUCCESS = cynvrtc.nvrtcResult.NVRTC_SUCCESS{{endif}} {{if 'NVRTC_ERROR_OUT_OF_MEMORY' in found_values}} - NVRTC_ERROR_OUT_OF_MEMORY = cnvrtc.nvrtcResult.NVRTC_ERROR_OUT_OF_MEMORY{{endif}} + NVRTC_ERROR_OUT_OF_MEMORY = cynvrtc.nvrtcResult.NVRTC_ERROR_OUT_OF_MEMORY{{endif}} {{if 'NVRTC_ERROR_PROGRAM_CREATION_FAILURE' in found_values}} - NVRTC_ERROR_PROGRAM_CREATION_FAILURE = cnvrtc.nvrtcResult.NVRTC_ERROR_PROGRAM_CREATION_FAILURE{{endif}} + NVRTC_ERROR_PROGRAM_CREATION_FAILURE = cynvrtc.nvrtcResult.NVRTC_ERROR_PROGRAM_CREATION_FAILURE{{endif}} {{if 'NVRTC_ERROR_INVALID_INPUT' in found_values}} - NVRTC_ERROR_INVALID_INPUT = cnvrtc.nvrtcResult.NVRTC_ERROR_INVALID_INPUT{{endif}} + NVRTC_ERROR_INVALID_INPUT = cynvrtc.nvrtcResult.NVRTC_ERROR_INVALID_INPUT{{endif}} {{if 'NVRTC_ERROR_INVALID_PROGRAM' in found_values}} - NVRTC_ERROR_INVALID_PROGRAM = cnvrtc.nvrtcResult.NVRTC_ERROR_INVALID_PROGRAM{{endif}} + NVRTC_ERROR_INVALID_PROGRAM = cynvrtc.nvrtcResult.NVRTC_ERROR_INVALID_PROGRAM{{endif}} {{if 'NVRTC_ERROR_INVALID_OPTION' in found_values}} - NVRTC_ERROR_INVALID_OPTION = cnvrtc.nvrtcResult.NVRTC_ERROR_INVALID_OPTION{{endif}} + NVRTC_ERROR_INVALID_OPTION = cynvrtc.nvrtcResult.NVRTC_ERROR_INVALID_OPTION{{endif}} {{if 'NVRTC_ERROR_COMPILATION' in found_values}} - NVRTC_ERROR_COMPILATION = cnvrtc.nvrtcResult.NVRTC_ERROR_COMPILATION{{endif}} + NVRTC_ERROR_COMPILATION = cynvrtc.nvrtcResult.NVRTC_ERROR_COMPILATION{{endif}} {{if 'NVRTC_ERROR_BUILTIN_OPERATION_FAILURE' in found_values}} - NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = cnvrtc.nvrtcResult.NVRTC_ERROR_BUILTIN_OPERATION_FAILURE{{endif}} + NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = cynvrtc.nvrtcResult.NVRTC_ERROR_BUILTIN_OPERATION_FAILURE{{endif}} {{if 'NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION' in found_values}} - NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION = cnvrtc.nvrtcResult.NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION{{endif}} + NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION = cynvrtc.nvrtcResult.NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION{{endif}} {{if 'NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION' in found_values}} - NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION = cnvrtc.nvrtcResult.NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION{{endif}} + NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION = cynvrtc.nvrtcResult.NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION{{endif}} {{if 'NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID' in found_values}} - NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID = cnvrtc.nvrtcResult.NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID{{endif}} + NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID = cynvrtc.nvrtcResult.NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID{{endif}} {{if 'NVRTC_ERROR_INTERNAL_ERROR' in found_values}} - NVRTC_ERROR_INTERNAL_ERROR = cnvrtc.nvrtcResult.NVRTC_ERROR_INTERNAL_ERROR{{endif}} + NVRTC_ERROR_INTERNAL_ERROR = cynvrtc.nvrtcResult.NVRTC_ERROR_INTERNAL_ERROR{{endif}} {{if 'NVRTC_ERROR_TIME_FILE_WRITE_FAILED' in found_values}} - NVRTC_ERROR_TIME_FILE_WRITE_FAILED = cnvrtc.nvrtcResult.NVRTC_ERROR_TIME_FILE_WRITE_FAILED{{endif}} + NVRTC_ERROR_TIME_FILE_WRITE_FAILED = cynvrtc.nvrtcResult.NVRTC_ERROR_TIME_FILE_WRITE_FAILED{{endif}} {{endif}} {{if 'nvrtcProgram' in found_types}} @@ -92,9 +92,9 @@ cdef class nvrtcProgram: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -125,8 +125,8 @@ def nvrtcGetErrorString(result not None : nvrtcResult): bytes Message string for the given :py:obj:`~.nvrtcResult` code. """ - cdef cnvrtc.nvrtcResult cresult = result.value - err = cnvrtc.nvrtcGetErrorString(cresult) + cdef cynvrtc.nvrtcResult cyresult = result.value + err = cynvrtc.nvrtcGetErrorString(cyresult) return (nvrtcResult.NVRTC_SUCCESS, err) {{endif}} @@ -148,7 +148,7 @@ def nvrtcVersion(): """ cdef int major = 0 cdef int minor = 0 - err = cnvrtc.nvrtcVersion(&major, &minor) + err = cynvrtc.nvrtcVersion(&major, &minor) return (nvrtcResult(err), major, minor) {{endif}} @@ -169,7 +169,7 @@ def nvrtcGetNumSupportedArchs(): number of supported architectures. """ cdef int numArchs = 0 - err = cnvrtc.nvrtcGetNumSupportedArchs(&numArchs) + err = cynvrtc.nvrtcGetNumSupportedArchs(&numArchs) return (nvrtcResult(err), numArchs) {{endif}} @@ -193,7 +193,7 @@ def nvrtcGetSupportedArchs(): _, s = nvrtcGetNumSupportedArchs() supportedArchs.resize(s) - err = cnvrtc.nvrtcGetSupportedArchs(supportedArchs.data()) + err = cynvrtc.nvrtcGetSupportedArchs(supportedArchs.data()) return (nvrtcResult(err), supportedArchs) {{endif}} @@ -246,9 +246,9 @@ def nvrtcCreateProgram(char* src, char* name, int numHeaders, headers : Optional cdef nvrtcProgram prog = nvrtcProgram() if numHeaders > len(headers): raise RuntimeError("List is too small: " + str(len(headers)) + " < " + str(numHeaders)) if numHeaders > len(includeNames): raise RuntimeError("List is too small: " + str(len(includeNames)) + " < " + str(numHeaders)) - cdef vector[const char*] cheaders = headers - cdef vector[const char*] cincludeNames = includeNames - err = cnvrtc.nvrtcCreateProgram(prog._ptr, src, name, numHeaders, cheaders.data(), cincludeNames.data()) + cdef vector[const char*] cyheaders = headers + cdef vector[const char*] cyincludeNames = includeNames + err = cynvrtc.nvrtcCreateProgram(prog._ptr, src, name, numHeaders, cyheaders.data(), cyincludeNames.data()) return (nvrtcResult(err), prog) {{endif}} @@ -273,17 +273,17 @@ def nvrtcDestroyProgram(prog): -------- :py:obj:`~.nvrtcCreateProgram` """ - cdef cnvrtc.nvrtcProgram *cprog + cdef cynvrtc.nvrtcProgram *cyprog if prog is None: - cprog = NULL + cyprog = NULL elif isinstance(prog, (nvrtcProgram,)): pprog = prog.getPtr() - cprog = pprog + cyprog = pprog elif isinstance(prog, (int)): - cprog = prog + cyprog = prog else: raise TypeError("Argument 'prog' is not instance of type (expected , found " + str(type(prog))) - err = cnvrtc.nvrtcDestroyProgram(cprog) + err = cynvrtc.nvrtcDestroyProgram(cyprog) return (nvrtcResult(err),) {{endif}} @@ -321,18 +321,18 @@ def nvrtcCompileProgram(prog, int numOptions, options : Optional[Tuple[bytes] | options = [] if options is None else options if not all(isinstance(_x, (bytes)) for _x in options): raise TypeError("Argument 'options' is not instance of type (expected Tuple[bytes] or List[bytes]") - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog + cyprog = pprog if numOptions > len(options): raise RuntimeError("List is too small: " + str(len(options)) + " < " + str(numOptions)) - cdef vector[const char*] coptions = options - err = cnvrtc.nvrtcCompileProgram(cprog, numOptions, coptions.data()) + cdef vector[const char*] cyoptions = options + err = cynvrtc.nvrtcCompileProgram(cyprog, numOptions, cyoptions.data()) return (nvrtcResult(err),) {{endif}} @@ -360,17 +360,17 @@ def nvrtcGetPTXSize(prog): -------- :py:obj:`~.nvrtcGetPTX` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog + cyprog = pprog cdef size_t ptxSizeRet = 0 - err = cnvrtc.nvrtcGetPTXSize(cprog, &ptxSizeRet) + err = cynvrtc.nvrtcGetPTXSize(cyprog, &ptxSizeRet) return (nvrtcResult(err), ptxSizeRet) {{endif}} @@ -398,16 +398,16 @@ def nvrtcGetPTX(prog, char* ptx): -------- :py:obj:`~.nvrtcGetPTXSize` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog - err = cnvrtc.nvrtcGetPTX(cprog, ptx) + cyprog = pprog + err = cynvrtc.nvrtcGetPTX(cyprog, ptx) return (nvrtcResult(err),) {{endif}} @@ -435,17 +435,17 @@ def nvrtcGetCUBINSize(prog): -------- :py:obj:`~.nvrtcGetCUBIN` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog + cyprog = pprog cdef size_t cubinSizeRet = 0 - err = cnvrtc.nvrtcGetCUBINSize(cprog, &cubinSizeRet) + err = cynvrtc.nvrtcGetCUBINSize(cyprog, &cubinSizeRet) return (nvrtcResult(err), cubinSizeRet) {{endif}} @@ -473,16 +473,16 @@ def nvrtcGetCUBIN(prog, char* cubin): -------- :py:obj:`~.nvrtcGetCUBINSize` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog - err = cnvrtc.nvrtcGetCUBIN(cprog, cubin) + cyprog = pprog + err = cynvrtc.nvrtcGetCUBIN(cyprog, cubin) return (nvrtcResult(err),) {{endif}} @@ -504,17 +504,17 @@ def nvrtcGetNVVMSize(prog): nvvmSizeRet : int None """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog + cyprog = pprog cdef size_t nvvmSizeRet = 0 - err = cnvrtc.nvrtcGetNVVMSize(cprog, &nvvmSizeRet) + err = cynvrtc.nvrtcGetNVVMSize(cyprog, &nvvmSizeRet) return (nvrtcResult(err), nvvmSizeRet) {{endif}} @@ -536,16 +536,16 @@ def nvrtcGetNVVM(prog, char* nvvm): nvrtcResult """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog - err = cnvrtc.nvrtcGetNVVM(cprog, nvvm) + cyprog = pprog + err = cynvrtc.nvrtcGetNVVM(cyprog, nvvm) return (nvrtcResult(err),) {{endif}} @@ -573,17 +573,17 @@ def nvrtcGetLTOIRSize(prog): -------- :py:obj:`~.nvrtcGetLTOIR` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog + cyprog = pprog cdef size_t LTOIRSizeRet = 0 - err = cnvrtc.nvrtcGetLTOIRSize(cprog, <OIRSizeRet) + err = cynvrtc.nvrtcGetLTOIRSize(cyprog, <OIRSizeRet) return (nvrtcResult(err), LTOIRSizeRet) {{endif}} @@ -611,16 +611,16 @@ def nvrtcGetLTOIR(prog, char* LTOIR): -------- :py:obj:`~.nvrtcGetLTOIRSize` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog - err = cnvrtc.nvrtcGetLTOIR(cprog, LTOIR) + cyprog = pprog + err = cynvrtc.nvrtcGetLTOIR(cyprog, LTOIR) return (nvrtcResult(err),) {{endif}} @@ -648,17 +648,17 @@ def nvrtcGetOptiXIRSize(prog): -------- :py:obj:`~.nvrtcGetOptiXIR` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog + cyprog = pprog cdef size_t optixirSizeRet = 0 - err = cnvrtc.nvrtcGetOptiXIRSize(cprog, &optixirSizeRet) + err = cynvrtc.nvrtcGetOptiXIRSize(cyprog, &optixirSizeRet) return (nvrtcResult(err), optixirSizeRet) {{endif}} @@ -686,16 +686,16 @@ def nvrtcGetOptiXIR(prog, char* optixir): -------- :py:obj:`~.nvrtcGetOptiXIRSize` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog - err = cnvrtc.nvrtcGetOptiXIR(cprog, optixir) + cyprog = pprog + err = cynvrtc.nvrtcGetOptiXIR(cyprog, optixir) return (nvrtcResult(err),) {{endif}} @@ -726,17 +726,17 @@ def nvrtcGetProgramLogSize(prog): -------- :py:obj:`~.nvrtcGetProgramLog` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog + cyprog = pprog cdef size_t logSizeRet = 0 - err = cnvrtc.nvrtcGetProgramLogSize(cprog, &logSizeRet) + err = cynvrtc.nvrtcGetProgramLogSize(cyprog, &logSizeRet) return (nvrtcResult(err), logSizeRet) {{endif}} @@ -764,16 +764,16 @@ def nvrtcGetProgramLog(prog, char* log): -------- :py:obj:`~.nvrtcGetProgramLogSize` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog - err = cnvrtc.nvrtcGetProgramLog(cprog, log) + cyprog = pprog + err = cynvrtc.nvrtcGetProgramLog(cyprog, log) return (nvrtcResult(err),) {{endif}} @@ -806,16 +806,16 @@ def nvrtcAddNameExpression(prog, char* name_expression): -------- :py:obj:`~.nvrtcGetLoweredName` """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog - err = cnvrtc.nvrtcAddNameExpression(cprog, name_expression) + cyprog = pprog + err = cynvrtc.nvrtcAddNameExpression(cyprog, name_expression) return (nvrtcResult(err),) {{endif}} @@ -848,17 +848,17 @@ def nvrtcGetLoweredName(prog, char* name_expression): -------- nvrtcAddNameExpression """ - cdef cnvrtc.nvrtcProgram cprog + cdef cynvrtc.nvrtcProgram cyprog if prog is None: - cprog = 0 + cyprog = 0 elif isinstance(prog, (nvrtcProgram,)): pprog = int(prog) - cprog = pprog + cyprog = pprog else: pprog = int(nvrtcProgram(prog)) - cprog = pprog + cyprog = pprog cdef const char* lowered_name = NULL - err = cnvrtc.nvrtcGetLoweredName(cprog, name_expression, &lowered_name) + err = cynvrtc.nvrtcGetLoweredName(cyprog, name_expression, &lowered_name) return (nvrtcResult(err), lowered_name) {{endif}} @@ -878,5 +878,5 @@ def sizeof(objType): """ {{if 'nvrtcProgram' in found_types}} if objType == nvrtcProgram: - return sizeof(cnvrtc.nvrtcProgram){{endif}} + return sizeof(cynvrtc.nvrtcProgram){{endif}} raise TypeError("Unknown type: " + str(objType)) diff --git a/cuda/cudart.pxd.in b/cuda/cuda/bindings/runtime.pxd.in similarity index 89% rename from cuda/cudart.pxd.in rename to cuda/cuda/bindings/runtime.pxd.in index 9db3286a..a3727e18 100644 --- a/cuda/cudart.pxd.in +++ b/cuda/cuda/bindings/runtime.pxd.in @@ -5,9 +5,9 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. -cimport cuda.ccudart as ccudart -cimport cuda._lib.utils as utils -cimport cuda.cuda as cuda +cimport cuda.bindings.cyruntime as cyruntime +cimport cuda.bindings._lib.utils as utils +cimport cuda.bindings.driver as driver {{if 'cudaArray_t' in found_types}} @@ -22,8 +22,8 @@ cdef class cudaArray_t: Get memory address of class instance """ - cdef ccudart.cudaArray_t __val - cdef ccudart.cudaArray_t* _ptr + cdef cyruntime.cudaArray_t __val + cdef cyruntime.cudaArray_t* _ptr {{endif}} {{if 'cudaArray_const_t' in found_types}} @@ -39,8 +39,8 @@ cdef class cudaArray_const_t: Get memory address of class instance """ - cdef ccudart.cudaArray_const_t __val - cdef ccudart.cudaArray_const_t* _ptr + cdef cyruntime.cudaArray_const_t __val + cdef cyruntime.cudaArray_const_t* _ptr {{endif}} {{if 'cudaMipmappedArray_t' in found_types}} @@ -56,8 +56,8 @@ cdef class cudaMipmappedArray_t: Get memory address of class instance """ - cdef ccudart.cudaMipmappedArray_t __val - cdef ccudart.cudaMipmappedArray_t* _ptr + cdef cyruntime.cudaMipmappedArray_t __val + cdef cyruntime.cudaMipmappedArray_t* _ptr {{endif}} {{if 'cudaMipmappedArray_const_t' in found_types}} @@ -73,8 +73,8 @@ cdef class cudaMipmappedArray_const_t: Get memory address of class instance """ - cdef ccudart.cudaMipmappedArray_const_t __val - cdef ccudart.cudaMipmappedArray_const_t* _ptr + cdef cyruntime.cudaMipmappedArray_const_t __val + cdef cyruntime.cudaMipmappedArray_const_t* _ptr {{endif}} {{if 'cudaGraphicsResource_t' in found_types}} @@ -90,8 +90,8 @@ cdef class cudaGraphicsResource_t: Get memory address of class instance """ - cdef ccudart.cudaGraphicsResource_t __val - cdef ccudart.cudaGraphicsResource_t* _ptr + cdef cyruntime.cudaGraphicsResource_t __val + cdef cyruntime.cudaGraphicsResource_t* _ptr {{endif}} {{if 'cudaExternalMemory_t' in found_types}} @@ -107,8 +107,8 @@ cdef class cudaExternalMemory_t: Get memory address of class instance """ - cdef ccudart.cudaExternalMemory_t __val - cdef ccudart.cudaExternalMemory_t* _ptr + cdef cyruntime.cudaExternalMemory_t __val + cdef cyruntime.cudaExternalMemory_t* _ptr {{endif}} {{if 'cudaExternalSemaphore_t' in found_types}} @@ -124,8 +124,8 @@ cdef class cudaExternalSemaphore_t: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphore_t __val - cdef ccudart.cudaExternalSemaphore_t* _ptr + cdef cyruntime.cudaExternalSemaphore_t __val + cdef cyruntime.cudaExternalSemaphore_t* _ptr {{endif}} {{if 'cudaKernel_t' in found_types}} @@ -141,8 +141,8 @@ cdef class cudaKernel_t: Get memory address of class instance """ - cdef ccudart.cudaKernel_t __val - cdef ccudart.cudaKernel_t* _ptr + cdef cyruntime.cudaKernel_t __val + cdef cyruntime.cudaKernel_t* _ptr {{endif}} {{if 'cudaGraphDeviceNode_t' in found_types}} @@ -158,8 +158,8 @@ cdef class cudaGraphDeviceNode_t: Get memory address of class instance """ - cdef ccudart.cudaGraphDeviceNode_t __val - cdef ccudart.cudaGraphDeviceNode_t* _ptr + cdef cyruntime.cudaGraphDeviceNode_t __val + cdef cyruntime.cudaGraphDeviceNode_t* _ptr {{endif}} {{if 'cudaAsyncCallbackHandle_t' in found_types}} @@ -175,8 +175,8 @@ cdef class cudaAsyncCallbackHandle_t: Get memory address of class instance """ - cdef ccudart.cudaAsyncCallbackHandle_t __val - cdef ccudart.cudaAsyncCallbackHandle_t* _ptr + cdef cyruntime.cudaAsyncCallbackHandle_t __val + cdef cyruntime.cudaAsyncCallbackHandle_t* _ptr {{endif}} {{if True}} @@ -190,8 +190,8 @@ cdef class EGLImageKHR: Get memory address of class instance """ - cdef ccudart.EGLImageKHR __val - cdef ccudart.EGLImageKHR* _ptr + cdef cyruntime.EGLImageKHR __val + cdef cyruntime.EGLImageKHR* _ptr {{endif}} {{if True}} @@ -205,8 +205,8 @@ cdef class EGLStreamKHR: Get memory address of class instance """ - cdef ccudart.EGLStreamKHR __val - cdef ccudart.EGLStreamKHR* _ptr + cdef cyruntime.EGLStreamKHR __val + cdef cyruntime.EGLStreamKHR* _ptr {{endif}} {{if True}} @@ -220,8 +220,8 @@ cdef class EGLSyncKHR: Get memory address of class instance """ - cdef ccudart.EGLSyncKHR __val - cdef ccudart.EGLSyncKHR* _ptr + cdef cyruntime.EGLSyncKHR __val + cdef cyruntime.EGLSyncKHR* _ptr {{endif}} {{if 'cudaHostFn_t' in found_types}} @@ -235,8 +235,8 @@ cdef class cudaHostFn_t: Get memory address of class instance """ - cdef ccudart.cudaHostFn_t __val - cdef ccudart.cudaHostFn_t* _ptr + cdef cyruntime.cudaHostFn_t __val + cdef cyruntime.cudaHostFn_t* _ptr {{endif}} {{if 'cudaAsyncCallback' in found_types}} @@ -250,8 +250,8 @@ cdef class cudaAsyncCallback: Get memory address of class instance """ - cdef ccudart.cudaAsyncCallback __val - cdef ccudart.cudaAsyncCallback* _ptr + cdef cyruntime.cudaAsyncCallback __val + cdef cyruntime.cudaAsyncCallback* _ptr {{endif}} {{if 'cudaStreamCallback_t' in found_types}} @@ -265,8 +265,8 @@ cdef class cudaStreamCallback_t: Get memory address of class instance """ - cdef ccudart.cudaStreamCallback_t __val - cdef ccudart.cudaStreamCallback_t* _ptr + cdef cyruntime.cudaStreamCallback_t __val + cdef cyruntime.cudaStreamCallback_t* _ptr {{endif}} {{if 'struct dim3' in found_types}} @@ -288,8 +288,8 @@ cdef class dim3: Get memory address of class instance """ - cdef ccudart.dim3 __val - cdef ccudart.dim3* _ptr + cdef cyruntime.dim3 __val + cdef cyruntime.dim3* _ptr {{endif}} {{if 'struct cudaChannelFormatDesc' in found_types}} @@ -316,8 +316,8 @@ cdef class cudaChannelFormatDesc: Get memory address of class instance """ - cdef ccudart.cudaChannelFormatDesc __val - cdef ccudart.cudaChannelFormatDesc* _ptr + cdef cyruntime.cudaChannelFormatDesc __val + cdef cyruntime.cudaChannelFormatDesc* _ptr {{endif}} {{if 'struct cudaArraySparseProperties' in found_types}} @@ -338,7 +338,7 @@ cdef class anon_struct0: Get memory address of class instance """ - cdef ccudart.cudaArraySparseProperties* _ptr + cdef cyruntime.cudaArraySparseProperties* _ptr {{endif}} {{if 'struct cudaArraySparseProperties' in found_types}} @@ -365,8 +365,8 @@ cdef class cudaArraySparseProperties: Get memory address of class instance """ - cdef ccudart.cudaArraySparseProperties __val - cdef ccudart.cudaArraySparseProperties* _ptr + cdef cyruntime.cudaArraySparseProperties __val + cdef cyruntime.cudaArraySparseProperties* _ptr cdef anon_struct0 _tileExtent {{endif}} {{if 'struct cudaArrayMemoryRequirements' in found_types}} @@ -390,8 +390,8 @@ cdef class cudaArrayMemoryRequirements: Get memory address of class instance """ - cdef ccudart.cudaArrayMemoryRequirements __val - cdef ccudart.cudaArrayMemoryRequirements* _ptr + cdef cyruntime.cudaArrayMemoryRequirements __val + cdef cyruntime.cudaArrayMemoryRequirements* _ptr {{endif}} {{if 'struct cudaPitchedPtr' in found_types}} @@ -416,8 +416,8 @@ cdef class cudaPitchedPtr: Get memory address of class instance """ - cdef ccudart.cudaPitchedPtr __val - cdef ccudart.cudaPitchedPtr* _ptr + cdef cyruntime.cudaPitchedPtr __val + cdef cyruntime.cudaPitchedPtr* _ptr {{endif}} {{if 'struct cudaExtent' in found_types}} @@ -441,8 +441,8 @@ cdef class cudaExtent: Get memory address of class instance """ - cdef ccudart.cudaExtent __val - cdef ccudart.cudaExtent* _ptr + cdef cyruntime.cudaExtent __val + cdef cyruntime.cudaExtent* _ptr {{endif}} {{if 'struct cudaPos' in found_types}} @@ -465,8 +465,8 @@ cdef class cudaPos: Get memory address of class instance """ - cdef ccudart.cudaPos __val - cdef ccudart.cudaPos* _ptr + cdef cyruntime.cudaPos __val + cdef cyruntime.cudaPos* _ptr {{endif}} {{if 'struct cudaMemcpy3DParms' in found_types}} @@ -499,8 +499,8 @@ cdef class cudaMemcpy3DParms: Get memory address of class instance """ - cdef ccudart.cudaMemcpy3DParms __val - cdef ccudart.cudaMemcpy3DParms* _ptr + cdef cyruntime.cudaMemcpy3DParms __val + cdef cyruntime.cudaMemcpy3DParms* _ptr cdef cudaArray_t _srcArray cdef cudaPos _srcPos cdef cudaPitchedPtr _srcPtr @@ -530,8 +530,8 @@ cdef class cudaMemcpyNodeParams: Get memory address of class instance """ - cdef ccudart.cudaMemcpyNodeParams __val - cdef ccudart.cudaMemcpyNodeParams* _ptr + cdef cyruntime.cudaMemcpyNodeParams __val + cdef cyruntime.cudaMemcpyNodeParams* _ptr cdef cudaMemcpy3DParms _copyParams {{endif}} {{if 'struct cudaMemcpy3DPeerParms' in found_types}} @@ -567,8 +567,8 @@ cdef class cudaMemcpy3DPeerParms: Get memory address of class instance """ - cdef ccudart.cudaMemcpy3DPeerParms __val - cdef ccudart.cudaMemcpy3DPeerParms* _ptr + cdef cyruntime.cudaMemcpy3DPeerParms __val + cdef cyruntime.cudaMemcpy3DPeerParms* _ptr cdef cudaArray_t _srcArray cdef cudaPos _srcPos cdef cudaPitchedPtr _srcPtr @@ -604,8 +604,8 @@ cdef class cudaMemsetParams: Get memory address of class instance """ - cdef ccudart.cudaMemsetParams __val - cdef ccudart.cudaMemsetParams* _ptr + cdef cyruntime.cudaMemsetParams __val + cdef cyruntime.cudaMemsetParams* _ptr {{endif}} {{if 'struct cudaMemsetParamsV2' in found_types}} @@ -634,8 +634,8 @@ cdef class cudaMemsetParamsV2: Get memory address of class instance """ - cdef ccudart.cudaMemsetParamsV2 __val - cdef ccudart.cudaMemsetParamsV2* _ptr + cdef cyruntime.cudaMemsetParamsV2 __val + cdef cyruntime.cudaMemsetParamsV2* _ptr {{endif}} {{if 'struct cudaAccessPolicyWindow' in found_types}} @@ -673,8 +673,8 @@ cdef class cudaAccessPolicyWindow: Get memory address of class instance """ - cdef ccudart.cudaAccessPolicyWindow __val - cdef ccudart.cudaAccessPolicyWindow* _ptr + cdef cyruntime.cudaAccessPolicyWindow __val + cdef cyruntime.cudaAccessPolicyWindow* _ptr {{endif}} {{if 'struct cudaHostNodeParams' in found_types}} @@ -695,8 +695,8 @@ cdef class cudaHostNodeParams: Get memory address of class instance """ - cdef ccudart.cudaHostNodeParams __val - cdef ccudart.cudaHostNodeParams* _ptr + cdef cyruntime.cudaHostNodeParams __val + cdef cyruntime.cudaHostNodeParams* _ptr cdef cudaHostFn_t _fn {{endif}} {{if 'struct cudaHostNodeParamsV2' in found_types}} @@ -718,8 +718,8 @@ cdef class cudaHostNodeParamsV2: Get memory address of class instance """ - cdef ccudart.cudaHostNodeParamsV2 __val - cdef ccudart.cudaHostNodeParamsV2* _ptr + cdef cyruntime.cudaHostNodeParamsV2 __val + cdef cyruntime.cudaHostNodeParamsV2* _ptr cdef cudaHostFn_t _fn {{endif}} {{if 'struct cudaResourceDesc' in found_types}} @@ -737,7 +737,7 @@ cdef class anon_struct1: Get memory address of class instance """ - cdef ccudart.cudaResourceDesc* _ptr + cdef cyruntime.cudaResourceDesc* _ptr cdef cudaArray_t _array {{endif}} {{if 'struct cudaResourceDesc' in found_types}} @@ -755,7 +755,7 @@ cdef class anon_struct2: Get memory address of class instance """ - cdef ccudart.cudaResourceDesc* _ptr + cdef cyruntime.cudaResourceDesc* _ptr cdef cudaMipmappedArray_t _mipmap {{endif}} {{if 'struct cudaResourceDesc' in found_types}} @@ -777,7 +777,7 @@ cdef class anon_struct3: Get memory address of class instance """ - cdef ccudart.cudaResourceDesc* _ptr + cdef cyruntime.cudaResourceDesc* _ptr cdef cudaChannelFormatDesc _desc {{endif}} {{if 'struct cudaResourceDesc' in found_types}} @@ -803,7 +803,7 @@ cdef class anon_struct4: Get memory address of class instance """ - cdef ccudart.cudaResourceDesc* _ptr + cdef cyruntime.cudaResourceDesc* _ptr cdef cudaChannelFormatDesc _desc {{endif}} {{if 'struct cudaResourceDesc' in found_types}} @@ -827,7 +827,7 @@ cdef class anon_union0: Get memory address of class instance """ - cdef ccudart.cudaResourceDesc* _ptr + cdef cyruntime.cudaResourceDesc* _ptr cdef anon_struct1 _array cdef anon_struct2 _mipmap cdef anon_struct3 _linear @@ -852,8 +852,8 @@ cdef class cudaResourceDesc: Get memory address of class instance """ - cdef ccudart.cudaResourceDesc* _val_ptr - cdef ccudart.cudaResourceDesc* _ptr + cdef cyruntime.cudaResourceDesc* _val_ptr + cdef cyruntime.cudaResourceDesc* _ptr cdef anon_union0 _res {{endif}} {{if 'struct cudaResourceViewDesc' in found_types}} @@ -887,8 +887,8 @@ cdef class cudaResourceViewDesc: Get memory address of class instance """ - cdef ccudart.cudaResourceViewDesc __val - cdef ccudart.cudaResourceViewDesc* _ptr + cdef cyruntime.cudaResourceViewDesc __val + cdef cyruntime.cudaResourceViewDesc* _ptr {{endif}} {{if 'struct cudaPointerAttributes' in found_types}} @@ -924,8 +924,8 @@ cdef class cudaPointerAttributes: Get memory address of class instance """ - cdef ccudart.cudaPointerAttributes __val - cdef ccudart.cudaPointerAttributes* _ptr + cdef cyruntime.cudaPointerAttributes __val + cdef cyruntime.cudaPointerAttributes* _ptr {{endif}} {{if 'struct cudaFuncAttributes' in found_types}} @@ -1013,8 +1013,8 @@ cdef class cudaFuncAttributes: Get memory address of class instance """ - cdef ccudart.cudaFuncAttributes __val - cdef ccudart.cudaFuncAttributes* _ptr + cdef cyruntime.cudaFuncAttributes __val + cdef cyruntime.cudaFuncAttributes* _ptr {{endif}} {{if 'struct cudaMemLocation' in found_types}} @@ -1038,8 +1038,8 @@ cdef class cudaMemLocation: Get memory address of class instance """ - cdef ccudart.cudaMemLocation __val - cdef ccudart.cudaMemLocation* _ptr + cdef cyruntime.cudaMemLocation __val + cdef cyruntime.cudaMemLocation* _ptr {{endif}} {{if 'struct cudaMemAccessDesc' in found_types}} @@ -1060,8 +1060,8 @@ cdef class cudaMemAccessDesc: Get memory address of class instance """ - cdef ccudart.cudaMemAccessDesc __val - cdef ccudart.cudaMemAccessDesc* _ptr + cdef cyruntime.cudaMemAccessDesc __val + cdef cyruntime.cudaMemAccessDesc* _ptr cdef cudaMemLocation _location {{endif}} {{if 'struct cudaMemPoolProps' in found_types}} @@ -1099,8 +1099,8 @@ cdef class cudaMemPoolProps: Get memory address of class instance """ - cdef ccudart.cudaMemPoolProps __val - cdef ccudart.cudaMemPoolProps* _ptr + cdef cyruntime.cudaMemPoolProps __val + cdef cyruntime.cudaMemPoolProps* _ptr cdef cudaMemLocation _location {{endif}} {{if 'struct cudaMemPoolPtrExportData' in found_types}} @@ -1120,8 +1120,8 @@ cdef class cudaMemPoolPtrExportData: Get memory address of class instance """ - cdef ccudart.cudaMemPoolPtrExportData __val - cdef ccudart.cudaMemPoolPtrExportData* _ptr + cdef cyruntime.cudaMemPoolPtrExportData __val + cdef cyruntime.cudaMemPoolPtrExportData* _ptr {{endif}} {{if 'struct cudaMemAllocNodeParams' in found_types}} @@ -1152,11 +1152,11 @@ cdef class cudaMemAllocNodeParams: Get memory address of class instance """ - cdef ccudart.cudaMemAllocNodeParams __val - cdef ccudart.cudaMemAllocNodeParams* _ptr + cdef cyruntime.cudaMemAllocNodeParams __val + cdef cyruntime.cudaMemAllocNodeParams* _ptr cdef cudaMemPoolProps _poolProps cdef size_t _accessDescs_length - cdef ccudart.cudaMemAccessDesc* _accessDescs + cdef cyruntime.cudaMemAccessDesc* _accessDescs {{endif}} {{if 'struct cudaMemAllocNodeParamsV2' in found_types}} @@ -1188,11 +1188,11 @@ cdef class cudaMemAllocNodeParamsV2: Get memory address of class instance """ - cdef ccudart.cudaMemAllocNodeParamsV2 __val - cdef ccudart.cudaMemAllocNodeParamsV2* _ptr + cdef cyruntime.cudaMemAllocNodeParamsV2 __val + cdef cyruntime.cudaMemAllocNodeParamsV2* _ptr cdef cudaMemPoolProps _poolProps cdef size_t _accessDescs_length - cdef ccudart.cudaMemAccessDesc* _accessDescs + cdef cyruntime.cudaMemAccessDesc* _accessDescs {{endif}} {{if 'struct cudaMemFreeNodeParams' in found_types}} @@ -1212,8 +1212,8 @@ cdef class cudaMemFreeNodeParams: Get memory address of class instance """ - cdef ccudart.cudaMemFreeNodeParams __val - cdef ccudart.cudaMemFreeNodeParams* _ptr + cdef cyruntime.cudaMemFreeNodeParams __val + cdef cyruntime.cudaMemFreeNodeParams* _ptr {{endif}} {{if 'struct CUuuid_st' in found_types}} @@ -1230,8 +1230,8 @@ cdef class CUuuid_st: Get memory address of class instance """ - cdef ccudart.CUuuid_st __val - cdef ccudart.CUuuid_st* _ptr + cdef cyruntime.CUuuid_st __val + cdef cyruntime.CUuuid_st* _ptr {{endif}} {{if 'struct cudaDeviceProp' in found_types}} @@ -1462,8 +1462,8 @@ cdef class cudaDeviceProp: Get memory address of class instance """ - cdef ccudart.cudaDeviceProp __val - cdef ccudart.cudaDeviceProp* _ptr + cdef cyruntime.cudaDeviceProp __val + cdef cyruntime.cudaDeviceProp* _ptr cdef cudaUUID_t _uuid {{endif}} {{if 'struct cudaIpcEventHandle_st' in found_types}} @@ -1483,8 +1483,8 @@ cdef class cudaIpcEventHandle_st: Get memory address of class instance """ - cdef ccudart.cudaIpcEventHandle_st __val - cdef ccudart.cudaIpcEventHandle_st* _ptr + cdef cyruntime.cudaIpcEventHandle_st __val + cdef cyruntime.cudaIpcEventHandle_st* _ptr {{endif}} {{if 'struct cudaIpcMemHandle_st' in found_types}} @@ -1503,8 +1503,8 @@ cdef class cudaIpcMemHandle_st: Get memory address of class instance """ - cdef ccudart.cudaIpcMemHandle_st __val - cdef ccudart.cudaIpcMemHandle_st* _ptr + cdef cyruntime.cudaIpcMemHandle_st __val + cdef cyruntime.cudaIpcMemHandle_st* _ptr {{endif}} {{if 'struct cudaMemFabricHandle_st' in found_types}} @@ -1521,8 +1521,8 @@ cdef class cudaMemFabricHandle_st: Get memory address of class instance """ - cdef ccudart.cudaMemFabricHandle_st __val - cdef ccudart.cudaMemFabricHandle_st* _ptr + cdef cyruntime.cudaMemFabricHandle_st __val + cdef cyruntime.cudaMemFabricHandle_st* _ptr {{endif}} {{if 'struct cudaExternalMemoryHandleDesc' in found_types}} @@ -1541,7 +1541,7 @@ cdef class anon_struct5: Get memory address of class instance """ - cdef ccudart.cudaExternalMemoryHandleDesc* _ptr + cdef cyruntime.cudaExternalMemoryHandleDesc* _ptr {{endif}} {{if 'struct cudaExternalMemoryHandleDesc' in found_types}} @@ -1562,7 +1562,7 @@ cdef class anon_union1: Get memory address of class instance """ - cdef ccudart.cudaExternalMemoryHandleDesc* _ptr + cdef cyruntime.cudaExternalMemoryHandleDesc* _ptr cdef anon_struct5 _win32 {{endif}} {{if 'struct cudaExternalMemoryHandleDesc' in found_types}} @@ -1588,8 +1588,8 @@ cdef class cudaExternalMemoryHandleDesc: Get memory address of class instance """ - cdef ccudart.cudaExternalMemoryHandleDesc* _val_ptr - cdef ccudart.cudaExternalMemoryHandleDesc* _ptr + cdef cyruntime.cudaExternalMemoryHandleDesc* _val_ptr + cdef cyruntime.cudaExternalMemoryHandleDesc* _ptr cdef anon_union1 _handle {{endif}} {{if 'struct cudaExternalMemoryBufferDesc' in found_types}} @@ -1613,8 +1613,8 @@ cdef class cudaExternalMemoryBufferDesc: Get memory address of class instance """ - cdef ccudart.cudaExternalMemoryBufferDesc __val - cdef ccudart.cudaExternalMemoryBufferDesc* _ptr + cdef cyruntime.cudaExternalMemoryBufferDesc __val + cdef cyruntime.cudaExternalMemoryBufferDesc* _ptr {{endif}} {{if 'struct cudaExternalMemoryMipmappedArrayDesc' in found_types}} @@ -1643,8 +1643,8 @@ cdef class cudaExternalMemoryMipmappedArrayDesc: Get memory address of class instance """ - cdef ccudart.cudaExternalMemoryMipmappedArrayDesc __val - cdef ccudart.cudaExternalMemoryMipmappedArrayDesc* _ptr + cdef cyruntime.cudaExternalMemoryMipmappedArrayDesc __val + cdef cyruntime.cudaExternalMemoryMipmappedArrayDesc* _ptr cdef cudaChannelFormatDesc _formatDesc cdef cudaExtent _extent {{endif}} @@ -1665,7 +1665,7 @@ cdef class anon_struct6: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreHandleDesc* _ptr + cdef cyruntime.cudaExternalSemaphoreHandleDesc* _ptr {{endif}} {{if 'struct cudaExternalSemaphoreHandleDesc' in found_types}} @@ -1686,7 +1686,7 @@ cdef class anon_union2: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreHandleDesc* _ptr + cdef cyruntime.cudaExternalSemaphoreHandleDesc* _ptr cdef anon_struct6 _win32 {{endif}} {{if 'struct cudaExternalSemaphoreHandleDesc' in found_types}} @@ -1710,8 +1710,8 @@ cdef class cudaExternalSemaphoreHandleDesc: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreHandleDesc* _val_ptr - cdef ccudart.cudaExternalSemaphoreHandleDesc* _ptr + cdef cyruntime.cudaExternalSemaphoreHandleDesc* _val_ptr + cdef cyruntime.cudaExternalSemaphoreHandleDesc* _ptr cdef anon_union2 _handle {{endif}} {{if 'struct cudaExternalSemaphoreSignalParams' in found_types}} @@ -1729,7 +1729,7 @@ cdef class anon_struct13: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreSignalParams* _ptr + cdef cyruntime.cudaExternalSemaphoreSignalParams* _ptr {{endif}} {{if 'struct cudaExternalSemaphoreSignalParams' in found_types}} @@ -1748,7 +1748,7 @@ cdef class anon_union5: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreSignalParams* _ptr + cdef cyruntime.cudaExternalSemaphoreSignalParams* _ptr {{endif}} {{if 'struct cudaExternalSemaphoreSignalParams' in found_types}} @@ -1765,7 +1765,7 @@ cdef class anon_struct14: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreSignalParams* _ptr + cdef cyruntime.cudaExternalSemaphoreSignalParams* _ptr {{endif}} {{if 'struct cudaExternalSemaphoreSignalParams' in found_types}} @@ -1788,7 +1788,7 @@ cdef class anon_struct15: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreSignalParams* _ptr + cdef cyruntime.cudaExternalSemaphoreSignalParams* _ptr cdef anon_struct13 _fence cdef anon_union5 _nvSciSync cdef anon_struct14 _keyedMutex @@ -1821,8 +1821,8 @@ cdef class cudaExternalSemaphoreSignalParams: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreSignalParams __val - cdef ccudart.cudaExternalSemaphoreSignalParams* _ptr + cdef cyruntime.cudaExternalSemaphoreSignalParams __val + cdef cyruntime.cudaExternalSemaphoreSignalParams* _ptr cdef anon_struct15 _params {{endif}} {{if 'struct cudaExternalSemaphoreWaitParams' in found_types}} @@ -1840,7 +1840,7 @@ cdef class anon_struct16: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreWaitParams* _ptr + cdef cyruntime.cudaExternalSemaphoreWaitParams* _ptr {{endif}} {{if 'struct cudaExternalSemaphoreWaitParams' in found_types}} @@ -1859,7 +1859,7 @@ cdef class anon_union6: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreWaitParams* _ptr + cdef cyruntime.cudaExternalSemaphoreWaitParams* _ptr {{endif}} {{if 'struct cudaExternalSemaphoreWaitParams' in found_types}} @@ -1878,7 +1878,7 @@ cdef class anon_struct17: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreWaitParams* _ptr + cdef cyruntime.cudaExternalSemaphoreWaitParams* _ptr {{endif}} {{if 'struct cudaExternalSemaphoreWaitParams' in found_types}} @@ -1901,7 +1901,7 @@ cdef class anon_struct18: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreWaitParams* _ptr + cdef cyruntime.cudaExternalSemaphoreWaitParams* _ptr cdef anon_struct16 _fence cdef anon_union6 _nvSciSync cdef anon_struct17 _keyedMutex @@ -1934,8 +1934,8 @@ cdef class cudaExternalSemaphoreWaitParams: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreWaitParams __val - cdef ccudart.cudaExternalSemaphoreWaitParams* _ptr + cdef cyruntime.cudaExternalSemaphoreWaitParams __val + cdef cyruntime.cudaExternalSemaphoreWaitParams* _ptr cdef anon_struct18 _params {{endif}} {{if 'struct cudaKernelNodeParams' in found_types}} @@ -1965,11 +1965,11 @@ cdef class cudaKernelNodeParams: Get memory address of class instance """ - cdef ccudart.cudaKernelNodeParams __val - cdef ccudart.cudaKernelNodeParams* _ptr + cdef cyruntime.cudaKernelNodeParams __val + cdef cyruntime.cudaKernelNodeParams* _ptr cdef dim3 _gridDim cdef dim3 _blockDim - cdef utils.HelperKernelParams _ckernelParams + cdef utils.HelperKernelParams _cykernelParams {{endif}} {{if 'struct cudaKernelNodeParamsV2' in found_types}} @@ -1998,11 +1998,11 @@ cdef class cudaKernelNodeParamsV2: Get memory address of class instance """ - cdef ccudart.cudaKernelNodeParamsV2 __val - cdef ccudart.cudaKernelNodeParamsV2* _ptr + cdef cyruntime.cudaKernelNodeParamsV2 __val + cdef cyruntime.cudaKernelNodeParamsV2* _ptr cdef dim3 _gridDim cdef dim3 _blockDim - cdef utils.HelperKernelParams _ckernelParams + cdef utils.HelperKernelParams _cykernelParams {{endif}} {{if 'struct cudaExternalSemaphoreSignalNodeParams' in found_types}} @@ -2026,13 +2026,13 @@ cdef class cudaExternalSemaphoreSignalNodeParams: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreSignalNodeParams __val - cdef ccudart.cudaExternalSemaphoreSignalNodeParams* _ptr + cdef cyruntime.cudaExternalSemaphoreSignalNodeParams __val + cdef cyruntime.cudaExternalSemaphoreSignalNodeParams* _ptr cdef size_t _extSemArray_length - cdef ccudart.cudaExternalSemaphore_t* _extSemArray + cdef cyruntime.cudaExternalSemaphore_t* _extSemArray cdef size_t _paramsArray_length - cdef ccudart.cudaExternalSemaphoreSignalParams* _paramsArray + cdef cyruntime.cudaExternalSemaphoreSignalParams* _paramsArray {{endif}} {{if 'struct cudaExternalSemaphoreSignalNodeParamsV2' in found_types}} @@ -2057,13 +2057,13 @@ cdef class cudaExternalSemaphoreSignalNodeParamsV2: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreSignalNodeParamsV2 __val - cdef ccudart.cudaExternalSemaphoreSignalNodeParamsV2* _ptr + cdef cyruntime.cudaExternalSemaphoreSignalNodeParamsV2 __val + cdef cyruntime.cudaExternalSemaphoreSignalNodeParamsV2* _ptr cdef size_t _extSemArray_length - cdef ccudart.cudaExternalSemaphore_t* _extSemArray + cdef cyruntime.cudaExternalSemaphore_t* _extSemArray cdef size_t _paramsArray_length - cdef ccudart.cudaExternalSemaphoreSignalParams* _paramsArray + cdef cyruntime.cudaExternalSemaphoreSignalParams* _paramsArray {{endif}} {{if 'struct cudaExternalSemaphoreWaitNodeParams' in found_types}} @@ -2088,13 +2088,13 @@ cdef class cudaExternalSemaphoreWaitNodeParams: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreWaitNodeParams __val - cdef ccudart.cudaExternalSemaphoreWaitNodeParams* _ptr + cdef cyruntime.cudaExternalSemaphoreWaitNodeParams __val + cdef cyruntime.cudaExternalSemaphoreWaitNodeParams* _ptr cdef size_t _extSemArray_length - cdef ccudart.cudaExternalSemaphore_t* _extSemArray + cdef cyruntime.cudaExternalSemaphore_t* _extSemArray cdef size_t _paramsArray_length - cdef ccudart.cudaExternalSemaphoreWaitParams* _paramsArray + cdef cyruntime.cudaExternalSemaphoreWaitParams* _paramsArray {{endif}} {{if 'struct cudaExternalSemaphoreWaitNodeParamsV2' in found_types}} @@ -2119,13 +2119,13 @@ cdef class cudaExternalSemaphoreWaitNodeParamsV2: Get memory address of class instance """ - cdef ccudart.cudaExternalSemaphoreWaitNodeParamsV2 __val - cdef ccudart.cudaExternalSemaphoreWaitNodeParamsV2* _ptr + cdef cyruntime.cudaExternalSemaphoreWaitNodeParamsV2 __val + cdef cyruntime.cudaExternalSemaphoreWaitNodeParamsV2* _ptr cdef size_t _extSemArray_length - cdef ccudart.cudaExternalSemaphore_t* _extSemArray + cdef cyruntime.cudaExternalSemaphore_t* _extSemArray cdef size_t _paramsArray_length - cdef ccudart.cudaExternalSemaphoreWaitParams* _paramsArray + cdef cyruntime.cudaExternalSemaphoreWaitParams* _paramsArray {{endif}} {{if 'struct cudaConditionalNodeParams' in found_types}} @@ -2161,11 +2161,11 @@ cdef class cudaConditionalNodeParams: Get memory address of class instance """ - cdef ccudart.cudaConditionalNodeParams __val - cdef ccudart.cudaConditionalNodeParams* _ptr + cdef cyruntime.cudaConditionalNodeParams __val + cdef cyruntime.cudaConditionalNodeParams* _ptr cdef cudaGraphConditionalHandle _handle cdef size_t _phGraph_out_length - cdef ccudart.cudaGraph_t* _phGraph_out + cdef cyruntime.cudaGraph_t* _phGraph_out {{endif}} {{if 'struct cudaChildGraphNodeParams' in found_types}} @@ -2186,8 +2186,8 @@ cdef class cudaChildGraphNodeParams: Get memory address of class instance """ - cdef ccudart.cudaChildGraphNodeParams __val - cdef ccudart.cudaChildGraphNodeParams* _ptr + cdef cyruntime.cudaChildGraphNodeParams __val + cdef cyruntime.cudaChildGraphNodeParams* _ptr cdef cudaGraph_t _graph {{endif}} {{if 'struct cudaEventRecordNodeParams' in found_types}} @@ -2207,8 +2207,8 @@ cdef class cudaEventRecordNodeParams: Get memory address of class instance """ - cdef ccudart.cudaEventRecordNodeParams __val - cdef ccudart.cudaEventRecordNodeParams* _ptr + cdef cyruntime.cudaEventRecordNodeParams __val + cdef cyruntime.cudaEventRecordNodeParams* _ptr cdef cudaEvent_t _event {{endif}} {{if 'struct cudaEventWaitNodeParams' in found_types}} @@ -2228,8 +2228,8 @@ cdef class cudaEventWaitNodeParams: Get memory address of class instance """ - cdef ccudart.cudaEventWaitNodeParams __val - cdef ccudart.cudaEventWaitNodeParams* _ptr + cdef cyruntime.cudaEventWaitNodeParams __val + cdef cyruntime.cudaEventWaitNodeParams* _ptr cdef cudaEvent_t _event {{endif}} {{if 'struct cudaGraphNodeParams' in found_types}} @@ -2279,8 +2279,8 @@ cdef class cudaGraphNodeParams: Get memory address of class instance """ - cdef ccudart.cudaGraphNodeParams* _val_ptr - cdef ccudart.cudaGraphNodeParams* _ptr + cdef cyruntime.cudaGraphNodeParams* _val_ptr + cdef cyruntime.cudaGraphNodeParams* _ptr cdef cudaKernelNodeParamsV2 _kernel cdef cudaMemcpyNodeParams _memcpy cdef cudaMemsetParamsV2 _memset @@ -2336,8 +2336,8 @@ cdef class cudaGraphEdgeData_st: Get memory address of class instance """ - cdef ccudart.cudaGraphEdgeData_st __val - cdef ccudart.cudaGraphEdgeData_st* _ptr + cdef cyruntime.cudaGraphEdgeData_st __val + cdef cyruntime.cudaGraphEdgeData_st* _ptr {{endif}} {{if 'struct cudaGraphInstantiateParams_st' in found_types}} @@ -2362,8 +2362,8 @@ cdef class cudaGraphInstantiateParams_st: Get memory address of class instance """ - cdef ccudart.cudaGraphInstantiateParams_st __val - cdef ccudart.cudaGraphInstantiateParams_st* _ptr + cdef cyruntime.cudaGraphInstantiateParams_st __val + cdef cyruntime.cudaGraphInstantiateParams_st* _ptr cdef cudaStream_t _uploadStream cdef cudaGraphNode_t _errNode_out {{endif}} @@ -2391,8 +2391,8 @@ cdef class cudaGraphExecUpdateResultInfo_st: Get memory address of class instance """ - cdef ccudart.cudaGraphExecUpdateResultInfo_st __val - cdef ccudart.cudaGraphExecUpdateResultInfo_st* _ptr + cdef cyruntime.cudaGraphExecUpdateResultInfo_st __val + cdef cyruntime.cudaGraphExecUpdateResultInfo_st* _ptr cdef cudaGraphNode_t _errorNode cdef cudaGraphNode_t _errorFromNode {{endif}} @@ -2415,7 +2415,7 @@ cdef class anon_struct19: Get memory address of class instance """ - cdef ccudart.cudaGraphKernelNodeUpdate* _ptr + cdef cyruntime.cudaGraphKernelNodeUpdate* _ptr {{endif}} {{if 'struct cudaGraphKernelNodeUpdate' in found_types}} @@ -2436,7 +2436,7 @@ cdef class anon_union8: Get memory address of class instance """ - cdef ccudart.cudaGraphKernelNodeUpdate* _ptr + cdef cyruntime.cudaGraphKernelNodeUpdate* _ptr cdef dim3 _gridDim cdef anon_struct19 _param {{endif}} @@ -2463,8 +2463,8 @@ cdef class cudaGraphKernelNodeUpdate: Get memory address of class instance """ - cdef ccudart.cudaGraphKernelNodeUpdate* _val_ptr - cdef ccudart.cudaGraphKernelNodeUpdate* _ptr + cdef cyruntime.cudaGraphKernelNodeUpdate* _val_ptr + cdef cyruntime.cudaGraphKernelNodeUpdate* _ptr cdef cudaGraphDeviceNode_t _node cdef anon_union8 _updateData {{endif}} @@ -2493,8 +2493,8 @@ cdef class cudaLaunchMemSyncDomainMap_st: Get memory address of class instance """ - cdef ccudart.cudaLaunchMemSyncDomainMap_st __val - cdef ccudart.cudaLaunchMemSyncDomainMap_st* _ptr + cdef cyruntime.cudaLaunchMemSyncDomainMap_st __val + cdef cyruntime.cudaLaunchMemSyncDomainMap_st* _ptr {{endif}} {{if 'union cudaLaunchAttributeValue' in found_types}} @@ -2515,7 +2515,7 @@ cdef class anon_struct20: Get memory address of class instance """ - cdef ccudart.cudaLaunchAttributeValue* _ptr + cdef cyruntime.cudaLaunchAttributeValue* _ptr {{endif}} {{if 'union cudaLaunchAttributeValue' in found_types}} @@ -2536,7 +2536,7 @@ cdef class anon_struct21: Get memory address of class instance """ - cdef ccudart.cudaLaunchAttributeValue* _ptr + cdef cyruntime.cudaLaunchAttributeValue* _ptr cdef cudaEvent_t _event {{endif}} {{if 'union cudaLaunchAttributeValue' in found_types}} @@ -2556,7 +2556,7 @@ cdef class anon_struct22: Get memory address of class instance """ - cdef ccudart.cudaLaunchAttributeValue* _ptr + cdef cyruntime.cudaLaunchAttributeValue* _ptr cdef cudaEvent_t _event {{endif}} {{if 'union cudaLaunchAttributeValue' in found_types}} @@ -2576,7 +2576,7 @@ cdef class anon_struct23: Get memory address of class instance """ - cdef ccudart.cudaLaunchAttributeValue* _ptr + cdef cyruntime.cudaLaunchAttributeValue* _ptr cdef cudaGraphDeviceNode_t _devNode {{endif}} {{if 'union cudaLaunchAttributeValue' in found_types}} @@ -2652,8 +2652,8 @@ cdef class cudaLaunchAttributeValue: Get memory address of class instance """ - cdef ccudart.cudaLaunchAttributeValue __val - cdef ccudart.cudaLaunchAttributeValue* _ptr + cdef cyruntime.cudaLaunchAttributeValue __val + cdef cyruntime.cudaLaunchAttributeValue* _ptr cdef cudaAccessPolicyWindow _accessPolicyWindow cdef anon_struct20 _clusterDim cdef anon_struct21 _programmaticEvent @@ -2680,8 +2680,8 @@ cdef class cudaLaunchAttribute_st: Get memory address of class instance """ - cdef ccudart.cudaLaunchAttribute_st __val - cdef ccudart.cudaLaunchAttribute_st* _ptr + cdef cyruntime.cudaLaunchAttribute_st __val + cdef cyruntime.cudaLaunchAttribute_st* _ptr cdef cudaLaunchAttributeValue _val {{endif}} {{if 'struct cudaAsyncNotificationInfo' in found_types}} @@ -2699,7 +2699,7 @@ cdef class anon_struct24: Get memory address of class instance """ - cdef ccudart.cudaAsyncNotificationInfo* _ptr + cdef cyruntime.cudaAsyncNotificationInfo* _ptr {{endif}} {{if 'struct cudaAsyncNotificationInfo' in found_types}} @@ -2716,7 +2716,7 @@ cdef class anon_union9: Get memory address of class instance """ - cdef ccudart.cudaAsyncNotificationInfo* _ptr + cdef cyruntime.cudaAsyncNotificationInfo* _ptr cdef anon_struct24 _overBudget {{endif}} {{if 'struct cudaAsyncNotificationInfo' in found_types}} @@ -2738,8 +2738,8 @@ cdef class cudaAsyncNotificationInfo: Get memory address of class instance """ - cdef ccudart.cudaAsyncNotificationInfo* _val_ptr - cdef ccudart.cudaAsyncNotificationInfo* _ptr + cdef cyruntime.cudaAsyncNotificationInfo* _val_ptr + cdef cyruntime.cudaAsyncNotificationInfo* _ptr cdef anon_union9 _info {{endif}} {{if 'struct cudaTextureDesc' in found_types}} @@ -2783,8 +2783,8 @@ cdef class cudaTextureDesc: Get memory address of class instance """ - cdef ccudart.cudaTextureDesc __val - cdef ccudart.cudaTextureDesc* _ptr + cdef cyruntime.cudaTextureDesc __val + cdef cyruntime.cudaTextureDesc* _ptr {{endif}} {{if True}} @@ -2816,8 +2816,8 @@ cdef class cudaEglPlaneDesc_st: Get memory address of class instance """ - cdef ccudart.cudaEglPlaneDesc_st __val - cdef ccudart.cudaEglPlaneDesc_st* _ptr + cdef cyruntime.cudaEglPlaneDesc_st __val + cdef cyruntime.cudaEglPlaneDesc_st* _ptr cdef cudaChannelFormatDesc _channelDesc {{endif}} {{if True}} @@ -2837,7 +2837,7 @@ cdef class anon_union10: Get memory address of class instance """ - cdef ccudart.cudaEglFrame_st* _ptr + cdef cyruntime.cudaEglFrame_st* _ptr {{endif}} {{if True}} @@ -2871,8 +2871,8 @@ cdef class cudaEglFrame_st: Get memory address of class instance """ - cdef ccudart.cudaEglFrame_st* _val_ptr - cdef ccudart.cudaEglFrame_st* _ptr + cdef cyruntime.cudaEglFrame_st* _val_ptr + cdef cyruntime.cudaEglFrame_st* _ptr cdef anon_union10 _frame {{endif}} {{if 'CUuuid' in found_types}} @@ -3346,7 +3346,7 @@ cdef class cudaEglFrame(cudaEglFrame_st): {{endif}} {{if 'cudaStream_t' in found_types}} -cdef class cudaStream_t(cuda.CUstream): +cdef class cudaStream_t(driver.CUstream): """ CUDA stream @@ -3362,7 +3362,7 @@ cdef class cudaStream_t(cuda.CUstream): {{if 'cudaEvent_t' in found_types}} -cdef class cudaEvent_t(cuda.CUevent): +cdef class cudaEvent_t(driver.CUevent): """ CUDA event types @@ -3378,7 +3378,7 @@ cdef class cudaEvent_t(cuda.CUevent): {{if 'cudaGraph_t' in found_types}} -cdef class cudaGraph_t(cuda.CUgraph): +cdef class cudaGraph_t(driver.CUgraph): """ CUDA graph @@ -3394,7 +3394,7 @@ cdef class cudaGraph_t(cuda.CUgraph): {{if 'cudaGraphNode_t' in found_types}} -cdef class cudaGraphNode_t(cuda.CUgraphNode): +cdef class cudaGraphNode_t(driver.CUgraphNode): """ CUDA graph node. @@ -3410,7 +3410,7 @@ cdef class cudaGraphNode_t(cuda.CUgraphNode): {{if 'cudaUserObject_t' in found_types}} -cdef class cudaUserObject_t(cuda.CUuserObject): +cdef class cudaUserObject_t(driver.CUuserObject): """ CUDA user object for graphs @@ -3426,7 +3426,7 @@ cdef class cudaUserObject_t(cuda.CUuserObject): {{if 'cudaFunction_t' in found_types}} -cdef class cudaFunction_t(cuda.CUfunction): +cdef class cudaFunction_t(driver.CUfunction): """ CUDA function @@ -3442,7 +3442,7 @@ cdef class cudaFunction_t(cuda.CUfunction): {{if 'cudaMemPool_t' in found_types}} -cdef class cudaMemPool_t(cuda.CUmemoryPool): +cdef class cudaMemPool_t(driver.CUmemoryPool): """ CUDA memory pool @@ -3458,7 +3458,7 @@ cdef class cudaMemPool_t(cuda.CUmemoryPool): {{if 'cudaGraphExec_t' in found_types}} -cdef class cudaGraphExec_t(cuda.CUgraphExec): +cdef class cudaGraphExec_t(driver.CUgraphExec): """ CUDA executable (launchable) graph @@ -3474,7 +3474,7 @@ cdef class cudaGraphExec_t(cuda.CUgraphExec): {{if True}} -cdef class cudaEglStreamConnection(cuda.CUeglStreamConnection): +cdef class cudaEglStreamConnection(driver.CUeglStreamConnection): """ CUDA EGLSream Connection @@ -3501,8 +3501,8 @@ cdef class cudaGraphConditionalHandle: Get memory address of class instance """ - cdef ccudart.cudaGraphConditionalHandle __val - cdef ccudart.cudaGraphConditionalHandle* _ptr + cdef cyruntime.cudaGraphConditionalHandle __val + cdef cyruntime.cudaGraphConditionalHandle* _ptr {{endif}} {{if 'cudaSurfaceObject_t' in found_types}} @@ -3518,8 +3518,8 @@ cdef class cudaSurfaceObject_t: Get memory address of class instance """ - cdef ccudart.cudaSurfaceObject_t __val - cdef ccudart.cudaSurfaceObject_t* _ptr + cdef cyruntime.cudaSurfaceObject_t __val + cdef cyruntime.cudaSurfaceObject_t* _ptr {{endif}} {{if 'cudaTextureObject_t' in found_types}} @@ -3535,8 +3535,8 @@ cdef class cudaTextureObject_t: Get memory address of class instance """ - cdef ccudart.cudaTextureObject_t __val - cdef ccudart.cudaTextureObject_t* _ptr + cdef cyruntime.cudaTextureObject_t __val + cdef cyruntime.cudaTextureObject_t* _ptr {{endif}} {{if True}} @@ -3550,8 +3550,8 @@ cdef class GLenum: Get memory address of class instance """ - cdef ccudart.GLenum __val - cdef ccudart.GLenum* _ptr + cdef cyruntime.GLenum __val + cdef cyruntime.GLenum* _ptr {{endif}} {{if True}} @@ -3565,8 +3565,8 @@ cdef class GLuint: Get memory address of class instance """ - cdef ccudart.GLuint __val - cdef ccudart.GLuint* _ptr + cdef cyruntime.GLuint __val + cdef cyruntime.GLuint* _ptr {{endif}} {{if True}} @@ -3580,8 +3580,8 @@ cdef class EGLint: Get memory address of class instance """ - cdef ccudart.EGLint __val - cdef ccudart.EGLint* _ptr + cdef cyruntime.EGLint __val + cdef cyruntime.EGLint* _ptr {{endif}} {{if True}} @@ -3595,8 +3595,8 @@ cdef class VdpDevice: Get memory address of class instance """ - cdef ccudart.VdpDevice __val - cdef ccudart.VdpDevice* _ptr + cdef cyruntime.VdpDevice __val + cdef cyruntime.VdpDevice* _ptr {{endif}} {{if True}} @@ -3610,8 +3610,8 @@ cdef class VdpGetProcAddress: Get memory address of class instance """ - cdef ccudart.VdpGetProcAddress __val - cdef ccudart.VdpGetProcAddress* _ptr + cdef cyruntime.VdpGetProcAddress __val + cdef cyruntime.VdpGetProcAddress* _ptr {{endif}} {{if True}} @@ -3625,8 +3625,8 @@ cdef class VdpVideoSurface: Get memory address of class instance """ - cdef ccudart.VdpVideoSurface __val - cdef ccudart.VdpVideoSurface* _ptr + cdef cyruntime.VdpVideoSurface __val + cdef cyruntime.VdpVideoSurface* _ptr {{endif}} {{if True}} @@ -3640,6 +3640,6 @@ cdef class VdpOutputSurface: Get memory address of class instance """ - cdef ccudart.VdpOutputSurface __val - cdef ccudart.VdpOutputSurface* _ptr + cdef cyruntime.VdpOutputSurface __val + cdef cyruntime.VdpOutputSurface* _ptr {{endif}} diff --git a/cuda/cudart.pyx.in b/cuda/cuda/bindings/runtime.pyx.in similarity index 80% rename from cuda/cudart.pyx.in rename to cuda/cuda/bindings/runtime.pyx.in index 5b868d98..e735ee44 100644 --- a/cuda/cudart.pyx.in +++ b/cuda/cuda/bindings/runtime.pyx.in @@ -17,7 +17,7 @@ from libc.limits cimport CHAR_MIN from libcpp.vector cimport vector from cpython.buffer cimport PyObject_CheckBuffer, PyObject_GetBuffer, PyBuffer_Release, PyBUF_SIMPLE, PyBUF_ANY_CONTIGUOUS from cpython.bytes cimport PyBytes_FromStringAndSize -import cuda.cuda +import cuda.bindings.driver ctypedef unsigned long long signed_char_ptr ctypedef unsigned long long unsigned_char_ptr @@ -43,40 +43,40 @@ ctypedef unsigned long long double_ptr ctypedef unsigned long long void_ptr #: Default page-locked allocation flag -cudaHostAllocDefault = ccudart.cudaHostAllocDefault +cudaHostAllocDefault = cyruntime.cudaHostAllocDefault #: Pinned memory accessible by all CUDA contexts -cudaHostAllocPortable = ccudart.cudaHostAllocPortable +cudaHostAllocPortable = cyruntime.cudaHostAllocPortable #: Map allocation into device space -cudaHostAllocMapped = ccudart.cudaHostAllocMapped +cudaHostAllocMapped = cyruntime.cudaHostAllocMapped #: Write-combined memory -cudaHostAllocWriteCombined = ccudart.cudaHostAllocWriteCombined +cudaHostAllocWriteCombined = cyruntime.cudaHostAllocWriteCombined #: Default host memory registration flag -cudaHostRegisterDefault = ccudart.cudaHostRegisterDefault +cudaHostRegisterDefault = cyruntime.cudaHostRegisterDefault #: Pinned memory accessible by all CUDA contexts -cudaHostRegisterPortable = ccudart.cudaHostRegisterPortable +cudaHostRegisterPortable = cyruntime.cudaHostRegisterPortable #: Map registered memory into device space -cudaHostRegisterMapped = ccudart.cudaHostRegisterMapped +cudaHostRegisterMapped = cyruntime.cudaHostRegisterMapped #: Memory-mapped I/O space -cudaHostRegisterIoMemory = ccudart.cudaHostRegisterIoMemory +cudaHostRegisterIoMemory = cyruntime.cudaHostRegisterIoMemory #: Memory-mapped read-only -cudaHostRegisterReadOnly = ccudart.cudaHostRegisterReadOnly +cudaHostRegisterReadOnly = cyruntime.cudaHostRegisterReadOnly #: Default peer addressing enable flag -cudaPeerAccessDefault = ccudart.cudaPeerAccessDefault +cudaPeerAccessDefault = cyruntime.cudaPeerAccessDefault #: Default stream flag -cudaStreamDefault = ccudart.cudaStreamDefault +cudaStreamDefault = cyruntime.cudaStreamDefault #: Stream does not synchronize with stream 0 (the NULL stream) -cudaStreamNonBlocking = ccudart.cudaStreamNonBlocking +cudaStreamNonBlocking = cyruntime.cudaStreamNonBlocking #: Legacy stream handle #: @@ -84,7 +84,7 @@ cudaStreamNonBlocking = ccudart.cudaStreamNonBlocking #: stream with legacy synchronization behavior. #: #: See details of the \link_sync_behavior -cudaStreamLegacy = ccudart.cudaStreamLegacy +cudaStreamLegacy = cyruntime.cudaStreamLegacy #: Per-thread stream handle #: @@ -92,146 +92,146 @@ cudaStreamLegacy = ccudart.cudaStreamLegacy #: stream with per-thread synchronization behavior. #: #: See details of the \link_sync_behavior -cudaStreamPerThread = ccudart.cudaStreamPerThread +cudaStreamPerThread = cyruntime.cudaStreamPerThread #: Default event flag -cudaEventDefault = ccudart.cudaEventDefault +cudaEventDefault = cyruntime.cudaEventDefault #: Event uses blocking synchronization -cudaEventBlockingSync = ccudart.cudaEventBlockingSync +cudaEventBlockingSync = cyruntime.cudaEventBlockingSync #: Event will not record timing data -cudaEventDisableTiming = ccudart.cudaEventDisableTiming +cudaEventDisableTiming = cyruntime.cudaEventDisableTiming #: Event is suitable for interprocess use. cudaEventDisableTiming must be #: set -cudaEventInterprocess = ccudart.cudaEventInterprocess +cudaEventInterprocess = cyruntime.cudaEventInterprocess #: Default event record flag -cudaEventRecordDefault = ccudart.cudaEventRecordDefault +cudaEventRecordDefault = cyruntime.cudaEventRecordDefault #: Event is captured in the graph as an external event node when performing #: stream capture -cudaEventRecordExternal = ccudart.cudaEventRecordExternal +cudaEventRecordExternal = cyruntime.cudaEventRecordExternal #: Default event wait flag -cudaEventWaitDefault = ccudart.cudaEventWaitDefault +cudaEventWaitDefault = cyruntime.cudaEventWaitDefault #: Event is captured in the graph as an external event node when performing #: stream capture -cudaEventWaitExternal = ccudart.cudaEventWaitExternal +cudaEventWaitExternal = cyruntime.cudaEventWaitExternal #: Device flag - Automatic scheduling -cudaDeviceScheduleAuto = ccudart.cudaDeviceScheduleAuto +cudaDeviceScheduleAuto = cyruntime.cudaDeviceScheduleAuto #: Device flag - Spin default scheduling -cudaDeviceScheduleSpin = ccudart.cudaDeviceScheduleSpin +cudaDeviceScheduleSpin = cyruntime.cudaDeviceScheduleSpin #: Device flag - Yield default scheduling -cudaDeviceScheduleYield = ccudart.cudaDeviceScheduleYield +cudaDeviceScheduleYield = cyruntime.cudaDeviceScheduleYield #: Device flag - Use blocking synchronization -cudaDeviceScheduleBlockingSync = ccudart.cudaDeviceScheduleBlockingSync +cudaDeviceScheduleBlockingSync = cyruntime.cudaDeviceScheduleBlockingSync #: Device flag - Use blocking synchronization [Deprecated] -cudaDeviceBlockingSync = ccudart.cudaDeviceBlockingSync +cudaDeviceBlockingSync = cyruntime.cudaDeviceBlockingSync #: Device schedule flags mask -cudaDeviceScheduleMask = ccudart.cudaDeviceScheduleMask +cudaDeviceScheduleMask = cyruntime.cudaDeviceScheduleMask #: Device flag - Support mapped pinned allocations -cudaDeviceMapHost = ccudart.cudaDeviceMapHost +cudaDeviceMapHost = cyruntime.cudaDeviceMapHost #: Device flag - Keep local memory allocation after launch -cudaDeviceLmemResizeToMax = ccudart.cudaDeviceLmemResizeToMax +cudaDeviceLmemResizeToMax = cyruntime.cudaDeviceLmemResizeToMax #: Device flag - Ensure synchronous memory operations on this context will #: synchronize -cudaDeviceSyncMemops = ccudart.cudaDeviceSyncMemops +cudaDeviceSyncMemops = cyruntime.cudaDeviceSyncMemops #: Device flags mask -cudaDeviceMask = ccudart.cudaDeviceMask +cudaDeviceMask = cyruntime.cudaDeviceMask #: Default CUDA array allocation flag -cudaArrayDefault = ccudart.cudaArrayDefault +cudaArrayDefault = cyruntime.cudaArrayDefault #: Must be set in cudaMalloc3DArray to create a layered CUDA array -cudaArrayLayered = ccudart.cudaArrayLayered +cudaArrayLayered = cyruntime.cudaArrayLayered #: Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind #: surfaces to the CUDA array -cudaArraySurfaceLoadStore = ccudart.cudaArraySurfaceLoadStore +cudaArraySurfaceLoadStore = cyruntime.cudaArraySurfaceLoadStore #: Must be set in cudaMalloc3DArray to create a cubemap CUDA array -cudaArrayCubemap = ccudart.cudaArrayCubemap +cudaArrayCubemap = cyruntime.cudaArrayCubemap #: Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform #: texture gather operations on the CUDA array -cudaArrayTextureGather = ccudart.cudaArrayTextureGather +cudaArrayTextureGather = cyruntime.cudaArrayTextureGather #: Must be set in cudaExternalMemoryGetMappedMipmappedArray if the #: mipmapped array is used as a color target in a graphics API -cudaArrayColorAttachment = ccudart.cudaArrayColorAttachment +cudaArrayColorAttachment = cyruntime.cudaArrayColorAttachment #: Must be set in cudaMallocArray, cudaMalloc3DArray or #: cudaMallocMipmappedArray in order to create a sparse CUDA array or CUDA #: mipmapped array -cudaArraySparse = ccudart.cudaArraySparse +cudaArraySparse = cyruntime.cudaArraySparse #: Must be set in cudaMallocArray, cudaMalloc3DArray or #: cudaMallocMipmappedArray in order to create a deferred mapping CUDA #: array or CUDA mipmapped array -cudaArrayDeferredMapping = ccudart.cudaArrayDeferredMapping +cudaArrayDeferredMapping = cyruntime.cudaArrayDeferredMapping #: Automatically enable peer access between remote devices as needed -cudaIpcMemLazyEnablePeerAccess = ccudart.cudaIpcMemLazyEnablePeerAccess +cudaIpcMemLazyEnablePeerAccess = cyruntime.cudaIpcMemLazyEnablePeerAccess #: Memory can be accessed by any stream on any device -cudaMemAttachGlobal = ccudart.cudaMemAttachGlobal +cudaMemAttachGlobal = cyruntime.cudaMemAttachGlobal #: Memory cannot be accessed by any stream on any device -cudaMemAttachHost = ccudart.cudaMemAttachHost +cudaMemAttachHost = cyruntime.cudaMemAttachHost #: Memory can only be accessed by a single stream on the associated device -cudaMemAttachSingle = ccudart.cudaMemAttachSingle +cudaMemAttachSingle = cyruntime.cudaMemAttachSingle #: Default behavior -cudaOccupancyDefault = ccudart.cudaOccupancyDefault +cudaOccupancyDefault = cyruntime.cudaOccupancyDefault #: Assume global caching is enabled and cannot be automatically turned off -cudaOccupancyDisableCachingOverride = ccudart.cudaOccupancyDisableCachingOverride +cudaOccupancyDisableCachingOverride = cyruntime.cudaOccupancyDisableCachingOverride #: Device id that represents the CPU -cudaCpuDeviceId = ccudart.cudaCpuDeviceId +cudaCpuDeviceId = cyruntime.cudaCpuDeviceId #: Device id that represents an invalid device -cudaInvalidDeviceId = ccudart.cudaInvalidDeviceId +cudaInvalidDeviceId = cyruntime.cudaInvalidDeviceId #: Tell the CUDA runtime that DeviceFlags is being set in cudaInitDevice #: call -cudaInitDeviceFlagsAreValid = ccudart.cudaInitDeviceFlagsAreValid +cudaInitDeviceFlagsAreValid = cyruntime.cudaInitDeviceFlagsAreValid #: If set, each kernel launched as part of #: :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` only waits for prior #: work in the stream corresponding to that GPU to complete before the #: kernel begins execution. -cudaCooperativeLaunchMultiDeviceNoPreSync = ccudart.cudaCooperativeLaunchMultiDeviceNoPreSync +cudaCooperativeLaunchMultiDeviceNoPreSync = cyruntime.cudaCooperativeLaunchMultiDeviceNoPreSync #: If set, any subsequent work pushed in a stream that participated in a #: call to :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` will only #: wait for the kernel launched on the GPU corresponding to that stream to #: complete before it begins execution. -cudaCooperativeLaunchMultiDeviceNoPostSync = ccudart.cudaCooperativeLaunchMultiDeviceNoPostSync +cudaCooperativeLaunchMultiDeviceNoPostSync = cyruntime.cudaCooperativeLaunchMultiDeviceNoPostSync #: Indicates that the layered sparse CUDA array or CUDA mipmapped array has #: a single mip tail region for all layers -cudaArraySparsePropertiesSingleMipTail = ccudart.cudaArraySparsePropertiesSingleMipTail +cudaArraySparsePropertiesSingleMipTail = cyruntime.cudaArraySparsePropertiesSingleMipTail #: CUDA IPC Handle Size -CUDA_IPC_HANDLE_SIZE = ccudart.CUDA_IPC_HANDLE_SIZE +CUDA_IPC_HANDLE_SIZE = cyruntime.CUDA_IPC_HANDLE_SIZE #: Indicates that the external memory object is a dedicated resource -cudaExternalMemoryDedicated = ccudart.cudaExternalMemoryDedicated +cudaExternalMemoryDedicated = cyruntime.cudaExternalMemoryDedicated #: When the /p flags parameter of #: :py:obj:`~.cudaExternalSemaphoreSignalParams` contains this flag, it @@ -241,7 +241,7 @@ cudaExternalMemoryDedicated = ccudart.cudaExternalMemoryDedicated #: :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are #: performed by default to ensure data coherency with other importers of #: the same NvSciBuf memory objects. -cudaExternalSemaphoreSignalSkipNvSciBufMemSync = ccudart.cudaExternalSemaphoreSignalSkipNvSciBufMemSync +cudaExternalSemaphoreSignalSkipNvSciBufMemSync = cyruntime.cudaExternalSemaphoreSignalSkipNvSciBufMemSync #: When the /p flags parameter of #: :py:obj:`~.cudaExternalSemaphoreWaitParams` contains this flag, it @@ -251,94 +251,94 @@ cudaExternalSemaphoreSignalSkipNvSciBufMemSync = ccudart.cudaExternalSemaphoreSi #: :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are #: performed by default to ensure data coherency with other importers of #: the same NvSciBuf memory objects. -cudaExternalSemaphoreWaitSkipNvSciBufMemSync = ccudart.cudaExternalSemaphoreWaitSkipNvSciBufMemSync +cudaExternalSemaphoreWaitSkipNvSciBufMemSync = cyruntime.cudaExternalSemaphoreWaitSkipNvSciBufMemSync #: When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to #: this, it indicates that application need signaler specific NvSciSyncAttr #: to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. -cudaNvSciSyncAttrSignal = ccudart.cudaNvSciSyncAttrSignal +cudaNvSciSyncAttrSignal = cyruntime.cudaNvSciSyncAttrSignal #: When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to #: this, it indicates that application need waiter specific NvSciSyncAttr #: to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. -cudaNvSciSyncAttrWait = ccudart.cudaNvSciSyncAttrWait +cudaNvSciSyncAttrWait = cyruntime.cudaNvSciSyncAttrWait #: This port activates when the kernel has finished executing. -cudaGraphKernelNodePortDefault = ccudart.cudaGraphKernelNodePortDefault +cudaGraphKernelNodePortDefault = cyruntime.cudaGraphKernelNodePortDefault #: This port activates when all blocks of the kernel have performed #: cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be #: used with edge type :py:obj:`~.cudaGraphDependencyTypeProgrammatic`. See #: also :py:obj:`~.cudaLaunchAttributeProgrammaticEvent`. -cudaGraphKernelNodePortProgrammatic = ccudart.cudaGraphKernelNodePortProgrammatic +cudaGraphKernelNodePortProgrammatic = cyruntime.cudaGraphKernelNodePortProgrammatic #: This port activates when all blocks of the kernel have begun execution. #: See also :py:obj:`~.cudaLaunchAttributeLaunchCompletionEvent`. -cudaGraphKernelNodePortLaunchCompletion = ccudart.cudaGraphKernelNodePortLaunchCompletion +cudaGraphKernelNodePortLaunchCompletion = cyruntime.cudaGraphKernelNodePortLaunchCompletion -cudaStreamAttributeAccessPolicyWindow = ccudart.cudaStreamAttributeAccessPolicyWindow +cudaStreamAttributeAccessPolicyWindow = cyruntime.cudaStreamAttributeAccessPolicyWindow -cudaStreamAttributeSynchronizationPolicy = ccudart.cudaStreamAttributeSynchronizationPolicy +cudaStreamAttributeSynchronizationPolicy = cyruntime.cudaStreamAttributeSynchronizationPolicy -cudaStreamAttributeMemSyncDomainMap = ccudart.cudaStreamAttributeMemSyncDomainMap +cudaStreamAttributeMemSyncDomainMap = cyruntime.cudaStreamAttributeMemSyncDomainMap -cudaStreamAttributeMemSyncDomain = ccudart.cudaStreamAttributeMemSyncDomain +cudaStreamAttributeMemSyncDomain = cyruntime.cudaStreamAttributeMemSyncDomain -cudaStreamAttributePriority = ccudart.cudaStreamAttributePriority +cudaStreamAttributePriority = cyruntime.cudaStreamAttributePriority -cudaKernelNodeAttributeAccessPolicyWindow = ccudart.cudaKernelNodeAttributeAccessPolicyWindow +cudaKernelNodeAttributeAccessPolicyWindow = cyruntime.cudaKernelNodeAttributeAccessPolicyWindow -cudaKernelNodeAttributeCooperative = ccudart.cudaKernelNodeAttributeCooperative +cudaKernelNodeAttributeCooperative = cyruntime.cudaKernelNodeAttributeCooperative -cudaKernelNodeAttributePriority = ccudart.cudaKernelNodeAttributePriority +cudaKernelNodeAttributePriority = cyruntime.cudaKernelNodeAttributePriority -cudaKernelNodeAttributeClusterDimension = ccudart.cudaKernelNodeAttributeClusterDimension +cudaKernelNodeAttributeClusterDimension = cyruntime.cudaKernelNodeAttributeClusterDimension -cudaKernelNodeAttributeClusterSchedulingPolicyPreference = ccudart.cudaKernelNodeAttributeClusterSchedulingPolicyPreference +cudaKernelNodeAttributeClusterSchedulingPolicyPreference = cyruntime.cudaKernelNodeAttributeClusterSchedulingPolicyPreference -cudaKernelNodeAttributeMemSyncDomainMap = ccudart.cudaKernelNodeAttributeMemSyncDomainMap +cudaKernelNodeAttributeMemSyncDomainMap = cyruntime.cudaKernelNodeAttributeMemSyncDomainMap -cudaKernelNodeAttributeMemSyncDomain = ccudart.cudaKernelNodeAttributeMemSyncDomain +cudaKernelNodeAttributeMemSyncDomain = cyruntime.cudaKernelNodeAttributeMemSyncDomain -cudaKernelNodeAttributePreferredSharedMemoryCarveout = ccudart.cudaKernelNodeAttributePreferredSharedMemoryCarveout +cudaKernelNodeAttributePreferredSharedMemoryCarveout = cyruntime.cudaKernelNodeAttributePreferredSharedMemoryCarveout -cudaKernelNodeAttributeDeviceUpdatableKernelNode = ccudart.cudaKernelNodeAttributeDeviceUpdatableKernelNode +cudaKernelNodeAttributeDeviceUpdatableKernelNode = cyruntime.cudaKernelNodeAttributeDeviceUpdatableKernelNode -cudaSurfaceType1D = ccudart.cudaSurfaceType1D +cudaSurfaceType1D = cyruntime.cudaSurfaceType1D -cudaSurfaceType2D = ccudart.cudaSurfaceType2D +cudaSurfaceType2D = cyruntime.cudaSurfaceType2D -cudaSurfaceType3D = ccudart.cudaSurfaceType3D +cudaSurfaceType3D = cyruntime.cudaSurfaceType3D -cudaSurfaceTypeCubemap = ccudart.cudaSurfaceTypeCubemap +cudaSurfaceTypeCubemap = cyruntime.cudaSurfaceTypeCubemap -cudaSurfaceType1DLayered = ccudart.cudaSurfaceType1DLayered +cudaSurfaceType1DLayered = cyruntime.cudaSurfaceType1DLayered -cudaSurfaceType2DLayered = ccudart.cudaSurfaceType2DLayered +cudaSurfaceType2DLayered = cyruntime.cudaSurfaceType2DLayered -cudaSurfaceTypeCubemapLayered = ccudart.cudaSurfaceTypeCubemapLayered +cudaSurfaceTypeCubemapLayered = cyruntime.cudaSurfaceTypeCubemapLayered -cudaTextureType1D = ccudart.cudaTextureType1D +cudaTextureType1D = cyruntime.cudaTextureType1D -cudaTextureType2D = ccudart.cudaTextureType2D +cudaTextureType2D = cyruntime.cudaTextureType2D -cudaTextureType3D = ccudart.cudaTextureType3D +cudaTextureType3D = cyruntime.cudaTextureType3D -cudaTextureTypeCubemap = ccudart.cudaTextureTypeCubemap +cudaTextureTypeCubemap = cyruntime.cudaTextureTypeCubemap -cudaTextureType1DLayered = ccudart.cudaTextureType1DLayered +cudaTextureType1DLayered = cyruntime.cudaTextureType1DLayered -cudaTextureType2DLayered = ccudart.cudaTextureType2DLayered +cudaTextureType2DLayered = cyruntime.cudaTextureType2DLayered -cudaTextureTypeCubemapLayered = ccudart.cudaTextureTypeCubemapLayered +cudaTextureTypeCubemapLayered = cyruntime.cudaTextureTypeCubemapLayered #: CUDA Runtime API Version -CUDART_VERSION = ccudart.CUDART_VERSION +CUDART_VERSION = cyruntime.CUDART_VERSION -__CUDART_API_VERSION = ccudart.__CUDART_API_VERSION +__CUDART_API_VERSION = cyruntime.__CUDART_API_VERSION #: Maximum number of planes per frame -CUDA_EGL_MAX_PLANES = ccudart.CUDA_EGL_MAX_PLANES +CUDA_EGL_MAX_PLANES = cyruntime.CUDA_EGL_MAX_PLANES {{if 'cudaError' in found_types}} @@ -351,46 +351,46 @@ class cudaError_t(IntEnum): #: The API call returned with no errors. In the case of query calls, #: this also means that the operation being queried is complete (see #: :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`). - cudaSuccess = ccudart.cudaError.cudaSuccess{{endif}} + cudaSuccess = cyruntime.cudaError.cudaSuccess{{endif}} {{if 'cudaErrorInvalidValue' in found_values}} #: This indicates that one or more of the parameters passed to the API #: call is not within an acceptable range of values. - cudaErrorInvalidValue = ccudart.cudaError.cudaErrorInvalidValue{{endif}} + cudaErrorInvalidValue = cyruntime.cudaError.cudaErrorInvalidValue{{endif}} {{if 'cudaErrorMemoryAllocation' in found_values}} #: The API call failed because it was unable to allocate enough memory #: or other resources to perform the requested operation. - cudaErrorMemoryAllocation = ccudart.cudaError.cudaErrorMemoryAllocation{{endif}} + cudaErrorMemoryAllocation = cyruntime.cudaError.cudaErrorMemoryAllocation{{endif}} {{if 'cudaErrorInitializationError' in found_values}} #: The API call failed because the CUDA driver and runtime could not be #: initialized. - cudaErrorInitializationError = ccudart.cudaError.cudaErrorInitializationError{{endif}} + cudaErrorInitializationError = cyruntime.cudaError.cudaErrorInitializationError{{endif}} {{if 'cudaErrorCudartUnloading' in found_values}} #: This indicates that a CUDA Runtime API call cannot be executed #: because it is being called during process shut down, at a point in #: time after CUDA driver has been unloaded. - cudaErrorCudartUnloading = ccudart.cudaError.cudaErrorCudartUnloading{{endif}} + cudaErrorCudartUnloading = cyruntime.cudaError.cudaErrorCudartUnloading{{endif}} {{if 'cudaErrorProfilerDisabled' in found_values}} #: This indicates profiler is not initialized for this run. This can #: happen when the application is running with external profiling tools #: like visual profiler. - cudaErrorProfilerDisabled = ccudart.cudaError.cudaErrorProfilerDisabled{{endif}} + cudaErrorProfilerDisabled = cyruntime.cudaError.cudaErrorProfilerDisabled{{endif}} {{if 'cudaErrorProfilerNotInitialized' in found_values}} #: [Deprecated] - cudaErrorProfilerNotInitialized = ccudart.cudaError.cudaErrorProfilerNotInitialized{{endif}} + cudaErrorProfilerNotInitialized = cyruntime.cudaError.cudaErrorProfilerNotInitialized{{endif}} {{if 'cudaErrorProfilerAlreadyStarted' in found_values}} #: [Deprecated] - cudaErrorProfilerAlreadyStarted = ccudart.cudaError.cudaErrorProfilerAlreadyStarted{{endif}} + cudaErrorProfilerAlreadyStarted = cyruntime.cudaError.cudaErrorProfilerAlreadyStarted{{endif}} {{if 'cudaErrorProfilerAlreadyStopped' in found_values}} #: [Deprecated] - cudaErrorProfilerAlreadyStopped = ccudart.cudaError.cudaErrorProfilerAlreadyStopped{{endif}} + cudaErrorProfilerAlreadyStopped = cyruntime.cudaError.cudaErrorProfilerAlreadyStopped{{endif}} {{if 'cudaErrorInvalidConfiguration' in found_values}} #: This indicates that a kernel launch is requesting resources that can @@ -398,139 +398,139 @@ class cudaError_t(IntEnum): #: memory per block than the device supports will trigger this error, #: as will requesting too many threads or blocks. See #: :py:obj:`~.cudaDeviceProp` for more device limitations. - cudaErrorInvalidConfiguration = ccudart.cudaError.cudaErrorInvalidConfiguration{{endif}} + cudaErrorInvalidConfiguration = cyruntime.cudaError.cudaErrorInvalidConfiguration{{endif}} {{if 'cudaErrorInvalidPitchValue' in found_values}} #: This indicates that one or more of the pitch-related parameters #: passed to the API call is not within the acceptable range for pitch. - cudaErrorInvalidPitchValue = ccudart.cudaError.cudaErrorInvalidPitchValue{{endif}} + cudaErrorInvalidPitchValue = cyruntime.cudaError.cudaErrorInvalidPitchValue{{endif}} {{if 'cudaErrorInvalidSymbol' in found_values}} #: This indicates that the symbol name/identifier passed to the API #: call is not a valid name or identifier. - cudaErrorInvalidSymbol = ccudart.cudaError.cudaErrorInvalidSymbol{{endif}} + cudaErrorInvalidSymbol = cyruntime.cudaError.cudaErrorInvalidSymbol{{endif}} {{if 'cudaErrorInvalidHostPointer' in found_values}} #: This indicates that at least one host pointer passed to the API call #: is not a valid host pointer. [Deprecated] - cudaErrorInvalidHostPointer = ccudart.cudaError.cudaErrorInvalidHostPointer{{endif}} + cudaErrorInvalidHostPointer = cyruntime.cudaError.cudaErrorInvalidHostPointer{{endif}} {{if 'cudaErrorInvalidDevicePointer' in found_values}} #: This indicates that at least one device pointer passed to the API #: call is not a valid device pointer. [Deprecated] - cudaErrorInvalidDevicePointer = ccudart.cudaError.cudaErrorInvalidDevicePointer{{endif}} + cudaErrorInvalidDevicePointer = cyruntime.cudaError.cudaErrorInvalidDevicePointer{{endif}} {{if 'cudaErrorInvalidTexture' in found_values}} #: This indicates that the texture passed to the API call is not a #: valid texture. - cudaErrorInvalidTexture = ccudart.cudaError.cudaErrorInvalidTexture{{endif}} + cudaErrorInvalidTexture = cyruntime.cudaError.cudaErrorInvalidTexture{{endif}} {{if 'cudaErrorInvalidTextureBinding' in found_values}} #: This indicates that the texture binding is not valid. This occurs if #: you call :py:obj:`~.cudaGetTextureAlignmentOffset()` with an unbound #: texture. - cudaErrorInvalidTextureBinding = ccudart.cudaError.cudaErrorInvalidTextureBinding{{endif}} + cudaErrorInvalidTextureBinding = cyruntime.cudaError.cudaErrorInvalidTextureBinding{{endif}} {{if 'cudaErrorInvalidChannelDescriptor' in found_values}} #: This indicates that the channel descriptor passed to the API call is #: not valid. This occurs if the format is not one of the formats #: specified by :py:obj:`~.cudaChannelFormatKind`, or if one of the #: dimensions is invalid. - cudaErrorInvalidChannelDescriptor = ccudart.cudaError.cudaErrorInvalidChannelDescriptor{{endif}} + cudaErrorInvalidChannelDescriptor = cyruntime.cudaError.cudaErrorInvalidChannelDescriptor{{endif}} {{if 'cudaErrorInvalidMemcpyDirection' in found_values}} #: This indicates that the direction of the memcpy passed to the API #: call is not one of the types specified by #: :py:obj:`~.cudaMemcpyKind`. - cudaErrorInvalidMemcpyDirection = ccudart.cudaError.cudaErrorInvalidMemcpyDirection{{endif}} + cudaErrorInvalidMemcpyDirection = cyruntime.cudaError.cudaErrorInvalidMemcpyDirection{{endif}} {{if 'cudaErrorAddressOfConstant' in found_values}} #: This indicated that the user has taken the address of a constant #: variable, which was forbidden up until the CUDA 3.1 release. #: [Deprecated] - cudaErrorAddressOfConstant = ccudart.cudaError.cudaErrorAddressOfConstant{{endif}} + cudaErrorAddressOfConstant = cyruntime.cudaError.cudaErrorAddressOfConstant{{endif}} {{if 'cudaErrorTextureFetchFailed' in found_values}} #: This indicated that a texture fetch was not able to be performed. #: This was previously used for device emulation of texture operations. #: [Deprecated] - cudaErrorTextureFetchFailed = ccudart.cudaError.cudaErrorTextureFetchFailed{{endif}} + cudaErrorTextureFetchFailed = cyruntime.cudaError.cudaErrorTextureFetchFailed{{endif}} {{if 'cudaErrorTextureNotBound' in found_values}} #: This indicated that a texture was not bound for access. This was #: previously used for device emulation of texture operations. #: [Deprecated] - cudaErrorTextureNotBound = ccudart.cudaError.cudaErrorTextureNotBound{{endif}} + cudaErrorTextureNotBound = cyruntime.cudaError.cudaErrorTextureNotBound{{endif}} {{if 'cudaErrorSynchronizationError' in found_values}} #: This indicated that a synchronization operation had failed. This was #: previously used for some device emulation functions. [Deprecated] - cudaErrorSynchronizationError = ccudart.cudaError.cudaErrorSynchronizationError{{endif}} + cudaErrorSynchronizationError = cyruntime.cudaError.cudaErrorSynchronizationError{{endif}} {{if 'cudaErrorInvalidFilterSetting' in found_values}} #: This indicates that a non-float texture was being accessed with #: linear filtering. This is not supported by CUDA. - cudaErrorInvalidFilterSetting = ccudart.cudaError.cudaErrorInvalidFilterSetting{{endif}} + cudaErrorInvalidFilterSetting = cyruntime.cudaError.cudaErrorInvalidFilterSetting{{endif}} {{if 'cudaErrorInvalidNormSetting' in found_values}} #: This indicates that an attempt was made to read a non-float texture #: as a normalized float. This is not supported by CUDA. - cudaErrorInvalidNormSetting = ccudart.cudaError.cudaErrorInvalidNormSetting{{endif}} + cudaErrorInvalidNormSetting = cyruntime.cudaError.cudaErrorInvalidNormSetting{{endif}} {{if 'cudaErrorMixedDeviceExecution' in found_values}} #: Mixing of device and device emulation code was not allowed. #: [Deprecated] - cudaErrorMixedDeviceExecution = ccudart.cudaError.cudaErrorMixedDeviceExecution{{endif}} + cudaErrorMixedDeviceExecution = cyruntime.cudaError.cudaErrorMixedDeviceExecution{{endif}} {{if 'cudaErrorNotYetImplemented' in found_values}} #: This indicates that the API call is not yet implemented. Production #: releases of CUDA will never return this error. [Deprecated] - cudaErrorNotYetImplemented = ccudart.cudaError.cudaErrorNotYetImplemented{{endif}} + cudaErrorNotYetImplemented = cyruntime.cudaError.cudaErrorNotYetImplemented{{endif}} {{if 'cudaErrorMemoryValueTooLarge' in found_values}} #: This indicated that an emulated device pointer exceeded the 32-bit #: address range. [Deprecated] - cudaErrorMemoryValueTooLarge = ccudart.cudaError.cudaErrorMemoryValueTooLarge{{endif}} + cudaErrorMemoryValueTooLarge = cyruntime.cudaError.cudaErrorMemoryValueTooLarge{{endif}} {{if 'cudaErrorStubLibrary' in found_values}} #: This indicates that the CUDA driver that the application has loaded #: is a stub library. Applications that run with the stub rather than a #: real driver loaded will result in CUDA API returning this error. - cudaErrorStubLibrary = ccudart.cudaError.cudaErrorStubLibrary{{endif}} + cudaErrorStubLibrary = cyruntime.cudaError.cudaErrorStubLibrary{{endif}} {{if 'cudaErrorInsufficientDriver' in found_values}} #: This indicates that the installed NVIDIA CUDA driver is older than #: the CUDA runtime library. This is not a supported configuration. #: Users should install an updated NVIDIA display driver to allow the #: application to run. - cudaErrorInsufficientDriver = ccudart.cudaError.cudaErrorInsufficientDriver{{endif}} + cudaErrorInsufficientDriver = cyruntime.cudaError.cudaErrorInsufficientDriver{{endif}} {{if 'cudaErrorCallRequiresNewerDriver' in found_values}} #: This indicates that the API call requires a newer CUDA driver than #: the one currently installed. Users should install an updated NVIDIA #: CUDA driver to allow the API call to succeed. - cudaErrorCallRequiresNewerDriver = ccudart.cudaError.cudaErrorCallRequiresNewerDriver{{endif}} + cudaErrorCallRequiresNewerDriver = cyruntime.cudaError.cudaErrorCallRequiresNewerDriver{{endif}} {{if 'cudaErrorInvalidSurface' in found_values}} #: This indicates that the surface passed to the API call is not a #: valid surface. - cudaErrorInvalidSurface = ccudart.cudaError.cudaErrorInvalidSurface{{endif}} + cudaErrorInvalidSurface = cyruntime.cudaError.cudaErrorInvalidSurface{{endif}} {{if 'cudaErrorDuplicateVariableName' in found_values}} #: This indicates that multiple global or constant variables (across #: separate CUDA source files in the application) share the same string #: name. - cudaErrorDuplicateVariableName = ccudart.cudaError.cudaErrorDuplicateVariableName{{endif}} + cudaErrorDuplicateVariableName = cyruntime.cudaError.cudaErrorDuplicateVariableName{{endif}} {{if 'cudaErrorDuplicateTextureName' in found_values}} #: This indicates that multiple textures (across separate CUDA source #: files in the application) share the same string name. - cudaErrorDuplicateTextureName = ccudart.cudaError.cudaErrorDuplicateTextureName{{endif}} + cudaErrorDuplicateTextureName = cyruntime.cudaError.cudaErrorDuplicateTextureName{{endif}} {{if 'cudaErrorDuplicateSurfaceName' in found_values}} #: This indicates that multiple surfaces (across separate CUDA source #: files in the application) share the same string name. - cudaErrorDuplicateSurfaceName = ccudart.cudaError.cudaErrorDuplicateSurfaceName{{endif}} + cudaErrorDuplicateSurfaceName = cyruntime.cudaError.cudaErrorDuplicateSurfaceName{{endif}} {{if 'cudaErrorDevicesUnavailable' in found_values}} #: This indicates that all CUDA devices are busy or unavailable at the @@ -540,7 +540,7 @@ class cudaError_t(IntEnum): #: CUDA kernels have filled up the GPU and are blocking new work from #: starting. They can also be unavailable due to memory constraints on #: a device that already has active CUDA work being performed. - cudaErrorDevicesUnavailable = ccudart.cudaError.cudaErrorDevicesUnavailable{{endif}} + cudaErrorDevicesUnavailable = cyruntime.cudaError.cudaErrorDevicesUnavailable{{endif}} {{if 'cudaErrorIncompatibleDriverContext' in found_values}} #: This indicates that the current context is not compatible with this @@ -552,39 +552,39 @@ class cudaError_t(IntEnum): #: context and the Driver context is not primary, or because the Driver #: context has been destroyed. Please see :py:obj:`~.Interactions`with #: the CUDA Driver API" for more information. - cudaErrorIncompatibleDriverContext = ccudart.cudaError.cudaErrorIncompatibleDriverContext{{endif}} + cudaErrorIncompatibleDriverContext = cyruntime.cudaError.cudaErrorIncompatibleDriverContext{{endif}} {{if 'cudaErrorMissingConfiguration' in found_values}} #: The device function being invoked (usually via #: :py:obj:`~.cudaLaunchKernel()`) was not previously configured via #: the :py:obj:`~.cudaConfigureCall()` function. - cudaErrorMissingConfiguration = ccudart.cudaError.cudaErrorMissingConfiguration{{endif}} + cudaErrorMissingConfiguration = cyruntime.cudaError.cudaErrorMissingConfiguration{{endif}} {{if 'cudaErrorPriorLaunchFailure' in found_values}} #: This indicated that a previous kernel launch failed. This was #: previously used for device emulation of kernel launches. #: [Deprecated] - cudaErrorPriorLaunchFailure = ccudart.cudaError.cudaErrorPriorLaunchFailure{{endif}} + cudaErrorPriorLaunchFailure = cyruntime.cudaError.cudaErrorPriorLaunchFailure{{endif}} {{if 'cudaErrorLaunchMaxDepthExceeded' in found_values}} #: This error indicates that a device runtime grid launch did not occur #: because the depth of the child grid would exceed the maximum #: supported number of nested grid launches. - cudaErrorLaunchMaxDepthExceeded = ccudart.cudaError.cudaErrorLaunchMaxDepthExceeded{{endif}} + cudaErrorLaunchMaxDepthExceeded = cyruntime.cudaError.cudaErrorLaunchMaxDepthExceeded{{endif}} {{if 'cudaErrorLaunchFileScopedTex' in found_values}} #: This error indicates that a grid launch did not occur because the #: kernel uses file-scoped textures which are unsupported by the device #: runtime. Kernels launched via the device runtime only support #: textures created with the Texture Object API's. - cudaErrorLaunchFileScopedTex = ccudart.cudaError.cudaErrorLaunchFileScopedTex{{endif}} + cudaErrorLaunchFileScopedTex = cyruntime.cudaError.cudaErrorLaunchFileScopedTex{{endif}} {{if 'cudaErrorLaunchFileScopedSurf' in found_values}} #: This error indicates that a grid launch did not occur because the #: kernel uses file-scoped surfaces which are unsupported by the device #: runtime. Kernels launched via the device runtime only support #: surfaces created with the Surface Object API's. - cudaErrorLaunchFileScopedSurf = ccudart.cudaError.cudaErrorLaunchFileScopedSurf{{endif}} + cudaErrorLaunchFileScopedSurf = cyruntime.cudaError.cudaErrorLaunchFileScopedSurf{{endif}} {{if 'cudaErrorSyncDepthExceeded' in found_values}} #: This error indicates that a call to @@ -602,7 +602,7 @@ class cudaError_t(IntEnum): #: memory that cannot be used for user allocations. Note that #: :py:obj:`~.cudaDeviceSynchronize` made from device runtime is only #: supported on devices of compute capability < 9.0. - cudaErrorSyncDepthExceeded = ccudart.cudaError.cudaErrorSyncDepthExceeded{{endif}} + cudaErrorSyncDepthExceeded = cyruntime.cudaError.cudaErrorSyncDepthExceeded{{endif}} {{if 'cudaErrorLaunchPendingCountExceeded' in found_values}} #: This error indicates that a device runtime grid launch failed @@ -614,27 +614,27 @@ class cudaError_t(IntEnum): #: be issued to the device runtime. Keep in mind that raising the limit #: of pending device runtime launches will require the runtime to #: reserve device memory that cannot be used for user allocations. - cudaErrorLaunchPendingCountExceeded = ccudart.cudaError.cudaErrorLaunchPendingCountExceeded{{endif}} + cudaErrorLaunchPendingCountExceeded = cyruntime.cudaError.cudaErrorLaunchPendingCountExceeded{{endif}} {{if 'cudaErrorInvalidDeviceFunction' in found_values}} #: The requested device function does not exist or is not compiled for #: the proper device architecture. - cudaErrorInvalidDeviceFunction = ccudart.cudaError.cudaErrorInvalidDeviceFunction{{endif}} + cudaErrorInvalidDeviceFunction = cyruntime.cudaError.cudaErrorInvalidDeviceFunction{{endif}} {{if 'cudaErrorNoDevice' in found_values}} #: This indicates that no CUDA-capable devices were detected by the #: installed CUDA driver. - cudaErrorNoDevice = ccudart.cudaError.cudaErrorNoDevice{{endif}} + cudaErrorNoDevice = cyruntime.cudaError.cudaErrorNoDevice{{endif}} {{if 'cudaErrorInvalidDevice' in found_values}} #: This indicates that the device ordinal supplied by the user does not #: correspond to a valid CUDA device or that the action requested is #: invalid for the specified device. - cudaErrorInvalidDevice = ccudart.cudaError.cudaErrorInvalidDevice{{endif}} + cudaErrorInvalidDevice = cyruntime.cudaError.cudaErrorInvalidDevice{{endif}} {{if 'cudaErrorDeviceNotLicensed' in found_values}} #: This indicates that the device doesn't have a valid Grid License. - cudaErrorDeviceNotLicensed = ccudart.cudaError.cudaErrorDeviceNotLicensed{{endif}} + cudaErrorDeviceNotLicensed = cyruntime.cudaError.cudaErrorDeviceNotLicensed{{endif}} {{if 'cudaErrorSoftwareValidityNotEstablished' in found_values}} #: By default, the CUDA runtime may perform a minimal set of self- @@ -642,15 +642,15 @@ class cudaError_t(IntEnum): #: both. Introduced in CUDA 11.2, this error return indicates that at #: least one of these tests has failed and the validity of either the #: runtime or the driver could not be established. - cudaErrorSoftwareValidityNotEstablished = ccudart.cudaError.cudaErrorSoftwareValidityNotEstablished{{endif}} + cudaErrorSoftwareValidityNotEstablished = cyruntime.cudaError.cudaErrorSoftwareValidityNotEstablished{{endif}} {{if 'cudaErrorStartupFailure' in found_values}} #: This indicates an internal startup failure in the CUDA runtime. - cudaErrorStartupFailure = ccudart.cudaError.cudaErrorStartupFailure{{endif}} + cudaErrorStartupFailure = cyruntime.cudaError.cudaErrorStartupFailure{{endif}} {{if 'cudaErrorInvalidKernelImage' in found_values}} #: This indicates that the device kernel image is invalid. - cudaErrorInvalidKernelImage = ccudart.cudaError.cudaErrorInvalidKernelImage{{endif}} + cudaErrorInvalidKernelImage = cyruntime.cudaError.cudaErrorInvalidKernelImage{{endif}} {{if 'cudaErrorDeviceUninitialized' in found_values}} #: This most frequently indicates that there is no context bound to the @@ -659,146 +659,146 @@ class cudaError_t(IntEnum): #: :py:obj:`~.cuCtxDestroy()` invoked on it). This can also be returned #: if a user mixes different API versions (i.e. 3010 context with 3020 #: API calls). See :py:obj:`~.cuCtxGetApiVersion()` for more details. - cudaErrorDeviceUninitialized = ccudart.cudaError.cudaErrorDeviceUninitialized{{endif}} + cudaErrorDeviceUninitialized = cyruntime.cudaError.cudaErrorDeviceUninitialized{{endif}} {{if 'cudaErrorMapBufferObjectFailed' in found_values}} #: This indicates that the buffer object could not be mapped. - cudaErrorMapBufferObjectFailed = ccudart.cudaError.cudaErrorMapBufferObjectFailed{{endif}} + cudaErrorMapBufferObjectFailed = cyruntime.cudaError.cudaErrorMapBufferObjectFailed{{endif}} {{if 'cudaErrorUnmapBufferObjectFailed' in found_values}} #: This indicates that the buffer object could not be unmapped. - cudaErrorUnmapBufferObjectFailed = ccudart.cudaError.cudaErrorUnmapBufferObjectFailed{{endif}} + cudaErrorUnmapBufferObjectFailed = cyruntime.cudaError.cudaErrorUnmapBufferObjectFailed{{endif}} {{if 'cudaErrorArrayIsMapped' in found_values}} #: This indicates that the specified array is currently mapped and thus #: cannot be destroyed. - cudaErrorArrayIsMapped = ccudart.cudaError.cudaErrorArrayIsMapped{{endif}} + cudaErrorArrayIsMapped = cyruntime.cudaError.cudaErrorArrayIsMapped{{endif}} {{if 'cudaErrorAlreadyMapped' in found_values}} #: This indicates that the resource is already mapped. - cudaErrorAlreadyMapped = ccudart.cudaError.cudaErrorAlreadyMapped{{endif}} + cudaErrorAlreadyMapped = cyruntime.cudaError.cudaErrorAlreadyMapped{{endif}} {{if 'cudaErrorNoKernelImageForDevice' in found_values}} #: This indicates that there is no kernel image available that is #: suitable for the device. This can occur when a user specifies code #: generation options for a particular CUDA source file that do not #: include the corresponding device configuration. - cudaErrorNoKernelImageForDevice = ccudart.cudaError.cudaErrorNoKernelImageForDevice{{endif}} + cudaErrorNoKernelImageForDevice = cyruntime.cudaError.cudaErrorNoKernelImageForDevice{{endif}} {{if 'cudaErrorAlreadyAcquired' in found_values}} #: This indicates that a resource has already been acquired. - cudaErrorAlreadyAcquired = ccudart.cudaError.cudaErrorAlreadyAcquired{{endif}} + cudaErrorAlreadyAcquired = cyruntime.cudaError.cudaErrorAlreadyAcquired{{endif}} {{if 'cudaErrorNotMapped' in found_values}} #: This indicates that a resource is not mapped. - cudaErrorNotMapped = ccudart.cudaError.cudaErrorNotMapped{{endif}} + cudaErrorNotMapped = cyruntime.cudaError.cudaErrorNotMapped{{endif}} {{if 'cudaErrorNotMappedAsArray' in found_values}} #: This indicates that a mapped resource is not available for access as #: an array. - cudaErrorNotMappedAsArray = ccudart.cudaError.cudaErrorNotMappedAsArray{{endif}} + cudaErrorNotMappedAsArray = cyruntime.cudaError.cudaErrorNotMappedAsArray{{endif}} {{if 'cudaErrorNotMappedAsPointer' in found_values}} #: This indicates that a mapped resource is not available for access as #: a pointer. - cudaErrorNotMappedAsPointer = ccudart.cudaError.cudaErrorNotMappedAsPointer{{endif}} + cudaErrorNotMappedAsPointer = cyruntime.cudaError.cudaErrorNotMappedAsPointer{{endif}} {{if 'cudaErrorECCUncorrectable' in found_values}} #: This indicates that an uncorrectable ECC error was detected during #: execution. - cudaErrorECCUncorrectable = ccudart.cudaError.cudaErrorECCUncorrectable{{endif}} + cudaErrorECCUncorrectable = cyruntime.cudaError.cudaErrorECCUncorrectable{{endif}} {{if 'cudaErrorUnsupportedLimit' in found_values}} #: This indicates that the :py:obj:`~.cudaLimit` passed to the API call #: is not supported by the active device. - cudaErrorUnsupportedLimit = ccudart.cudaError.cudaErrorUnsupportedLimit{{endif}} + cudaErrorUnsupportedLimit = cyruntime.cudaError.cudaErrorUnsupportedLimit{{endif}} {{if 'cudaErrorDeviceAlreadyInUse' in found_values}} #: This indicates that a call tried to access an exclusive-thread #: device that is already in use by a different thread. - cudaErrorDeviceAlreadyInUse = ccudart.cudaError.cudaErrorDeviceAlreadyInUse{{endif}} + cudaErrorDeviceAlreadyInUse = cyruntime.cudaError.cudaErrorDeviceAlreadyInUse{{endif}} {{if 'cudaErrorPeerAccessUnsupported' in found_values}} #: This error indicates that P2P access is not supported across the #: given devices. - cudaErrorPeerAccessUnsupported = ccudart.cudaError.cudaErrorPeerAccessUnsupported{{endif}} + cudaErrorPeerAccessUnsupported = cyruntime.cudaError.cudaErrorPeerAccessUnsupported{{endif}} {{if 'cudaErrorInvalidPtx' in found_values}} #: A PTX compilation failed. The runtime may fall back to compiling PTX #: if an application does not contain a suitable binary for the current #: device. - cudaErrorInvalidPtx = ccudart.cudaError.cudaErrorInvalidPtx{{endif}} + cudaErrorInvalidPtx = cyruntime.cudaError.cudaErrorInvalidPtx{{endif}} {{if 'cudaErrorInvalidGraphicsContext' in found_values}} #: This indicates an error with the OpenGL or DirectX context. - cudaErrorInvalidGraphicsContext = ccudart.cudaError.cudaErrorInvalidGraphicsContext{{endif}} + cudaErrorInvalidGraphicsContext = cyruntime.cudaError.cudaErrorInvalidGraphicsContext{{endif}} {{if 'cudaErrorNvlinkUncorrectable' in found_values}} #: This indicates that an uncorrectable NVLink error was detected #: during the execution. - cudaErrorNvlinkUncorrectable = ccudart.cudaError.cudaErrorNvlinkUncorrectable{{endif}} + cudaErrorNvlinkUncorrectable = cyruntime.cudaError.cudaErrorNvlinkUncorrectable{{endif}} {{if 'cudaErrorJitCompilerNotFound' in found_values}} #: This indicates that the PTX JIT compiler library was not found. The #: JIT Compiler library is used for PTX compilation. The runtime may #: fall back to compiling PTX if an application does not contain a #: suitable binary for the current device. - cudaErrorJitCompilerNotFound = ccudart.cudaError.cudaErrorJitCompilerNotFound{{endif}} + cudaErrorJitCompilerNotFound = cyruntime.cudaError.cudaErrorJitCompilerNotFound{{endif}} {{if 'cudaErrorUnsupportedPtxVersion' in found_values}} #: This indicates that the provided PTX was compiled with an #: unsupported toolchain. The most common reason for this, is the PTX #: was generated by a compiler newer than what is supported by the CUDA #: driver and PTX JIT compiler. - cudaErrorUnsupportedPtxVersion = ccudart.cudaError.cudaErrorUnsupportedPtxVersion{{endif}} + cudaErrorUnsupportedPtxVersion = cyruntime.cudaError.cudaErrorUnsupportedPtxVersion{{endif}} {{if 'cudaErrorJitCompilationDisabled' in found_values}} #: This indicates that the JIT compilation was disabled. The JIT #: compilation compiles PTX. The runtime may fall back to compiling PTX #: if an application does not contain a suitable binary for the current #: device. - cudaErrorJitCompilationDisabled = ccudart.cudaError.cudaErrorJitCompilationDisabled{{endif}} + cudaErrorJitCompilationDisabled = cyruntime.cudaError.cudaErrorJitCompilationDisabled{{endif}} {{if 'cudaErrorUnsupportedExecAffinity' in found_values}} #: This indicates that the provided execution affinity is not supported #: by the device. - cudaErrorUnsupportedExecAffinity = ccudart.cudaError.cudaErrorUnsupportedExecAffinity{{endif}} + cudaErrorUnsupportedExecAffinity = cyruntime.cudaError.cudaErrorUnsupportedExecAffinity{{endif}} {{if 'cudaErrorUnsupportedDevSideSync' in found_values}} #: This indicates that the code to be compiled by the PTX JIT contains #: unsupported call to cudaDeviceSynchronize. - cudaErrorUnsupportedDevSideSync = ccudart.cudaError.cudaErrorUnsupportedDevSideSync{{endif}} + cudaErrorUnsupportedDevSideSync = cyruntime.cudaError.cudaErrorUnsupportedDevSideSync{{endif}} {{if 'cudaErrorInvalidSource' in found_values}} #: This indicates that the device kernel source is invalid. - cudaErrorInvalidSource = ccudart.cudaError.cudaErrorInvalidSource{{endif}} + cudaErrorInvalidSource = cyruntime.cudaError.cudaErrorInvalidSource{{endif}} {{if 'cudaErrorFileNotFound' in found_values}} #: This indicates that the file specified was not found. - cudaErrorFileNotFound = ccudart.cudaError.cudaErrorFileNotFound{{endif}} + cudaErrorFileNotFound = cyruntime.cudaError.cudaErrorFileNotFound{{endif}} {{if 'cudaErrorSharedObjectSymbolNotFound' in found_values}} #: This indicates that a link to a shared object failed to resolve. - cudaErrorSharedObjectSymbolNotFound = ccudart.cudaError.cudaErrorSharedObjectSymbolNotFound{{endif}} + cudaErrorSharedObjectSymbolNotFound = cyruntime.cudaError.cudaErrorSharedObjectSymbolNotFound{{endif}} {{if 'cudaErrorSharedObjectInitFailed' in found_values}} #: This indicates that initialization of a shared object failed. - cudaErrorSharedObjectInitFailed = ccudart.cudaError.cudaErrorSharedObjectInitFailed{{endif}} + cudaErrorSharedObjectInitFailed = cyruntime.cudaError.cudaErrorSharedObjectInitFailed{{endif}} {{if 'cudaErrorOperatingSystem' in found_values}} #: This error indicates that an OS call failed. - cudaErrorOperatingSystem = ccudart.cudaError.cudaErrorOperatingSystem{{endif}} + cudaErrorOperatingSystem = cyruntime.cudaError.cudaErrorOperatingSystem{{endif}} {{if 'cudaErrorInvalidResourceHandle' in found_values}} #: This indicates that a resource handle passed to the API call was not #: valid. Resource handles are opaque types like #: :py:obj:`~.cudaStream_t` and :py:obj:`~.cudaEvent_t`. - cudaErrorInvalidResourceHandle = ccudart.cudaError.cudaErrorInvalidResourceHandle{{endif}} + cudaErrorInvalidResourceHandle = cyruntime.cudaError.cudaErrorInvalidResourceHandle{{endif}} {{if 'cudaErrorIllegalState' in found_values}} #: This indicates that a resource required by the API call is not in a #: valid state to perform the requested operation. - cudaErrorIllegalState = ccudart.cudaError.cudaErrorIllegalState{{endif}} + cudaErrorIllegalState = cyruntime.cudaError.cudaErrorIllegalState{{endif}} {{if 'cudaErrorLossyQuery' in found_values}} #: This indicates an attempt was made to introspect an object in a way @@ -806,13 +806,13 @@ class cudaError_t(IntEnum): #: either due to the object using funtionality newer than the API #: version used to introspect it or omission of optional return #: arguments. - cudaErrorLossyQuery = ccudart.cudaError.cudaErrorLossyQuery{{endif}} + cudaErrorLossyQuery = cyruntime.cudaError.cudaErrorLossyQuery{{endif}} {{if 'cudaErrorSymbolNotFound' in found_values}} #: This indicates that a named symbol was not found. Examples of #: symbols are global/constant variable names, driver function names, #: texture names, and surface names. - cudaErrorSymbolNotFound = ccudart.cudaError.cudaErrorSymbolNotFound{{endif}} + cudaErrorSymbolNotFound = cyruntime.cudaError.cudaErrorSymbolNotFound{{endif}} {{if 'cudaErrorNotReady' in found_values}} #: This indicates that asynchronous operations issued previously have @@ -820,14 +820,14 @@ class cudaError_t(IntEnum): #: indicated differently than :py:obj:`~.cudaSuccess` (which indicates #: completion). Calls that may return this value include #: :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`. - cudaErrorNotReady = ccudart.cudaError.cudaErrorNotReady{{endif}} + cudaErrorNotReady = cyruntime.cudaError.cudaErrorNotReady{{endif}} {{if 'cudaErrorIllegalAddress' in found_values}} #: The device encountered a load or store instruction on an invalid #: memory address. This leaves the process in an inconsistent state and #: any further CUDA work will return the same error. To continue using #: CUDA, the process must be terminated and relaunched. - cudaErrorIllegalAddress = ccudart.cudaError.cudaErrorIllegalAddress{{endif}} + cudaErrorIllegalAddress = cyruntime.cudaError.cudaErrorIllegalAddress{{endif}} {{if 'cudaErrorLaunchOutOfResources' in found_values}} #: This indicates that a launch did not occur because it did not have @@ -836,7 +836,7 @@ class cudaError_t(IntEnum): #: indicates that the user has attempted to pass too many arguments to #: the device kernel, or the kernel launch specifies too many threads #: for the kernel's register count. - cudaErrorLaunchOutOfResources = ccudart.cudaError.cudaErrorLaunchOutOfResources{{endif}} + cudaErrorLaunchOutOfResources = cyruntime.cudaError.cudaErrorLaunchOutOfResources{{endif}} {{if 'cudaErrorLaunchTimeout' in found_values}} #: This indicates that the device kernel took too long to execute. This @@ -845,25 +845,25 @@ class cudaError_t(IntEnum): #: leaves the process in an inconsistent state and any further CUDA #: work will return the same error. To continue using CUDA, the process #: must be terminated and relaunched. - cudaErrorLaunchTimeout = ccudart.cudaError.cudaErrorLaunchTimeout{{endif}} + cudaErrorLaunchTimeout = cyruntime.cudaError.cudaErrorLaunchTimeout{{endif}} {{if 'cudaErrorLaunchIncompatibleTexturing' in found_values}} #: This error indicates a kernel launch that uses an incompatible #: texturing mode. - cudaErrorLaunchIncompatibleTexturing = ccudart.cudaError.cudaErrorLaunchIncompatibleTexturing{{endif}} + cudaErrorLaunchIncompatibleTexturing = cyruntime.cudaError.cudaErrorLaunchIncompatibleTexturing{{endif}} {{if 'cudaErrorPeerAccessAlreadyEnabled' in found_values}} #: This error indicates that a call to #: :py:obj:`~.cudaDeviceEnablePeerAccess()` is trying to re-enable peer #: addressing on from a context which has already had peer addressing #: enabled. - cudaErrorPeerAccessAlreadyEnabled = ccudart.cudaError.cudaErrorPeerAccessAlreadyEnabled{{endif}} + cudaErrorPeerAccessAlreadyEnabled = cyruntime.cudaError.cudaErrorPeerAccessAlreadyEnabled{{endif}} {{if 'cudaErrorPeerAccessNotEnabled' in found_values}} #: This error indicates that :py:obj:`~.cudaDeviceDisablePeerAccess()` #: is trying to disable peer addressing which has not been enabled yet #: via :py:obj:`~.cudaDeviceEnablePeerAccess()`. - cudaErrorPeerAccessNotEnabled = ccudart.cudaError.cudaErrorPeerAccessNotEnabled{{endif}} + cudaErrorPeerAccessNotEnabled = cyruntime.cudaError.cudaErrorPeerAccessNotEnabled{{endif}} {{if 'cudaErrorSetOnActiveProcess' in found_values}} #: This indicates that the user has called @@ -877,37 +877,37 @@ class cudaError_t(IntEnum): #: operations). This error can also be returned if using runtime/driver #: interoperability and there is an existing :py:obj:`~.CUcontext` #: active on the host thread. - cudaErrorSetOnActiveProcess = ccudart.cudaError.cudaErrorSetOnActiveProcess{{endif}} + cudaErrorSetOnActiveProcess = cyruntime.cudaError.cudaErrorSetOnActiveProcess{{endif}} {{if 'cudaErrorContextIsDestroyed' in found_values}} #: This error indicates that the context current to the calling thread #: has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary #: context which has not yet been initialized. - cudaErrorContextIsDestroyed = ccudart.cudaError.cudaErrorContextIsDestroyed{{endif}} + cudaErrorContextIsDestroyed = cyruntime.cudaError.cudaErrorContextIsDestroyed{{endif}} {{if 'cudaErrorAssert' in found_values}} #: An assert triggered in device code during kernel execution. The #: device cannot be used again. All existing allocations are invalid. #: To continue using CUDA, the process must be terminated and #: relaunched. - cudaErrorAssert = ccudart.cudaError.cudaErrorAssert{{endif}} + cudaErrorAssert = cyruntime.cudaError.cudaErrorAssert{{endif}} {{if 'cudaErrorTooManyPeers' in found_values}} #: This error indicates that the hardware resources required to enable #: peer access have been exhausted for one or more of the devices #: passed to :py:obj:`~.cudaEnablePeerAccess()`. - cudaErrorTooManyPeers = ccudart.cudaError.cudaErrorTooManyPeers{{endif}} + cudaErrorTooManyPeers = cyruntime.cudaError.cudaErrorTooManyPeers{{endif}} {{if 'cudaErrorHostMemoryAlreadyRegistered' in found_values}} #: This error indicates that the memory range passed to #: :py:obj:`~.cudaHostRegister()` has already been registered. - cudaErrorHostMemoryAlreadyRegistered = ccudart.cudaError.cudaErrorHostMemoryAlreadyRegistered{{endif}} + cudaErrorHostMemoryAlreadyRegistered = cyruntime.cudaError.cudaErrorHostMemoryAlreadyRegistered{{endif}} {{if 'cudaErrorHostMemoryNotRegistered' in found_values}} #: This error indicates that the pointer passed to #: :py:obj:`~.cudaHostUnregister()` does not correspond to any #: currently registered memory region. - cudaErrorHostMemoryNotRegistered = ccudart.cudaError.cudaErrorHostMemoryNotRegistered{{endif}} + cudaErrorHostMemoryNotRegistered = cyruntime.cudaError.cudaErrorHostMemoryNotRegistered{{endif}} {{if 'cudaErrorHardwareStackError' in found_values}} #: Device encountered an error in the call stack during kernel @@ -915,14 +915,14 @@ class cudaError_t(IntEnum): #: size limit. This leaves the process in an inconsistent state and any #: further CUDA work will return the same error. To continue using #: CUDA, the process must be terminated and relaunched. - cudaErrorHardwareStackError = ccudart.cudaError.cudaErrorHardwareStackError{{endif}} + cudaErrorHardwareStackError = cyruntime.cudaError.cudaErrorHardwareStackError{{endif}} {{if 'cudaErrorIllegalInstruction' in found_values}} #: The device encountered an illegal instruction during kernel #: execution This leaves the process in an inconsistent state and any #: further CUDA work will return the same error. To continue using #: CUDA, the process must be terminated and relaunched. - cudaErrorIllegalInstruction = ccudart.cudaError.cudaErrorIllegalInstruction{{endif}} + cudaErrorIllegalInstruction = cyruntime.cudaError.cudaErrorIllegalInstruction{{endif}} {{if 'cudaErrorMisalignedAddress' in found_values}} #: The device encountered a load or store instruction on a memory @@ -930,7 +930,7 @@ class cudaError_t(IntEnum): #: inconsistent state and any further CUDA work will return the same #: error. To continue using CUDA, the process must be terminated and #: relaunched. - cudaErrorMisalignedAddress = ccudart.cudaError.cudaErrorMisalignedAddress{{endif}} + cudaErrorMisalignedAddress = cyruntime.cudaError.cudaErrorMisalignedAddress{{endif}} {{if 'cudaErrorInvalidAddressSpace' in found_values}} #: While executing a kernel, the device encountered an instruction @@ -940,14 +940,14 @@ class cudaError_t(IntEnum): #: inconsistent state and any further CUDA work will return the same #: error. To continue using CUDA, the process must be terminated and #: relaunched. - cudaErrorInvalidAddressSpace = ccudart.cudaError.cudaErrorInvalidAddressSpace{{endif}} + cudaErrorInvalidAddressSpace = cyruntime.cudaError.cudaErrorInvalidAddressSpace{{endif}} {{if 'cudaErrorInvalidPc' in found_values}} #: The device encountered an invalid program counter. This leaves the #: process in an inconsistent state and any further CUDA work will #: return the same error. To continue using CUDA, the process must be #: terminated and relaunched. - cudaErrorInvalidPc = ccudart.cudaError.cudaErrorInvalidPc{{endif}} + cudaErrorInvalidPc = cyruntime.cudaError.cudaErrorInvalidPc{{endif}} {{if 'cudaErrorLaunchFailure' in found_values}} #: An exception occurred on the device while executing a kernel. Common @@ -958,7 +958,7 @@ class cudaError_t(IntEnum): #: inconsistent state and any further CUDA work will return the same #: error. To continue using CUDA, the process must be terminated and #: relaunched. - cudaErrorLaunchFailure = ccudart.cudaError.cudaErrorLaunchFailure{{endif}} + cudaErrorLaunchFailure = cyruntime.cudaError.cudaErrorLaunchFailure{{endif}} {{if 'cudaErrorCooperativeLaunchTooLarge' in found_values}} #: This error indicates that the number of blocks launched per grid for @@ -970,16 +970,16 @@ class cudaError_t(IntEnum): #: :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` #: times the number of multiprocessors as specified by the device #: attribute :py:obj:`~.cudaDevAttrMultiProcessorCount`. - cudaErrorCooperativeLaunchTooLarge = ccudart.cudaError.cudaErrorCooperativeLaunchTooLarge{{endif}} + cudaErrorCooperativeLaunchTooLarge = cyruntime.cudaError.cudaErrorCooperativeLaunchTooLarge{{endif}} {{if 'cudaErrorNotPermitted' in found_values}} #: This error indicates the attempted operation is not permitted. - cudaErrorNotPermitted = ccudart.cudaError.cudaErrorNotPermitted{{endif}} + cudaErrorNotPermitted = cyruntime.cudaError.cudaErrorNotPermitted{{endif}} {{if 'cudaErrorNotSupported' in found_values}} #: This error indicates the attempted operation is not supported on the #: current system or device. - cudaErrorNotSupported = ccudart.cudaError.cudaErrorNotSupported{{endif}} + cudaErrorNotSupported = cyruntime.cudaError.cudaErrorNotSupported{{endif}} {{if 'cudaErrorSystemNotReady' in found_values}} #: This error indicates that the system is not yet ready to start any @@ -987,13 +987,13 @@ class cudaError_t(IntEnum): #: is in a valid state and all required driver daemons are actively #: running. More information about this error can be found in the #: system specific user guide. - cudaErrorSystemNotReady = ccudart.cudaError.cudaErrorSystemNotReady{{endif}} + cudaErrorSystemNotReady = cyruntime.cudaError.cudaErrorSystemNotReady{{endif}} {{if 'cudaErrorSystemDriverMismatch' in found_values}} #: This error indicates that there is a mismatch between the versions #: of the display driver and the CUDA driver. Refer to the #: compatibility documentation for supported versions. - cudaErrorSystemDriverMismatch = ccudart.cudaError.cudaErrorSystemDriverMismatch{{endif}} + cudaErrorSystemDriverMismatch = cyruntime.cudaError.cudaErrorSystemDriverMismatch{{endif}} {{if 'cudaErrorCompatNotSupportedOnDevice' in found_values}} #: This error indicates that the system was upgraded to run with @@ -1002,106 +1002,106 @@ class cudaError_t(IntEnum): #: documentation for the supported hardware matrix or ensure that only #: supported hardware is visible during initialization via the #: CUDA_VISIBLE_DEVICES environment variable. - cudaErrorCompatNotSupportedOnDevice = ccudart.cudaError.cudaErrorCompatNotSupportedOnDevice{{endif}} + cudaErrorCompatNotSupportedOnDevice = cyruntime.cudaError.cudaErrorCompatNotSupportedOnDevice{{endif}} {{if 'cudaErrorMpsConnectionFailed' in found_values}} #: This error indicates that the MPS client failed to connect to the #: MPS control daemon or the MPS server. - cudaErrorMpsConnectionFailed = ccudart.cudaError.cudaErrorMpsConnectionFailed{{endif}} + cudaErrorMpsConnectionFailed = cyruntime.cudaError.cudaErrorMpsConnectionFailed{{endif}} {{if 'cudaErrorMpsRpcFailure' in found_values}} #: This error indicates that the remote procedural call between the MPS #: server and the MPS client failed. - cudaErrorMpsRpcFailure = ccudart.cudaError.cudaErrorMpsRpcFailure{{endif}} + cudaErrorMpsRpcFailure = cyruntime.cudaError.cudaErrorMpsRpcFailure{{endif}} {{if 'cudaErrorMpsServerNotReady' in found_values}} #: This error indicates that the MPS server is not ready to accept new #: MPS client requests. This error can be returned when the MPS server #: is in the process of recovering from a fatal failure. - cudaErrorMpsServerNotReady = ccudart.cudaError.cudaErrorMpsServerNotReady{{endif}} + cudaErrorMpsServerNotReady = cyruntime.cudaError.cudaErrorMpsServerNotReady{{endif}} {{if 'cudaErrorMpsMaxClientsReached' in found_values}} #: This error indicates that the hardware resources required to create #: MPS client have been exhausted. - cudaErrorMpsMaxClientsReached = ccudart.cudaError.cudaErrorMpsMaxClientsReached{{endif}} + cudaErrorMpsMaxClientsReached = cyruntime.cudaError.cudaErrorMpsMaxClientsReached{{endif}} {{if 'cudaErrorMpsMaxConnectionsReached' in found_values}} #: This error indicates the the hardware resources required to device #: connections have been exhausted. - cudaErrorMpsMaxConnectionsReached = ccudart.cudaError.cudaErrorMpsMaxConnectionsReached{{endif}} + cudaErrorMpsMaxConnectionsReached = cyruntime.cudaError.cudaErrorMpsMaxConnectionsReached{{endif}} {{if 'cudaErrorMpsClientTerminated' in found_values}} #: This error indicates that the MPS client has been terminated by the #: server. To continue using CUDA, the process must be terminated and #: relaunched. - cudaErrorMpsClientTerminated = ccudart.cudaError.cudaErrorMpsClientTerminated{{endif}} + cudaErrorMpsClientTerminated = cyruntime.cudaError.cudaErrorMpsClientTerminated{{endif}} {{if 'cudaErrorCdpNotSupported' in found_values}} #: This error indicates, that the program is using CUDA Dynamic #: Parallelism, but the current configuration, like MPS, does not #: support it. - cudaErrorCdpNotSupported = ccudart.cudaError.cudaErrorCdpNotSupported{{endif}} + cudaErrorCdpNotSupported = cyruntime.cudaError.cudaErrorCdpNotSupported{{endif}} {{if 'cudaErrorCdpVersionMismatch' in found_values}} #: This error indicates, that the program contains an unsupported #: interaction between different versions of CUDA Dynamic Parallelism. - cudaErrorCdpVersionMismatch = ccudart.cudaError.cudaErrorCdpVersionMismatch{{endif}} + cudaErrorCdpVersionMismatch = cyruntime.cudaError.cudaErrorCdpVersionMismatch{{endif}} {{if 'cudaErrorStreamCaptureUnsupported' in found_values}} #: The operation is not permitted when the stream is capturing. - cudaErrorStreamCaptureUnsupported = ccudart.cudaError.cudaErrorStreamCaptureUnsupported{{endif}} + cudaErrorStreamCaptureUnsupported = cyruntime.cudaError.cudaErrorStreamCaptureUnsupported{{endif}} {{if 'cudaErrorStreamCaptureInvalidated' in found_values}} #: The current capture sequence on the stream has been invalidated due #: to a previous error. - cudaErrorStreamCaptureInvalidated = ccudart.cudaError.cudaErrorStreamCaptureInvalidated{{endif}} + cudaErrorStreamCaptureInvalidated = cyruntime.cudaError.cudaErrorStreamCaptureInvalidated{{endif}} {{if 'cudaErrorStreamCaptureMerge' in found_values}} #: The operation would have resulted in a merge of two independent #: capture sequences. - cudaErrorStreamCaptureMerge = ccudart.cudaError.cudaErrorStreamCaptureMerge{{endif}} + cudaErrorStreamCaptureMerge = cyruntime.cudaError.cudaErrorStreamCaptureMerge{{endif}} {{if 'cudaErrorStreamCaptureUnmatched' in found_values}} #: The capture was not initiated in this stream. - cudaErrorStreamCaptureUnmatched = ccudart.cudaError.cudaErrorStreamCaptureUnmatched{{endif}} + cudaErrorStreamCaptureUnmatched = cyruntime.cudaError.cudaErrorStreamCaptureUnmatched{{endif}} {{if 'cudaErrorStreamCaptureUnjoined' in found_values}} #: The capture sequence contains a fork that was not joined to the #: primary stream. - cudaErrorStreamCaptureUnjoined = ccudart.cudaError.cudaErrorStreamCaptureUnjoined{{endif}} + cudaErrorStreamCaptureUnjoined = cyruntime.cudaError.cudaErrorStreamCaptureUnjoined{{endif}} {{if 'cudaErrorStreamCaptureIsolation' in found_values}} #: A dependency would have been created which crosses the capture #: sequence boundary. Only implicit in-stream ordering dependencies are #: allowed to cross the boundary. - cudaErrorStreamCaptureIsolation = ccudart.cudaError.cudaErrorStreamCaptureIsolation{{endif}} + cudaErrorStreamCaptureIsolation = cyruntime.cudaError.cudaErrorStreamCaptureIsolation{{endif}} {{if 'cudaErrorStreamCaptureImplicit' in found_values}} #: The operation would have resulted in a disallowed implicit #: dependency on a current capture sequence from cudaStreamLegacy. - cudaErrorStreamCaptureImplicit = ccudart.cudaError.cudaErrorStreamCaptureImplicit{{endif}} + cudaErrorStreamCaptureImplicit = cyruntime.cudaError.cudaErrorStreamCaptureImplicit{{endif}} {{if 'cudaErrorCapturedEvent' in found_values}} #: The operation is not permitted on an event which was last recorded #: in a capturing stream. - cudaErrorCapturedEvent = ccudart.cudaError.cudaErrorCapturedEvent{{endif}} + cudaErrorCapturedEvent = cyruntime.cudaError.cudaErrorCapturedEvent{{endif}} {{if 'cudaErrorStreamCaptureWrongThread' in found_values}} #: A stream capture sequence not initiated with the #: :py:obj:`~.cudaStreamCaptureModeRelaxed` argument to #: :py:obj:`~.cudaStreamBeginCapture` was passed to #: :py:obj:`~.cudaStreamEndCapture` in a different thread. - cudaErrorStreamCaptureWrongThread = ccudart.cudaError.cudaErrorStreamCaptureWrongThread{{endif}} + cudaErrorStreamCaptureWrongThread = cyruntime.cudaError.cudaErrorStreamCaptureWrongThread{{endif}} {{if 'cudaErrorTimeout' in found_values}} #: This indicates that the wait operation has timed out. - cudaErrorTimeout = ccudart.cudaError.cudaErrorTimeout{{endif}} + cudaErrorTimeout = cyruntime.cudaError.cudaErrorTimeout{{endif}} {{if 'cudaErrorGraphExecUpdateFailure' in found_values}} #: This error indicates that the graph update was not performed because #: it included changes which violated constraints specific to #: instantiated graph update. - cudaErrorGraphExecUpdateFailure = ccudart.cudaError.cudaErrorGraphExecUpdateFailure{{endif}} + cudaErrorGraphExecUpdateFailure = cyruntime.cudaError.cudaErrorGraphExecUpdateFailure{{endif}} {{if 'cudaErrorExternalDevice' in found_values}} #: This indicates that an async error has occurred in a device outside @@ -1111,33 +1111,33 @@ class cudaError_t(IntEnum): #: the process in an inconsistent state and any further CUDA work will #: return the same error. To continue using CUDA, the process must be #: terminated and relaunched. - cudaErrorExternalDevice = ccudart.cudaError.cudaErrorExternalDevice{{endif}} + cudaErrorExternalDevice = cyruntime.cudaError.cudaErrorExternalDevice{{endif}} {{if 'cudaErrorInvalidClusterSize' in found_values}} #: This indicates that a kernel launch error has occurred due to #: cluster misconfiguration. - cudaErrorInvalidClusterSize = ccudart.cudaError.cudaErrorInvalidClusterSize{{endif}} + cudaErrorInvalidClusterSize = cyruntime.cudaError.cudaErrorInvalidClusterSize{{endif}} {{if 'cudaErrorFunctionNotLoaded' in found_values}} #: Indiciates a function handle is not loaded when calling an API that #: requires a loaded function. - cudaErrorFunctionNotLoaded = ccudart.cudaError.cudaErrorFunctionNotLoaded{{endif}} + cudaErrorFunctionNotLoaded = cyruntime.cudaError.cudaErrorFunctionNotLoaded{{endif}} {{if 'cudaErrorInvalidResourceType' in found_values}} #: This error indicates one or more resources passed in are not valid #: resource types for the operation. - cudaErrorInvalidResourceType = ccudart.cudaError.cudaErrorInvalidResourceType{{endif}} + cudaErrorInvalidResourceType = cyruntime.cudaError.cudaErrorInvalidResourceType{{endif}} {{if 'cudaErrorInvalidResourceConfiguration' in found_values}} #: This error indicates one or more resources are insufficient or non- #: applicable for the operation. - cudaErrorInvalidResourceConfiguration = ccudart.cudaError.cudaErrorInvalidResourceConfiguration{{endif}} + cudaErrorInvalidResourceConfiguration = cyruntime.cudaError.cudaErrorInvalidResourceConfiguration{{endif}} {{if 'cudaErrorUnknown' in found_values}} #: This indicates that an unknown internal error has occurred. - cudaErrorUnknown = ccudart.cudaError.cudaErrorUnknown{{endif}} + cudaErrorUnknown = cyruntime.cudaError.cudaErrorUnknown{{endif}} {{if 'cudaErrorApiFailureBase' in found_values}} - cudaErrorApiFailureBase = ccudart.cudaError.cudaErrorApiFailureBase{{endif}} + cudaErrorApiFailureBase = cyruntime.cudaError.cudaErrorApiFailureBase{{endif}} {{endif}} {{if 'cudaGraphDependencyType_enum' in found_types}} @@ -1149,7 +1149,7 @@ class cudaGraphDependencyType(IntEnum): {{if 'cudaGraphDependencyTypeDefault' in found_values}} #: This is an ordinary dependency. - cudaGraphDependencyTypeDefault = ccudart.cudaGraphDependencyType_enum.cudaGraphDependencyTypeDefault{{endif}} + cudaGraphDependencyTypeDefault = cyruntime.cudaGraphDependencyType_enum.cudaGraphDependencyTypeDefault{{endif}} {{if 'cudaGraphDependencyTypeProgrammatic' in found_values}} #: This dependency type allows the downstream node to use @@ -1157,7 +1157,7 @@ class cudaGraphDependencyType(IntEnum): #: kernel nodes, and must be used with either the #: :py:obj:`~.cudaGraphKernelNodePortProgrammatic` or #: :py:obj:`~.cudaGraphKernelNodePortLaunchCompletion` outgoing port. - cudaGraphDependencyTypeProgrammatic = ccudart.cudaGraphDependencyType_enum.cudaGraphDependencyTypeProgrammatic{{endif}} + cudaGraphDependencyTypeProgrammatic = cyruntime.cudaGraphDependencyType_enum.cudaGraphDependencyTypeProgrammatic{{endif}} {{endif}} {{if 'cudaGraphInstantiateResult' in found_types}} @@ -1168,26 +1168,26 @@ class cudaGraphInstantiateResult(IntEnum): {{if 'cudaGraphInstantiateSuccess' in found_values}} #: Instantiation succeeded - cudaGraphInstantiateSuccess = ccudart.cudaGraphInstantiateResult.cudaGraphInstantiateSuccess{{endif}} + cudaGraphInstantiateSuccess = cyruntime.cudaGraphInstantiateResult.cudaGraphInstantiateSuccess{{endif}} {{if 'cudaGraphInstantiateError' in found_values}} #: Instantiation failed for an unexpected reason which is described in #: the return value of the function - cudaGraphInstantiateError = ccudart.cudaGraphInstantiateResult.cudaGraphInstantiateError{{endif}} + cudaGraphInstantiateError = cyruntime.cudaGraphInstantiateResult.cudaGraphInstantiateError{{endif}} {{if 'cudaGraphInstantiateInvalidStructure' in found_values}} #: Instantiation failed due to invalid structure, such as cycles - cudaGraphInstantiateInvalidStructure = ccudart.cudaGraphInstantiateResult.cudaGraphInstantiateInvalidStructure{{endif}} + cudaGraphInstantiateInvalidStructure = cyruntime.cudaGraphInstantiateResult.cudaGraphInstantiateInvalidStructure{{endif}} {{if 'cudaGraphInstantiateNodeOperationNotSupported' in found_values}} #: Instantiation for device launch failed because the graph contained #: an unsupported operation - cudaGraphInstantiateNodeOperationNotSupported = ccudart.cudaGraphInstantiateResult.cudaGraphInstantiateNodeOperationNotSupported{{endif}} + cudaGraphInstantiateNodeOperationNotSupported = cyruntime.cudaGraphInstantiateResult.cudaGraphInstantiateNodeOperationNotSupported{{endif}} {{if 'cudaGraphInstantiateMultipleDevicesNotSupported' in found_values}} #: Instantiation for device launch failed due to the nodes belonging to #: different contexts - cudaGraphInstantiateMultipleDevicesNotSupported = ccudart.cudaGraphInstantiateResult.cudaGraphInstantiateMultipleDevicesNotSupported{{endif}} + cudaGraphInstantiateMultipleDevicesNotSupported = cyruntime.cudaGraphInstantiateResult.cudaGraphInstantiateMultipleDevicesNotSupported{{endif}} {{endif}} {{if 'cudaLaunchMemSyncDomain' in found_types}} @@ -1215,11 +1215,11 @@ class cudaLaunchMemSyncDomain(IntEnum): {{if 'cudaLaunchMemSyncDomainDefault' in found_values}} #: Launch kernels in the default domain - cudaLaunchMemSyncDomainDefault = ccudart.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainDefault{{endif}} + cudaLaunchMemSyncDomainDefault = cyruntime.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainDefault{{endif}} {{if 'cudaLaunchMemSyncDomainRemote' in found_values}} #: Launch kernels in the remote domain - cudaLaunchMemSyncDomainRemote = ccudart.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainRemote{{endif}} + cudaLaunchMemSyncDomainRemote = cyruntime.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainRemote{{endif}} {{endif}} {{if 'cudaLaunchAttributeID' in found_types}} @@ -1231,32 +1231,32 @@ class cudaLaunchAttributeID(IntEnum): {{if 'cudaLaunchAttributeIgnore' in found_values}} #: Ignored entry, for convenient composition - cudaLaunchAttributeIgnore = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeIgnore{{endif}} + cudaLaunchAttributeIgnore = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeIgnore{{endif}} {{if 'cudaLaunchAttributeAccessPolicyWindow' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.accessPolicyWindow`. - cudaLaunchAttributeAccessPolicyWindow = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow{{endif}} + cudaLaunchAttributeAccessPolicyWindow = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow{{endif}} {{if 'cudaLaunchAttributeCooperative' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.cooperative`. - cudaLaunchAttributeCooperative = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeCooperative{{endif}} + cudaLaunchAttributeCooperative = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeCooperative{{endif}} {{if 'cudaLaunchAttributeSynchronizationPolicy' in found_values}} #: Valid for streams. See #: :py:obj:`~.cudaLaunchAttributeValue.syncPolicy`. - cudaLaunchAttributeSynchronizationPolicy = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy{{endif}} + cudaLaunchAttributeSynchronizationPolicy = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy{{endif}} {{if 'cudaLaunchAttributeClusterDimension' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.clusterDim`. - cudaLaunchAttributeClusterDimension = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension{{endif}} + cudaLaunchAttributeClusterDimension = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension{{endif}} {{if 'cudaLaunchAttributeClusterSchedulingPolicyPreference' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.clusterSchedulingPolicyPreference`. - cudaLaunchAttributeClusterSchedulingPolicyPreference = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference{{endif}} + cudaLaunchAttributeClusterSchedulingPolicyPreference = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference{{endif}} {{if 'cudaLaunchAttributeProgrammaticStreamSerialization' in found_values}} #: Valid for launches. Setting @@ -1268,7 +1268,7 @@ class cudaLaunchAttributeID(IntEnum): #: The dependent launches can choose to wait on the dependency using #: the programmatic sync (cudaGridDependencySynchronize() or equivalent #: PTX instructions). - cudaLaunchAttributeProgrammaticStreamSerialization = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization{{endif}} + cudaLaunchAttributeProgrammaticStreamSerialization = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization{{endif}} {{if 'cudaLaunchAttributeProgrammaticEvent' in found_values}} #: Valid for launches. Set @@ -1291,22 +1291,22 @@ class cudaLaunchAttributeID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.cudaEventDisableTiming` flag set). - cudaLaunchAttributeProgrammaticEvent = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent{{endif}} + cudaLaunchAttributeProgrammaticEvent = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent{{endif}} {{if 'cudaLaunchAttributePriority' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.priority`. - cudaLaunchAttributePriority = ccudart.cudaLaunchAttributeID.cudaLaunchAttributePriority{{endif}} + cudaLaunchAttributePriority = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributePriority{{endif}} {{if 'cudaLaunchAttributeMemSyncDomainMap' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.memSyncDomainMap`. - cudaLaunchAttributeMemSyncDomainMap = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap{{endif}} + cudaLaunchAttributeMemSyncDomainMap = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap{{endif}} {{if 'cudaLaunchAttributeMemSyncDomain' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.memSyncDomain`. - cudaLaunchAttributeMemSyncDomain = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain{{endif}} + cudaLaunchAttributeMemSyncDomain = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain{{endif}} {{if 'cudaLaunchAttributeLaunchCompletionEvent' in found_values}} #: Valid for launches. Set @@ -1327,7 +1327,7 @@ class cudaLaunchAttributeID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.cudaEventDisableTiming` flag set). - cudaLaunchAttributeLaunchCompletionEvent = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent{{endif}} + cudaLaunchAttributeLaunchCompletionEvent = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent{{endif}} {{if 'cudaLaunchAttributeDeviceUpdatableKernelNode' in found_values}} #: Valid for graph nodes, launches. This attribute is graphs-only, and @@ -1361,7 +1361,7 @@ class cudaLaunchAttributeID(IntEnum): #: graph, if host-side executable graph updates are made to the device- #: updatable nodes, the graph must be uploaded before it is launched #: again. - cudaLaunchAttributeDeviceUpdatableKernelNode = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode{{endif}} + cudaLaunchAttributeDeviceUpdatableKernelNode = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode{{endif}} {{if 'cudaLaunchAttributePreferredSharedMemoryCarveout' in found_values}} #: Valid for launches. On devices where the L1 cache and shared memory @@ -1373,7 +1373,7 @@ class cudaLaunchAttributeID(IntEnum): #: :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout`. This is #: only a hint, and the driver can choose a different configuration if #: required for the launch. - cudaLaunchAttributePreferredSharedMemoryCarveout = ccudart.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout{{endif}} + cudaLaunchAttributePreferredSharedMemoryCarveout = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout{{endif}} {{endif}} {{if 'cudaAsyncNotificationType_enum' in found_types}} @@ -1382,83 +1382,83 @@ class cudaAsyncNotificationType(IntEnum): Types of async notification that can occur """ {{if 'cudaAsyncNotificationTypeOverBudget' in found_values}} - cudaAsyncNotificationTypeOverBudget = ccudart.cudaAsyncNotificationType_enum.cudaAsyncNotificationTypeOverBudget{{endif}} + cudaAsyncNotificationTypeOverBudget = cyruntime.cudaAsyncNotificationType_enum.cudaAsyncNotificationTypeOverBudget{{endif}} {{endif}} {{if 'cudaDataType_t' in found_types}} class cudaDataType(IntEnum): """""" {{if 'CUDA_R_32F' in found_values}} - CUDA_R_32F = ccudart.cudaDataType_t.CUDA_R_32F{{endif}} + CUDA_R_32F = cyruntime.cudaDataType_t.CUDA_R_32F{{endif}} {{if 'CUDA_R_64F' in found_values}} - CUDA_R_64F = ccudart.cudaDataType_t.CUDA_R_64F{{endif}} + CUDA_R_64F = cyruntime.cudaDataType_t.CUDA_R_64F{{endif}} {{if 'CUDA_R_16F' in found_values}} - CUDA_R_16F = ccudart.cudaDataType_t.CUDA_R_16F{{endif}} + CUDA_R_16F = cyruntime.cudaDataType_t.CUDA_R_16F{{endif}} {{if 'CUDA_R_8I' in found_values}} - CUDA_R_8I = ccudart.cudaDataType_t.CUDA_R_8I{{endif}} + CUDA_R_8I = cyruntime.cudaDataType_t.CUDA_R_8I{{endif}} {{if 'CUDA_C_32F' in found_values}} - CUDA_C_32F = ccudart.cudaDataType_t.CUDA_C_32F{{endif}} + CUDA_C_32F = cyruntime.cudaDataType_t.CUDA_C_32F{{endif}} {{if 'CUDA_C_64F' in found_values}} - CUDA_C_64F = ccudart.cudaDataType_t.CUDA_C_64F{{endif}} + CUDA_C_64F = cyruntime.cudaDataType_t.CUDA_C_64F{{endif}} {{if 'CUDA_C_16F' in found_values}} - CUDA_C_16F = ccudart.cudaDataType_t.CUDA_C_16F{{endif}} + CUDA_C_16F = cyruntime.cudaDataType_t.CUDA_C_16F{{endif}} {{if 'CUDA_C_8I' in found_values}} - CUDA_C_8I = ccudart.cudaDataType_t.CUDA_C_8I{{endif}} + CUDA_C_8I = cyruntime.cudaDataType_t.CUDA_C_8I{{endif}} {{if 'CUDA_R_8U' in found_values}} - CUDA_R_8U = ccudart.cudaDataType_t.CUDA_R_8U{{endif}} + CUDA_R_8U = cyruntime.cudaDataType_t.CUDA_R_8U{{endif}} {{if 'CUDA_C_8U' in found_values}} - CUDA_C_8U = ccudart.cudaDataType_t.CUDA_C_8U{{endif}} + CUDA_C_8U = cyruntime.cudaDataType_t.CUDA_C_8U{{endif}} {{if 'CUDA_R_32I' in found_values}} - CUDA_R_32I = ccudart.cudaDataType_t.CUDA_R_32I{{endif}} + CUDA_R_32I = cyruntime.cudaDataType_t.CUDA_R_32I{{endif}} {{if 'CUDA_C_32I' in found_values}} - CUDA_C_32I = ccudart.cudaDataType_t.CUDA_C_32I{{endif}} + CUDA_C_32I = cyruntime.cudaDataType_t.CUDA_C_32I{{endif}} {{if 'CUDA_R_32U' in found_values}} - CUDA_R_32U = ccudart.cudaDataType_t.CUDA_R_32U{{endif}} + CUDA_R_32U = cyruntime.cudaDataType_t.CUDA_R_32U{{endif}} {{if 'CUDA_C_32U' in found_values}} - CUDA_C_32U = ccudart.cudaDataType_t.CUDA_C_32U{{endif}} + CUDA_C_32U = cyruntime.cudaDataType_t.CUDA_C_32U{{endif}} {{if 'CUDA_R_16BF' in found_values}} - CUDA_R_16BF = ccudart.cudaDataType_t.CUDA_R_16BF{{endif}} + CUDA_R_16BF = cyruntime.cudaDataType_t.CUDA_R_16BF{{endif}} {{if 'CUDA_C_16BF' in found_values}} - CUDA_C_16BF = ccudart.cudaDataType_t.CUDA_C_16BF{{endif}} + CUDA_C_16BF = cyruntime.cudaDataType_t.CUDA_C_16BF{{endif}} {{if 'CUDA_R_4I' in found_values}} - CUDA_R_4I = ccudart.cudaDataType_t.CUDA_R_4I{{endif}} + CUDA_R_4I = cyruntime.cudaDataType_t.CUDA_R_4I{{endif}} {{if 'CUDA_C_4I' in found_values}} - CUDA_C_4I = ccudart.cudaDataType_t.CUDA_C_4I{{endif}} + CUDA_C_4I = cyruntime.cudaDataType_t.CUDA_C_4I{{endif}} {{if 'CUDA_R_4U' in found_values}} - CUDA_R_4U = ccudart.cudaDataType_t.CUDA_R_4U{{endif}} + CUDA_R_4U = cyruntime.cudaDataType_t.CUDA_R_4U{{endif}} {{if 'CUDA_C_4U' in found_values}} - CUDA_C_4U = ccudart.cudaDataType_t.CUDA_C_4U{{endif}} + CUDA_C_4U = cyruntime.cudaDataType_t.CUDA_C_4U{{endif}} {{if 'CUDA_R_16I' in found_values}} - CUDA_R_16I = ccudart.cudaDataType_t.CUDA_R_16I{{endif}} + CUDA_R_16I = cyruntime.cudaDataType_t.CUDA_R_16I{{endif}} {{if 'CUDA_C_16I' in found_values}} - CUDA_C_16I = ccudart.cudaDataType_t.CUDA_C_16I{{endif}} + CUDA_C_16I = cyruntime.cudaDataType_t.CUDA_C_16I{{endif}} {{if 'CUDA_R_16U' in found_values}} - CUDA_R_16U = ccudart.cudaDataType_t.CUDA_R_16U{{endif}} + CUDA_R_16U = cyruntime.cudaDataType_t.CUDA_R_16U{{endif}} {{if 'CUDA_C_16U' in found_values}} - CUDA_C_16U = ccudart.cudaDataType_t.CUDA_C_16U{{endif}} + CUDA_C_16U = cyruntime.cudaDataType_t.CUDA_C_16U{{endif}} {{if 'CUDA_R_64I' in found_values}} - CUDA_R_64I = ccudart.cudaDataType_t.CUDA_R_64I{{endif}} + CUDA_R_64I = cyruntime.cudaDataType_t.CUDA_R_64I{{endif}} {{if 'CUDA_C_64I' in found_values}} - CUDA_C_64I = ccudart.cudaDataType_t.CUDA_C_64I{{endif}} + CUDA_C_64I = cyruntime.cudaDataType_t.CUDA_C_64I{{endif}} {{if 'CUDA_R_64U' in found_values}} - CUDA_R_64U = ccudart.cudaDataType_t.CUDA_R_64U{{endif}} + CUDA_R_64U = cyruntime.cudaDataType_t.CUDA_R_64U{{endif}} {{if 'CUDA_C_64U' in found_values}} - CUDA_C_64U = ccudart.cudaDataType_t.CUDA_C_64U{{endif}} + CUDA_C_64U = cyruntime.cudaDataType_t.CUDA_C_64U{{endif}} {{if 'CUDA_R_8F_E4M3' in found_values}} - CUDA_R_8F_E4M3 = ccudart.cudaDataType_t.CUDA_R_8F_E4M3{{endif}} + CUDA_R_8F_E4M3 = cyruntime.cudaDataType_t.CUDA_R_8F_E4M3{{endif}} {{if 'CUDA_R_8F_E5M2' in found_values}} - CUDA_R_8F_E5M2 = ccudart.cudaDataType_t.CUDA_R_8F_E5M2{{endif}} + CUDA_R_8F_E5M2 = cyruntime.cudaDataType_t.CUDA_R_8F_E5M2{{endif}} {{endif}} {{if 'libraryPropertyType_t' in found_types}} class libraryPropertyType(IntEnum): """""" {{if 'MAJOR_VERSION' in found_values}} - MAJOR_VERSION = ccudart.libraryPropertyType_t.MAJOR_VERSION{{endif}} + MAJOR_VERSION = cyruntime.libraryPropertyType_t.MAJOR_VERSION{{endif}} {{if 'MINOR_VERSION' in found_values}} - MINOR_VERSION = ccudart.libraryPropertyType_t.MINOR_VERSION{{endif}} + MINOR_VERSION = cyruntime.libraryPropertyType_t.MINOR_VERSION{{endif}} {{if 'PATCH_LEVEL' in found_values}} - PATCH_LEVEL = ccudart.libraryPropertyType_t.PATCH_LEVEL{{endif}} + PATCH_LEVEL = cyruntime.libraryPropertyType_t.PATCH_LEVEL{{endif}} {{endif}} {{if True}} @@ -1469,11 +1469,11 @@ class cudaEglFrameType(IntEnum): {{if True}} #: Frame type CUDA array - cudaEglFrameTypeArray = ccudart.cudaEglFrameType_enum.cudaEglFrameTypeArray{{endif}} + cudaEglFrameTypeArray = cyruntime.cudaEglFrameType_enum.cudaEglFrameTypeArray{{endif}} {{if True}} #: Frame type CUDA pointer - cudaEglFrameTypePitch = ccudart.cudaEglFrameType_enum.cudaEglFrameTypePitch{{endif}} + cudaEglFrameTypePitch = cyruntime.cudaEglFrameType_enum.cudaEglFrameTypePitch{{endif}} {{endif}} {{if True}} @@ -1494,11 +1494,11 @@ class cudaEglResourceLocationFlags(IntEnum): {{if True}} #: Resource location sysmem - cudaEglResourceLocationSysmem = ccudart.cudaEglResourceLocationFlags_enum.cudaEglResourceLocationSysmem{{endif}} + cudaEglResourceLocationSysmem = cyruntime.cudaEglResourceLocationFlags_enum.cudaEglResourceLocationSysmem{{endif}} {{if True}} #: Resource location vidmem - cudaEglResourceLocationVidmem = ccudart.cudaEglResourceLocationFlags_enum.cudaEglResourceLocationVidmem{{endif}} + cudaEglResourceLocationVidmem = cyruntime.cudaEglResourceLocationFlags_enum.cudaEglResourceLocationVidmem{{endif}} {{endif}} {{if True}} @@ -1511,535 +1511,535 @@ class cudaEglColorFormat(IntEnum): #: Y, U, V in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatYUV420Planar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV420Planar{{endif}} + cudaEglColorFormatYUV420Planar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV420Planar{{endif}} {{if True}} #: Y, UV in two surfaces (UV as one surface) with VU byte ordering, #: width, height ratio same as YUV420Planar. - cudaEglColorFormatYUV420SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV420SemiPlanar{{endif}} + cudaEglColorFormatYUV420SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV420SemiPlanar{{endif}} {{if True}} #: Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V #: height = Y height. - cudaEglColorFormatYUV422Planar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV422Planar{{endif}} + cudaEglColorFormatYUV422Planar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV422Planar{{endif}} {{if True}} #: Y, UV in two surfaces with VU byte ordering, width, height ratio #: same as YUV422Planar. - cudaEglColorFormatYUV422SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV422SemiPlanar{{endif}} + cudaEglColorFormatYUV422SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV422SemiPlanar{{endif}} {{if True}} #: R/G/B/A four channels in one surface with BGRA byte ordering. - cudaEglColorFormatARGB = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatARGB{{endif}} + cudaEglColorFormatARGB = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatARGB{{endif}} {{if True}} #: R/G/B/A four channels in one surface with ABGR byte ordering. - cudaEglColorFormatRGBA = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatRGBA{{endif}} + cudaEglColorFormatRGBA = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatRGBA{{endif}} {{if True}} #: single luminance channel in one surface. - cudaEglColorFormatL = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatL{{endif}} + cudaEglColorFormatL = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatL{{endif}} {{if True}} #: single color channel in one surface. - cudaEglColorFormatR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatR{{endif}} + cudaEglColorFormatR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatR{{endif}} {{if True}} #: Y, U, V in three surfaces, each in a separate surface, U/V width = Y #: width, U/V height = Y height. - cudaEglColorFormatYUV444Planar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV444Planar{{endif}} + cudaEglColorFormatYUV444Planar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV444Planar{{endif}} {{if True}} #: Y, UV in two surfaces (UV as one surface) with VU byte ordering, #: width, height ratio same as YUV444Planar. - cudaEglColorFormatYUV444SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV444SemiPlanar{{endif}} + cudaEglColorFormatYUV444SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV444SemiPlanar{{endif}} {{if True}} #: Y, U, V in one surface, interleaved as UYVY in one channel. - cudaEglColorFormatYUYV422 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUYV422{{endif}} + cudaEglColorFormatYUYV422 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUYV422{{endif}} {{if True}} #: Y, U, V in one surface, interleaved as YUYV in one channel. - cudaEglColorFormatUYVY422 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatUYVY422{{endif}} + cudaEglColorFormatUYVY422 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatUYVY422{{endif}} {{if True}} #: R/G/B/A four channels in one surface with RGBA byte ordering. - cudaEglColorFormatABGR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatABGR{{endif}} + cudaEglColorFormatABGR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatABGR{{endif}} {{if True}} #: R/G/B/A four channels in one surface with ARGB byte ordering. - cudaEglColorFormatBGRA = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBGRA{{endif}} + cudaEglColorFormatBGRA = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBGRA{{endif}} {{if True}} #: Alpha color format - one channel in one surface. - cudaEglColorFormatA = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatA{{endif}} + cudaEglColorFormatA = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatA{{endif}} {{if True}} #: R/G color format - two channels in one surface with GR byte ordering - cudaEglColorFormatRG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatRG{{endif}} + cudaEglColorFormatRG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatRG{{endif}} {{if True}} #: Y, U, V, A four channels in one surface, interleaved as VUYA. - cudaEglColorFormatAYUV = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatAYUV{{endif}} + cudaEglColorFormatAYUV = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatAYUV{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V #: width = Y width, U/V height = Y height. - cudaEglColorFormatYVU444SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU444SemiPlanar{{endif}} + cudaEglColorFormatYVU444SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU444SemiPlanar{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V #: width = 1/2 Y width, U/V height = Y height. - cudaEglColorFormatYVU422SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU422SemiPlanar{{endif}} + cudaEglColorFormatYVU422SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU422SemiPlanar{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatYVU420SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU420SemiPlanar{{endif}} + cudaEglColorFormatYVU420SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU420SemiPlanar{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface) with UV byte #: ordering, U/V width = Y width, U/V height = Y height. - cudaEglColorFormatY10V10U10_444SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_444SemiPlanar{{endif}} + cudaEglColorFormatY10V10U10_444SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_444SemiPlanar{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface) with UV byte #: ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatY10V10U10_420SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar{{endif}} + cudaEglColorFormatY10V10U10_420SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar{{endif}} {{if True}} #: Y12, V12U12 in two surfaces (VU as one surface) with UV byte #: ordering, U/V width = Y width, U/V height = Y height. - cudaEglColorFormatY12V12U12_444SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_444SemiPlanar{{endif}} + cudaEglColorFormatY12V12U12_444SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_444SemiPlanar{{endif}} {{if True}} #: Y12, V12U12 in two surfaces (VU as one surface) with UV byte #: ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatY12V12U12_420SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_420SemiPlanar{{endif}} + cudaEglColorFormatY12V12U12_420SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_420SemiPlanar{{endif}} {{if True}} #: Extended Range Y, U, V in one surface, interleaved as YVYU in one #: channel. - cudaEglColorFormatVYUY_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatVYUY_ER{{endif}} + cudaEglColorFormatVYUY_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatVYUY_ER{{endif}} {{if True}} #: Extended Range Y, U, V in one surface, interleaved as YUYV in one #: channel. - cudaEglColorFormatUYVY_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatUYVY_ER{{endif}} + cudaEglColorFormatUYVY_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatUYVY_ER{{endif}} {{if True}} #: Extended Range Y, U, V in one surface, interleaved as UYVY in one #: channel. - cudaEglColorFormatYUYV_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUYV_ER{{endif}} + cudaEglColorFormatYUYV_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUYV_ER{{endif}} {{if True}} #: Extended Range Y, U, V in one surface, interleaved as VYUY in one #: channel. - cudaEglColorFormatYVYU_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVYU_ER{{endif}} + cudaEglColorFormatYVYU_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVYU_ER{{endif}} {{if True}} #: Extended Range Y, U, V, A four channels in one surface, interleaved #: as AVUY. - cudaEglColorFormatYUVA_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUVA_ER{{endif}} + cudaEglColorFormatYUVA_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUVA_ER{{endif}} {{if True}} #: Extended Range Y, U, V, A four channels in one surface, interleaved #: as VUYA. - cudaEglColorFormatAYUV_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatAYUV_ER{{endif}} + cudaEglColorFormatAYUV_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatAYUV_ER{{endif}} {{if True}} #: Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V #: height = Y height. - cudaEglColorFormatYUV444Planar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV444Planar_ER{{endif}} + cudaEglColorFormatYUV444Planar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV444Planar_ER{{endif}} {{if True}} #: Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, #: U/V height = Y height. - cudaEglColorFormatYUV422Planar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV422Planar_ER{{endif}} + cudaEglColorFormatYUV422Planar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV422Planar_ER{{endif}} {{if True}} #: Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - cudaEglColorFormatYUV420Planar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV420Planar_ER{{endif}} + cudaEglColorFormatYUV420Planar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV420Planar_ER{{endif}} {{if True}} #: Extended Range Y, UV in two surfaces (UV as one surface) with VU #: byte ordering, U/V width = Y width, U/V height = Y height. - cudaEglColorFormatYUV444SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV444SemiPlanar_ER{{endif}} + cudaEglColorFormatYUV444SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV444SemiPlanar_ER{{endif}} {{if True}} #: Extended Range Y, UV in two surfaces (UV as one surface) with VU #: byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - cudaEglColorFormatYUV422SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV422SemiPlanar_ER{{endif}} + cudaEglColorFormatYUV422SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV422SemiPlanar_ER{{endif}} {{if True}} #: Extended Range Y, UV in two surfaces (UV as one surface) with VU #: byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatYUV420SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV420SemiPlanar_ER{{endif}} + cudaEglColorFormatYUV420SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV420SemiPlanar_ER{{endif}} {{if True}} #: Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V #: height = Y height. - cudaEglColorFormatYVU444Planar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU444Planar_ER{{endif}} + cudaEglColorFormatYVU444Planar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU444Planar_ER{{endif}} {{if True}} #: Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, #: U/V height = Y height. - cudaEglColorFormatYVU422Planar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU422Planar_ER{{endif}} + cudaEglColorFormatYVU422Planar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU422Planar_ER{{endif}} {{if True}} #: Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - cudaEglColorFormatYVU420Planar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU420Planar_ER{{endif}} + cudaEglColorFormatYVU420Planar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU420Planar_ER{{endif}} {{if True}} #: Extended Range Y, VU in two surfaces (VU as one surface) with UV #: byte ordering, U/V width = Y width, U/V height = Y height. - cudaEglColorFormatYVU444SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU444SemiPlanar_ER{{endif}} + cudaEglColorFormatYVU444SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU444SemiPlanar_ER{{endif}} {{if True}} #: Extended Range Y, VU in two surfaces (VU as one surface) with UV #: byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - cudaEglColorFormatYVU422SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU422SemiPlanar_ER{{endif}} + cudaEglColorFormatYVU422SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU422SemiPlanar_ER{{endif}} {{if True}} #: Extended Range Y, VU in two surfaces (VU as one surface) with UV #: byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatYVU420SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU420SemiPlanar_ER{{endif}} + cudaEglColorFormatYVU420SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU420SemiPlanar_ER{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved RGGB #: ordering. - cudaEglColorFormatBayerRGGB = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerRGGB{{endif}} + cudaEglColorFormatBayerRGGB = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerRGGB{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved BGGR #: ordering. - cudaEglColorFormatBayerBGGR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerBGGR{{endif}} + cudaEglColorFormatBayerBGGR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerBGGR{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved GRBG #: ordering. - cudaEglColorFormatBayerGRBG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerGRBG{{endif}} + cudaEglColorFormatBayerGRBG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerGRBG{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved GBRG #: ordering. - cudaEglColorFormatBayerGBRG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerGBRG{{endif}} + cudaEglColorFormatBayerGBRG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerGBRG{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved RGGB #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - cudaEglColorFormatBayer10RGGB = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer10RGGB{{endif}} + cudaEglColorFormatBayer10RGGB = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer10RGGB{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved BGGR #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - cudaEglColorFormatBayer10BGGR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer10BGGR{{endif}} + cudaEglColorFormatBayer10BGGR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer10BGGR{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved GRBG #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - cudaEglColorFormatBayer10GRBG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer10GRBG{{endif}} + cudaEglColorFormatBayer10GRBG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer10GRBG{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved GBRG #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - cudaEglColorFormatBayer10GBRG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer10GBRG{{endif}} + cudaEglColorFormatBayer10GBRG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer10GBRG{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved RGGB #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - cudaEglColorFormatBayer12RGGB = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer12RGGB{{endif}} + cudaEglColorFormatBayer12RGGB = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer12RGGB{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved BGGR #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - cudaEglColorFormatBayer12BGGR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer12BGGR{{endif}} + cudaEglColorFormatBayer12BGGR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer12BGGR{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved GRBG #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - cudaEglColorFormatBayer12GRBG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer12GRBG{{endif}} + cudaEglColorFormatBayer12GRBG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer12GRBG{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved GBRG #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - cudaEglColorFormatBayer12GBRG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer12GBRG{{endif}} + cudaEglColorFormatBayer12GBRG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer12GBRG{{endif}} {{if True}} #: Bayer14 format - one channel in one surface with interleaved RGGB #: ordering. Out of 16 bits, 14 bits used 2 bits No-op. - cudaEglColorFormatBayer14RGGB = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer14RGGB{{endif}} + cudaEglColorFormatBayer14RGGB = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer14RGGB{{endif}} {{if True}} #: Bayer14 format - one channel in one surface with interleaved BGGR #: ordering. Out of 16 bits, 14 bits used 2 bits No-op. - cudaEglColorFormatBayer14BGGR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer14BGGR{{endif}} + cudaEglColorFormatBayer14BGGR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer14BGGR{{endif}} {{if True}} #: Bayer14 format - one channel in one surface with interleaved GRBG #: ordering. Out of 16 bits, 14 bits used 2 bits No-op. - cudaEglColorFormatBayer14GRBG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer14GRBG{{endif}} + cudaEglColorFormatBayer14GRBG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer14GRBG{{endif}} {{if True}} #: Bayer14 format - one channel in one surface with interleaved GBRG #: ordering. Out of 16 bits, 14 bits used 2 bits No-op. - cudaEglColorFormatBayer14GBRG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer14GBRG{{endif}} + cudaEglColorFormatBayer14GBRG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer14GBRG{{endif}} {{if True}} #: Bayer20 format - one channel in one surface with interleaved RGGB #: ordering. Out of 32 bits, 20 bits used 12 bits No-op. - cudaEglColorFormatBayer20RGGB = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer20RGGB{{endif}} + cudaEglColorFormatBayer20RGGB = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer20RGGB{{endif}} {{if True}} #: Bayer20 format - one channel in one surface with interleaved BGGR #: ordering. Out of 32 bits, 20 bits used 12 bits No-op. - cudaEglColorFormatBayer20BGGR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer20BGGR{{endif}} + cudaEglColorFormatBayer20BGGR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer20BGGR{{endif}} {{if True}} #: Bayer20 format - one channel in one surface with interleaved GRBG #: ordering. Out of 32 bits, 20 bits used 12 bits No-op. - cudaEglColorFormatBayer20GRBG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer20GRBG{{endif}} + cudaEglColorFormatBayer20GRBG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer20GRBG{{endif}} {{if True}} #: Bayer20 format - one channel in one surface with interleaved GBRG #: ordering. Out of 32 bits, 20 bits used 12 bits No-op. - cudaEglColorFormatBayer20GBRG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer20GBRG{{endif}} + cudaEglColorFormatBayer20GBRG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer20GBRG{{endif}} {{if True}} #: Y, V, U in three surfaces, each in a separate surface, U/V width = Y #: width, U/V height = Y height. - cudaEglColorFormatYVU444Planar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU444Planar{{endif}} + cudaEglColorFormatYVU444Planar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU444Planar{{endif}} {{if True}} #: Y, V, U in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = Y height. - cudaEglColorFormatYVU422Planar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU422Planar{{endif}} + cudaEglColorFormatYVU422Planar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU422Planar{{endif}} {{if True}} #: Y, V, U in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatYVU420Planar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU420Planar{{endif}} + cudaEglColorFormatYVU420Planar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU420Planar{{endif}} {{if True}} #: Nvidia proprietary Bayer ISP format - one channel in one surface #: with interleaved RGGB ordering and mapped to opaque integer #: datatype. - cudaEglColorFormatBayerIspRGGB = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerIspRGGB{{endif}} + cudaEglColorFormatBayerIspRGGB = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerIspRGGB{{endif}} {{if True}} #: Nvidia proprietary Bayer ISP format - one channel in one surface #: with interleaved BGGR ordering and mapped to opaque integer #: datatype. - cudaEglColorFormatBayerIspBGGR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerIspBGGR{{endif}} + cudaEglColorFormatBayerIspBGGR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerIspBGGR{{endif}} {{if True}} #: Nvidia proprietary Bayer ISP format - one channel in one surface #: with interleaved GRBG ordering and mapped to opaque integer #: datatype. - cudaEglColorFormatBayerIspGRBG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerIspGRBG{{endif}} + cudaEglColorFormatBayerIspGRBG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerIspGRBG{{endif}} {{if True}} #: Nvidia proprietary Bayer ISP format - one channel in one surface #: with interleaved GBRG ordering and mapped to opaque integer #: datatype. - cudaEglColorFormatBayerIspGBRG = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerIspGBRG{{endif}} + cudaEglColorFormatBayerIspGBRG = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerIspGBRG{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved BCCR #: ordering. - cudaEglColorFormatBayerBCCR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerBCCR{{endif}} + cudaEglColorFormatBayerBCCR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerBCCR{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved RCCB #: ordering. - cudaEglColorFormatBayerRCCB = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerRCCB{{endif}} + cudaEglColorFormatBayerRCCB = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerRCCB{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved CRBC #: ordering. - cudaEglColorFormatBayerCRBC = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerCRBC{{endif}} + cudaEglColorFormatBayerCRBC = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerCRBC{{endif}} {{if True}} #: Bayer format - one channel in one surface with interleaved CBRC #: ordering. - cudaEglColorFormatBayerCBRC = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayerCBRC{{endif}} + cudaEglColorFormatBayerCBRC = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayerCBRC{{endif}} {{if True}} #: Bayer10 format - one channel in one surface with interleaved CCCC #: ordering. Out of 16 bits, 10 bits used 6 bits No-op. - cudaEglColorFormatBayer10CCCC = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer10CCCC{{endif}} + cudaEglColorFormatBayer10CCCC = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer10CCCC{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved BCCR #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - cudaEglColorFormatBayer12BCCR = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer12BCCR{{endif}} + cudaEglColorFormatBayer12BCCR = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer12BCCR{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved RCCB #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - cudaEglColorFormatBayer12RCCB = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer12RCCB{{endif}} + cudaEglColorFormatBayer12RCCB = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer12RCCB{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved CRBC #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - cudaEglColorFormatBayer12CRBC = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer12CRBC{{endif}} + cudaEglColorFormatBayer12CRBC = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer12CRBC{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved CBRC #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - cudaEglColorFormatBayer12CBRC = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer12CBRC{{endif}} + cudaEglColorFormatBayer12CBRC = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer12CBRC{{endif}} {{if True}} #: Bayer12 format - one channel in one surface with interleaved CCCC #: ordering. Out of 16 bits, 12 bits used 4 bits No-op. - cudaEglColorFormatBayer12CCCC = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatBayer12CCCC{{endif}} + cudaEglColorFormatBayer12CCCC = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatBayer12CCCC{{endif}} {{if True}} #: Color format for single Y plane. - cudaEglColorFormatY = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY{{endif}} + cudaEglColorFormatY = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY{{endif}} {{if True}} #: Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - cudaEglColorFormatYUV420SemiPlanar_2020 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV420SemiPlanar_2020{{endif}} + cudaEglColorFormatYUV420SemiPlanar_2020 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV420SemiPlanar_2020{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - cudaEglColorFormatYVU420SemiPlanar_2020 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU420SemiPlanar_2020{{endif}} + cudaEglColorFormatYVU420SemiPlanar_2020 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU420SemiPlanar_2020{{endif}} {{if True}} #: Y, U, V in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatYUV420Planar_2020 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV420Planar_2020{{endif}} + cudaEglColorFormatYUV420Planar_2020 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV420Planar_2020{{endif}} {{if True}} #: Y, V, U in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatYVU420Planar_2020 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU420Planar_2020{{endif}} + cudaEglColorFormatYVU420Planar_2020 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU420Planar_2020{{endif}} {{if True}} #: Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - cudaEglColorFormatYUV420SemiPlanar_709 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV420SemiPlanar_709{{endif}} + cudaEglColorFormatYUV420SemiPlanar_709 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV420SemiPlanar_709{{endif}} {{if True}} #: Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, #: U/V height = 1/2 Y height. - cudaEglColorFormatYVU420SemiPlanar_709 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU420SemiPlanar_709{{endif}} + cudaEglColorFormatYVU420SemiPlanar_709 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU420SemiPlanar_709{{endif}} {{if True}} #: Y, U, V in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatYUV420Planar_709 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUV420Planar_709{{endif}} + cudaEglColorFormatYUV420Planar_709 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUV420Planar_709{{endif}} {{if True}} #: Y, V, U in three surfaces, each in a separate surface, U/V width = #: 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatYVU420Planar_709 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVU420Planar_709{{endif}} + cudaEglColorFormatYVU420Planar_709 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVU420Planar_709{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y #: width, U/V height = 1/2 Y height. - cudaEglColorFormatY10V10U10_420SemiPlanar_709 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar_709{{endif}} + cudaEglColorFormatY10V10U10_420SemiPlanar_709 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar_709{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y #: width, U/V height = 1/2 Y height. - cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar_2020{{endif}} + cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar_2020{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y #: width, U/V height = Y height. - cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_422SemiPlanar_2020{{endif}} + cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_422SemiPlanar_2020{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y #: width, U/V height = Y height. - cudaEglColorFormatY10V10U10_422SemiPlanar = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_422SemiPlanar{{endif}} + cudaEglColorFormatY10V10U10_422SemiPlanar = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_422SemiPlanar{{endif}} {{if True}} #: Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y #: width, U/V height = Y height. - cudaEglColorFormatY10V10U10_422SemiPlanar_709 = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_422SemiPlanar_709{{endif}} + cudaEglColorFormatY10V10U10_422SemiPlanar_709 = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_422SemiPlanar_709{{endif}} {{if True}} #: Extended Range Color format for single Y plane. - cudaEglColorFormatY_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY_ER{{endif}} + cudaEglColorFormatY_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY_ER{{endif}} {{if True}} #: Extended Range Color format for single Y plane. - cudaEglColorFormatY_709_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY_709_ER{{endif}} + cudaEglColorFormatY_709_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY_709_ER{{endif}} {{if True}} #: Extended Range Color format for single Y10 plane. - cudaEglColorFormatY10_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10_ER{{endif}} + cudaEglColorFormatY10_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10_ER{{endif}} {{if True}} #: Extended Range Color format for single Y10 plane. - cudaEglColorFormatY10_709_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10_709_ER{{endif}} + cudaEglColorFormatY10_709_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10_709_ER{{endif}} {{if True}} #: Extended Range Color format for single Y12 plane. - cudaEglColorFormatY12_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY12_ER{{endif}} + cudaEglColorFormatY12_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY12_ER{{endif}} {{if True}} #: Extended Range Color format for single Y12 plane. - cudaEglColorFormatY12_709_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY12_709_ER{{endif}} + cudaEglColorFormatY12_709_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY12_709_ER{{endif}} {{if True}} #: Y, U, V, A four channels in one surface, interleaved as AVUY. - cudaEglColorFormatYUVA = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYUVA{{endif}} + cudaEglColorFormatYUVA = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYUVA{{endif}} {{if True}} #: Y, U, V in one surface, interleaved as YVYU in one channel. - cudaEglColorFormatYVYU = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatYVYU{{endif}} + cudaEglColorFormatYVYU = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatYVYU{{endif}} {{if True}} #: Y, U, V in one surface, interleaved as VYUY in one channel. - cudaEglColorFormatVYUY = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatVYUY{{endif}} + cudaEglColorFormatVYUY = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatVYUY{{endif}} {{if True}} #: Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatY10V10U10_420SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar_ER{{endif}} + cudaEglColorFormatY10V10U10_420SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar_ER{{endif}} {{if True}} #: Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER{{endif}} + cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER{{endif}} {{if True}} #: Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V #: width = Y width, U/V height = Y height. - cudaEglColorFormatY10V10U10_444SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_444SemiPlanar_ER{{endif}} + cudaEglColorFormatY10V10U10_444SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_444SemiPlanar_ER{{endif}} {{if True}} #: Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V #: width = Y width, U/V height = Y height. - cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER{{endif}} + cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER{{endif}} {{if True}} #: Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatY12V12U12_420SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_420SemiPlanar_ER{{endif}} + cudaEglColorFormatY12V12U12_420SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_420SemiPlanar_ER{{endif}} {{if True}} #: Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V #: width = 1/2 Y width, U/V height = 1/2 Y height. - cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER{{endif}} + cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER{{endif}} {{if True}} #: Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V #: width = Y width, U/V height = Y height. - cudaEglColorFormatY12V12U12_444SemiPlanar_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_444SemiPlanar_ER{{endif}} + cudaEglColorFormatY12V12U12_444SemiPlanar_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_444SemiPlanar_ER{{endif}} {{if True}} #: Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V #: width = Y width, U/V height = Y height. - cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = ccudart.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER{{endif}} + cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = cyruntime.cudaEglColorFormat_enum.cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER{{endif}} {{endif}} {{if 'cudaChannelFormatKind' in found_types}} @@ -2050,141 +2050,141 @@ class cudaChannelFormatKind(IntEnum): {{if 'cudaChannelFormatKindSigned' in found_values}} #: Signed channel format - cudaChannelFormatKindSigned = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSigned{{endif}} + cudaChannelFormatKindSigned = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSigned{{endif}} {{if 'cudaChannelFormatKindUnsigned' in found_values}} #: Unsigned channel format - cudaChannelFormatKindUnsigned = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned{{endif}} + cudaChannelFormatKindUnsigned = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsigned{{endif}} {{if 'cudaChannelFormatKindFloat' in found_values}} #: Float channel format - cudaChannelFormatKindFloat = ccudart.cudaChannelFormatKind.cudaChannelFormatKindFloat{{endif}} + cudaChannelFormatKindFloat = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindFloat{{endif}} {{if 'cudaChannelFormatKindNone' in found_values}} #: No channel format - cudaChannelFormatKindNone = ccudart.cudaChannelFormatKind.cudaChannelFormatKindNone{{endif}} + cudaChannelFormatKindNone = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindNone{{endif}} {{if 'cudaChannelFormatKindNV12' in found_values}} #: Unsigned 8-bit integers, planar 4:2:0 YUV format - cudaChannelFormatKindNV12 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindNV12{{endif}} + cudaChannelFormatKindNV12 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindNV12{{endif}} {{if 'cudaChannelFormatKindUnsignedNormalized8X1' in found_values}} #: 1 channel unsigned 8-bit normalized integer - cudaChannelFormatKindUnsignedNormalized8X1 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X1{{endif}} + cudaChannelFormatKindUnsignedNormalized8X1 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X1{{endif}} {{if 'cudaChannelFormatKindUnsignedNormalized8X2' in found_values}} #: 2 channel unsigned 8-bit normalized integer - cudaChannelFormatKindUnsignedNormalized8X2 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X2{{endif}} + cudaChannelFormatKindUnsignedNormalized8X2 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X2{{endif}} {{if 'cudaChannelFormatKindUnsignedNormalized8X4' in found_values}} #: 4 channel unsigned 8-bit normalized integer - cudaChannelFormatKindUnsignedNormalized8X4 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X4{{endif}} + cudaChannelFormatKindUnsignedNormalized8X4 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X4{{endif}} {{if 'cudaChannelFormatKindUnsignedNormalized16X1' in found_values}} #: 1 channel unsigned 16-bit normalized integer - cudaChannelFormatKindUnsignedNormalized16X1 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X1{{endif}} + cudaChannelFormatKindUnsignedNormalized16X1 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X1{{endif}} {{if 'cudaChannelFormatKindUnsignedNormalized16X2' in found_values}} #: 2 channel unsigned 16-bit normalized integer - cudaChannelFormatKindUnsignedNormalized16X2 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X2{{endif}} + cudaChannelFormatKindUnsignedNormalized16X2 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X2{{endif}} {{if 'cudaChannelFormatKindUnsignedNormalized16X4' in found_values}} #: 4 channel unsigned 16-bit normalized integer - cudaChannelFormatKindUnsignedNormalized16X4 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X4{{endif}} + cudaChannelFormatKindUnsignedNormalized16X4 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X4{{endif}} {{if 'cudaChannelFormatKindSignedNormalized8X1' in found_values}} #: 1 channel signed 8-bit normalized integer - cudaChannelFormatKindSignedNormalized8X1 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X1{{endif}} + cudaChannelFormatKindSignedNormalized8X1 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X1{{endif}} {{if 'cudaChannelFormatKindSignedNormalized8X2' in found_values}} #: 2 channel signed 8-bit normalized integer - cudaChannelFormatKindSignedNormalized8X2 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X2{{endif}} + cudaChannelFormatKindSignedNormalized8X2 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X2{{endif}} {{if 'cudaChannelFormatKindSignedNormalized8X4' in found_values}} #: 4 channel signed 8-bit normalized integer - cudaChannelFormatKindSignedNormalized8X4 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X4{{endif}} + cudaChannelFormatKindSignedNormalized8X4 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X4{{endif}} {{if 'cudaChannelFormatKindSignedNormalized16X1' in found_values}} #: 1 channel signed 16-bit normalized integer - cudaChannelFormatKindSignedNormalized16X1 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X1{{endif}} + cudaChannelFormatKindSignedNormalized16X1 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X1{{endif}} {{if 'cudaChannelFormatKindSignedNormalized16X2' in found_values}} #: 2 channel signed 16-bit normalized integer - cudaChannelFormatKindSignedNormalized16X2 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X2{{endif}} + cudaChannelFormatKindSignedNormalized16X2 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X2{{endif}} {{if 'cudaChannelFormatKindSignedNormalized16X4' in found_values}} #: 4 channel signed 16-bit normalized integer - cudaChannelFormatKindSignedNormalized16X4 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X4{{endif}} + cudaChannelFormatKindSignedNormalized16X4 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X4{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed1' in found_values}} #: 4 channel unsigned normalized block-compressed (BC1 compression) #: format - cudaChannelFormatKindUnsignedBlockCompressed1 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed1 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed1SRGB' in found_values}} #: 4 channel unsigned normalized block-compressed (BC1 compression) #: format with sRGB encoding - cudaChannelFormatKindUnsignedBlockCompressed1SRGB = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1SRGB{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed1SRGB = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1SRGB{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed2' in found_values}} #: 4 channel unsigned normalized block-compressed (BC2 compression) #: format - cudaChannelFormatKindUnsignedBlockCompressed2 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed2 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed2SRGB' in found_values}} #: 4 channel unsigned normalized block-compressed (BC2 compression) #: format with sRGB encoding - cudaChannelFormatKindUnsignedBlockCompressed2SRGB = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2SRGB{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed2SRGB = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2SRGB{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed3' in found_values}} #: 4 channel unsigned normalized block-compressed (BC3 compression) #: format - cudaChannelFormatKindUnsignedBlockCompressed3 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed3 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed3SRGB' in found_values}} #: 4 channel unsigned normalized block-compressed (BC3 compression) #: format with sRGB encoding - cudaChannelFormatKindUnsignedBlockCompressed3SRGB = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3SRGB{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed3SRGB = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3SRGB{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed4' in found_values}} #: 1 channel unsigned normalized block-compressed (BC4 compression) #: format - cudaChannelFormatKindUnsignedBlockCompressed4 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed4{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed4 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed4{{endif}} {{if 'cudaChannelFormatKindSignedBlockCompressed4' in found_values}} #: 1 channel signed normalized block-compressed (BC4 compression) #: format - cudaChannelFormatKindSignedBlockCompressed4 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed4{{endif}} + cudaChannelFormatKindSignedBlockCompressed4 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed4{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed5' in found_values}} #: 2 channel unsigned normalized block-compressed (BC5 compression) #: format - cudaChannelFormatKindUnsignedBlockCompressed5 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed5{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed5 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed5{{endif}} {{if 'cudaChannelFormatKindSignedBlockCompressed5' in found_values}} #: 2 channel signed normalized block-compressed (BC5 compression) #: format - cudaChannelFormatKindSignedBlockCompressed5 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed5{{endif}} + cudaChannelFormatKindSignedBlockCompressed5 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed5{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed6H' in found_values}} #: 3 channel unsigned half-float block-compressed (BC6H compression) #: format - cudaChannelFormatKindUnsignedBlockCompressed6H = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed6H{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed6H = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed6H{{endif}} {{if 'cudaChannelFormatKindSignedBlockCompressed6H' in found_values}} #: 3 channel signed half-float block-compressed (BC6H compression) #: format - cudaChannelFormatKindSignedBlockCompressed6H = ccudart.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed6H{{endif}} + cudaChannelFormatKindSignedBlockCompressed6H = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed6H{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed7' in found_values}} #: 4 channel unsigned normalized block-compressed (BC7 compression) #: format - cudaChannelFormatKindUnsignedBlockCompressed7 = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed7 = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7{{endif}} {{if 'cudaChannelFormatKindUnsignedBlockCompressed7SRGB' in found_values}} #: 4 channel unsigned normalized block-compressed (BC7 compression) #: format with sRGB encoding - cudaChannelFormatKindUnsignedBlockCompressed7SRGB = ccudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7SRGB{{endif}} + cudaChannelFormatKindUnsignedBlockCompressed7SRGB = cyruntime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7SRGB{{endif}} {{endif}} {{if 'cudaMemoryType' in found_types}} @@ -2195,19 +2195,19 @@ class cudaMemoryType(IntEnum): {{if 'cudaMemoryTypeUnregistered' in found_values}} #: Unregistered memory - cudaMemoryTypeUnregistered = ccudart.cudaMemoryType.cudaMemoryTypeUnregistered{{endif}} + cudaMemoryTypeUnregistered = cyruntime.cudaMemoryType.cudaMemoryTypeUnregistered{{endif}} {{if 'cudaMemoryTypeHost' in found_values}} #: Host memory - cudaMemoryTypeHost = ccudart.cudaMemoryType.cudaMemoryTypeHost{{endif}} + cudaMemoryTypeHost = cyruntime.cudaMemoryType.cudaMemoryTypeHost{{endif}} {{if 'cudaMemoryTypeDevice' in found_values}} #: Device memory - cudaMemoryTypeDevice = ccudart.cudaMemoryType.cudaMemoryTypeDevice{{endif}} + cudaMemoryTypeDevice = cyruntime.cudaMemoryType.cudaMemoryTypeDevice{{endif}} {{if 'cudaMemoryTypeManaged' in found_values}} #: Managed memory - cudaMemoryTypeManaged = ccudart.cudaMemoryType.cudaMemoryTypeManaged{{endif}} + cudaMemoryTypeManaged = cyruntime.cudaMemoryType.cudaMemoryTypeManaged{{endif}} {{endif}} {{if 'cudaMemcpyKind' in found_types}} @@ -2218,24 +2218,24 @@ class cudaMemcpyKind(IntEnum): {{if 'cudaMemcpyHostToHost' in found_values}} #: Host -> Host - cudaMemcpyHostToHost = ccudart.cudaMemcpyKind.cudaMemcpyHostToHost{{endif}} + cudaMemcpyHostToHost = cyruntime.cudaMemcpyKind.cudaMemcpyHostToHost{{endif}} {{if 'cudaMemcpyHostToDevice' in found_values}} #: Host -> Device - cudaMemcpyHostToDevice = ccudart.cudaMemcpyKind.cudaMemcpyHostToDevice{{endif}} + cudaMemcpyHostToDevice = cyruntime.cudaMemcpyKind.cudaMemcpyHostToDevice{{endif}} {{if 'cudaMemcpyDeviceToHost' in found_values}} #: Device -> Host - cudaMemcpyDeviceToHost = ccudart.cudaMemcpyKind.cudaMemcpyDeviceToHost{{endif}} + cudaMemcpyDeviceToHost = cyruntime.cudaMemcpyKind.cudaMemcpyDeviceToHost{{endif}} {{if 'cudaMemcpyDeviceToDevice' in found_values}} #: Device -> Device - cudaMemcpyDeviceToDevice = ccudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice{{endif}} + cudaMemcpyDeviceToDevice = cyruntime.cudaMemcpyKind.cudaMemcpyDeviceToDevice{{endif}} {{if 'cudaMemcpyDefault' in found_values}} #: Direction of the transfer is inferred from the pointer values. #: Requires unified virtual addressing - cudaMemcpyDefault = ccudart.cudaMemcpyKind.cudaMemcpyDefault{{endif}} + cudaMemcpyDefault = cyruntime.cudaMemcpyKind.cudaMemcpyDefault{{endif}} {{endif}} {{if 'cudaAccessProperty' in found_types}} @@ -2247,15 +2247,15 @@ class cudaAccessProperty(IntEnum): {{if 'cudaAccessPropertyNormal' in found_values}} #: Normal cache persistence. - cudaAccessPropertyNormal = ccudart.cudaAccessProperty.cudaAccessPropertyNormal{{endif}} + cudaAccessPropertyNormal = cyruntime.cudaAccessProperty.cudaAccessPropertyNormal{{endif}} {{if 'cudaAccessPropertyStreaming' in found_values}} #: Streaming access is less likely to persit from cache. - cudaAccessPropertyStreaming = ccudart.cudaAccessProperty.cudaAccessPropertyStreaming{{endif}} + cudaAccessPropertyStreaming = cyruntime.cudaAccessProperty.cudaAccessPropertyStreaming{{endif}} {{if 'cudaAccessPropertyPersisting' in found_values}} #: Persisting access is more likely to persist in cache. - cudaAccessPropertyPersisting = ccudart.cudaAccessProperty.cudaAccessPropertyPersisting{{endif}} + cudaAccessPropertyPersisting = cyruntime.cudaAccessProperty.cudaAccessPropertyPersisting{{endif}} {{endif}} {{if 'cudaStreamCaptureStatus' in found_types}} @@ -2267,16 +2267,16 @@ class cudaStreamCaptureStatus(IntEnum): {{if 'cudaStreamCaptureStatusNone' in found_values}} #: Stream is not capturing - cudaStreamCaptureStatusNone = ccudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusNone{{endif}} + cudaStreamCaptureStatusNone = cyruntime.cudaStreamCaptureStatus.cudaStreamCaptureStatusNone{{endif}} {{if 'cudaStreamCaptureStatusActive' in found_values}} #: Stream is actively capturing - cudaStreamCaptureStatusActive = ccudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusActive{{endif}} + cudaStreamCaptureStatusActive = cyruntime.cudaStreamCaptureStatus.cudaStreamCaptureStatusActive{{endif}} {{if 'cudaStreamCaptureStatusInvalidated' in found_values}} #: Stream is part of a capture sequence that has been invalidated, but #: not terminated - cudaStreamCaptureStatusInvalidated = ccudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusInvalidated{{endif}} + cudaStreamCaptureStatusInvalidated = cyruntime.cudaStreamCaptureStatus.cudaStreamCaptureStatusInvalidated{{endif}} {{endif}} {{if 'cudaStreamCaptureMode' in found_types}} @@ -2287,11 +2287,11 @@ class cudaStreamCaptureMode(IntEnum): :py:obj:`~.cudaThreadExchangeStreamCaptureMode` """ {{if 'cudaStreamCaptureModeGlobal' in found_values}} - cudaStreamCaptureModeGlobal = ccudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal{{endif}} + cudaStreamCaptureModeGlobal = cyruntime.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal{{endif}} {{if 'cudaStreamCaptureModeThreadLocal' in found_values}} - cudaStreamCaptureModeThreadLocal = ccudart.cudaStreamCaptureMode.cudaStreamCaptureModeThreadLocal{{endif}} + cudaStreamCaptureModeThreadLocal = cyruntime.cudaStreamCaptureMode.cudaStreamCaptureModeThreadLocal{{endif}} {{if 'cudaStreamCaptureModeRelaxed' in found_values}} - cudaStreamCaptureModeRelaxed = ccudart.cudaStreamCaptureMode.cudaStreamCaptureModeRelaxed{{endif}} + cudaStreamCaptureModeRelaxed = cyruntime.cudaStreamCaptureMode.cudaStreamCaptureModeRelaxed{{endif}} {{endif}} {{if 'cudaSynchronizationPolicy' in found_types}} @@ -2300,13 +2300,13 @@ class cudaSynchronizationPolicy(IntEnum): """ {{if 'cudaSyncPolicyAuto' in found_values}} - cudaSyncPolicyAuto = ccudart.cudaSynchronizationPolicy.cudaSyncPolicyAuto{{endif}} + cudaSyncPolicyAuto = cyruntime.cudaSynchronizationPolicy.cudaSyncPolicyAuto{{endif}} {{if 'cudaSyncPolicySpin' in found_values}} - cudaSyncPolicySpin = ccudart.cudaSynchronizationPolicy.cudaSyncPolicySpin{{endif}} + cudaSyncPolicySpin = cyruntime.cudaSynchronizationPolicy.cudaSyncPolicySpin{{endif}} {{if 'cudaSyncPolicyYield' in found_values}} - cudaSyncPolicyYield = ccudart.cudaSynchronizationPolicy.cudaSyncPolicyYield{{endif}} + cudaSyncPolicyYield = cyruntime.cudaSynchronizationPolicy.cudaSyncPolicyYield{{endif}} {{if 'cudaSyncPolicyBlockingSync' in found_values}} - cudaSyncPolicyBlockingSync = ccudart.cudaSynchronizationPolicy.cudaSyncPolicyBlockingSync{{endif}} + cudaSyncPolicyBlockingSync = cyruntime.cudaSynchronizationPolicy.cudaSyncPolicyBlockingSync{{endif}} {{endif}} {{if 'cudaClusterSchedulingPolicy' in found_types}} @@ -2318,16 +2318,16 @@ class cudaClusterSchedulingPolicy(IntEnum): {{if 'cudaClusterSchedulingPolicyDefault' in found_values}} #: the default policy - cudaClusterSchedulingPolicyDefault = ccudart.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyDefault{{endif}} + cudaClusterSchedulingPolicyDefault = cyruntime.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyDefault{{endif}} {{if 'cudaClusterSchedulingPolicySpread' in found_values}} #: spread the blocks within a cluster to the SMs - cudaClusterSchedulingPolicySpread = ccudart.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicySpread{{endif}} + cudaClusterSchedulingPolicySpread = cyruntime.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicySpread{{endif}} {{if 'cudaClusterSchedulingPolicyLoadBalancing' in found_values}} #: allow the hardware to load-balance the blocks in a cluster to the #: SMs - cudaClusterSchedulingPolicyLoadBalancing = ccudart.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyLoadBalancing{{endif}} + cudaClusterSchedulingPolicyLoadBalancing = cyruntime.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyLoadBalancing{{endif}} {{endif}} {{if 'cudaStreamUpdateCaptureDependenciesFlags' in found_types}} @@ -2338,11 +2338,11 @@ class cudaStreamUpdateCaptureDependenciesFlags(IntEnum): {{if 'cudaStreamAddCaptureDependencies' in found_values}} #: Add new nodes to the dependency set - cudaStreamAddCaptureDependencies = ccudart.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamAddCaptureDependencies{{endif}} + cudaStreamAddCaptureDependencies = cyruntime.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamAddCaptureDependencies{{endif}} {{if 'cudaStreamSetCaptureDependencies' in found_values}} #: Replace the dependency set with the new nodes - cudaStreamSetCaptureDependencies = ccudart.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamSetCaptureDependencies{{endif}} + cudaStreamSetCaptureDependencies = cyruntime.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamSetCaptureDependencies{{endif}} {{endif}} {{if 'cudaUserObjectFlags' in found_types}} @@ -2354,7 +2354,7 @@ class cudaUserObjectFlags(IntEnum): #: Indicates the destructor execution is not synchronized by any CUDA #: handle. - cudaUserObjectNoDestructorSync = ccudart.cudaUserObjectFlags.cudaUserObjectNoDestructorSync{{endif}} + cudaUserObjectNoDestructorSync = cyruntime.cudaUserObjectFlags.cudaUserObjectNoDestructorSync{{endif}} {{endif}} {{if 'cudaUserObjectRetainFlags' in found_types}} @@ -2366,7 +2366,7 @@ class cudaUserObjectRetainFlags(IntEnum): #: Transfer references from the caller rather than creating new #: references. - cudaGraphUserObjectMove = ccudart.cudaUserObjectRetainFlags.cudaGraphUserObjectMove{{endif}} + cudaGraphUserObjectMove = cyruntime.cudaUserObjectRetainFlags.cudaGraphUserObjectMove{{endif}} {{endif}} {{if 'cudaGraphicsRegisterFlags' in found_types}} @@ -2377,23 +2377,23 @@ class cudaGraphicsRegisterFlags(IntEnum): {{if 'cudaGraphicsRegisterFlagsNone' in found_values}} #: Default - cudaGraphicsRegisterFlagsNone = ccudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone{{endif}} + cudaGraphicsRegisterFlagsNone = cyruntime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone{{endif}} {{if 'cudaGraphicsRegisterFlagsReadOnly' in found_values}} #: CUDA will not write to this resource - cudaGraphicsRegisterFlagsReadOnly = ccudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly{{endif}} + cudaGraphicsRegisterFlagsReadOnly = cyruntime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly{{endif}} {{if 'cudaGraphicsRegisterFlagsWriteDiscard' in found_values}} #: CUDA will only write to and will not read from this resource - cudaGraphicsRegisterFlagsWriteDiscard = ccudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard{{endif}} + cudaGraphicsRegisterFlagsWriteDiscard = cyruntime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard{{endif}} {{if 'cudaGraphicsRegisterFlagsSurfaceLoadStore' in found_values}} #: CUDA will bind this resource to a surface reference - cudaGraphicsRegisterFlagsSurfaceLoadStore = ccudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsSurfaceLoadStore{{endif}} + cudaGraphicsRegisterFlagsSurfaceLoadStore = cyruntime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsSurfaceLoadStore{{endif}} {{if 'cudaGraphicsRegisterFlagsTextureGather' in found_values}} #: CUDA will perform texture gather operations on this resource - cudaGraphicsRegisterFlagsTextureGather = ccudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsTextureGather{{endif}} + cudaGraphicsRegisterFlagsTextureGather = cyruntime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsTextureGather{{endif}} {{endif}} {{if 'cudaGraphicsMapFlags' in found_types}} @@ -2404,15 +2404,15 @@ class cudaGraphicsMapFlags(IntEnum): {{if 'cudaGraphicsMapFlagsNone' in found_values}} #: Default; Assume resource can be read/written - cudaGraphicsMapFlagsNone = ccudart.cudaGraphicsMapFlags.cudaGraphicsMapFlagsNone{{endif}} + cudaGraphicsMapFlagsNone = cyruntime.cudaGraphicsMapFlags.cudaGraphicsMapFlagsNone{{endif}} {{if 'cudaGraphicsMapFlagsReadOnly' in found_values}} #: CUDA will not write to this resource - cudaGraphicsMapFlagsReadOnly = ccudart.cudaGraphicsMapFlags.cudaGraphicsMapFlagsReadOnly{{endif}} + cudaGraphicsMapFlagsReadOnly = cyruntime.cudaGraphicsMapFlags.cudaGraphicsMapFlagsReadOnly{{endif}} {{if 'cudaGraphicsMapFlagsWriteDiscard' in found_values}} #: CUDA will only write to and will not read from this resource - cudaGraphicsMapFlagsWriteDiscard = ccudart.cudaGraphicsMapFlags.cudaGraphicsMapFlagsWriteDiscard{{endif}} + cudaGraphicsMapFlagsWriteDiscard = cyruntime.cudaGraphicsMapFlags.cudaGraphicsMapFlagsWriteDiscard{{endif}} {{endif}} {{if 'cudaGraphicsCubeFace' in found_types}} @@ -2423,27 +2423,27 @@ class cudaGraphicsCubeFace(IntEnum): {{if 'cudaGraphicsCubeFacePositiveX' in found_values}} #: Positive X face of cubemap - cudaGraphicsCubeFacePositiveX = ccudart.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveX{{endif}} + cudaGraphicsCubeFacePositiveX = cyruntime.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveX{{endif}} {{if 'cudaGraphicsCubeFaceNegativeX' in found_values}} #: Negative X face of cubemap - cudaGraphicsCubeFaceNegativeX = ccudart.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeX{{endif}} + cudaGraphicsCubeFaceNegativeX = cyruntime.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeX{{endif}} {{if 'cudaGraphicsCubeFacePositiveY' in found_values}} #: Positive Y face of cubemap - cudaGraphicsCubeFacePositiveY = ccudart.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveY{{endif}} + cudaGraphicsCubeFacePositiveY = cyruntime.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveY{{endif}} {{if 'cudaGraphicsCubeFaceNegativeY' in found_values}} #: Negative Y face of cubemap - cudaGraphicsCubeFaceNegativeY = ccudart.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeY{{endif}} + cudaGraphicsCubeFaceNegativeY = cyruntime.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeY{{endif}} {{if 'cudaGraphicsCubeFacePositiveZ' in found_values}} #: Positive Z face of cubemap - cudaGraphicsCubeFacePositiveZ = ccudart.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveZ{{endif}} + cudaGraphicsCubeFacePositiveZ = cyruntime.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveZ{{endif}} {{if 'cudaGraphicsCubeFaceNegativeZ' in found_values}} #: Negative Z face of cubemap - cudaGraphicsCubeFaceNegativeZ = ccudart.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeZ{{endif}} + cudaGraphicsCubeFaceNegativeZ = cyruntime.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeZ{{endif}} {{endif}} {{if 'cudaResourceType' in found_types}} @@ -2454,19 +2454,19 @@ class cudaResourceType(IntEnum): {{if 'cudaResourceTypeArray' in found_values}} #: Array resource - cudaResourceTypeArray = ccudart.cudaResourceType.cudaResourceTypeArray{{endif}} + cudaResourceTypeArray = cyruntime.cudaResourceType.cudaResourceTypeArray{{endif}} {{if 'cudaResourceTypeMipmappedArray' in found_values}} #: Mipmapped array resource - cudaResourceTypeMipmappedArray = ccudart.cudaResourceType.cudaResourceTypeMipmappedArray{{endif}} + cudaResourceTypeMipmappedArray = cyruntime.cudaResourceType.cudaResourceTypeMipmappedArray{{endif}} {{if 'cudaResourceTypeLinear' in found_values}} #: Linear resource - cudaResourceTypeLinear = ccudart.cudaResourceType.cudaResourceTypeLinear{{endif}} + cudaResourceTypeLinear = cyruntime.cudaResourceType.cudaResourceTypeLinear{{endif}} {{if 'cudaResourceTypePitch2D' in found_values}} #: Pitch 2D resource - cudaResourceTypePitch2D = ccudart.cudaResourceType.cudaResourceTypePitch2D{{endif}} + cudaResourceTypePitch2D = cyruntime.cudaResourceType.cudaResourceTypePitch2D{{endif}} {{endif}} {{if 'cudaResourceViewFormat' in found_types}} @@ -2477,143 +2477,143 @@ class cudaResourceViewFormat(IntEnum): {{if 'cudaResViewFormatNone' in found_values}} #: No resource view format (use underlying resource format) - cudaResViewFormatNone = ccudart.cudaResourceViewFormat.cudaResViewFormatNone{{endif}} + cudaResViewFormatNone = cyruntime.cudaResourceViewFormat.cudaResViewFormatNone{{endif}} {{if 'cudaResViewFormatUnsignedChar1' in found_values}} #: 1 channel unsigned 8-bit integers - cudaResViewFormatUnsignedChar1 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedChar1{{endif}} + cudaResViewFormatUnsignedChar1 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedChar1{{endif}} {{if 'cudaResViewFormatUnsignedChar2' in found_values}} #: 2 channel unsigned 8-bit integers - cudaResViewFormatUnsignedChar2 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedChar2{{endif}} + cudaResViewFormatUnsignedChar2 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedChar2{{endif}} {{if 'cudaResViewFormatUnsignedChar4' in found_values}} #: 4 channel unsigned 8-bit integers - cudaResViewFormatUnsignedChar4 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedChar4{{endif}} + cudaResViewFormatUnsignedChar4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedChar4{{endif}} {{if 'cudaResViewFormatSignedChar1' in found_values}} #: 1 channel signed 8-bit integers - cudaResViewFormatSignedChar1 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedChar1{{endif}} + cudaResViewFormatSignedChar1 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedChar1{{endif}} {{if 'cudaResViewFormatSignedChar2' in found_values}} #: 2 channel signed 8-bit integers - cudaResViewFormatSignedChar2 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedChar2{{endif}} + cudaResViewFormatSignedChar2 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedChar2{{endif}} {{if 'cudaResViewFormatSignedChar4' in found_values}} #: 4 channel signed 8-bit integers - cudaResViewFormatSignedChar4 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedChar4{{endif}} + cudaResViewFormatSignedChar4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedChar4{{endif}} {{if 'cudaResViewFormatUnsignedShort1' in found_values}} #: 1 channel unsigned 16-bit integers - cudaResViewFormatUnsignedShort1 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedShort1{{endif}} + cudaResViewFormatUnsignedShort1 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedShort1{{endif}} {{if 'cudaResViewFormatUnsignedShort2' in found_values}} #: 2 channel unsigned 16-bit integers - cudaResViewFormatUnsignedShort2 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedShort2{{endif}} + cudaResViewFormatUnsignedShort2 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedShort2{{endif}} {{if 'cudaResViewFormatUnsignedShort4' in found_values}} #: 4 channel unsigned 16-bit integers - cudaResViewFormatUnsignedShort4 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedShort4{{endif}} + cudaResViewFormatUnsignedShort4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedShort4{{endif}} {{if 'cudaResViewFormatSignedShort1' in found_values}} #: 1 channel signed 16-bit integers - cudaResViewFormatSignedShort1 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedShort1{{endif}} + cudaResViewFormatSignedShort1 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedShort1{{endif}} {{if 'cudaResViewFormatSignedShort2' in found_values}} #: 2 channel signed 16-bit integers - cudaResViewFormatSignedShort2 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedShort2{{endif}} + cudaResViewFormatSignedShort2 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedShort2{{endif}} {{if 'cudaResViewFormatSignedShort4' in found_values}} #: 4 channel signed 16-bit integers - cudaResViewFormatSignedShort4 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedShort4{{endif}} + cudaResViewFormatSignedShort4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedShort4{{endif}} {{if 'cudaResViewFormatUnsignedInt1' in found_values}} #: 1 channel unsigned 32-bit integers - cudaResViewFormatUnsignedInt1 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedInt1{{endif}} + cudaResViewFormatUnsignedInt1 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedInt1{{endif}} {{if 'cudaResViewFormatUnsignedInt2' in found_values}} #: 2 channel unsigned 32-bit integers - cudaResViewFormatUnsignedInt2 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedInt2{{endif}} + cudaResViewFormatUnsignedInt2 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedInt2{{endif}} {{if 'cudaResViewFormatUnsignedInt4' in found_values}} #: 4 channel unsigned 32-bit integers - cudaResViewFormatUnsignedInt4 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedInt4{{endif}} + cudaResViewFormatUnsignedInt4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedInt4{{endif}} {{if 'cudaResViewFormatSignedInt1' in found_values}} #: 1 channel signed 32-bit integers - cudaResViewFormatSignedInt1 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedInt1{{endif}} + cudaResViewFormatSignedInt1 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedInt1{{endif}} {{if 'cudaResViewFormatSignedInt2' in found_values}} #: 2 channel signed 32-bit integers - cudaResViewFormatSignedInt2 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedInt2{{endif}} + cudaResViewFormatSignedInt2 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedInt2{{endif}} {{if 'cudaResViewFormatSignedInt4' in found_values}} #: 4 channel signed 32-bit integers - cudaResViewFormatSignedInt4 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedInt4{{endif}} + cudaResViewFormatSignedInt4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedInt4{{endif}} {{if 'cudaResViewFormatHalf1' in found_values}} #: 1 channel 16-bit floating point - cudaResViewFormatHalf1 = ccudart.cudaResourceViewFormat.cudaResViewFormatHalf1{{endif}} + cudaResViewFormatHalf1 = cyruntime.cudaResourceViewFormat.cudaResViewFormatHalf1{{endif}} {{if 'cudaResViewFormatHalf2' in found_values}} #: 2 channel 16-bit floating point - cudaResViewFormatHalf2 = ccudart.cudaResourceViewFormat.cudaResViewFormatHalf2{{endif}} + cudaResViewFormatHalf2 = cyruntime.cudaResourceViewFormat.cudaResViewFormatHalf2{{endif}} {{if 'cudaResViewFormatHalf4' in found_values}} #: 4 channel 16-bit floating point - cudaResViewFormatHalf4 = ccudart.cudaResourceViewFormat.cudaResViewFormatHalf4{{endif}} + cudaResViewFormatHalf4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatHalf4{{endif}} {{if 'cudaResViewFormatFloat1' in found_values}} #: 1 channel 32-bit floating point - cudaResViewFormatFloat1 = ccudart.cudaResourceViewFormat.cudaResViewFormatFloat1{{endif}} + cudaResViewFormatFloat1 = cyruntime.cudaResourceViewFormat.cudaResViewFormatFloat1{{endif}} {{if 'cudaResViewFormatFloat2' in found_values}} #: 2 channel 32-bit floating point - cudaResViewFormatFloat2 = ccudart.cudaResourceViewFormat.cudaResViewFormatFloat2{{endif}} + cudaResViewFormatFloat2 = cyruntime.cudaResourceViewFormat.cudaResViewFormatFloat2{{endif}} {{if 'cudaResViewFormatFloat4' in found_values}} #: 4 channel 32-bit floating point - cudaResViewFormatFloat4 = ccudart.cudaResourceViewFormat.cudaResViewFormatFloat4{{endif}} + cudaResViewFormatFloat4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatFloat4{{endif}} {{if 'cudaResViewFormatUnsignedBlockCompressed1' in found_values}} #: Block compressed 1 - cudaResViewFormatUnsignedBlockCompressed1 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed1{{endif}} + cudaResViewFormatUnsignedBlockCompressed1 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed1{{endif}} {{if 'cudaResViewFormatUnsignedBlockCompressed2' in found_values}} #: Block compressed 2 - cudaResViewFormatUnsignedBlockCompressed2 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed2{{endif}} + cudaResViewFormatUnsignedBlockCompressed2 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed2{{endif}} {{if 'cudaResViewFormatUnsignedBlockCompressed3' in found_values}} #: Block compressed 3 - cudaResViewFormatUnsignedBlockCompressed3 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed3{{endif}} + cudaResViewFormatUnsignedBlockCompressed3 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed3{{endif}} {{if 'cudaResViewFormatUnsignedBlockCompressed4' in found_values}} #: Block compressed 4 unsigned - cudaResViewFormatUnsignedBlockCompressed4 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed4{{endif}} + cudaResViewFormatUnsignedBlockCompressed4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed4{{endif}} {{if 'cudaResViewFormatSignedBlockCompressed4' in found_values}} #: Block compressed 4 signed - cudaResViewFormatSignedBlockCompressed4 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed4{{endif}} + cudaResViewFormatSignedBlockCompressed4 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed4{{endif}} {{if 'cudaResViewFormatUnsignedBlockCompressed5' in found_values}} #: Block compressed 5 unsigned - cudaResViewFormatUnsignedBlockCompressed5 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed5{{endif}} + cudaResViewFormatUnsignedBlockCompressed5 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed5{{endif}} {{if 'cudaResViewFormatSignedBlockCompressed5' in found_values}} #: Block compressed 5 signed - cudaResViewFormatSignedBlockCompressed5 = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed5{{endif}} + cudaResViewFormatSignedBlockCompressed5 = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed5{{endif}} {{if 'cudaResViewFormatUnsignedBlockCompressed6H' in found_values}} #: Block compressed 6 unsigned half-float - cudaResViewFormatUnsignedBlockCompressed6H = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed6H{{endif}} + cudaResViewFormatUnsignedBlockCompressed6H = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed6H{{endif}} {{if 'cudaResViewFormatSignedBlockCompressed6H' in found_values}} #: Block compressed 6 signed half-float - cudaResViewFormatSignedBlockCompressed6H = ccudart.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed6H{{endif}} + cudaResViewFormatSignedBlockCompressed6H = cyruntime.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed6H{{endif}} {{if 'cudaResViewFormatUnsignedBlockCompressed7' in found_values}} #: Block compressed 7 - cudaResViewFormatUnsignedBlockCompressed7 = ccudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed7{{endif}} + cudaResViewFormatUnsignedBlockCompressed7 = cyruntime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed7{{endif}} {{endif}} {{if 'cudaFuncAttribute' in found_types}} @@ -2625,38 +2625,38 @@ class cudaFuncAttribute(IntEnum): {{if 'cudaFuncAttributeMaxDynamicSharedMemorySize' in found_values}} #: Maximum dynamic shared memory size - cudaFuncAttributeMaxDynamicSharedMemorySize = ccudart.cudaFuncAttribute.cudaFuncAttributeMaxDynamicSharedMemorySize{{endif}} + cudaFuncAttributeMaxDynamicSharedMemorySize = cyruntime.cudaFuncAttribute.cudaFuncAttributeMaxDynamicSharedMemorySize{{endif}} {{if 'cudaFuncAttributePreferredSharedMemoryCarveout' in found_values}} #: Preferred shared memory-L1 cache split - cudaFuncAttributePreferredSharedMemoryCarveout = ccudart.cudaFuncAttribute.cudaFuncAttributePreferredSharedMemoryCarveout{{endif}} + cudaFuncAttributePreferredSharedMemoryCarveout = cyruntime.cudaFuncAttribute.cudaFuncAttributePreferredSharedMemoryCarveout{{endif}} {{if 'cudaFuncAttributeClusterDimMustBeSet' in found_values}} #: Indicator to enforce valid cluster dimension specification on kernel #: launch - cudaFuncAttributeClusterDimMustBeSet = ccudart.cudaFuncAttribute.cudaFuncAttributeClusterDimMustBeSet{{endif}} + cudaFuncAttributeClusterDimMustBeSet = cyruntime.cudaFuncAttribute.cudaFuncAttributeClusterDimMustBeSet{{endif}} {{if 'cudaFuncAttributeRequiredClusterWidth' in found_values}} #: Required cluster width - cudaFuncAttributeRequiredClusterWidth = ccudart.cudaFuncAttribute.cudaFuncAttributeRequiredClusterWidth{{endif}} + cudaFuncAttributeRequiredClusterWidth = cyruntime.cudaFuncAttribute.cudaFuncAttributeRequiredClusterWidth{{endif}} {{if 'cudaFuncAttributeRequiredClusterHeight' in found_values}} #: Required cluster height - cudaFuncAttributeRequiredClusterHeight = ccudart.cudaFuncAttribute.cudaFuncAttributeRequiredClusterHeight{{endif}} + cudaFuncAttributeRequiredClusterHeight = cyruntime.cudaFuncAttribute.cudaFuncAttributeRequiredClusterHeight{{endif}} {{if 'cudaFuncAttributeRequiredClusterDepth' in found_values}} #: Required cluster depth - cudaFuncAttributeRequiredClusterDepth = ccudart.cudaFuncAttribute.cudaFuncAttributeRequiredClusterDepth{{endif}} + cudaFuncAttributeRequiredClusterDepth = cyruntime.cudaFuncAttribute.cudaFuncAttributeRequiredClusterDepth{{endif}} {{if 'cudaFuncAttributeNonPortableClusterSizeAllowed' in found_values}} #: Whether non-portable cluster scheduling policy is supported - cudaFuncAttributeNonPortableClusterSizeAllowed = ccudart.cudaFuncAttribute.cudaFuncAttributeNonPortableClusterSizeAllowed{{endif}} + cudaFuncAttributeNonPortableClusterSizeAllowed = cyruntime.cudaFuncAttribute.cudaFuncAttributeNonPortableClusterSizeAllowed{{endif}} {{if 'cudaFuncAttributeClusterSchedulingPolicyPreference' in found_values}} #: Required cluster scheduling policy preference - cudaFuncAttributeClusterSchedulingPolicyPreference = ccudart.cudaFuncAttribute.cudaFuncAttributeClusterSchedulingPolicyPreference{{endif}} + cudaFuncAttributeClusterSchedulingPolicyPreference = cyruntime.cudaFuncAttribute.cudaFuncAttributeClusterSchedulingPolicyPreference{{endif}} {{if 'cudaFuncAttributeMax' in found_values}} - cudaFuncAttributeMax = ccudart.cudaFuncAttribute.cudaFuncAttributeMax{{endif}} + cudaFuncAttributeMax = cyruntime.cudaFuncAttribute.cudaFuncAttributeMax{{endif}} {{endif}} {{if 'cudaFuncCache' in found_types}} @@ -2667,19 +2667,19 @@ class cudaFuncCache(IntEnum): {{if 'cudaFuncCachePreferNone' in found_values}} #: Default function cache configuration, no preference - cudaFuncCachePreferNone = ccudart.cudaFuncCache.cudaFuncCachePreferNone{{endif}} + cudaFuncCachePreferNone = cyruntime.cudaFuncCache.cudaFuncCachePreferNone{{endif}} {{if 'cudaFuncCachePreferShared' in found_values}} #: Prefer larger shared memory and smaller L1 cache - cudaFuncCachePreferShared = ccudart.cudaFuncCache.cudaFuncCachePreferShared{{endif}} + cudaFuncCachePreferShared = cyruntime.cudaFuncCache.cudaFuncCachePreferShared{{endif}} {{if 'cudaFuncCachePreferL1' in found_values}} #: Prefer larger L1 cache and smaller shared memory - cudaFuncCachePreferL1 = ccudart.cudaFuncCache.cudaFuncCachePreferL1{{endif}} + cudaFuncCachePreferL1 = cyruntime.cudaFuncCache.cudaFuncCachePreferL1{{endif}} {{if 'cudaFuncCachePreferEqual' in found_values}} #: Prefer equal size L1 cache and shared memory - cudaFuncCachePreferEqual = ccudart.cudaFuncCache.cudaFuncCachePreferEqual{{endif}} + cudaFuncCachePreferEqual = cyruntime.cudaFuncCache.cudaFuncCachePreferEqual{{endif}} {{endif}} {{if 'cudaSharedMemConfig' in found_types}} @@ -2688,11 +2688,11 @@ class cudaSharedMemConfig(IntEnum): CUDA shared memory configuration [Deprecated] """ {{if 'cudaSharedMemBankSizeDefault' in found_values}} - cudaSharedMemBankSizeDefault = ccudart.cudaSharedMemConfig.cudaSharedMemBankSizeDefault{{endif}} + cudaSharedMemBankSizeDefault = cyruntime.cudaSharedMemConfig.cudaSharedMemBankSizeDefault{{endif}} {{if 'cudaSharedMemBankSizeFourByte' in found_values}} - cudaSharedMemBankSizeFourByte = ccudart.cudaSharedMemConfig.cudaSharedMemBankSizeFourByte{{endif}} + cudaSharedMemBankSizeFourByte = cyruntime.cudaSharedMemConfig.cudaSharedMemBankSizeFourByte{{endif}} {{if 'cudaSharedMemBankSizeEightByte' in found_values}} - cudaSharedMemBankSizeEightByte = ccudart.cudaSharedMemConfig.cudaSharedMemBankSizeEightByte{{endif}} + cudaSharedMemBankSizeEightByte = cyruntime.cudaSharedMemConfig.cudaSharedMemBankSizeEightByte{{endif}} {{endif}} {{if 'cudaSharedCarveout' in found_types}} @@ -2704,15 +2704,15 @@ class cudaSharedCarveout(IntEnum): {{if 'cudaSharedmemCarveoutDefault' in found_values}} #: No preference for shared memory or L1 (default) - cudaSharedmemCarveoutDefault = ccudart.cudaSharedCarveout.cudaSharedmemCarveoutDefault{{endif}} + cudaSharedmemCarveoutDefault = cyruntime.cudaSharedCarveout.cudaSharedmemCarveoutDefault{{endif}} {{if 'cudaSharedmemCarveoutMaxL1' in found_values}} #: Prefer maximum available L1 cache, minimum shared memory - cudaSharedmemCarveoutMaxL1 = ccudart.cudaSharedCarveout.cudaSharedmemCarveoutMaxL1{{endif}} + cudaSharedmemCarveoutMaxL1 = cyruntime.cudaSharedCarveout.cudaSharedmemCarveoutMaxL1{{endif}} {{if 'cudaSharedmemCarveoutMaxShared' in found_values}} #: Prefer maximum available shared memory, minimum L1 cache - cudaSharedmemCarveoutMaxShared = ccudart.cudaSharedCarveout.cudaSharedmemCarveoutMaxShared{{endif}} + cudaSharedmemCarveoutMaxShared = cyruntime.cudaSharedCarveout.cudaSharedmemCarveoutMaxShared{{endif}} {{endif}} {{if 'cudaComputeMode' in found_types}} @@ -2724,22 +2724,22 @@ class cudaComputeMode(IntEnum): #: Default compute mode (Multiple threads can use #: :py:obj:`~.cudaSetDevice()` with this device) - cudaComputeModeDefault = ccudart.cudaComputeMode.cudaComputeModeDefault{{endif}} + cudaComputeModeDefault = cyruntime.cudaComputeMode.cudaComputeModeDefault{{endif}} {{if 'cudaComputeModeExclusive' in found_values}} #: Compute-exclusive-thread mode (Only one thread in one process will #: be able to use :py:obj:`~.cudaSetDevice()` with this device) - cudaComputeModeExclusive = ccudart.cudaComputeMode.cudaComputeModeExclusive{{endif}} + cudaComputeModeExclusive = cyruntime.cudaComputeMode.cudaComputeModeExclusive{{endif}} {{if 'cudaComputeModeProhibited' in found_values}} #: Compute-prohibited mode (No threads can use #: :py:obj:`~.cudaSetDevice()` with this device) - cudaComputeModeProhibited = ccudart.cudaComputeMode.cudaComputeModeProhibited{{endif}} + cudaComputeModeProhibited = cyruntime.cudaComputeMode.cudaComputeModeProhibited{{endif}} {{if 'cudaComputeModeExclusiveProcess' in found_values}} #: Compute-exclusive-process mode (Many threads in one process will be #: able to use :py:obj:`~.cudaSetDevice()` with this device) - cudaComputeModeExclusiveProcess = ccudart.cudaComputeMode.cudaComputeModeExclusiveProcess{{endif}} + cudaComputeModeExclusiveProcess = cyruntime.cudaComputeMode.cudaComputeModeExclusiveProcess{{endif}} {{endif}} {{if 'cudaLimit' in found_types}} @@ -2750,32 +2750,32 @@ class cudaLimit(IntEnum): {{if 'cudaLimitStackSize' in found_values}} #: GPU thread stack size - cudaLimitStackSize = ccudart.cudaLimit.cudaLimitStackSize{{endif}} + cudaLimitStackSize = cyruntime.cudaLimit.cudaLimitStackSize{{endif}} {{if 'cudaLimitPrintfFifoSize' in found_values}} #: GPU printf FIFO size - cudaLimitPrintfFifoSize = ccudart.cudaLimit.cudaLimitPrintfFifoSize{{endif}} + cudaLimitPrintfFifoSize = cyruntime.cudaLimit.cudaLimitPrintfFifoSize{{endif}} {{if 'cudaLimitMallocHeapSize' in found_values}} #: GPU malloc heap size - cudaLimitMallocHeapSize = ccudart.cudaLimit.cudaLimitMallocHeapSize{{endif}} + cudaLimitMallocHeapSize = cyruntime.cudaLimit.cudaLimitMallocHeapSize{{endif}} {{if 'cudaLimitDevRuntimeSyncDepth' in found_values}} #: GPU device runtime synchronize depth - cudaLimitDevRuntimeSyncDepth = ccudart.cudaLimit.cudaLimitDevRuntimeSyncDepth{{endif}} + cudaLimitDevRuntimeSyncDepth = cyruntime.cudaLimit.cudaLimitDevRuntimeSyncDepth{{endif}} {{if 'cudaLimitDevRuntimePendingLaunchCount' in found_values}} #: GPU device runtime pending launch count - cudaLimitDevRuntimePendingLaunchCount = ccudart.cudaLimit.cudaLimitDevRuntimePendingLaunchCount{{endif}} + cudaLimitDevRuntimePendingLaunchCount = cyruntime.cudaLimit.cudaLimitDevRuntimePendingLaunchCount{{endif}} {{if 'cudaLimitMaxL2FetchGranularity' in found_values}} #: A value between 0 and 128 that indicates the maximum fetch #: granularity of L2 (in Bytes). This is a hint - cudaLimitMaxL2FetchGranularity = ccudart.cudaLimit.cudaLimitMaxL2FetchGranularity{{endif}} + cudaLimitMaxL2FetchGranularity = cyruntime.cudaLimit.cudaLimitMaxL2FetchGranularity{{endif}} {{if 'cudaLimitPersistingL2CacheSize' in found_values}} #: A size in bytes for L2 persisting lines cache size - cudaLimitPersistingL2CacheSize = ccudart.cudaLimit.cudaLimitPersistingL2CacheSize{{endif}} + cudaLimitPersistingL2CacheSize = cyruntime.cudaLimit.cudaLimitPersistingL2CacheSize{{endif}} {{endif}} {{if 'cudaMemoryAdvise' in found_types}} @@ -2786,29 +2786,29 @@ class cudaMemoryAdvise(IntEnum): {{if 'cudaMemAdviseSetReadMostly' in found_values}} #: Data will mostly be read and only occassionally be written to - cudaMemAdviseSetReadMostly = ccudart.cudaMemoryAdvise.cudaMemAdviseSetReadMostly{{endif}} + cudaMemAdviseSetReadMostly = cyruntime.cudaMemoryAdvise.cudaMemAdviseSetReadMostly{{endif}} {{if 'cudaMemAdviseUnsetReadMostly' in found_values}} #: Undo the effect of :py:obj:`~.cudaMemAdviseSetReadMostly` - cudaMemAdviseUnsetReadMostly = ccudart.cudaMemoryAdvise.cudaMemAdviseUnsetReadMostly{{endif}} + cudaMemAdviseUnsetReadMostly = cyruntime.cudaMemoryAdvise.cudaMemAdviseUnsetReadMostly{{endif}} {{if 'cudaMemAdviseSetPreferredLocation' in found_values}} #: Set the preferred location for the data as the specified device - cudaMemAdviseSetPreferredLocation = ccudart.cudaMemoryAdvise.cudaMemAdviseSetPreferredLocation{{endif}} + cudaMemAdviseSetPreferredLocation = cyruntime.cudaMemoryAdvise.cudaMemAdviseSetPreferredLocation{{endif}} {{if 'cudaMemAdviseUnsetPreferredLocation' in found_values}} #: Clear the preferred location for the data - cudaMemAdviseUnsetPreferredLocation = ccudart.cudaMemoryAdvise.cudaMemAdviseUnsetPreferredLocation{{endif}} + cudaMemAdviseUnsetPreferredLocation = cyruntime.cudaMemoryAdvise.cudaMemAdviseUnsetPreferredLocation{{endif}} {{if 'cudaMemAdviseSetAccessedBy' in found_values}} #: Data will be accessed by the specified device, so prevent page #: faults as much as possible - cudaMemAdviseSetAccessedBy = ccudart.cudaMemoryAdvise.cudaMemAdviseSetAccessedBy{{endif}} + cudaMemAdviseSetAccessedBy = cyruntime.cudaMemoryAdvise.cudaMemAdviseSetAccessedBy{{endif}} {{if 'cudaMemAdviseUnsetAccessedBy' in found_values}} #: Let the Unified Memory subsystem decide on the page faulting policy #: for the specified device - cudaMemAdviseUnsetAccessedBy = ccudart.cudaMemoryAdvise.cudaMemAdviseUnsetAccessedBy{{endif}} + cudaMemAdviseUnsetAccessedBy = cyruntime.cudaMemoryAdvise.cudaMemAdviseUnsetAccessedBy{{endif}} {{endif}} {{if 'cudaMemRangeAttribute' in found_types}} @@ -2820,36 +2820,36 @@ class cudaMemRangeAttribute(IntEnum): #: Whether the range will mostly be read and only occassionally be #: written to - cudaMemRangeAttributeReadMostly = ccudart.cudaMemRangeAttribute.cudaMemRangeAttributeReadMostly{{endif}} + cudaMemRangeAttributeReadMostly = cyruntime.cudaMemRangeAttribute.cudaMemRangeAttributeReadMostly{{endif}} {{if 'cudaMemRangeAttributePreferredLocation' in found_values}} #: The preferred location of the range - cudaMemRangeAttributePreferredLocation = ccudart.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocation{{endif}} + cudaMemRangeAttributePreferredLocation = cyruntime.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocation{{endif}} {{if 'cudaMemRangeAttributeAccessedBy' in found_values}} #: Memory range has :py:obj:`~.cudaMemAdviseSetAccessedBy` set for #: specified device - cudaMemRangeAttributeAccessedBy = ccudart.cudaMemRangeAttribute.cudaMemRangeAttributeAccessedBy{{endif}} + cudaMemRangeAttributeAccessedBy = cyruntime.cudaMemRangeAttribute.cudaMemRangeAttributeAccessedBy{{endif}} {{if 'cudaMemRangeAttributeLastPrefetchLocation' in found_values}} #: The last location to which the range was prefetched - cudaMemRangeAttributeLastPrefetchLocation = ccudart.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocation{{endif}} + cudaMemRangeAttributeLastPrefetchLocation = cyruntime.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocation{{endif}} {{if 'cudaMemRangeAttributePreferredLocationType' in found_values}} #: The preferred location type of the range - cudaMemRangeAttributePreferredLocationType = ccudart.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationType{{endif}} + cudaMemRangeAttributePreferredLocationType = cyruntime.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationType{{endif}} {{if 'cudaMemRangeAttributePreferredLocationId' in found_values}} #: The preferred location id of the range - cudaMemRangeAttributePreferredLocationId = ccudart.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationId{{endif}} + cudaMemRangeAttributePreferredLocationId = cyruntime.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationId{{endif}} {{if 'cudaMemRangeAttributeLastPrefetchLocationType' in found_values}} #: The last location type to which the range was prefetched - cudaMemRangeAttributeLastPrefetchLocationType = ccudart.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationType{{endif}} + cudaMemRangeAttributeLastPrefetchLocationType = cyruntime.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationType{{endif}} {{if 'cudaMemRangeAttributeLastPrefetchLocationId' in found_values}} #: The last location id to which the range was prefetched - cudaMemRangeAttributeLastPrefetchLocationId = ccudart.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationId{{endif}} + cudaMemRangeAttributeLastPrefetchLocationId = cyruntime.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationId{{endif}} {{endif}} {{if 'cudaFlushGPUDirectRDMAWritesOptions' in found_types}} @@ -2861,13 +2861,13 @@ class cudaFlushGPUDirectRDMAWritesOptions(IntEnum): #: :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` and its CUDA Driver #: API counterpart are supported on the device. - cudaFlushGPUDirectRDMAWritesOptionHost = ccudart.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionHost{{endif}} + cudaFlushGPUDirectRDMAWritesOptionHost = cyruntime.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionHost{{endif}} {{if 'cudaFlushGPUDirectRDMAWritesOptionMemOps' in found_values}} #: The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the #: :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported #: on the CUDA device. - cudaFlushGPUDirectRDMAWritesOptionMemOps = ccudart.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionMemOps{{endif}} + cudaFlushGPUDirectRDMAWritesOptionMemOps = cyruntime.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionMemOps{{endif}} {{endif}} {{if 'cudaGPUDirectRDMAWritesOrdering' in found_types}} @@ -2880,17 +2880,17 @@ class cudaGPUDirectRDMAWritesOrdering(IntEnum): #: The device does not natively support ordering of GPUDirect RDMA #: writes. :py:obj:`~.cudaFlushGPUDirectRDMAWrites()` can be leveraged #: if supported. - cudaGPUDirectRDMAWritesOrderingNone = ccudart.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingNone{{endif}} + cudaGPUDirectRDMAWritesOrderingNone = cyruntime.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingNone{{endif}} {{if 'cudaGPUDirectRDMAWritesOrderingOwner' in found_values}} #: Natively, the device can consistently consume GPUDirect RDMA writes, #: although other CUDA devices may not. - cudaGPUDirectRDMAWritesOrderingOwner = ccudart.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingOwner{{endif}} + cudaGPUDirectRDMAWritesOrderingOwner = cyruntime.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingOwner{{endif}} {{if 'cudaGPUDirectRDMAWritesOrderingAllDevices' in found_values}} #: Any CUDA device in the system can consistently consume GPUDirect #: RDMA writes to this device. - cudaGPUDirectRDMAWritesOrderingAllDevices = ccudart.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingAllDevices{{endif}} + cudaGPUDirectRDMAWritesOrderingAllDevices = cyruntime.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingAllDevices{{endif}} {{endif}} {{if 'cudaFlushGPUDirectRDMAWritesScope' in found_types}} @@ -2902,11 +2902,11 @@ class cudaFlushGPUDirectRDMAWritesScope(IntEnum): #: Blocks until remote writes are visible to the CUDA device context #: owning the data. - cudaFlushGPUDirectRDMAWritesToOwner = ccudart.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToOwner{{endif}} + cudaFlushGPUDirectRDMAWritesToOwner = cyruntime.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToOwner{{endif}} {{if 'cudaFlushGPUDirectRDMAWritesToAllDevices' in found_values}} #: Blocks until remote writes are visible to all CUDA device contexts. - cudaFlushGPUDirectRDMAWritesToAllDevices = ccudart.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToAllDevices{{endif}} + cudaFlushGPUDirectRDMAWritesToAllDevices = cyruntime.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToAllDevices{{endif}} {{endif}} {{if 'cudaFlushGPUDirectRDMAWritesTarget' in found_types}} @@ -2918,7 +2918,7 @@ class cudaFlushGPUDirectRDMAWritesTarget(IntEnum): #: Sets the target for :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` #: to the currently active CUDA device context. - cudaFlushGPUDirectRDMAWritesTargetCurrentDevice = ccudart.cudaFlushGPUDirectRDMAWritesTarget.cudaFlushGPUDirectRDMAWritesTargetCurrentDevice{{endif}} + cudaFlushGPUDirectRDMAWritesTargetCurrentDevice = cyruntime.cudaFlushGPUDirectRDMAWritesTarget.cudaFlushGPUDirectRDMAWritesTargetCurrentDevice{{endif}} {{endif}} {{if 'cudaDeviceAttr' in found_types}} @@ -2929,524 +2929,524 @@ class cudaDeviceAttr(IntEnum): {{if 'cudaDevAttrMaxThreadsPerBlock' in found_values}} #: Maximum number of threads per block - cudaDevAttrMaxThreadsPerBlock = ccudart.cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock{{endif}} + cudaDevAttrMaxThreadsPerBlock = cyruntime.cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock{{endif}} {{if 'cudaDevAttrMaxBlockDimX' in found_values}} #: Maximum block dimension X - cudaDevAttrMaxBlockDimX = ccudart.cudaDeviceAttr.cudaDevAttrMaxBlockDimX{{endif}} + cudaDevAttrMaxBlockDimX = cyruntime.cudaDeviceAttr.cudaDevAttrMaxBlockDimX{{endif}} {{if 'cudaDevAttrMaxBlockDimY' in found_values}} #: Maximum block dimension Y - cudaDevAttrMaxBlockDimY = ccudart.cudaDeviceAttr.cudaDevAttrMaxBlockDimY{{endif}} + cudaDevAttrMaxBlockDimY = cyruntime.cudaDeviceAttr.cudaDevAttrMaxBlockDimY{{endif}} {{if 'cudaDevAttrMaxBlockDimZ' in found_values}} #: Maximum block dimension Z - cudaDevAttrMaxBlockDimZ = ccudart.cudaDeviceAttr.cudaDevAttrMaxBlockDimZ{{endif}} + cudaDevAttrMaxBlockDimZ = cyruntime.cudaDeviceAttr.cudaDevAttrMaxBlockDimZ{{endif}} {{if 'cudaDevAttrMaxGridDimX' in found_values}} #: Maximum grid dimension X - cudaDevAttrMaxGridDimX = ccudart.cudaDeviceAttr.cudaDevAttrMaxGridDimX{{endif}} + cudaDevAttrMaxGridDimX = cyruntime.cudaDeviceAttr.cudaDevAttrMaxGridDimX{{endif}} {{if 'cudaDevAttrMaxGridDimY' in found_values}} #: Maximum grid dimension Y - cudaDevAttrMaxGridDimY = ccudart.cudaDeviceAttr.cudaDevAttrMaxGridDimY{{endif}} + cudaDevAttrMaxGridDimY = cyruntime.cudaDeviceAttr.cudaDevAttrMaxGridDimY{{endif}} {{if 'cudaDevAttrMaxGridDimZ' in found_values}} #: Maximum grid dimension Z - cudaDevAttrMaxGridDimZ = ccudart.cudaDeviceAttr.cudaDevAttrMaxGridDimZ{{endif}} + cudaDevAttrMaxGridDimZ = cyruntime.cudaDeviceAttr.cudaDevAttrMaxGridDimZ{{endif}} {{if 'cudaDevAttrMaxSharedMemoryPerBlock' in found_values}} #: Maximum shared memory available per block in bytes - cudaDevAttrMaxSharedMemoryPerBlock = ccudart.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock{{endif}} + cudaDevAttrMaxSharedMemoryPerBlock = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock{{endif}} {{if 'cudaDevAttrTotalConstantMemory' in found_values}} #: Memory available on device for constant variables in a CUDA C kernel #: in bytes - cudaDevAttrTotalConstantMemory = ccudart.cudaDeviceAttr.cudaDevAttrTotalConstantMemory{{endif}} + cudaDevAttrTotalConstantMemory = cyruntime.cudaDeviceAttr.cudaDevAttrTotalConstantMemory{{endif}} {{if 'cudaDevAttrWarpSize' in found_values}} #: Warp size in threads - cudaDevAttrWarpSize = ccudart.cudaDeviceAttr.cudaDevAttrWarpSize{{endif}} + cudaDevAttrWarpSize = cyruntime.cudaDeviceAttr.cudaDevAttrWarpSize{{endif}} {{if 'cudaDevAttrMaxPitch' in found_values}} #: Maximum pitch in bytes allowed by memory copies - cudaDevAttrMaxPitch = ccudart.cudaDeviceAttr.cudaDevAttrMaxPitch{{endif}} + cudaDevAttrMaxPitch = cyruntime.cudaDeviceAttr.cudaDevAttrMaxPitch{{endif}} {{if 'cudaDevAttrMaxRegistersPerBlock' in found_values}} #: Maximum number of 32-bit registers available per block - cudaDevAttrMaxRegistersPerBlock = ccudart.cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock{{endif}} + cudaDevAttrMaxRegistersPerBlock = cyruntime.cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock{{endif}} {{if 'cudaDevAttrClockRate' in found_values}} #: Peak clock frequency in kilohertz - cudaDevAttrClockRate = ccudart.cudaDeviceAttr.cudaDevAttrClockRate{{endif}} + cudaDevAttrClockRate = cyruntime.cudaDeviceAttr.cudaDevAttrClockRate{{endif}} {{if 'cudaDevAttrTextureAlignment' in found_values}} #: Alignment requirement for textures - cudaDevAttrTextureAlignment = ccudart.cudaDeviceAttr.cudaDevAttrTextureAlignment{{endif}} + cudaDevAttrTextureAlignment = cyruntime.cudaDeviceAttr.cudaDevAttrTextureAlignment{{endif}} {{if 'cudaDevAttrGpuOverlap' in found_values}} #: Device can possibly copy memory and execute a kernel concurrently - cudaDevAttrGpuOverlap = ccudart.cudaDeviceAttr.cudaDevAttrGpuOverlap{{endif}} + cudaDevAttrGpuOverlap = cyruntime.cudaDeviceAttr.cudaDevAttrGpuOverlap{{endif}} {{if 'cudaDevAttrMultiProcessorCount' in found_values}} #: Number of multiprocessors on device - cudaDevAttrMultiProcessorCount = ccudart.cudaDeviceAttr.cudaDevAttrMultiProcessorCount{{endif}} + cudaDevAttrMultiProcessorCount = cyruntime.cudaDeviceAttr.cudaDevAttrMultiProcessorCount{{endif}} {{if 'cudaDevAttrKernelExecTimeout' in found_values}} #: Specifies whether there is a run time limit on kernels - cudaDevAttrKernelExecTimeout = ccudart.cudaDeviceAttr.cudaDevAttrKernelExecTimeout{{endif}} + cudaDevAttrKernelExecTimeout = cyruntime.cudaDeviceAttr.cudaDevAttrKernelExecTimeout{{endif}} {{if 'cudaDevAttrIntegrated' in found_values}} #: Device is integrated with host memory - cudaDevAttrIntegrated = ccudart.cudaDeviceAttr.cudaDevAttrIntegrated{{endif}} + cudaDevAttrIntegrated = cyruntime.cudaDeviceAttr.cudaDevAttrIntegrated{{endif}} {{if 'cudaDevAttrCanMapHostMemory' in found_values}} #: Device can map host memory into CUDA address space - cudaDevAttrCanMapHostMemory = ccudart.cudaDeviceAttr.cudaDevAttrCanMapHostMemory{{endif}} + cudaDevAttrCanMapHostMemory = cyruntime.cudaDeviceAttr.cudaDevAttrCanMapHostMemory{{endif}} {{if 'cudaDevAttrComputeMode' in found_values}} #: Compute mode (See :py:obj:`~.cudaComputeMode` for details) - cudaDevAttrComputeMode = ccudart.cudaDeviceAttr.cudaDevAttrComputeMode{{endif}} + cudaDevAttrComputeMode = cyruntime.cudaDeviceAttr.cudaDevAttrComputeMode{{endif}} {{if 'cudaDevAttrMaxTexture1DWidth' in found_values}} #: Maximum 1D texture width - cudaDevAttrMaxTexture1DWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth{{endif}} + cudaDevAttrMaxTexture1DWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth{{endif}} {{if 'cudaDevAttrMaxTexture2DWidth' in found_values}} #: Maximum 2D texture width - cudaDevAttrMaxTexture2DWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth{{endif}} + cudaDevAttrMaxTexture2DWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth{{endif}} {{if 'cudaDevAttrMaxTexture2DHeight' in found_values}} #: Maximum 2D texture height - cudaDevAttrMaxTexture2DHeight = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight{{endif}} + cudaDevAttrMaxTexture2DHeight = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight{{endif}} {{if 'cudaDevAttrMaxTexture3DWidth' in found_values}} #: Maximum 3D texture width - cudaDevAttrMaxTexture3DWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth{{endif}} + cudaDevAttrMaxTexture3DWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth{{endif}} {{if 'cudaDevAttrMaxTexture3DHeight' in found_values}} #: Maximum 3D texture height - cudaDevAttrMaxTexture3DHeight = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight{{endif}} + cudaDevAttrMaxTexture3DHeight = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight{{endif}} {{if 'cudaDevAttrMaxTexture3DDepth' in found_values}} #: Maximum 3D texture depth - cudaDevAttrMaxTexture3DDepth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth{{endif}} + cudaDevAttrMaxTexture3DDepth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth{{endif}} {{if 'cudaDevAttrMaxTexture2DLayeredWidth' in found_values}} #: Maximum 2D layered texture width - cudaDevAttrMaxTexture2DLayeredWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth{{endif}} + cudaDevAttrMaxTexture2DLayeredWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth{{endif}} {{if 'cudaDevAttrMaxTexture2DLayeredHeight' in found_values}} #: Maximum 2D layered texture height - cudaDevAttrMaxTexture2DLayeredHeight = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight{{endif}} + cudaDevAttrMaxTexture2DLayeredHeight = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight{{endif}} {{if 'cudaDevAttrMaxTexture2DLayeredLayers' in found_values}} #: Maximum layers in a 2D layered texture - cudaDevAttrMaxTexture2DLayeredLayers = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers{{endif}} + cudaDevAttrMaxTexture2DLayeredLayers = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers{{endif}} {{if 'cudaDevAttrSurfaceAlignment' in found_values}} #: Alignment requirement for surfaces - cudaDevAttrSurfaceAlignment = ccudart.cudaDeviceAttr.cudaDevAttrSurfaceAlignment{{endif}} + cudaDevAttrSurfaceAlignment = cyruntime.cudaDeviceAttr.cudaDevAttrSurfaceAlignment{{endif}} {{if 'cudaDevAttrConcurrentKernels' in found_values}} #: Device can possibly execute multiple kernels concurrently - cudaDevAttrConcurrentKernels = ccudart.cudaDeviceAttr.cudaDevAttrConcurrentKernels{{endif}} + cudaDevAttrConcurrentKernels = cyruntime.cudaDeviceAttr.cudaDevAttrConcurrentKernels{{endif}} {{if 'cudaDevAttrEccEnabled' in found_values}} #: Device has ECC support enabled - cudaDevAttrEccEnabled = ccudart.cudaDeviceAttr.cudaDevAttrEccEnabled{{endif}} + cudaDevAttrEccEnabled = cyruntime.cudaDeviceAttr.cudaDevAttrEccEnabled{{endif}} {{if 'cudaDevAttrPciBusId' in found_values}} #: PCI bus ID of the device - cudaDevAttrPciBusId = ccudart.cudaDeviceAttr.cudaDevAttrPciBusId{{endif}} + cudaDevAttrPciBusId = cyruntime.cudaDeviceAttr.cudaDevAttrPciBusId{{endif}} {{if 'cudaDevAttrPciDeviceId' in found_values}} #: PCI device ID of the device - cudaDevAttrPciDeviceId = ccudart.cudaDeviceAttr.cudaDevAttrPciDeviceId{{endif}} + cudaDevAttrPciDeviceId = cyruntime.cudaDeviceAttr.cudaDevAttrPciDeviceId{{endif}} {{if 'cudaDevAttrTccDriver' in found_values}} #: Device is using TCC driver model - cudaDevAttrTccDriver = ccudart.cudaDeviceAttr.cudaDevAttrTccDriver{{endif}} + cudaDevAttrTccDriver = cyruntime.cudaDeviceAttr.cudaDevAttrTccDriver{{endif}} {{if 'cudaDevAttrMemoryClockRate' in found_values}} #: Peak memory clock frequency in kilohertz - cudaDevAttrMemoryClockRate = ccudart.cudaDeviceAttr.cudaDevAttrMemoryClockRate{{endif}} + cudaDevAttrMemoryClockRate = cyruntime.cudaDeviceAttr.cudaDevAttrMemoryClockRate{{endif}} {{if 'cudaDevAttrGlobalMemoryBusWidth' in found_values}} #: Global memory bus width in bits - cudaDevAttrGlobalMemoryBusWidth = ccudart.cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth{{endif}} + cudaDevAttrGlobalMemoryBusWidth = cyruntime.cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth{{endif}} {{if 'cudaDevAttrL2CacheSize' in found_values}} #: Size of L2 cache in bytes - cudaDevAttrL2CacheSize = ccudart.cudaDeviceAttr.cudaDevAttrL2CacheSize{{endif}} + cudaDevAttrL2CacheSize = cyruntime.cudaDeviceAttr.cudaDevAttrL2CacheSize{{endif}} {{if 'cudaDevAttrMaxThreadsPerMultiProcessor' in found_values}} #: Maximum resident threads per multiprocessor - cudaDevAttrMaxThreadsPerMultiProcessor = ccudart.cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor{{endif}} + cudaDevAttrMaxThreadsPerMultiProcessor = cyruntime.cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor{{endif}} {{if 'cudaDevAttrAsyncEngineCount' in found_values}} #: Number of asynchronous engines - cudaDevAttrAsyncEngineCount = ccudart.cudaDeviceAttr.cudaDevAttrAsyncEngineCount{{endif}} + cudaDevAttrAsyncEngineCount = cyruntime.cudaDeviceAttr.cudaDevAttrAsyncEngineCount{{endif}} {{if 'cudaDevAttrUnifiedAddressing' in found_values}} #: Device shares a unified address space with the host - cudaDevAttrUnifiedAddressing = ccudart.cudaDeviceAttr.cudaDevAttrUnifiedAddressing{{endif}} + cudaDevAttrUnifiedAddressing = cyruntime.cudaDeviceAttr.cudaDevAttrUnifiedAddressing{{endif}} {{if 'cudaDevAttrMaxTexture1DLayeredWidth' in found_values}} #: Maximum 1D layered texture width - cudaDevAttrMaxTexture1DLayeredWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth{{endif}} + cudaDevAttrMaxTexture1DLayeredWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth{{endif}} {{if 'cudaDevAttrMaxTexture1DLayeredLayers' in found_values}} #: Maximum layers in a 1D layered texture - cudaDevAttrMaxTexture1DLayeredLayers = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers{{endif}} + cudaDevAttrMaxTexture1DLayeredLayers = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers{{endif}} {{if 'cudaDevAttrMaxTexture2DGatherWidth' in found_values}} #: Maximum 2D texture width if cudaArrayTextureGather is set - cudaDevAttrMaxTexture2DGatherWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth{{endif}} + cudaDevAttrMaxTexture2DGatherWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth{{endif}} {{if 'cudaDevAttrMaxTexture2DGatherHeight' in found_values}} #: Maximum 2D texture height if cudaArrayTextureGather is set - cudaDevAttrMaxTexture2DGatherHeight = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight{{endif}} + cudaDevAttrMaxTexture2DGatherHeight = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight{{endif}} {{if 'cudaDevAttrMaxTexture3DWidthAlt' in found_values}} #: Alternate maximum 3D texture width - cudaDevAttrMaxTexture3DWidthAlt = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt{{endif}} + cudaDevAttrMaxTexture3DWidthAlt = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt{{endif}} {{if 'cudaDevAttrMaxTexture3DHeightAlt' in found_values}} #: Alternate maximum 3D texture height - cudaDevAttrMaxTexture3DHeightAlt = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt{{endif}} + cudaDevAttrMaxTexture3DHeightAlt = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt{{endif}} {{if 'cudaDevAttrMaxTexture3DDepthAlt' in found_values}} #: Alternate maximum 3D texture depth - cudaDevAttrMaxTexture3DDepthAlt = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt{{endif}} + cudaDevAttrMaxTexture3DDepthAlt = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt{{endif}} {{if 'cudaDevAttrPciDomainId' in found_values}} #: PCI domain ID of the device - cudaDevAttrPciDomainId = ccudart.cudaDeviceAttr.cudaDevAttrPciDomainId{{endif}} + cudaDevAttrPciDomainId = cyruntime.cudaDeviceAttr.cudaDevAttrPciDomainId{{endif}} {{if 'cudaDevAttrTexturePitchAlignment' in found_values}} #: Pitch alignment requirement for textures - cudaDevAttrTexturePitchAlignment = ccudart.cudaDeviceAttr.cudaDevAttrTexturePitchAlignment{{endif}} + cudaDevAttrTexturePitchAlignment = cyruntime.cudaDeviceAttr.cudaDevAttrTexturePitchAlignment{{endif}} {{if 'cudaDevAttrMaxTextureCubemapWidth' in found_values}} #: Maximum cubemap texture width/height - cudaDevAttrMaxTextureCubemapWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth{{endif}} + cudaDevAttrMaxTextureCubemapWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth{{endif}} {{if 'cudaDevAttrMaxTextureCubemapLayeredWidth' in found_values}} #: Maximum cubemap layered texture width/height - cudaDevAttrMaxTextureCubemapLayeredWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth{{endif}} + cudaDevAttrMaxTextureCubemapLayeredWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth{{endif}} {{if 'cudaDevAttrMaxTextureCubemapLayeredLayers' in found_values}} #: Maximum layers in a cubemap layered texture - cudaDevAttrMaxTextureCubemapLayeredLayers = ccudart.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers{{endif}} + cudaDevAttrMaxTextureCubemapLayeredLayers = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers{{endif}} {{if 'cudaDevAttrMaxSurface1DWidth' in found_values}} #: Maximum 1D surface width - cudaDevAttrMaxSurface1DWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth{{endif}} + cudaDevAttrMaxSurface1DWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth{{endif}} {{if 'cudaDevAttrMaxSurface2DWidth' in found_values}} #: Maximum 2D surface width - cudaDevAttrMaxSurface2DWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth{{endif}} + cudaDevAttrMaxSurface2DWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth{{endif}} {{if 'cudaDevAttrMaxSurface2DHeight' in found_values}} #: Maximum 2D surface height - cudaDevAttrMaxSurface2DHeight = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight{{endif}} + cudaDevAttrMaxSurface2DHeight = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight{{endif}} {{if 'cudaDevAttrMaxSurface3DWidth' in found_values}} #: Maximum 3D surface width - cudaDevAttrMaxSurface3DWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth{{endif}} + cudaDevAttrMaxSurface3DWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth{{endif}} {{if 'cudaDevAttrMaxSurface3DHeight' in found_values}} #: Maximum 3D surface height - cudaDevAttrMaxSurface3DHeight = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight{{endif}} + cudaDevAttrMaxSurface3DHeight = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight{{endif}} {{if 'cudaDevAttrMaxSurface3DDepth' in found_values}} #: Maximum 3D surface depth - cudaDevAttrMaxSurface3DDepth = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth{{endif}} + cudaDevAttrMaxSurface3DDepth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth{{endif}} {{if 'cudaDevAttrMaxSurface1DLayeredWidth' in found_values}} #: Maximum 1D layered surface width - cudaDevAttrMaxSurface1DLayeredWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth{{endif}} + cudaDevAttrMaxSurface1DLayeredWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth{{endif}} {{if 'cudaDevAttrMaxSurface1DLayeredLayers' in found_values}} #: Maximum layers in a 1D layered surface - cudaDevAttrMaxSurface1DLayeredLayers = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers{{endif}} + cudaDevAttrMaxSurface1DLayeredLayers = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers{{endif}} {{if 'cudaDevAttrMaxSurface2DLayeredWidth' in found_values}} #: Maximum 2D layered surface width - cudaDevAttrMaxSurface2DLayeredWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth{{endif}} + cudaDevAttrMaxSurface2DLayeredWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth{{endif}} {{if 'cudaDevAttrMaxSurface2DLayeredHeight' in found_values}} #: Maximum 2D layered surface height - cudaDevAttrMaxSurface2DLayeredHeight = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight{{endif}} + cudaDevAttrMaxSurface2DLayeredHeight = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight{{endif}} {{if 'cudaDevAttrMaxSurface2DLayeredLayers' in found_values}} #: Maximum layers in a 2D layered surface - cudaDevAttrMaxSurface2DLayeredLayers = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers{{endif}} + cudaDevAttrMaxSurface2DLayeredLayers = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers{{endif}} {{if 'cudaDevAttrMaxSurfaceCubemapWidth' in found_values}} #: Maximum cubemap surface width - cudaDevAttrMaxSurfaceCubemapWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth{{endif}} + cudaDevAttrMaxSurfaceCubemapWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth{{endif}} {{if 'cudaDevAttrMaxSurfaceCubemapLayeredWidth' in found_values}} #: Maximum cubemap layered surface width - cudaDevAttrMaxSurfaceCubemapLayeredWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth{{endif}} + cudaDevAttrMaxSurfaceCubemapLayeredWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth{{endif}} {{if 'cudaDevAttrMaxSurfaceCubemapLayeredLayers' in found_values}} #: Maximum layers in a cubemap layered surface - cudaDevAttrMaxSurfaceCubemapLayeredLayers = ccudart.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers{{endif}} + cudaDevAttrMaxSurfaceCubemapLayeredLayers = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers{{endif}} {{if 'cudaDevAttrMaxTexture1DLinearWidth' in found_values}} #: Maximum 1D linear texture width - cudaDevAttrMaxTexture1DLinearWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth{{endif}} + cudaDevAttrMaxTexture1DLinearWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth{{endif}} {{if 'cudaDevAttrMaxTexture2DLinearWidth' in found_values}} #: Maximum 2D linear texture width - cudaDevAttrMaxTexture2DLinearWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth{{endif}} + cudaDevAttrMaxTexture2DLinearWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth{{endif}} {{if 'cudaDevAttrMaxTexture2DLinearHeight' in found_values}} #: Maximum 2D linear texture height - cudaDevAttrMaxTexture2DLinearHeight = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight{{endif}} + cudaDevAttrMaxTexture2DLinearHeight = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight{{endif}} {{if 'cudaDevAttrMaxTexture2DLinearPitch' in found_values}} #: Maximum 2D linear texture pitch in bytes - cudaDevAttrMaxTexture2DLinearPitch = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch{{endif}} + cudaDevAttrMaxTexture2DLinearPitch = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch{{endif}} {{if 'cudaDevAttrMaxTexture2DMipmappedWidth' in found_values}} #: Maximum mipmapped 2D texture width - cudaDevAttrMaxTexture2DMipmappedWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth{{endif}} + cudaDevAttrMaxTexture2DMipmappedWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth{{endif}} {{if 'cudaDevAttrMaxTexture2DMipmappedHeight' in found_values}} #: Maximum mipmapped 2D texture height - cudaDevAttrMaxTexture2DMipmappedHeight = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight{{endif}} + cudaDevAttrMaxTexture2DMipmappedHeight = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight{{endif}} {{if 'cudaDevAttrComputeCapabilityMajor' in found_values}} #: Major compute capability version number - cudaDevAttrComputeCapabilityMajor = ccudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor{{endif}} + cudaDevAttrComputeCapabilityMajor = cyruntime.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor{{endif}} {{if 'cudaDevAttrComputeCapabilityMinor' in found_values}} #: Minor compute capability version number - cudaDevAttrComputeCapabilityMinor = ccudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor{{endif}} + cudaDevAttrComputeCapabilityMinor = cyruntime.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor{{endif}} {{if 'cudaDevAttrMaxTexture1DMipmappedWidth' in found_values}} #: Maximum mipmapped 1D texture width - cudaDevAttrMaxTexture1DMipmappedWidth = ccudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth{{endif}} + cudaDevAttrMaxTexture1DMipmappedWidth = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth{{endif}} {{if 'cudaDevAttrStreamPrioritiesSupported' in found_values}} #: Device supports stream priorities - cudaDevAttrStreamPrioritiesSupported = ccudart.cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported{{endif}} + cudaDevAttrStreamPrioritiesSupported = cyruntime.cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported{{endif}} {{if 'cudaDevAttrGlobalL1CacheSupported' in found_values}} #: Device supports caching globals in L1 - cudaDevAttrGlobalL1CacheSupported = ccudart.cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported{{endif}} + cudaDevAttrGlobalL1CacheSupported = cyruntime.cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported{{endif}} {{if 'cudaDevAttrLocalL1CacheSupported' in found_values}} #: Device supports caching locals in L1 - cudaDevAttrLocalL1CacheSupported = ccudart.cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported{{endif}} + cudaDevAttrLocalL1CacheSupported = cyruntime.cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported{{endif}} {{if 'cudaDevAttrMaxSharedMemoryPerMultiprocessor' in found_values}} #: Maximum shared memory available per multiprocessor in bytes - cudaDevAttrMaxSharedMemoryPerMultiprocessor = ccudart.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor{{endif}} + cudaDevAttrMaxSharedMemoryPerMultiprocessor = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor{{endif}} {{if 'cudaDevAttrMaxRegistersPerMultiprocessor' in found_values}} #: Maximum number of 32-bit registers available per multiprocessor - cudaDevAttrMaxRegistersPerMultiprocessor = ccudart.cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor{{endif}} + cudaDevAttrMaxRegistersPerMultiprocessor = cyruntime.cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor{{endif}} {{if 'cudaDevAttrManagedMemory' in found_values}} #: Device can allocate managed memory on this system - cudaDevAttrManagedMemory = ccudart.cudaDeviceAttr.cudaDevAttrManagedMemory{{endif}} + cudaDevAttrManagedMemory = cyruntime.cudaDeviceAttr.cudaDevAttrManagedMemory{{endif}} {{if 'cudaDevAttrIsMultiGpuBoard' in found_values}} #: Device is on a multi-GPU board - cudaDevAttrIsMultiGpuBoard = ccudart.cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard{{endif}} + cudaDevAttrIsMultiGpuBoard = cyruntime.cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard{{endif}} {{if 'cudaDevAttrMultiGpuBoardGroupID' in found_values}} #: Unique identifier for a group of devices on the same multi-GPU board - cudaDevAttrMultiGpuBoardGroupID = ccudart.cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID{{endif}} + cudaDevAttrMultiGpuBoardGroupID = cyruntime.cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID{{endif}} {{if 'cudaDevAttrHostNativeAtomicSupported' in found_values}} #: Link between the device and the host supports native atomic #: operations - cudaDevAttrHostNativeAtomicSupported = ccudart.cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported{{endif}} + cudaDevAttrHostNativeAtomicSupported = cyruntime.cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported{{endif}} {{if 'cudaDevAttrSingleToDoublePrecisionPerfRatio' in found_values}} #: Ratio of single precision performance (in floating-point operations #: per second) to double precision performance - cudaDevAttrSingleToDoublePrecisionPerfRatio = ccudart.cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio{{endif}} + cudaDevAttrSingleToDoublePrecisionPerfRatio = cyruntime.cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio{{endif}} {{if 'cudaDevAttrPageableMemoryAccess' in found_values}} #: Device supports coherently accessing pageable memory without calling #: cudaHostRegister on it - cudaDevAttrPageableMemoryAccess = ccudart.cudaDeviceAttr.cudaDevAttrPageableMemoryAccess{{endif}} + cudaDevAttrPageableMemoryAccess = cyruntime.cudaDeviceAttr.cudaDevAttrPageableMemoryAccess{{endif}} {{if 'cudaDevAttrConcurrentManagedAccess' in found_values}} #: Device can coherently access managed memory concurrently with the #: CPU - cudaDevAttrConcurrentManagedAccess = ccudart.cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess{{endif}} + cudaDevAttrConcurrentManagedAccess = cyruntime.cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess{{endif}} {{if 'cudaDevAttrComputePreemptionSupported' in found_values}} #: Device supports Compute Preemption - cudaDevAttrComputePreemptionSupported = ccudart.cudaDeviceAttr.cudaDevAttrComputePreemptionSupported{{endif}} + cudaDevAttrComputePreemptionSupported = cyruntime.cudaDeviceAttr.cudaDevAttrComputePreemptionSupported{{endif}} {{if 'cudaDevAttrCanUseHostPointerForRegisteredMem' in found_values}} #: Device can access host registered memory at the same virtual address #: as the CPU - cudaDevAttrCanUseHostPointerForRegisteredMem = ccudart.cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem{{endif}} + cudaDevAttrCanUseHostPointerForRegisteredMem = cyruntime.cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem{{endif}} {{if 'cudaDevAttrReserved92' in found_values}} - cudaDevAttrReserved92 = ccudart.cudaDeviceAttr.cudaDevAttrReserved92{{endif}} + cudaDevAttrReserved92 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved92{{endif}} {{if 'cudaDevAttrReserved93' in found_values}} - cudaDevAttrReserved93 = ccudart.cudaDeviceAttr.cudaDevAttrReserved93{{endif}} + cudaDevAttrReserved93 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved93{{endif}} {{if 'cudaDevAttrReserved94' in found_values}} - cudaDevAttrReserved94 = ccudart.cudaDeviceAttr.cudaDevAttrReserved94{{endif}} + cudaDevAttrReserved94 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved94{{endif}} {{if 'cudaDevAttrCooperativeLaunch' in found_values}} #: Device supports launching cooperative kernels via #: :py:obj:`~.cudaLaunchCooperativeKernel` - cudaDevAttrCooperativeLaunch = ccudart.cudaDeviceAttr.cudaDevAttrCooperativeLaunch{{endif}} + cudaDevAttrCooperativeLaunch = cyruntime.cudaDeviceAttr.cudaDevAttrCooperativeLaunch{{endif}} {{if 'cudaDevAttrCooperativeMultiDeviceLaunch' in found_values}} #: Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated. - cudaDevAttrCooperativeMultiDeviceLaunch = ccudart.cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch{{endif}} + cudaDevAttrCooperativeMultiDeviceLaunch = cyruntime.cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch{{endif}} {{if 'cudaDevAttrMaxSharedMemoryPerBlockOptin' in found_values}} #: The maximum optin shared memory per block. This value may vary by #: chip. See :py:obj:`~.cudaFuncSetAttribute` - cudaDevAttrMaxSharedMemoryPerBlockOptin = ccudart.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin{{endif}} + cudaDevAttrMaxSharedMemoryPerBlockOptin = cyruntime.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin{{endif}} {{if 'cudaDevAttrCanFlushRemoteWrites' in found_values}} #: Device supports flushing of outstanding remote writes. - cudaDevAttrCanFlushRemoteWrites = ccudart.cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites{{endif}} + cudaDevAttrCanFlushRemoteWrites = cyruntime.cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites{{endif}} {{if 'cudaDevAttrHostRegisterSupported' in found_values}} #: Device supports host memory registration via #: :py:obj:`~.cudaHostRegister`. - cudaDevAttrHostRegisterSupported = ccudart.cudaDeviceAttr.cudaDevAttrHostRegisterSupported{{endif}} + cudaDevAttrHostRegisterSupported = cyruntime.cudaDeviceAttr.cudaDevAttrHostRegisterSupported{{endif}} {{if 'cudaDevAttrPageableMemoryAccessUsesHostPageTables' in found_values}} #: Device accesses pageable memory via the host's page tables. - cudaDevAttrPageableMemoryAccessUsesHostPageTables = ccudart.cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables{{endif}} + cudaDevAttrPageableMemoryAccessUsesHostPageTables = cyruntime.cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables{{endif}} {{if 'cudaDevAttrDirectManagedMemAccessFromHost' in found_values}} #: Host can directly access managed memory on the device without #: migration. - cudaDevAttrDirectManagedMemAccessFromHost = ccudart.cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost{{endif}} + cudaDevAttrDirectManagedMemAccessFromHost = cyruntime.cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost{{endif}} {{if 'cudaDevAttrMaxBlocksPerMultiprocessor' in found_values}} #: Maximum number of blocks per multiprocessor - cudaDevAttrMaxBlocksPerMultiprocessor = ccudart.cudaDeviceAttr.cudaDevAttrMaxBlocksPerMultiprocessor{{endif}} + cudaDevAttrMaxBlocksPerMultiprocessor = cyruntime.cudaDeviceAttr.cudaDevAttrMaxBlocksPerMultiprocessor{{endif}} {{if 'cudaDevAttrMaxPersistingL2CacheSize' in found_values}} #: Maximum L2 persisting lines capacity setting in bytes. - cudaDevAttrMaxPersistingL2CacheSize = ccudart.cudaDeviceAttr.cudaDevAttrMaxPersistingL2CacheSize{{endif}} + cudaDevAttrMaxPersistingL2CacheSize = cyruntime.cudaDeviceAttr.cudaDevAttrMaxPersistingL2CacheSize{{endif}} {{if 'cudaDevAttrMaxAccessPolicyWindowSize' in found_values}} #: Maximum value of :py:obj:`~.cudaAccessPolicyWindow.num_bytes`. - cudaDevAttrMaxAccessPolicyWindowSize = ccudart.cudaDeviceAttr.cudaDevAttrMaxAccessPolicyWindowSize{{endif}} + cudaDevAttrMaxAccessPolicyWindowSize = cyruntime.cudaDeviceAttr.cudaDevAttrMaxAccessPolicyWindowSize{{endif}} {{if 'cudaDevAttrReservedSharedMemoryPerBlock' in found_values}} #: Shared memory reserved by CUDA driver per block in bytes - cudaDevAttrReservedSharedMemoryPerBlock = ccudart.cudaDeviceAttr.cudaDevAttrReservedSharedMemoryPerBlock{{endif}} + cudaDevAttrReservedSharedMemoryPerBlock = cyruntime.cudaDeviceAttr.cudaDevAttrReservedSharedMemoryPerBlock{{endif}} {{if 'cudaDevAttrSparseCudaArraySupported' in found_values}} #: Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays - cudaDevAttrSparseCudaArraySupported = ccudart.cudaDeviceAttr.cudaDevAttrSparseCudaArraySupported{{endif}} + cudaDevAttrSparseCudaArraySupported = cyruntime.cudaDeviceAttr.cudaDevAttrSparseCudaArraySupported{{endif}} {{if 'cudaDevAttrHostRegisterReadOnlySupported' in found_values}} #: Device supports using the :py:obj:`~.cudaHostRegister` flag #: cudaHostRegisterReadOnly to register memory that must be mapped as #: read-only to the GPU - cudaDevAttrHostRegisterReadOnlySupported = ccudart.cudaDeviceAttr.cudaDevAttrHostRegisterReadOnlySupported{{endif}} + cudaDevAttrHostRegisterReadOnlySupported = cyruntime.cudaDeviceAttr.cudaDevAttrHostRegisterReadOnlySupported{{endif}} {{if 'cudaDevAttrTimelineSemaphoreInteropSupported' in found_values}} #: External timeline semaphore interop is supported on the device - cudaDevAttrTimelineSemaphoreInteropSupported = ccudart.cudaDeviceAttr.cudaDevAttrTimelineSemaphoreInteropSupported{{endif}} + cudaDevAttrTimelineSemaphoreInteropSupported = cyruntime.cudaDeviceAttr.cudaDevAttrTimelineSemaphoreInteropSupported{{endif}} {{if 'cudaDevAttrMaxTimelineSemaphoreInteropSupported' in found_values}} #: Deprecated, External timeline semaphore interop is supported on the #: device - cudaDevAttrMaxTimelineSemaphoreInteropSupported = ccudart.cudaDeviceAttr.cudaDevAttrMaxTimelineSemaphoreInteropSupported{{endif}} + cudaDevAttrMaxTimelineSemaphoreInteropSupported = cyruntime.cudaDeviceAttr.cudaDevAttrMaxTimelineSemaphoreInteropSupported{{endif}} {{if 'cudaDevAttrMemoryPoolsSupported' in found_values}} #: Device supports using the :py:obj:`~.cudaMallocAsync` and #: :py:obj:`~.cudaMemPool` family of APIs - cudaDevAttrMemoryPoolsSupported = ccudart.cudaDeviceAttr.cudaDevAttrMemoryPoolsSupported{{endif}} + cudaDevAttrMemoryPoolsSupported = cyruntime.cudaDeviceAttr.cudaDevAttrMemoryPoolsSupported{{endif}} {{if 'cudaDevAttrGPUDirectRDMASupported' in found_values}} #: Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see #: https://docs.nvidia.com/cuda/gpudirect-rdma for more information) - cudaDevAttrGPUDirectRDMASupported = ccudart.cudaDeviceAttr.cudaDevAttrGPUDirectRDMASupported{{endif}} + cudaDevAttrGPUDirectRDMASupported = cyruntime.cudaDeviceAttr.cudaDevAttrGPUDirectRDMASupported{{endif}} {{if 'cudaDevAttrGPUDirectRDMAFlushWritesOptions' in found_values}} #: The returned attribute shall be interpreted as a bitmask, where the #: individual bits are listed in the #: :py:obj:`~.cudaFlushGPUDirectRDMAWritesOptions` enum - cudaDevAttrGPUDirectRDMAFlushWritesOptions = ccudart.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAFlushWritesOptions{{endif}} + cudaDevAttrGPUDirectRDMAFlushWritesOptions = cyruntime.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAFlushWritesOptions{{endif}} {{if 'cudaDevAttrGPUDirectRDMAWritesOrdering' in found_values}} #: GPUDirect RDMA writes to the device do not need to be flushed for #: consumers within the scope indicated by the returned attribute. See #: :py:obj:`~.cudaGPUDirectRDMAWritesOrdering` for the numerical values #: returned here. - cudaDevAttrGPUDirectRDMAWritesOrdering = ccudart.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAWritesOrdering{{endif}} + cudaDevAttrGPUDirectRDMAWritesOrdering = cyruntime.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAWritesOrdering{{endif}} {{if 'cudaDevAttrMemoryPoolSupportedHandleTypes' in found_values}} #: Handle types supported with mempool based IPC - cudaDevAttrMemoryPoolSupportedHandleTypes = ccudart.cudaDeviceAttr.cudaDevAttrMemoryPoolSupportedHandleTypes{{endif}} + cudaDevAttrMemoryPoolSupportedHandleTypes = cyruntime.cudaDeviceAttr.cudaDevAttrMemoryPoolSupportedHandleTypes{{endif}} {{if 'cudaDevAttrClusterLaunch' in found_values}} #: Indicates device supports cluster launch - cudaDevAttrClusterLaunch = ccudart.cudaDeviceAttr.cudaDevAttrClusterLaunch{{endif}} + cudaDevAttrClusterLaunch = cyruntime.cudaDeviceAttr.cudaDevAttrClusterLaunch{{endif}} {{if 'cudaDevAttrDeferredMappingCudaArraySupported' in found_values}} #: Device supports deferred mapping CUDA arrays and CUDA mipmapped #: arrays - cudaDevAttrDeferredMappingCudaArraySupported = ccudart.cudaDeviceAttr.cudaDevAttrDeferredMappingCudaArraySupported{{endif}} + cudaDevAttrDeferredMappingCudaArraySupported = cyruntime.cudaDeviceAttr.cudaDevAttrDeferredMappingCudaArraySupported{{endif}} {{if 'cudaDevAttrReserved122' in found_values}} - cudaDevAttrReserved122 = ccudart.cudaDeviceAttr.cudaDevAttrReserved122{{endif}} + cudaDevAttrReserved122 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved122{{endif}} {{if 'cudaDevAttrReserved123' in found_values}} - cudaDevAttrReserved123 = ccudart.cudaDeviceAttr.cudaDevAttrReserved123{{endif}} + cudaDevAttrReserved123 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved123{{endif}} {{if 'cudaDevAttrReserved124' in found_values}} - cudaDevAttrReserved124 = ccudart.cudaDeviceAttr.cudaDevAttrReserved124{{endif}} + cudaDevAttrReserved124 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved124{{endif}} {{if 'cudaDevAttrIpcEventSupport' in found_values}} #: Device supports IPC Events. - cudaDevAttrIpcEventSupport = ccudart.cudaDeviceAttr.cudaDevAttrIpcEventSupport{{endif}} + cudaDevAttrIpcEventSupport = cyruntime.cudaDeviceAttr.cudaDevAttrIpcEventSupport{{endif}} {{if 'cudaDevAttrMemSyncDomainCount' in found_values}} #: Number of memory synchronization domains the device supports. - cudaDevAttrMemSyncDomainCount = ccudart.cudaDeviceAttr.cudaDevAttrMemSyncDomainCount{{endif}} + cudaDevAttrMemSyncDomainCount = cyruntime.cudaDeviceAttr.cudaDevAttrMemSyncDomainCount{{endif}} {{if 'cudaDevAttrReserved127' in found_values}} - cudaDevAttrReserved127 = ccudart.cudaDeviceAttr.cudaDevAttrReserved127{{endif}} + cudaDevAttrReserved127 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved127{{endif}} {{if 'cudaDevAttrReserved128' in found_values}} - cudaDevAttrReserved128 = ccudart.cudaDeviceAttr.cudaDevAttrReserved128{{endif}} + cudaDevAttrReserved128 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved128{{endif}} {{if 'cudaDevAttrReserved129' in found_values}} - cudaDevAttrReserved129 = ccudart.cudaDeviceAttr.cudaDevAttrReserved129{{endif}} + cudaDevAttrReserved129 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved129{{endif}} {{if 'cudaDevAttrNumaConfig' in found_values}} #: NUMA configuration of a device: value is of type #: :py:obj:`~.cudaDeviceNumaConfig` enum - cudaDevAttrNumaConfig = ccudart.cudaDeviceAttr.cudaDevAttrNumaConfig{{endif}} + cudaDevAttrNumaConfig = cyruntime.cudaDeviceAttr.cudaDevAttrNumaConfig{{endif}} {{if 'cudaDevAttrNumaId' in found_values}} #: NUMA node ID of the GPU memory - cudaDevAttrNumaId = ccudart.cudaDeviceAttr.cudaDevAttrNumaId{{endif}} + cudaDevAttrNumaId = cyruntime.cudaDeviceAttr.cudaDevAttrNumaId{{endif}} {{if 'cudaDevAttrReserved132' in found_values}} - cudaDevAttrReserved132 = ccudart.cudaDeviceAttr.cudaDevAttrReserved132{{endif}} + cudaDevAttrReserved132 = cyruntime.cudaDeviceAttr.cudaDevAttrReserved132{{endif}} {{if 'cudaDevAttrMpsEnabled' in found_values}} #: Contexts created on this device will be shared via MPS - cudaDevAttrMpsEnabled = ccudart.cudaDeviceAttr.cudaDevAttrMpsEnabled{{endif}} + cudaDevAttrMpsEnabled = cyruntime.cudaDeviceAttr.cudaDevAttrMpsEnabled{{endif}} {{if 'cudaDevAttrHostNumaId' in found_values}} #: NUMA ID of the host node closest to the device. Returns -1 when #: system does not support NUMA. - cudaDevAttrHostNumaId = ccudart.cudaDeviceAttr.cudaDevAttrHostNumaId{{endif}} + cudaDevAttrHostNumaId = cyruntime.cudaDeviceAttr.cudaDevAttrHostNumaId{{endif}} {{if 'cudaDevAttrD3D12CigSupported' in found_values}} #: Device supports CIG with D3D12. - cudaDevAttrD3D12CigSupported = ccudart.cudaDeviceAttr.cudaDevAttrD3D12CigSupported{{endif}} + cudaDevAttrD3D12CigSupported = cyruntime.cudaDeviceAttr.cudaDevAttrD3D12CigSupported{{endif}} {{if 'cudaDevAttrMax' in found_values}} - cudaDevAttrMax = ccudart.cudaDeviceAttr.cudaDevAttrMax{{endif}} + cudaDevAttrMax = cyruntime.cudaDeviceAttr.cudaDevAttrMax{{endif}} {{endif}} {{if 'cudaMemPoolAttr' in found_types}} @@ -3461,18 +3461,18 @@ class cudaMemPoolAttr(IntEnum): #: dependency of the allocating stream on the free action exists. Cuda #: events and null stream interactions can create the required stream #: ordered dependencies. (default enabled) - cudaMemPoolReuseFollowEventDependencies = ccudart.cudaMemPoolAttr.cudaMemPoolReuseFollowEventDependencies{{endif}} + cudaMemPoolReuseFollowEventDependencies = cyruntime.cudaMemPoolAttr.cudaMemPoolReuseFollowEventDependencies{{endif}} {{if 'cudaMemPoolReuseAllowOpportunistic' in found_values}} #: (value type = int) Allow reuse of already completed frees when there #: is no dependency between the free and allocation. (default enabled) - cudaMemPoolReuseAllowOpportunistic = ccudart.cudaMemPoolAttr.cudaMemPoolReuseAllowOpportunistic{{endif}} + cudaMemPoolReuseAllowOpportunistic = cyruntime.cudaMemPoolAttr.cudaMemPoolReuseAllowOpportunistic{{endif}} {{if 'cudaMemPoolReuseAllowInternalDependencies' in found_values}} #: (value type = int) Allow cuMemAllocAsync to insert new stream #: dependencies in order to establish the stream ordering required to #: reuse a piece of memory released by cuFreeAsync (default enabled). - cudaMemPoolReuseAllowInternalDependencies = ccudart.cudaMemPoolAttr.cudaMemPoolReuseAllowInternalDependencies{{endif}} + cudaMemPoolReuseAllowInternalDependencies = cyruntime.cudaMemPoolAttr.cudaMemPoolReuseAllowInternalDependencies{{endif}} {{if 'cudaMemPoolAttrReleaseThreshold' in found_values}} #: (value type = cuuint64_t) Amount of reserved memory in bytes to hold @@ -3480,29 +3480,29 @@ class cudaMemPoolAttr(IntEnum): #: the release threshold bytes of memory are held by the memory pool, #: the allocator will try to release memory back to the OS on the next #: call to stream, event or context synchronize. (default 0) - cudaMemPoolAttrReleaseThreshold = ccudart.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold{{endif}} + cudaMemPoolAttrReleaseThreshold = cyruntime.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold{{endif}} {{if 'cudaMemPoolAttrReservedMemCurrent' in found_values}} #: (value type = cuuint64_t) Amount of backing memory currently #: allocated for the mempool. - cudaMemPoolAttrReservedMemCurrent = ccudart.cudaMemPoolAttr.cudaMemPoolAttrReservedMemCurrent{{endif}} + cudaMemPoolAttrReservedMemCurrent = cyruntime.cudaMemPoolAttr.cudaMemPoolAttrReservedMemCurrent{{endif}} {{if 'cudaMemPoolAttrReservedMemHigh' in found_values}} #: (value type = cuuint64_t) High watermark of backing memory allocated #: for the mempool since the last time it was reset. High watermark can #: only be reset to zero. - cudaMemPoolAttrReservedMemHigh = ccudart.cudaMemPoolAttr.cudaMemPoolAttrReservedMemHigh{{endif}} + cudaMemPoolAttrReservedMemHigh = cyruntime.cudaMemPoolAttr.cudaMemPoolAttrReservedMemHigh{{endif}} {{if 'cudaMemPoolAttrUsedMemCurrent' in found_values}} #: (value type = cuuint64_t) Amount of memory from the pool that is #: currently in use by the application. - cudaMemPoolAttrUsedMemCurrent = ccudart.cudaMemPoolAttr.cudaMemPoolAttrUsedMemCurrent{{endif}} + cudaMemPoolAttrUsedMemCurrent = cyruntime.cudaMemPoolAttr.cudaMemPoolAttrUsedMemCurrent{{endif}} {{if 'cudaMemPoolAttrUsedMemHigh' in found_values}} #: (value type = cuuint64_t) High watermark of the amount of memory #: from the pool that was in use by the application since the last time #: it was reset. High watermark can only be reset to zero. - cudaMemPoolAttrUsedMemHigh = ccudart.cudaMemPoolAttr.cudaMemPoolAttrUsedMemHigh{{endif}} + cudaMemPoolAttrUsedMemHigh = cyruntime.cudaMemPoolAttr.cudaMemPoolAttrUsedMemHigh{{endif}} {{endif}} {{if 'cudaMemLocationType' in found_types}} @@ -3511,24 +3511,24 @@ class cudaMemLocationType(IntEnum): Specifies the type of location """ {{if 'cudaMemLocationTypeInvalid' in found_values}} - cudaMemLocationTypeInvalid = ccudart.cudaMemLocationType.cudaMemLocationTypeInvalid{{endif}} + cudaMemLocationTypeInvalid = cyruntime.cudaMemLocationType.cudaMemLocationTypeInvalid{{endif}} {{if 'cudaMemLocationTypeDevice' in found_values}} #: Location is a device location, thus id is a device ordinal - cudaMemLocationTypeDevice = ccudart.cudaMemLocationType.cudaMemLocationTypeDevice{{endif}} + cudaMemLocationTypeDevice = cyruntime.cudaMemLocationType.cudaMemLocationTypeDevice{{endif}} {{if 'cudaMemLocationTypeHost' in found_values}} #: Location is host, id is ignored - cudaMemLocationTypeHost = ccudart.cudaMemLocationType.cudaMemLocationTypeHost{{endif}} + cudaMemLocationTypeHost = cyruntime.cudaMemLocationType.cudaMemLocationTypeHost{{endif}} {{if 'cudaMemLocationTypeHostNuma' in found_values}} #: Location is a host NUMA node, thus id is a host NUMA node id - cudaMemLocationTypeHostNuma = ccudart.cudaMemLocationType.cudaMemLocationTypeHostNuma{{endif}} + cudaMemLocationTypeHostNuma = cyruntime.cudaMemLocationType.cudaMemLocationTypeHostNuma{{endif}} {{if 'cudaMemLocationTypeHostNumaCurrent' in found_values}} #: Location is the host NUMA node closest to the current thread's CPU, #: id is ignored - cudaMemLocationTypeHostNumaCurrent = ccudart.cudaMemLocationType.cudaMemLocationTypeHostNumaCurrent{{endif}} + cudaMemLocationTypeHostNumaCurrent = cyruntime.cudaMemLocationType.cudaMemLocationTypeHostNumaCurrent{{endif}} {{endif}} {{if 'cudaMemAccessFlags' in found_types}} @@ -3539,15 +3539,15 @@ class cudaMemAccessFlags(IntEnum): {{if 'cudaMemAccessFlagsProtNone' in found_values}} #: Default, make the address range not accessible - cudaMemAccessFlagsProtNone = ccudart.cudaMemAccessFlags.cudaMemAccessFlagsProtNone{{endif}} + cudaMemAccessFlagsProtNone = cyruntime.cudaMemAccessFlags.cudaMemAccessFlagsProtNone{{endif}} {{if 'cudaMemAccessFlagsProtRead' in found_values}} #: Make the address range read accessible - cudaMemAccessFlagsProtRead = ccudart.cudaMemAccessFlags.cudaMemAccessFlagsProtRead{{endif}} + cudaMemAccessFlagsProtRead = cyruntime.cudaMemAccessFlags.cudaMemAccessFlagsProtRead{{endif}} {{if 'cudaMemAccessFlagsProtReadWrite' in found_values}} #: Make the address range read-write accessible - cudaMemAccessFlagsProtReadWrite = ccudart.cudaMemAccessFlags.cudaMemAccessFlagsProtReadWrite{{endif}} + cudaMemAccessFlagsProtReadWrite = cyruntime.cudaMemAccessFlags.cudaMemAccessFlagsProtReadWrite{{endif}} {{endif}} {{if 'cudaMemAllocationType' in found_types}} @@ -3556,14 +3556,14 @@ class cudaMemAllocationType(IntEnum): Defines the allocation types available """ {{if 'cudaMemAllocationTypeInvalid' in found_values}} - cudaMemAllocationTypeInvalid = ccudart.cudaMemAllocationType.cudaMemAllocationTypeInvalid{{endif}} + cudaMemAllocationTypeInvalid = cyruntime.cudaMemAllocationType.cudaMemAllocationTypeInvalid{{endif}} {{if 'cudaMemAllocationTypePinned' in found_values}} #: This allocation type is 'pinned', i.e. cannot migrate from its #: current location while the application is actively using it - cudaMemAllocationTypePinned = ccudart.cudaMemAllocationType.cudaMemAllocationTypePinned{{endif}} + cudaMemAllocationTypePinned = cyruntime.cudaMemAllocationType.cudaMemAllocationTypePinned{{endif}} {{if 'cudaMemAllocationTypeMax' in found_values}} - cudaMemAllocationTypeMax = ccudart.cudaMemAllocationType.cudaMemAllocationTypeMax{{endif}} + cudaMemAllocationTypeMax = cyruntime.cudaMemAllocationType.cudaMemAllocationTypeMax{{endif}} {{endif}} {{if 'cudaMemAllocationHandleType' in found_types}} @@ -3574,25 +3574,25 @@ class cudaMemAllocationHandleType(IntEnum): {{if 'cudaMemHandleTypeNone' in found_values}} #: Does not allow any export mechanism. > - cudaMemHandleTypeNone = ccudart.cudaMemAllocationHandleType.cudaMemHandleTypeNone{{endif}} + cudaMemHandleTypeNone = cyruntime.cudaMemAllocationHandleType.cudaMemHandleTypeNone{{endif}} {{if 'cudaMemHandleTypePosixFileDescriptor' in found_values}} #: Allows a file descriptor to be used for exporting. Permitted only on #: POSIX systems. (int) - cudaMemHandleTypePosixFileDescriptor = ccudart.cudaMemAllocationHandleType.cudaMemHandleTypePosixFileDescriptor{{endif}} + cudaMemHandleTypePosixFileDescriptor = cyruntime.cudaMemAllocationHandleType.cudaMemHandleTypePosixFileDescriptor{{endif}} {{if 'cudaMemHandleTypeWin32' in found_values}} #: Allows a Win32 NT handle to be used for exporting. (HANDLE) - cudaMemHandleTypeWin32 = ccudart.cudaMemAllocationHandleType.cudaMemHandleTypeWin32{{endif}} + cudaMemHandleTypeWin32 = cyruntime.cudaMemAllocationHandleType.cudaMemHandleTypeWin32{{endif}} {{if 'cudaMemHandleTypeWin32Kmt' in found_values}} #: Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) - cudaMemHandleTypeWin32Kmt = ccudart.cudaMemAllocationHandleType.cudaMemHandleTypeWin32Kmt{{endif}} + cudaMemHandleTypeWin32Kmt = cyruntime.cudaMemAllocationHandleType.cudaMemHandleTypeWin32Kmt{{endif}} {{if 'cudaMemHandleTypeFabric' in found_values}} #: Allows a fabric handle to be used for exporting. #: (cudaMemFabricHandle_t) - cudaMemHandleTypeFabric = ccudart.cudaMemAllocationHandleType.cudaMemHandleTypeFabric{{endif}} + cudaMemHandleTypeFabric = cyruntime.cudaMemAllocationHandleType.cudaMemHandleTypeFabric{{endif}} {{endif}} {{if 'cudaGraphMemAttributeType' in found_types}} @@ -3604,24 +3604,24 @@ class cudaGraphMemAttributeType(IntEnum): #: (value type = cuuint64_t) Amount of memory, in bytes, currently #: associated with graphs. - cudaGraphMemAttrUsedMemCurrent = ccudart.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemCurrent{{endif}} + cudaGraphMemAttrUsedMemCurrent = cyruntime.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemCurrent{{endif}} {{if 'cudaGraphMemAttrUsedMemHigh' in found_values}} #: (value type = cuuint64_t) High watermark of memory, in bytes, #: associated with graphs since the last time it was reset. High #: watermark can only be reset to zero. - cudaGraphMemAttrUsedMemHigh = ccudart.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemHigh{{endif}} + cudaGraphMemAttrUsedMemHigh = cyruntime.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemHigh{{endif}} {{if 'cudaGraphMemAttrReservedMemCurrent' in found_values}} #: (value type = cuuint64_t) Amount of memory, in bytes, currently #: allocated for use by the CUDA graphs asynchronous allocator. - cudaGraphMemAttrReservedMemCurrent = ccudart.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemCurrent{{endif}} + cudaGraphMemAttrReservedMemCurrent = cyruntime.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemCurrent{{endif}} {{if 'cudaGraphMemAttrReservedMemHigh' in found_values}} #: (value type = cuuint64_t) High watermark of memory, in bytes, #: currently allocated for use by the CUDA graphs asynchronous #: allocator. - cudaGraphMemAttrReservedMemHigh = ccudart.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemHigh{{endif}} + cudaGraphMemAttrReservedMemHigh = cyruntime.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemHigh{{endif}} {{endif}} {{if 'cudaDeviceP2PAttr' in found_types}} @@ -3633,19 +3633,19 @@ class cudaDeviceP2PAttr(IntEnum): #: A relative value indicating the performance of the link between two #: devices - cudaDevP2PAttrPerformanceRank = ccudart.cudaDeviceP2PAttr.cudaDevP2PAttrPerformanceRank{{endif}} + cudaDevP2PAttrPerformanceRank = cyruntime.cudaDeviceP2PAttr.cudaDevP2PAttrPerformanceRank{{endif}} {{if 'cudaDevP2PAttrAccessSupported' in found_values}} #: Peer access is enabled - cudaDevP2PAttrAccessSupported = ccudart.cudaDeviceP2PAttr.cudaDevP2PAttrAccessSupported{{endif}} + cudaDevP2PAttrAccessSupported = cyruntime.cudaDeviceP2PAttr.cudaDevP2PAttrAccessSupported{{endif}} {{if 'cudaDevP2PAttrNativeAtomicSupported' in found_values}} #: Native atomic operation over the link supported - cudaDevP2PAttrNativeAtomicSupported = ccudart.cudaDeviceP2PAttr.cudaDevP2PAttrNativeAtomicSupported{{endif}} + cudaDevP2PAttrNativeAtomicSupported = cyruntime.cudaDeviceP2PAttr.cudaDevP2PAttrNativeAtomicSupported{{endif}} {{if 'cudaDevP2PAttrCudaArrayAccessSupported' in found_values}} #: Accessing CUDA arrays over the link supported - cudaDevP2PAttrCudaArrayAccessSupported = ccudart.cudaDeviceP2PAttr.cudaDevP2PAttrCudaArrayAccessSupported{{endif}} + cudaDevP2PAttrCudaArrayAccessSupported = cyruntime.cudaDeviceP2PAttr.cudaDevP2PAttrCudaArrayAccessSupported{{endif}} {{endif}} {{if 'cudaExternalMemoryHandleType' in found_types}} @@ -3656,35 +3656,35 @@ class cudaExternalMemoryHandleType(IntEnum): {{if 'cudaExternalMemoryHandleTypeOpaqueFd' in found_values}} #: Handle is an opaque file descriptor - cudaExternalMemoryHandleTypeOpaqueFd = ccudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueFd{{endif}} + cudaExternalMemoryHandleTypeOpaqueFd = cyruntime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueFd{{endif}} {{if 'cudaExternalMemoryHandleTypeOpaqueWin32' in found_values}} #: Handle is an opaque shared NT handle - cudaExternalMemoryHandleTypeOpaqueWin32 = ccudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32{{endif}} + cudaExternalMemoryHandleTypeOpaqueWin32 = cyruntime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32{{endif}} {{if 'cudaExternalMemoryHandleTypeOpaqueWin32Kmt' in found_values}} #: Handle is an opaque, globally shared handle - cudaExternalMemoryHandleTypeOpaqueWin32Kmt = ccudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32Kmt{{endif}} + cudaExternalMemoryHandleTypeOpaqueWin32Kmt = cyruntime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32Kmt{{endif}} {{if 'cudaExternalMemoryHandleTypeD3D12Heap' in found_values}} #: Handle is a D3D12 heap object - cudaExternalMemoryHandleTypeD3D12Heap = ccudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Heap{{endif}} + cudaExternalMemoryHandleTypeD3D12Heap = cyruntime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Heap{{endif}} {{if 'cudaExternalMemoryHandleTypeD3D12Resource' in found_values}} #: Handle is a D3D12 committed resource - cudaExternalMemoryHandleTypeD3D12Resource = ccudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Resource{{endif}} + cudaExternalMemoryHandleTypeD3D12Resource = cyruntime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Resource{{endif}} {{if 'cudaExternalMemoryHandleTypeD3D11Resource' in found_values}} #: Handle is a shared NT handle to a D3D11 resource - cudaExternalMemoryHandleTypeD3D11Resource = ccudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11Resource{{endif}} + cudaExternalMemoryHandleTypeD3D11Resource = cyruntime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11Resource{{endif}} {{if 'cudaExternalMemoryHandleTypeD3D11ResourceKmt' in found_values}} #: Handle is a globally shared handle to a D3D11 resource - cudaExternalMemoryHandleTypeD3D11ResourceKmt = ccudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11ResourceKmt{{endif}} + cudaExternalMemoryHandleTypeD3D11ResourceKmt = cyruntime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11ResourceKmt{{endif}} {{if 'cudaExternalMemoryHandleTypeNvSciBuf' in found_values}} #: Handle is an NvSciBuf object - cudaExternalMemoryHandleTypeNvSciBuf = ccudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeNvSciBuf{{endif}} + cudaExternalMemoryHandleTypeNvSciBuf = cyruntime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeNvSciBuf{{endif}} {{endif}} {{if 'cudaExternalSemaphoreHandleType' in found_types}} @@ -3695,45 +3695,45 @@ class cudaExternalSemaphoreHandleType(IntEnum): {{if 'cudaExternalSemaphoreHandleTypeOpaqueFd' in found_values}} #: Handle is an opaque file descriptor - cudaExternalSemaphoreHandleTypeOpaqueFd = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueFd{{endif}} + cudaExternalSemaphoreHandleTypeOpaqueFd = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueFd{{endif}} {{if 'cudaExternalSemaphoreHandleTypeOpaqueWin32' in found_values}} #: Handle is an opaque shared NT handle - cudaExternalSemaphoreHandleTypeOpaqueWin32 = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32{{endif}} + cudaExternalSemaphoreHandleTypeOpaqueWin32 = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32{{endif}} {{if 'cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt' in found_values}} #: Handle is an opaque, globally shared handle - cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt{{endif}} + cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt{{endif}} {{if 'cudaExternalSemaphoreHandleTypeD3D12Fence' in found_values}} #: Handle is a shared NT handle referencing a D3D12 fence object - cudaExternalSemaphoreHandleTypeD3D12Fence = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D12Fence{{endif}} + cudaExternalSemaphoreHandleTypeD3D12Fence = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D12Fence{{endif}} {{if 'cudaExternalSemaphoreHandleTypeD3D11Fence' in found_values}} #: Handle is a shared NT handle referencing a D3D11 fence object - cudaExternalSemaphoreHandleTypeD3D11Fence = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D11Fence{{endif}} + cudaExternalSemaphoreHandleTypeD3D11Fence = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D11Fence{{endif}} {{if 'cudaExternalSemaphoreHandleTypeNvSciSync' in found_values}} #: Opaque handle to NvSciSync Object - cudaExternalSemaphoreHandleTypeNvSciSync = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeNvSciSync{{endif}} + cudaExternalSemaphoreHandleTypeNvSciSync = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeNvSciSync{{endif}} {{if 'cudaExternalSemaphoreHandleTypeKeyedMutex' in found_values}} #: Handle is a shared NT handle referencing a D3D11 keyed mutex object - cudaExternalSemaphoreHandleTypeKeyedMutex = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutex{{endif}} + cudaExternalSemaphoreHandleTypeKeyedMutex = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutex{{endif}} {{if 'cudaExternalSemaphoreHandleTypeKeyedMutexKmt' in found_values}} #: Handle is a shared KMT handle referencing a D3D11 keyed mutex object - cudaExternalSemaphoreHandleTypeKeyedMutexKmt = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutexKmt{{endif}} + cudaExternalSemaphoreHandleTypeKeyedMutexKmt = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutexKmt{{endif}} {{if 'cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd' in found_values}} #: Handle is an opaque handle file descriptor referencing a timeline #: semaphore - cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd{{endif}} + cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd{{endif}} {{if 'cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32' in found_values}} #: Handle is an opaque handle file descriptor referencing a timeline #: semaphore - cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 = ccudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32{{endif}} + cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 = cyruntime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32{{endif}} {{endif}} {{if 'cudaCGScope' in found_types}} @@ -3744,15 +3744,15 @@ class cudaCGScope(IntEnum): {{if 'cudaCGScopeInvalid' in found_values}} #: Invalid cooperative group scope - cudaCGScopeInvalid = ccudart.cudaCGScope.cudaCGScopeInvalid{{endif}} + cudaCGScopeInvalid = cyruntime.cudaCGScope.cudaCGScopeInvalid{{endif}} {{if 'cudaCGScopeGrid' in found_values}} #: Scope represented by a grid_group - cudaCGScopeGrid = ccudart.cudaCGScope.cudaCGScopeGrid{{endif}} + cudaCGScopeGrid = cyruntime.cudaCGScope.cudaCGScopeGrid{{endif}} {{if 'cudaCGScopeMultiGrid' in found_values}} #: Scope represented by a multi_grid_group - cudaCGScopeMultiGrid = ccudart.cudaCGScope.cudaCGScopeMultiGrid{{endif}} + cudaCGScopeMultiGrid = cyruntime.cudaCGScope.cudaCGScopeMultiGrid{{endif}} {{endif}} {{if 'cudaGraphConditionalHandleFlags' in found_types}} @@ -3763,7 +3763,7 @@ class cudaGraphConditionalHandleFlags(IntEnum): {{if 'cudaGraphCondAssignDefault' in found_values}} #: Apply default handle value when graph is launched. - cudaGraphCondAssignDefault = ccudart.cudaGraphConditionalHandleFlags.cudaGraphCondAssignDefault{{endif}} + cudaGraphCondAssignDefault = cyruntime.cudaGraphConditionalHandleFlags.cudaGraphCondAssignDefault{{endif}} {{endif}} {{if 'cudaGraphConditionalNodeType' in found_types}} @@ -3775,12 +3775,12 @@ class cudaGraphConditionalNodeType(IntEnum): #: Conditional 'if' Node. Body executed once if condition value is non- #: zero. - cudaGraphCondTypeIf = ccudart.cudaGraphConditionalNodeType.cudaGraphCondTypeIf{{endif}} + cudaGraphCondTypeIf = cyruntime.cudaGraphConditionalNodeType.cudaGraphCondTypeIf{{endif}} {{if 'cudaGraphCondTypeWhile' in found_values}} #: Conditional 'while' Node. Body executed repeatedly while condition #: value is non-zero. - cudaGraphCondTypeWhile = ccudart.cudaGraphConditionalNodeType.cudaGraphCondTypeWhile{{endif}} + cudaGraphCondTypeWhile = cyruntime.cudaGraphConditionalNodeType.cudaGraphCondTypeWhile{{endif}} {{endif}} {{if 'cudaGraphNodeType' in found_types}} @@ -3791,51 +3791,51 @@ class cudaGraphNodeType(IntEnum): {{if 'cudaGraphNodeTypeKernel' in found_values}} #: GPU kernel node - cudaGraphNodeTypeKernel = ccudart.cudaGraphNodeType.cudaGraphNodeTypeKernel{{endif}} + cudaGraphNodeTypeKernel = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeKernel{{endif}} {{if 'cudaGraphNodeTypeMemcpy' in found_values}} #: Memcpy node - cudaGraphNodeTypeMemcpy = ccudart.cudaGraphNodeType.cudaGraphNodeTypeMemcpy{{endif}} + cudaGraphNodeTypeMemcpy = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeMemcpy{{endif}} {{if 'cudaGraphNodeTypeMemset' in found_values}} #: Memset node - cudaGraphNodeTypeMemset = ccudart.cudaGraphNodeType.cudaGraphNodeTypeMemset{{endif}} + cudaGraphNodeTypeMemset = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeMemset{{endif}} {{if 'cudaGraphNodeTypeHost' in found_values}} #: Host (executable) node - cudaGraphNodeTypeHost = ccudart.cudaGraphNodeType.cudaGraphNodeTypeHost{{endif}} + cudaGraphNodeTypeHost = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeHost{{endif}} {{if 'cudaGraphNodeTypeGraph' in found_values}} #: Node which executes an embedded graph - cudaGraphNodeTypeGraph = ccudart.cudaGraphNodeType.cudaGraphNodeTypeGraph{{endif}} + cudaGraphNodeTypeGraph = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeGraph{{endif}} {{if 'cudaGraphNodeTypeEmpty' in found_values}} #: Empty (no-op) node - cudaGraphNodeTypeEmpty = ccudart.cudaGraphNodeType.cudaGraphNodeTypeEmpty{{endif}} + cudaGraphNodeTypeEmpty = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeEmpty{{endif}} {{if 'cudaGraphNodeTypeWaitEvent' in found_values}} #: External event wait node - cudaGraphNodeTypeWaitEvent = ccudart.cudaGraphNodeType.cudaGraphNodeTypeWaitEvent{{endif}} + cudaGraphNodeTypeWaitEvent = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeWaitEvent{{endif}} {{if 'cudaGraphNodeTypeEventRecord' in found_values}} #: External event record node - cudaGraphNodeTypeEventRecord = ccudart.cudaGraphNodeType.cudaGraphNodeTypeEventRecord{{endif}} + cudaGraphNodeTypeEventRecord = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeEventRecord{{endif}} {{if 'cudaGraphNodeTypeExtSemaphoreSignal' in found_values}} #: External semaphore signal node - cudaGraphNodeTypeExtSemaphoreSignal = ccudart.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreSignal{{endif}} + cudaGraphNodeTypeExtSemaphoreSignal = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreSignal{{endif}} {{if 'cudaGraphNodeTypeExtSemaphoreWait' in found_values}} #: External semaphore wait node - cudaGraphNodeTypeExtSemaphoreWait = ccudart.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreWait{{endif}} + cudaGraphNodeTypeExtSemaphoreWait = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreWait{{endif}} {{if 'cudaGraphNodeTypeMemAlloc' in found_values}} #: Memory allocation node - cudaGraphNodeTypeMemAlloc = ccudart.cudaGraphNodeType.cudaGraphNodeTypeMemAlloc{{endif}} + cudaGraphNodeTypeMemAlloc = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeMemAlloc{{endif}} {{if 'cudaGraphNodeTypeMemFree' in found_values}} #: Memory free node - cudaGraphNodeTypeMemFree = ccudart.cudaGraphNodeType.cudaGraphNodeTypeMemFree{{endif}} + cudaGraphNodeTypeMemFree = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeMemFree{{endif}} {{if 'cudaGraphNodeTypeConditional' in found_values}} #: Conditional node May be used to @@ -3863,9 +3863,9 @@ class cudaGraphNodeType(IntEnum): #: a default value when creating the handle and/or #: call #: :py:obj:`~.cudaGraphSetConditional` from device code. - cudaGraphNodeTypeConditional = ccudart.cudaGraphNodeType.cudaGraphNodeTypeConditional{{endif}} + cudaGraphNodeTypeConditional = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeConditional{{endif}} {{if 'cudaGraphNodeTypeCount' in found_values}} - cudaGraphNodeTypeCount = ccudart.cudaGraphNodeType.cudaGraphNodeTypeCount{{endif}} + cudaGraphNodeTypeCount = cyruntime.cudaGraphNodeType.cudaGraphNodeTypeCount{{endif}} {{endif}} {{if 'cudaGraphExecUpdateResult' in found_types}} @@ -3876,44 +3876,44 @@ class cudaGraphExecUpdateResult(IntEnum): {{if 'cudaGraphExecUpdateSuccess' in found_values}} #: The update succeeded - cudaGraphExecUpdateSuccess = ccudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateSuccess{{endif}} + cudaGraphExecUpdateSuccess = cyruntime.cudaGraphExecUpdateResult.cudaGraphExecUpdateSuccess{{endif}} {{if 'cudaGraphExecUpdateError' in found_values}} #: The update failed for an unexpected reason which is described in the #: return value of the function - cudaGraphExecUpdateError = ccudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateError{{endif}} + cudaGraphExecUpdateError = cyruntime.cudaGraphExecUpdateResult.cudaGraphExecUpdateError{{endif}} {{if 'cudaGraphExecUpdateErrorTopologyChanged' in found_values}} #: The update failed because the topology changed - cudaGraphExecUpdateErrorTopologyChanged = ccudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorTopologyChanged{{endif}} + cudaGraphExecUpdateErrorTopologyChanged = cyruntime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorTopologyChanged{{endif}} {{if 'cudaGraphExecUpdateErrorNodeTypeChanged' in found_values}} #: The update failed because a node type changed - cudaGraphExecUpdateErrorNodeTypeChanged = ccudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNodeTypeChanged{{endif}} + cudaGraphExecUpdateErrorNodeTypeChanged = cyruntime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNodeTypeChanged{{endif}} {{if 'cudaGraphExecUpdateErrorFunctionChanged' in found_values}} #: The update failed because the function of a kernel node changed #: (CUDA driver < 11.2) - cudaGraphExecUpdateErrorFunctionChanged = ccudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorFunctionChanged{{endif}} + cudaGraphExecUpdateErrorFunctionChanged = cyruntime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorFunctionChanged{{endif}} {{if 'cudaGraphExecUpdateErrorParametersChanged' in found_values}} #: The update failed because the parameters changed in a way that is #: not supported - cudaGraphExecUpdateErrorParametersChanged = ccudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorParametersChanged{{endif}} + cudaGraphExecUpdateErrorParametersChanged = cyruntime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorParametersChanged{{endif}} {{if 'cudaGraphExecUpdateErrorNotSupported' in found_values}} #: The update failed because something about the node is not supported - cudaGraphExecUpdateErrorNotSupported = ccudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNotSupported{{endif}} + cudaGraphExecUpdateErrorNotSupported = cyruntime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNotSupported{{endif}} {{if 'cudaGraphExecUpdateErrorUnsupportedFunctionChange' in found_values}} #: The update failed because the function of a kernel node changed in #: an unsupported way - cudaGraphExecUpdateErrorUnsupportedFunctionChange = ccudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorUnsupportedFunctionChange{{endif}} + cudaGraphExecUpdateErrorUnsupportedFunctionChange = cyruntime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorUnsupportedFunctionChange{{endif}} {{if 'cudaGraphExecUpdateErrorAttributesChanged' in found_values}} #: The update failed because the node attributes changed in a way that #: is not supported - cudaGraphExecUpdateErrorAttributesChanged = ccudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorAttributesChanged{{endif}} + cudaGraphExecUpdateErrorAttributesChanged = cyruntime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorAttributesChanged{{endif}} {{endif}} {{if 'cudaGraphKernelNodeField' in found_types}} @@ -3925,19 +3925,19 @@ class cudaGraphKernelNodeField(IntEnum): {{if 'cudaGraphKernelNodeFieldInvalid' in found_values}} #: Invalid field - cudaGraphKernelNodeFieldInvalid = ccudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldInvalid{{endif}} + cudaGraphKernelNodeFieldInvalid = cyruntime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldInvalid{{endif}} {{if 'cudaGraphKernelNodeFieldGridDim' in found_values}} #: Grid dimension update - cudaGraphKernelNodeFieldGridDim = ccudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldGridDim{{endif}} + cudaGraphKernelNodeFieldGridDim = cyruntime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldGridDim{{endif}} {{if 'cudaGraphKernelNodeFieldParam' in found_values}} #: Kernel parameter update - cudaGraphKernelNodeFieldParam = ccudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldParam{{endif}} + cudaGraphKernelNodeFieldParam = cyruntime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldParam{{endif}} {{if 'cudaGraphKernelNodeFieldEnabled' in found_values}} #: Node enable/disable - cudaGraphKernelNodeFieldEnabled = ccudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldEnabled{{endif}} + cudaGraphKernelNodeFieldEnabled = cyruntime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldEnabled{{endif}} {{endif}} {{if 'cudaGetDriverEntryPointFlags' in found_types}} @@ -3950,15 +3950,15 @@ class cudaGetDriverEntryPointFlags(IntEnum): {{if 'cudaEnableDefault' in found_values}} #: Default search mode for driver symbols. - cudaEnableDefault = ccudart.cudaGetDriverEntryPointFlags.cudaEnableDefault{{endif}} + cudaEnableDefault = cyruntime.cudaGetDriverEntryPointFlags.cudaEnableDefault{{endif}} {{if 'cudaEnableLegacyStream' in found_values}} #: Search for legacy versions of driver symbols. - cudaEnableLegacyStream = ccudart.cudaGetDriverEntryPointFlags.cudaEnableLegacyStream{{endif}} + cudaEnableLegacyStream = cyruntime.cudaGetDriverEntryPointFlags.cudaEnableLegacyStream{{endif}} {{if 'cudaEnablePerThreadDefaultStream' in found_values}} #: Search for per-thread versions of driver symbols. - cudaEnablePerThreadDefaultStream = ccudart.cudaGetDriverEntryPointFlags.cudaEnablePerThreadDefaultStream{{endif}} + cudaEnablePerThreadDefaultStream = cyruntime.cudaGetDriverEntryPointFlags.cudaEnablePerThreadDefaultStream{{endif}} {{endif}} {{if 'cudaDriverEntryPointQueryResult' in found_types}} @@ -3970,15 +3970,15 @@ class cudaDriverEntryPointQueryResult(IntEnum): {{if 'cudaDriverEntryPointSuccess' in found_values}} #: Search for symbol found a match - cudaDriverEntryPointSuccess = ccudart.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSuccess{{endif}} + cudaDriverEntryPointSuccess = cyruntime.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSuccess{{endif}} {{if 'cudaDriverEntryPointSymbolNotFound' in found_values}} #: Search for symbol was not found - cudaDriverEntryPointSymbolNotFound = ccudart.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSymbolNotFound{{endif}} + cudaDriverEntryPointSymbolNotFound = cyruntime.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSymbolNotFound{{endif}} {{if 'cudaDriverEntryPointVersionNotSufficent' in found_values}} #: Search for symbol was found but version wasn't great enough - cudaDriverEntryPointVersionNotSufficent = ccudart.cudaDriverEntryPointQueryResult.cudaDriverEntryPointVersionNotSufficent{{endif}} + cudaDriverEntryPointVersionNotSufficent = cyruntime.cudaDriverEntryPointQueryResult.cudaDriverEntryPointVersionNotSufficent{{endif}} {{endif}} {{if 'cudaGraphDebugDotFlags' in found_types}} @@ -3989,48 +3989,48 @@ class cudaGraphDebugDotFlags(IntEnum): {{if 'cudaGraphDebugDotFlagsVerbose' in found_values}} #: Output all debug data as if every debug flag is enabled - cudaGraphDebugDotFlagsVerbose = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsVerbose{{endif}} + cudaGraphDebugDotFlagsVerbose = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsVerbose{{endif}} {{if 'cudaGraphDebugDotFlagsKernelNodeParams' in found_values}} #: Adds :py:obj:`~.cudaKernelNodeParams` to output - cudaGraphDebugDotFlagsKernelNodeParams = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeParams{{endif}} + cudaGraphDebugDotFlagsKernelNodeParams = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeParams{{endif}} {{if 'cudaGraphDebugDotFlagsMemcpyNodeParams' in found_values}} #: Adds :py:obj:`~.cudaMemcpy3DParms` to output - cudaGraphDebugDotFlagsMemcpyNodeParams = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemcpyNodeParams{{endif}} + cudaGraphDebugDotFlagsMemcpyNodeParams = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemcpyNodeParams{{endif}} {{if 'cudaGraphDebugDotFlagsMemsetNodeParams' in found_values}} #: Adds :py:obj:`~.cudaMemsetParams` to output - cudaGraphDebugDotFlagsMemsetNodeParams = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemsetNodeParams{{endif}} + cudaGraphDebugDotFlagsMemsetNodeParams = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemsetNodeParams{{endif}} {{if 'cudaGraphDebugDotFlagsHostNodeParams' in found_values}} #: Adds :py:obj:`~.cudaHostNodeParams` to output - cudaGraphDebugDotFlagsHostNodeParams = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHostNodeParams{{endif}} + cudaGraphDebugDotFlagsHostNodeParams = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHostNodeParams{{endif}} {{if 'cudaGraphDebugDotFlagsEventNodeParams' in found_values}} #: Adds cudaEvent_t handle from record and wait nodes to output - cudaGraphDebugDotFlagsEventNodeParams = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsEventNodeParams{{endif}} + cudaGraphDebugDotFlagsEventNodeParams = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsEventNodeParams{{endif}} {{if 'cudaGraphDebugDotFlagsExtSemasSignalNodeParams' in found_values}} #: Adds :py:obj:`~.cudaExternalSemaphoreSignalNodeParams` values to #: output - cudaGraphDebugDotFlagsExtSemasSignalNodeParams = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasSignalNodeParams{{endif}} + cudaGraphDebugDotFlagsExtSemasSignalNodeParams = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasSignalNodeParams{{endif}} {{if 'cudaGraphDebugDotFlagsExtSemasWaitNodeParams' in found_values}} #: Adds :py:obj:`~.cudaExternalSemaphoreWaitNodeParams` to output - cudaGraphDebugDotFlagsExtSemasWaitNodeParams = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasWaitNodeParams{{endif}} + cudaGraphDebugDotFlagsExtSemasWaitNodeParams = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasWaitNodeParams{{endif}} {{if 'cudaGraphDebugDotFlagsKernelNodeAttributes' in found_values}} #: Adds cudaKernelNodeAttrID values to output - cudaGraphDebugDotFlagsKernelNodeAttributes = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeAttributes{{endif}} + cudaGraphDebugDotFlagsKernelNodeAttributes = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeAttributes{{endif}} {{if 'cudaGraphDebugDotFlagsHandles' in found_values}} #: Adds node handles and every kernel function handle to output - cudaGraphDebugDotFlagsHandles = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHandles{{endif}} + cudaGraphDebugDotFlagsHandles = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHandles{{endif}} {{if 'cudaGraphDebugDotFlagsConditionalNodeParams' in found_values}} #: Adds :py:obj:`~.cudaConditionalNodeParams` to output - cudaGraphDebugDotFlagsConditionalNodeParams = ccudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsConditionalNodeParams{{endif}} + cudaGraphDebugDotFlagsConditionalNodeParams = cyruntime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsConditionalNodeParams{{endif}} {{endif}} {{if 'cudaGraphInstantiateFlags' in found_types}} @@ -4041,7 +4041,7 @@ class cudaGraphInstantiateFlags(IntEnum): {{if 'cudaGraphInstantiateFlagAutoFreeOnLaunch' in found_values}} #: Automatically free memory allocated in a graph before relaunching. - cudaGraphInstantiateFlagAutoFreeOnLaunch = ccudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagAutoFreeOnLaunch{{endif}} + cudaGraphInstantiateFlagAutoFreeOnLaunch = cyruntime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagAutoFreeOnLaunch{{endif}} {{if 'cudaGraphInstantiateFlagUpload' in found_values}} #: Automatically upload the graph after instantiation. Only supported @@ -4049,7 +4049,7 @@ class cudaGraphInstantiateFlags(IntEnum): #: :py:obj:`~.cudaGraphInstantiateWithParams`. The upload will be #: performed using the #: stream provided in `instantiateParams`. - cudaGraphInstantiateFlagUpload = ccudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUpload{{endif}} + cudaGraphInstantiateFlagUpload = cyruntime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUpload{{endif}} {{if 'cudaGraphInstantiateFlagDeviceLaunch' in found_values}} #: Instantiate the graph to be launchable from the device. This flag @@ -4057,12 +4057,12 @@ class cudaGraphInstantiateFlags(IntEnum): #: be used on platforms which support unified addressing. This flag #: cannot be #: used in conjunction with cudaGraphInstantiateFlagAutoFreeOnLaunch. - cudaGraphInstantiateFlagDeviceLaunch = ccudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagDeviceLaunch{{endif}} + cudaGraphInstantiateFlagDeviceLaunch = cyruntime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagDeviceLaunch{{endif}} {{if 'cudaGraphInstantiateFlagUseNodePriority' in found_values}} #: Run the graph using the per-node priority attributes rather than the #: priority of the stream it is launched into. - cudaGraphInstantiateFlagUseNodePriority = ccudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUseNodePriority{{endif}} + cudaGraphInstantiateFlagUseNodePriority = cyruntime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUseNodePriority{{endif}} {{endif}} {{if 'cudaDeviceNumaConfig' in found_types}} @@ -4073,11 +4073,11 @@ class cudaDeviceNumaConfig(IntEnum): {{if 'cudaDeviceNumaConfigNone' in found_values}} #: The GPU is not a NUMA node - cudaDeviceNumaConfigNone = ccudart.cudaDeviceNumaConfig.cudaDeviceNumaConfigNone{{endif}} + cudaDeviceNumaConfigNone = cyruntime.cudaDeviceNumaConfig.cudaDeviceNumaConfigNone{{endif}} {{if 'cudaDeviceNumaConfigNumaNode' in found_values}} #: The GPU is a NUMA node, cudaDevAttrNumaId contains its NUMA ID - cudaDeviceNumaConfigNumaNode = ccudart.cudaDeviceNumaConfig.cudaDeviceNumaConfigNumaNode{{endif}} + cudaDeviceNumaConfigNumaNode = cyruntime.cudaDeviceNumaConfig.cudaDeviceNumaConfigNumaNode{{endif}} {{endif}} {{if 'cudaSurfaceBoundaryMode' in found_types}} @@ -4088,15 +4088,15 @@ class cudaSurfaceBoundaryMode(IntEnum): {{if 'cudaBoundaryModeZero' in found_values}} #: Zero boundary mode - cudaBoundaryModeZero = ccudart.cudaSurfaceBoundaryMode.cudaBoundaryModeZero{{endif}} + cudaBoundaryModeZero = cyruntime.cudaSurfaceBoundaryMode.cudaBoundaryModeZero{{endif}} {{if 'cudaBoundaryModeClamp' in found_values}} #: Clamp boundary mode - cudaBoundaryModeClamp = ccudart.cudaSurfaceBoundaryMode.cudaBoundaryModeClamp{{endif}} + cudaBoundaryModeClamp = cyruntime.cudaSurfaceBoundaryMode.cudaBoundaryModeClamp{{endif}} {{if 'cudaBoundaryModeTrap' in found_values}} #: Trap boundary mode - cudaBoundaryModeTrap = ccudart.cudaSurfaceBoundaryMode.cudaBoundaryModeTrap{{endif}} + cudaBoundaryModeTrap = cyruntime.cudaSurfaceBoundaryMode.cudaBoundaryModeTrap{{endif}} {{endif}} {{if 'cudaSurfaceFormatMode' in found_types}} @@ -4107,11 +4107,11 @@ class cudaSurfaceFormatMode(IntEnum): {{if 'cudaFormatModeForced' in found_values}} #: Forced format mode - cudaFormatModeForced = ccudart.cudaSurfaceFormatMode.cudaFormatModeForced{{endif}} + cudaFormatModeForced = cyruntime.cudaSurfaceFormatMode.cudaFormatModeForced{{endif}} {{if 'cudaFormatModeAuto' in found_values}} #: Auto format mode - cudaFormatModeAuto = ccudart.cudaSurfaceFormatMode.cudaFormatModeAuto{{endif}} + cudaFormatModeAuto = cyruntime.cudaSurfaceFormatMode.cudaFormatModeAuto{{endif}} {{endif}} {{if 'cudaTextureAddressMode' in found_types}} @@ -4122,19 +4122,19 @@ class cudaTextureAddressMode(IntEnum): {{if 'cudaAddressModeWrap' in found_values}} #: Wrapping address mode - cudaAddressModeWrap = ccudart.cudaTextureAddressMode.cudaAddressModeWrap{{endif}} + cudaAddressModeWrap = cyruntime.cudaTextureAddressMode.cudaAddressModeWrap{{endif}} {{if 'cudaAddressModeClamp' in found_values}} #: Clamp to edge address mode - cudaAddressModeClamp = ccudart.cudaTextureAddressMode.cudaAddressModeClamp{{endif}} + cudaAddressModeClamp = cyruntime.cudaTextureAddressMode.cudaAddressModeClamp{{endif}} {{if 'cudaAddressModeMirror' in found_values}} #: Mirror address mode - cudaAddressModeMirror = ccudart.cudaTextureAddressMode.cudaAddressModeMirror{{endif}} + cudaAddressModeMirror = cyruntime.cudaTextureAddressMode.cudaAddressModeMirror{{endif}} {{if 'cudaAddressModeBorder' in found_values}} #: Border address mode - cudaAddressModeBorder = ccudart.cudaTextureAddressMode.cudaAddressModeBorder{{endif}} + cudaAddressModeBorder = cyruntime.cudaTextureAddressMode.cudaAddressModeBorder{{endif}} {{endif}} {{if 'cudaTextureFilterMode' in found_types}} @@ -4145,11 +4145,11 @@ class cudaTextureFilterMode(IntEnum): {{if 'cudaFilterModePoint' in found_values}} #: Point filter mode - cudaFilterModePoint = ccudart.cudaTextureFilterMode.cudaFilterModePoint{{endif}} + cudaFilterModePoint = cyruntime.cudaTextureFilterMode.cudaFilterModePoint{{endif}} {{if 'cudaFilterModeLinear' in found_values}} #: Linear filter mode - cudaFilterModeLinear = ccudart.cudaTextureFilterMode.cudaFilterModeLinear{{endif}} + cudaFilterModeLinear = cyruntime.cudaTextureFilterMode.cudaFilterModeLinear{{endif}} {{endif}} {{if 'cudaTextureReadMode' in found_types}} @@ -4160,24 +4160,24 @@ class cudaTextureReadMode(IntEnum): {{if 'cudaReadModeElementType' in found_values}} #: Read texture as specified element type - cudaReadModeElementType = ccudart.cudaTextureReadMode.cudaReadModeElementType{{endif}} + cudaReadModeElementType = cyruntime.cudaTextureReadMode.cudaReadModeElementType{{endif}} {{if 'cudaReadModeNormalizedFloat' in found_values}} #: Read texture as normalized float - cudaReadModeNormalizedFloat = ccudart.cudaTextureReadMode.cudaReadModeNormalizedFloat{{endif}} + cudaReadModeNormalizedFloat = cyruntime.cudaTextureReadMode.cudaReadModeNormalizedFloat{{endif}} {{endif}} {{if 'cudaRoundMode' in found_types}} class cudaRoundMode(IntEnum): """""" {{if 'cudaRoundNearest' in found_values}} - cudaRoundNearest = ccudart.cudaRoundMode.cudaRoundNearest{{endif}} + cudaRoundNearest = cyruntime.cudaRoundMode.cudaRoundNearest{{endif}} {{if 'cudaRoundZero' in found_values}} - cudaRoundZero = ccudart.cudaRoundMode.cudaRoundZero{{endif}} + cudaRoundZero = cyruntime.cudaRoundMode.cudaRoundZero{{endif}} {{if 'cudaRoundPosInf' in found_values}} - cudaRoundPosInf = ccudart.cudaRoundMode.cudaRoundPosInf{{endif}} + cudaRoundPosInf = cyruntime.cudaRoundMode.cudaRoundPosInf{{endif}} {{if 'cudaRoundMinInf' in found_values}} - cudaRoundMinInf = ccudart.cudaRoundMode.cudaRoundMinInf{{endif}} + cudaRoundMinInf = cyruntime.cudaRoundMode.cudaRoundMinInf{{endif}} {{endif}} {{if True}} @@ -4188,17 +4188,17 @@ class cudaGLDeviceList(IntEnum): {{if True}} #: The CUDA devices for all GPUs used by the current OpenGL context - cudaGLDeviceListAll = ccudart.cudaGLDeviceList.cudaGLDeviceListAll{{endif}} + cudaGLDeviceListAll = cyruntime.cudaGLDeviceList.cudaGLDeviceListAll{{endif}} {{if True}} #: The CUDA devices for the GPUs used by the current OpenGL context in #: its currently rendering frame - cudaGLDeviceListCurrentFrame = ccudart.cudaGLDeviceList.cudaGLDeviceListCurrentFrame{{endif}} + cudaGLDeviceListCurrentFrame = cyruntime.cudaGLDeviceList.cudaGLDeviceListCurrentFrame{{endif}} {{if True}} #: The CUDA devices for the GPUs to be used by the current OpenGL #: context in the next frame - cudaGLDeviceListNextFrame = ccudart.cudaGLDeviceList.cudaGLDeviceListNextFrame{{endif}} + cudaGLDeviceListNextFrame = cyruntime.cudaGLDeviceList.cudaGLDeviceListNextFrame{{endif}} {{endif}} {{if True}} @@ -4209,15 +4209,15 @@ class cudaGLMapFlags(IntEnum): {{if True}} #: Default; Assume resource can be read/written - cudaGLMapFlagsNone = ccudart.cudaGLMapFlags.cudaGLMapFlagsNone{{endif}} + cudaGLMapFlagsNone = cyruntime.cudaGLMapFlags.cudaGLMapFlagsNone{{endif}} {{if True}} #: CUDA kernels will not write to this resource - cudaGLMapFlagsReadOnly = ccudart.cudaGLMapFlags.cudaGLMapFlagsReadOnly{{endif}} + cudaGLMapFlagsReadOnly = cyruntime.cudaGLMapFlags.cudaGLMapFlagsReadOnly{{endif}} {{if True}} #: CUDA kernels will only write to and will not read from this resource - cudaGLMapFlagsWriteDiscard = ccudart.cudaGLMapFlags.cudaGLMapFlagsWriteDiscard{{endif}} + cudaGLMapFlagsWriteDiscard = cyruntime.cudaGLMapFlags.cudaGLMapFlagsWriteDiscard{{endif}} {{endif}} {{if 'cudaLaunchAttributeID' in found_types}} @@ -4229,32 +4229,32 @@ class cudaStreamAttrID(IntEnum): {{if 'cudaLaunchAttributeIgnore' in found_values}} #: Ignored entry, for convenient composition - cudaLaunchAttributeIgnore = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeIgnore{{endif}} + cudaLaunchAttributeIgnore = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeIgnore{{endif}} {{if 'cudaLaunchAttributeAccessPolicyWindow' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.accessPolicyWindow`. - cudaLaunchAttributeAccessPolicyWindow = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow{{endif}} + cudaLaunchAttributeAccessPolicyWindow = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow{{endif}} {{if 'cudaLaunchAttributeCooperative' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.cooperative`. - cudaLaunchAttributeCooperative = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeCooperative{{endif}} + cudaLaunchAttributeCooperative = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeCooperative{{endif}} {{if 'cudaLaunchAttributeSynchronizationPolicy' in found_values}} #: Valid for streams. See #: :py:obj:`~.cudaLaunchAttributeValue.syncPolicy`. - cudaLaunchAttributeSynchronizationPolicy = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy{{endif}} + cudaLaunchAttributeSynchronizationPolicy = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy{{endif}} {{if 'cudaLaunchAttributeClusterDimension' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.clusterDim`. - cudaLaunchAttributeClusterDimension = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension{{endif}} + cudaLaunchAttributeClusterDimension = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension{{endif}} {{if 'cudaLaunchAttributeClusterSchedulingPolicyPreference' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.clusterSchedulingPolicyPreference`. - cudaLaunchAttributeClusterSchedulingPolicyPreference = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference{{endif}} + cudaLaunchAttributeClusterSchedulingPolicyPreference = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference{{endif}} {{if 'cudaLaunchAttributeProgrammaticStreamSerialization' in found_values}} #: Valid for launches. Setting @@ -4266,7 +4266,7 @@ class cudaStreamAttrID(IntEnum): #: The dependent launches can choose to wait on the dependency using #: the programmatic sync (cudaGridDependencySynchronize() or equivalent #: PTX instructions). - cudaLaunchAttributeProgrammaticStreamSerialization = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization{{endif}} + cudaLaunchAttributeProgrammaticStreamSerialization = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization{{endif}} {{if 'cudaLaunchAttributeProgrammaticEvent' in found_values}} #: Valid for launches. Set @@ -4289,22 +4289,22 @@ class cudaStreamAttrID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.cudaEventDisableTiming` flag set). - cudaLaunchAttributeProgrammaticEvent = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent{{endif}} + cudaLaunchAttributeProgrammaticEvent = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent{{endif}} {{if 'cudaLaunchAttributePriority' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.priority`. - cudaLaunchAttributePriority = ccudart.cudaLaunchAttributeID.cudaLaunchAttributePriority{{endif}} + cudaLaunchAttributePriority = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributePriority{{endif}} {{if 'cudaLaunchAttributeMemSyncDomainMap' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.memSyncDomainMap`. - cudaLaunchAttributeMemSyncDomainMap = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap{{endif}} + cudaLaunchAttributeMemSyncDomainMap = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap{{endif}} {{if 'cudaLaunchAttributeMemSyncDomain' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.memSyncDomain`. - cudaLaunchAttributeMemSyncDomain = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain{{endif}} + cudaLaunchAttributeMemSyncDomain = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain{{endif}} {{if 'cudaLaunchAttributeLaunchCompletionEvent' in found_values}} #: Valid for launches. Set @@ -4325,7 +4325,7 @@ class cudaStreamAttrID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.cudaEventDisableTiming` flag set). - cudaLaunchAttributeLaunchCompletionEvent = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent{{endif}} + cudaLaunchAttributeLaunchCompletionEvent = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent{{endif}} {{if 'cudaLaunchAttributeDeviceUpdatableKernelNode' in found_values}} #: Valid for graph nodes, launches. This attribute is graphs-only, and @@ -4359,7 +4359,7 @@ class cudaStreamAttrID(IntEnum): #: graph, if host-side executable graph updates are made to the device- #: updatable nodes, the graph must be uploaded before it is launched #: again. - cudaLaunchAttributeDeviceUpdatableKernelNode = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode{{endif}} + cudaLaunchAttributeDeviceUpdatableKernelNode = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode{{endif}} {{if 'cudaLaunchAttributePreferredSharedMemoryCarveout' in found_values}} #: Valid for launches. On devices where the L1 cache and shared memory @@ -4371,7 +4371,7 @@ class cudaStreamAttrID(IntEnum): #: :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout`. This is #: only a hint, and the driver can choose a different configuration if #: required for the launch. - cudaLaunchAttributePreferredSharedMemoryCarveout = ccudart.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout{{endif}} + cudaLaunchAttributePreferredSharedMemoryCarveout = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout{{endif}} {{endif}} {{if 'cudaLaunchAttributeID' in found_types}} @@ -4383,32 +4383,32 @@ class cudaKernelNodeAttrID(IntEnum): {{if 'cudaLaunchAttributeIgnore' in found_values}} #: Ignored entry, for convenient composition - cudaLaunchAttributeIgnore = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeIgnore{{endif}} + cudaLaunchAttributeIgnore = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeIgnore{{endif}} {{if 'cudaLaunchAttributeAccessPolicyWindow' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.accessPolicyWindow`. - cudaLaunchAttributeAccessPolicyWindow = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow{{endif}} + cudaLaunchAttributeAccessPolicyWindow = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow{{endif}} {{if 'cudaLaunchAttributeCooperative' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.cooperative`. - cudaLaunchAttributeCooperative = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeCooperative{{endif}} + cudaLaunchAttributeCooperative = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeCooperative{{endif}} {{if 'cudaLaunchAttributeSynchronizationPolicy' in found_values}} #: Valid for streams. See #: :py:obj:`~.cudaLaunchAttributeValue.syncPolicy`. - cudaLaunchAttributeSynchronizationPolicy = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy{{endif}} + cudaLaunchAttributeSynchronizationPolicy = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy{{endif}} {{if 'cudaLaunchAttributeClusterDimension' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.clusterDim`. - cudaLaunchAttributeClusterDimension = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension{{endif}} + cudaLaunchAttributeClusterDimension = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension{{endif}} {{if 'cudaLaunchAttributeClusterSchedulingPolicyPreference' in found_values}} #: Valid for graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.clusterSchedulingPolicyPreference`. - cudaLaunchAttributeClusterSchedulingPolicyPreference = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference{{endif}} + cudaLaunchAttributeClusterSchedulingPolicyPreference = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference{{endif}} {{if 'cudaLaunchAttributeProgrammaticStreamSerialization' in found_values}} #: Valid for launches. Setting @@ -4420,7 +4420,7 @@ class cudaKernelNodeAttrID(IntEnum): #: The dependent launches can choose to wait on the dependency using #: the programmatic sync (cudaGridDependencySynchronize() or equivalent #: PTX instructions). - cudaLaunchAttributeProgrammaticStreamSerialization = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization{{endif}} + cudaLaunchAttributeProgrammaticStreamSerialization = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization{{endif}} {{if 'cudaLaunchAttributeProgrammaticEvent' in found_values}} #: Valid for launches. Set @@ -4443,22 +4443,22 @@ class cudaKernelNodeAttrID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.cudaEventDisableTiming` flag set). - cudaLaunchAttributeProgrammaticEvent = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent{{endif}} + cudaLaunchAttributeProgrammaticEvent = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent{{endif}} {{if 'cudaLaunchAttributePriority' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.priority`. - cudaLaunchAttributePriority = ccudart.cudaLaunchAttributeID.cudaLaunchAttributePriority{{endif}} + cudaLaunchAttributePriority = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributePriority{{endif}} {{if 'cudaLaunchAttributeMemSyncDomainMap' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.memSyncDomainMap`. - cudaLaunchAttributeMemSyncDomainMap = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap{{endif}} + cudaLaunchAttributeMemSyncDomainMap = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap{{endif}} {{if 'cudaLaunchAttributeMemSyncDomain' in found_values}} #: Valid for streams, graph nodes, launches. See #: :py:obj:`~.cudaLaunchAttributeValue.memSyncDomain`. - cudaLaunchAttributeMemSyncDomain = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain{{endif}} + cudaLaunchAttributeMemSyncDomain = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain{{endif}} {{if 'cudaLaunchAttributeLaunchCompletionEvent' in found_values}} #: Valid for launches. Set @@ -4479,7 +4479,7 @@ class cudaKernelNodeAttrID(IntEnum): #: The event supplied must not be an interprocess or interop event. #: The event must disable timing (i.e. must be created with the #: :py:obj:`~.cudaEventDisableTiming` flag set). - cudaLaunchAttributeLaunchCompletionEvent = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent{{endif}} + cudaLaunchAttributeLaunchCompletionEvent = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent{{endif}} {{if 'cudaLaunchAttributeDeviceUpdatableKernelNode' in found_values}} #: Valid for graph nodes, launches. This attribute is graphs-only, and @@ -4513,7 +4513,7 @@ class cudaKernelNodeAttrID(IntEnum): #: graph, if host-side executable graph updates are made to the device- #: updatable nodes, the graph must be uploaded before it is launched #: again. - cudaLaunchAttributeDeviceUpdatableKernelNode = ccudart.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode{{endif}} + cudaLaunchAttributeDeviceUpdatableKernelNode = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode{{endif}} {{if 'cudaLaunchAttributePreferredSharedMemoryCarveout' in found_values}} #: Valid for launches. On devices where the L1 cache and shared memory @@ -4525,7 +4525,7 @@ class cudaKernelNodeAttrID(IntEnum): #: :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout`. This is #: only a hint, and the driver can choose a different configuration if #: required for the launch. - cudaLaunchAttributePreferredSharedMemoryCarveout = ccudart.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout{{endif}} + cudaLaunchAttributePreferredSharedMemoryCarveout = cyruntime.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout{{endif}} {{endif}} {{if 'cudaArray_t' in found_types}} @@ -4543,9 +4543,9 @@ cdef class cudaArray_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4574,9 +4574,9 @@ cdef class cudaArray_const_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4605,9 +4605,9 @@ cdef class cudaMipmappedArray_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4636,9 +4636,9 @@ cdef class cudaMipmappedArray_const_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4667,9 +4667,9 @@ cdef class cudaGraphicsResource_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4698,9 +4698,9 @@ cdef class cudaExternalMemory_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4729,9 +4729,9 @@ cdef class cudaExternalSemaphore_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4760,9 +4760,9 @@ cdef class cudaKernel_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4791,9 +4791,9 @@ cdef class cudaGraphDeviceNode_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4822,9 +4822,9 @@ cdef class cudaAsyncCallbackHandle_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4851,9 +4851,9 @@ cdef class EGLImageKHR: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4880,9 +4880,9 @@ cdef class EGLStreamKHR: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4909,9 +4909,9 @@ cdef class EGLSyncKHR: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4938,9 +4938,9 @@ cdef class cudaHostFn_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4967,9 +4967,9 @@ cdef class cudaAsyncCallback: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -4996,9 +4996,9 @@ cdef class cudaStreamCallback_t: def __cinit__(self, void_ptr init_value = 0, void_ptr _ptr = 0): if _ptr == 0: self._ptr = &self.__val - self._ptr[0] = init_value + self._ptr[0] = init_value else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, *args, **kwargs): pass def __repr__(self): @@ -5034,7 +5034,7 @@ cdef class dim3: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -5107,7 +5107,7 @@ cdef class cudaChannelFormatDesc: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -5191,7 +5191,7 @@ cdef class anon_struct0: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -5265,7 +5265,7 @@ cdef class cudaArraySparseProperties: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._tileExtent = anon_struct0(_ptr=self._ptr) def __dealloc__(self): @@ -5303,7 +5303,7 @@ cdef class cudaArraySparseProperties: return self._tileExtent @tileExtent.setter def tileExtent(self, tileExtent not None : anon_struct0): - string.memcpy(&self._ptr[0].tileExtent, tileExtent.getPtr(), sizeof(self._ptr[0].tileExtent)) + string.memcpy(&self._ptr[0].tileExtent, tileExtent.getPtr(), sizeof(self._ptr[0].tileExtent)) @property def miptailFirstLevel(self): return self._ptr[0].miptailFirstLevel @@ -5354,7 +5354,7 @@ cdef class cudaArrayMemoryRequirements: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -5425,7 +5425,7 @@ cdef class cudaPitchedPtr: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -5459,8 +5459,8 @@ cdef class cudaPitchedPtr: return self._ptr[0].ptr @ptr.setter def ptr(self, ptr): - _cptr = utils.HelperInputVoidPtr(ptr) - self._ptr[0].ptr = _cptr.cptr + _cyptr = utils.HelperInputVoidPtr(ptr) + self._ptr[0].ptr = _cyptr.cptr @property def pitch(self): return self._ptr[0].pitch @@ -5506,7 +5506,7 @@ cdef class cudaExtent: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -5575,7 +5575,7 @@ cdef class cudaPos: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -5654,7 +5654,7 @@ cdef class cudaMemcpy3DParms: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._srcArray = cudaArray_t(_ptr=&self._ptr[0].srcArray) self._srcPos = cudaPos(_ptr=&self._ptr[0].srcPos) @@ -5710,61 +5710,61 @@ cdef class cudaMemcpy3DParms: return self._srcArray @srcArray.setter def srcArray(self, srcArray): - cdef ccudart.cudaArray_t csrcArray + cdef cyruntime.cudaArray_t cysrcArray if srcArray is None: - csrcArray = 0 + cysrcArray = 0 elif isinstance(srcArray, (cudaArray_t,)): psrcArray = int(srcArray) - csrcArray = psrcArray + cysrcArray = psrcArray else: psrcArray = int(cudaArray_t(srcArray)) - csrcArray = psrcArray - self._srcArray._ptr[0] = csrcArray + cysrcArray = psrcArray + self._srcArray._ptr[0] = cysrcArray @property def srcPos(self): return self._srcPos @srcPos.setter def srcPos(self, srcPos not None : cudaPos): - string.memcpy(&self._ptr[0].srcPos, srcPos.getPtr(), sizeof(self._ptr[0].srcPos)) + string.memcpy(&self._ptr[0].srcPos, srcPos.getPtr(), sizeof(self._ptr[0].srcPos)) @property def srcPtr(self): return self._srcPtr @srcPtr.setter def srcPtr(self, srcPtr not None : cudaPitchedPtr): - string.memcpy(&self._ptr[0].srcPtr, srcPtr.getPtr(), sizeof(self._ptr[0].srcPtr)) + string.memcpy(&self._ptr[0].srcPtr, srcPtr.getPtr(), sizeof(self._ptr[0].srcPtr)) @property def dstArray(self): return self._dstArray @dstArray.setter def dstArray(self, dstArray): - cdef ccudart.cudaArray_t cdstArray + cdef cyruntime.cudaArray_t cydstArray if dstArray is None: - cdstArray = 0 + cydstArray = 0 elif isinstance(dstArray, (cudaArray_t,)): pdstArray = int(dstArray) - cdstArray = pdstArray + cydstArray = pdstArray else: pdstArray = int(cudaArray_t(dstArray)) - cdstArray = pdstArray - self._dstArray._ptr[0] = cdstArray + cydstArray = pdstArray + self._dstArray._ptr[0] = cydstArray @property def dstPos(self): return self._dstPos @dstPos.setter def dstPos(self, dstPos not None : cudaPos): - string.memcpy(&self._ptr[0].dstPos, dstPos.getPtr(), sizeof(self._ptr[0].dstPos)) + string.memcpy(&self._ptr[0].dstPos, dstPos.getPtr(), sizeof(self._ptr[0].dstPos)) @property def dstPtr(self): return self._dstPtr @dstPtr.setter def dstPtr(self, dstPtr not None : cudaPitchedPtr): - string.memcpy(&self._ptr[0].dstPtr, dstPtr.getPtr(), sizeof(self._ptr[0].dstPtr)) + string.memcpy(&self._ptr[0].dstPtr, dstPtr.getPtr(), sizeof(self._ptr[0].dstPtr)) @property def extent(self): return self._extent @extent.setter def extent(self, extent not None : cudaExtent): - string.memcpy(&self._ptr[0].extent, extent.getPtr(), sizeof(self._ptr[0].extent)) + string.memcpy(&self._ptr[0].extent, extent.getPtr(), sizeof(self._ptr[0].extent)) @property def kind(self): return cudaMemcpyKind(self._ptr[0].kind) @@ -5797,7 +5797,7 @@ cdef class cudaMemcpyNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._copyParams = cudaMemcpy3DParms(_ptr=&self._ptr[0].copyParams) def __dealloc__(self): @@ -5839,7 +5839,7 @@ cdef class cudaMemcpyNodeParams: return self._copyParams @copyParams.setter def copyParams(self, copyParams not None : cudaMemcpy3DParms): - string.memcpy(&self._ptr[0].copyParams, copyParams.getPtr(), sizeof(self._ptr[0].copyParams)) + string.memcpy(&self._ptr[0].copyParams, copyParams.getPtr(), sizeof(self._ptr[0].copyParams)) {{endif}} {{if 'struct cudaMemcpy3DPeerParms' in found_types}} @@ -5878,7 +5878,7 @@ cdef class cudaMemcpy3DPeerParms: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._srcArray = cudaArray_t(_ptr=&self._ptr[0].srcArray) self._srcPos = cudaPos(_ptr=&self._ptr[0].srcPos) @@ -5938,28 +5938,28 @@ cdef class cudaMemcpy3DPeerParms: return self._srcArray @srcArray.setter def srcArray(self, srcArray): - cdef ccudart.cudaArray_t csrcArray + cdef cyruntime.cudaArray_t cysrcArray if srcArray is None: - csrcArray = 0 + cysrcArray = 0 elif isinstance(srcArray, (cudaArray_t,)): psrcArray = int(srcArray) - csrcArray = psrcArray + cysrcArray = psrcArray else: psrcArray = int(cudaArray_t(srcArray)) - csrcArray = psrcArray - self._srcArray._ptr[0] = csrcArray + cysrcArray = psrcArray + self._srcArray._ptr[0] = cysrcArray @property def srcPos(self): return self._srcPos @srcPos.setter def srcPos(self, srcPos not None : cudaPos): - string.memcpy(&self._ptr[0].srcPos, srcPos.getPtr(), sizeof(self._ptr[0].srcPos)) + string.memcpy(&self._ptr[0].srcPos, srcPos.getPtr(), sizeof(self._ptr[0].srcPos)) @property def srcPtr(self): return self._srcPtr @srcPtr.setter def srcPtr(self, srcPtr not None : cudaPitchedPtr): - string.memcpy(&self._ptr[0].srcPtr, srcPtr.getPtr(), sizeof(self._ptr[0].srcPtr)) + string.memcpy(&self._ptr[0].srcPtr, srcPtr.getPtr(), sizeof(self._ptr[0].srcPtr)) @property def srcDevice(self): return self._ptr[0].srcDevice @@ -5971,28 +5971,28 @@ cdef class cudaMemcpy3DPeerParms: return self._dstArray @dstArray.setter def dstArray(self, dstArray): - cdef ccudart.cudaArray_t cdstArray + cdef cyruntime.cudaArray_t cydstArray if dstArray is None: - cdstArray = 0 + cydstArray = 0 elif isinstance(dstArray, (cudaArray_t,)): pdstArray = int(dstArray) - cdstArray = pdstArray + cydstArray = pdstArray else: pdstArray = int(cudaArray_t(dstArray)) - cdstArray = pdstArray - self._dstArray._ptr[0] = cdstArray + cydstArray = pdstArray + self._dstArray._ptr[0] = cydstArray @property def dstPos(self): return self._dstPos @dstPos.setter def dstPos(self, dstPos not None : cudaPos): - string.memcpy(&self._ptr[0].dstPos, dstPos.getPtr(), sizeof(self._ptr[0].dstPos)) + string.memcpy(&self._ptr[0].dstPos, dstPos.getPtr(), sizeof(self._ptr[0].dstPos)) @property def dstPtr(self): return self._dstPtr @dstPtr.setter def dstPtr(self, dstPtr not None : cudaPitchedPtr): - string.memcpy(&self._ptr[0].dstPtr, dstPtr.getPtr(), sizeof(self._ptr[0].dstPtr)) + string.memcpy(&self._ptr[0].dstPtr, dstPtr.getPtr(), sizeof(self._ptr[0].dstPtr)) @property def dstDevice(self): return self._ptr[0].dstDevice @@ -6004,7 +6004,7 @@ cdef class cudaMemcpy3DPeerParms: return self._extent @extent.setter def extent(self, extent not None : cudaExtent): - string.memcpy(&self._ptr[0].extent, extent.getPtr(), sizeof(self._ptr[0].extent)) + string.memcpy(&self._ptr[0].extent, extent.getPtr(), sizeof(self._ptr[0].extent)) {{endif}} {{if 'struct cudaMemsetParams' in found_types}} @@ -6037,7 +6037,7 @@ cdef class cudaMemsetParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -6079,8 +6079,8 @@ cdef class cudaMemsetParams: return self._ptr[0].dst @dst.setter def dst(self, dst): - _cdst = utils.HelperInputVoidPtr(dst) - self._ptr[0].dst = _cdst.cptr + _cydst = utils.HelperInputVoidPtr(dst) + self._ptr[0].dst = _cydst.cptr @property def pitch(self): return self._ptr[0].pitch @@ -6143,7 +6143,7 @@ cdef class cudaMemsetParamsV2: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -6185,8 +6185,8 @@ cdef class cudaMemsetParamsV2: return self._ptr[0].dst @dst.setter def dst(self, dst): - _cdst = utils.HelperInputVoidPtr(dst) - self._ptr[0].dst = _cdst.cptr + _cydst = utils.HelperInputVoidPtr(dst) + self._ptr[0].dst = _cydst.cptr @property def pitch(self): return self._ptr[0].pitch @@ -6258,7 +6258,7 @@ cdef class cudaAccessPolicyWindow: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -6296,8 +6296,8 @@ cdef class cudaAccessPolicyWindow: return self._ptr[0].base_ptr @base_ptr.setter def base_ptr(self, base_ptr): - _cbase_ptr = utils.HelperInputVoidPtr(base_ptr) - self._ptr[0].base_ptr = _cbase_ptr.cptr + _cybase_ptr = utils.HelperInputVoidPtr(base_ptr) + self._ptr[0].base_ptr = _cybase_ptr.cptr @property def num_bytes(self): return self._ptr[0].num_bytes @@ -6346,7 +6346,7 @@ cdef class cudaHostNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._fn = cudaHostFn_t(_ptr=&self._ptr[0].fn) def __dealloc__(self): @@ -6372,23 +6372,23 @@ cdef class cudaHostNodeParams: return self._fn @fn.setter def fn(self, fn): - cdef ccudart.cudaHostFn_t cfn + cdef cyruntime.cudaHostFn_t cyfn if fn is None: - cfn = 0 + cyfn = 0 elif isinstance(fn, (cudaHostFn_t)): pfn = int(fn) - cfn = pfn + cyfn = pfn else: pfn = int(cudaHostFn_t(fn)) - cfn = pfn - self._fn._ptr[0] = cfn + cyfn = pfn + self._fn._ptr[0] = cyfn @property def userData(self): return self._ptr[0].userData @userData.setter def userData(self, userData): - _cuserData = utils.HelperInputVoidPtr(userData) - self._ptr[0].userData = _cuserData.cptr + _cyuserData = utils.HelperInputVoidPtr(userData) + self._ptr[0].userData = _cyuserData.cptr {{endif}} {{if 'struct cudaHostNodeParamsV2' in found_types}} @@ -6413,7 +6413,7 @@ cdef class cudaHostNodeParamsV2: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._fn = cudaHostFn_t(_ptr=&self._ptr[0].fn) def __dealloc__(self): @@ -6439,23 +6439,23 @@ cdef class cudaHostNodeParamsV2: return self._fn @fn.setter def fn(self, fn): - cdef ccudart.cudaHostFn_t cfn + cdef cyruntime.cudaHostFn_t cyfn if fn is None: - cfn = 0 + cyfn = 0 elif isinstance(fn, (cudaHostFn_t)): pfn = int(fn) - cfn = pfn + cyfn = pfn else: pfn = int(cudaHostFn_t(fn)) - cfn = pfn - self._fn._ptr[0] = cfn + cyfn = pfn + self._fn._ptr[0] = cyfn @property def userData(self): return self._ptr[0].userData @userData.setter def userData(self, userData): - _cuserData = utils.HelperInputVoidPtr(userData) - self._ptr[0].userData = _cuserData.cptr + _cyuserData = utils.HelperInputVoidPtr(userData) + self._ptr[0].userData = _cyuserData.cptr {{endif}} {{if 'struct cudaResourceDesc' in found_types}} @@ -6473,7 +6473,7 @@ cdef class anon_struct1: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._array = cudaArray_t(_ptr=&self._ptr[0].res.array.array) @@ -6496,16 +6496,16 @@ cdef class anon_struct1: return self._array @array.setter def array(self, array): - cdef ccudart.cudaArray_t carray + cdef cyruntime.cudaArray_t cyarray if array is None: - carray = 0 + cyarray = 0 elif isinstance(array, (cudaArray_t,)): parray = int(array) - carray = parray + cyarray = parray else: parray = int(cudaArray_t(array)) - carray = parray - self._array._ptr[0] = carray + cyarray = parray + self._array._ptr[0] = cyarray {{endif}} {{if 'struct cudaResourceDesc' in found_types}} @@ -6523,7 +6523,7 @@ cdef class anon_struct2: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._mipmap = cudaMipmappedArray_t(_ptr=&self._ptr[0].res.mipmap.mipmap) @@ -6546,16 +6546,16 @@ cdef class anon_struct2: return self._mipmap @mipmap.setter def mipmap(self, mipmap): - cdef ccudart.cudaMipmappedArray_t cmipmap + cdef cyruntime.cudaMipmappedArray_t cymipmap if mipmap is None: - cmipmap = 0 + cymipmap = 0 elif isinstance(mipmap, (cudaMipmappedArray_t,)): pmipmap = int(mipmap) - cmipmap = pmipmap + cymipmap = pmipmap else: pmipmap = int(cudaMipmappedArray_t(mipmap)) - cmipmap = pmipmap - self._mipmap._ptr[0] = cmipmap + cymipmap = pmipmap + self._mipmap._ptr[0] = cymipmap {{endif}} {{if 'struct cudaResourceDesc' in found_types}} @@ -6577,7 +6577,7 @@ cdef class anon_struct3: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._desc = cudaChannelFormatDesc(_ptr=&self._ptr[0].res.linear.desc) @@ -6608,14 +6608,14 @@ cdef class anon_struct3: return self._ptr[0].res.linear.devPtr @devPtr.setter def devPtr(self, devPtr): - _cdevPtr = utils.HelperInputVoidPtr(devPtr) - self._ptr[0].res.linear.devPtr = _cdevPtr.cptr + _cydevPtr = utils.HelperInputVoidPtr(devPtr) + self._ptr[0].res.linear.devPtr = _cydevPtr.cptr @property def desc(self): return self._desc @desc.setter def desc(self, desc not None : cudaChannelFormatDesc): - string.memcpy(&self._ptr[0].res.linear.desc, desc.getPtr(), sizeof(self._ptr[0].res.linear.desc)) + string.memcpy(&self._ptr[0].res.linear.desc, desc.getPtr(), sizeof(self._ptr[0].res.linear.desc)) @property def sizeInBytes(self): return self._ptr[0].res.linear.sizeInBytes @@ -6647,7 +6647,7 @@ cdef class anon_struct4: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._desc = cudaChannelFormatDesc(_ptr=&self._ptr[0].res.pitch2D.desc) @@ -6686,14 +6686,14 @@ cdef class anon_struct4: return self._ptr[0].res.pitch2D.devPtr @devPtr.setter def devPtr(self, devPtr): - _cdevPtr = utils.HelperInputVoidPtr(devPtr) - self._ptr[0].res.pitch2D.devPtr = _cdevPtr.cptr + _cydevPtr = utils.HelperInputVoidPtr(devPtr) + self._ptr[0].res.pitch2D.devPtr = _cydevPtr.cptr @property def desc(self): return self._desc @desc.setter def desc(self, desc not None : cudaChannelFormatDesc): - string.memcpy(&self._ptr[0].res.pitch2D.desc, desc.getPtr(), sizeof(self._ptr[0].res.pitch2D.desc)) + string.memcpy(&self._ptr[0].res.pitch2D.desc, desc.getPtr(), sizeof(self._ptr[0].res.pitch2D.desc)) @property def width(self): return self._ptr[0].res.pitch2D.width @@ -6735,7 +6735,7 @@ cdef class anon_union0: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._array = anon_struct1(_ptr=self._ptr) @@ -6773,25 +6773,25 @@ cdef class anon_union0: return self._array @array.setter def array(self, array not None : anon_struct1): - string.memcpy(&self._ptr[0].res.array, array.getPtr(), sizeof(self._ptr[0].res.array)) + string.memcpy(&self._ptr[0].res.array, array.getPtr(), sizeof(self._ptr[0].res.array)) @property def mipmap(self): return self._mipmap @mipmap.setter def mipmap(self, mipmap not None : anon_struct2): - string.memcpy(&self._ptr[0].res.mipmap, mipmap.getPtr(), sizeof(self._ptr[0].res.mipmap)) + string.memcpy(&self._ptr[0].res.mipmap, mipmap.getPtr(), sizeof(self._ptr[0].res.mipmap)) @property def linear(self): return self._linear @linear.setter def linear(self, linear not None : anon_struct3): - string.memcpy(&self._ptr[0].res.linear, linear.getPtr(), sizeof(self._ptr[0].res.linear)) + string.memcpy(&self._ptr[0].res.linear, linear.getPtr(), sizeof(self._ptr[0].res.linear)) @property def pitch2D(self): return self._pitch2D @pitch2D.setter def pitch2D(self, pitch2D not None : anon_struct4): - string.memcpy(&self._ptr[0].res.pitch2D, pitch2D.getPtr(), sizeof(self._ptr[0].res.pitch2D)) + string.memcpy(&self._ptr[0].res.pitch2D, pitch2D.getPtr(), sizeof(self._ptr[0].res.pitch2D)) {{endif}} {{if 'struct cudaResourceDesc' in found_types}} @@ -6814,10 +6814,10 @@ cdef class cudaResourceDesc: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccudart.cudaResourceDesc)) + self._val_ptr = calloc(1, sizeof(cyruntime.cudaResourceDesc)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._res = anon_union0(_ptr=self._ptr) def __dealloc__(self): @@ -6850,7 +6850,7 @@ cdef class cudaResourceDesc: return self._res @res.setter def res(self, res not None : anon_union0): - string.memcpy(&self._ptr[0].res, res.getPtr(), sizeof(self._ptr[0].res)) + string.memcpy(&self._ptr[0].res, res.getPtr(), sizeof(self._ptr[0].res)) {{endif}} {{if 'struct cudaResourceViewDesc' in found_types}} @@ -6887,7 +6887,7 @@ cdef class cudaResourceViewDesc: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -7019,7 +7019,7 @@ cdef class cudaPointerAttributes: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -7065,15 +7065,15 @@ cdef class cudaPointerAttributes: return self._ptr[0].devicePointer @devicePointer.setter def devicePointer(self, devicePointer): - _cdevicePointer = utils.HelperInputVoidPtr(devicePointer) - self._ptr[0].devicePointer = _cdevicePointer.cptr + _cydevicePointer = utils.HelperInputVoidPtr(devicePointer) + self._ptr[0].devicePointer = _cydevicePointer.cptr @property def hostPointer(self): return self._ptr[0].hostPointer @hostPointer.setter def hostPointer(self, hostPointer): - _chostPointer = utils.HelperInputVoidPtr(hostPointer) - self._ptr[0].hostPointer = _chostPointer.cptr + _cyhostPointer = utils.HelperInputVoidPtr(hostPointer) + self._ptr[0].hostPointer = _cyhostPointer.cptr {{endif}} {{if 'struct cudaFuncAttributes' in found_types}} @@ -7165,7 +7165,7 @@ cdef class cudaFuncAttributes: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -7375,7 +7375,7 @@ cdef class cudaMemLocation: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -7432,7 +7432,7 @@ cdef class cudaMemAccessDesc: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._location = cudaMemLocation(_ptr=&self._ptr[0].location) def __dealloc__(self): @@ -7458,7 +7458,7 @@ cdef class cudaMemAccessDesc: return self._location @location.setter def location(self, location not None : cudaMemLocation): - string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) + string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) @property def flags(self): return cudaMemAccessFlags(self._ptr[0].flags) @@ -7505,7 +7505,7 @@ cdef class cudaMemPoolProps: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._location = cudaMemLocation(_ptr=&self._ptr[0].location) def __dealloc__(self): @@ -7563,14 +7563,14 @@ cdef class cudaMemPoolProps: return self._location @location.setter def location(self, location not None : cudaMemLocation): - string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) + string.memcpy(&self._ptr[0].location, location.getPtr(), sizeof(self._ptr[0].location)) @property def win32SecurityAttributes(self): return self._ptr[0].win32SecurityAttributes @win32SecurityAttributes.setter def win32SecurityAttributes(self, win32SecurityAttributes): - _cwin32SecurityAttributes = utils.HelperInputVoidPtr(win32SecurityAttributes) - self._ptr[0].win32SecurityAttributes = _cwin32SecurityAttributes.cptr + _cywin32SecurityAttributes = utils.HelperInputVoidPtr(win32SecurityAttributes) + self._ptr[0].win32SecurityAttributes = _cywin32SecurityAttributes.cptr @property def maxSize(self): return self._ptr[0].maxSize @@ -7614,7 +7614,7 @@ cdef class cudaMemPoolPtrExportData: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -7674,7 +7674,7 @@ cdef class cudaMemAllocNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._poolProps = cudaMemPoolProps(_ptr=&self._ptr[0].poolProps) def __dealloc__(self): @@ -7713,10 +7713,10 @@ cdef class cudaMemAllocNodeParams: return self._poolProps @poolProps.setter def poolProps(self, poolProps not None : cudaMemPoolProps): - string.memcpy(&self._ptr[0].poolProps, poolProps.getPtr(), sizeof(self._ptr[0].poolProps)) + string.memcpy(&self._ptr[0].poolProps, poolProps.getPtr(), sizeof(self._ptr[0].poolProps)) @property def accessDescs(self): - arrs = [self._ptr[0].accessDescs + x*sizeof(ccudart.cudaMemAccessDesc) for x in range(self._accessDescs_length)] + arrs = [self._ptr[0].accessDescs + x*sizeof(cyruntime.cudaMemAccessDesc) for x in range(self._accessDescs_length)] return [cudaMemAccessDesc(_ptr=arr) for arr in arrs] @accessDescs.setter def accessDescs(self, val): @@ -7727,13 +7727,13 @@ cdef class cudaMemAllocNodeParams: else: if self._accessDescs_length != len(val): free(self._accessDescs) - self._accessDescs = calloc(len(val), sizeof(ccudart.cudaMemAccessDesc)) + self._accessDescs = calloc(len(val), sizeof(cyruntime.cudaMemAccessDesc)) if self._accessDescs is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaMemAccessDesc))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaMemAccessDesc))) self._accessDescs_length = len(val) self._ptr[0].accessDescs = self._accessDescs for idx in range(len(val)): - string.memcpy(&self._accessDescs[idx], (val[idx])._ptr, sizeof(ccudart.cudaMemAccessDesc)) + string.memcpy(&self._accessDescs[idx], (val[idx])._ptr, sizeof(cyruntime.cudaMemAccessDesc)) @property def accessDescCount(self): @@ -7752,8 +7752,8 @@ cdef class cudaMemAllocNodeParams: return self._ptr[0].dptr @dptr.setter def dptr(self, dptr): - _cdptr = utils.HelperInputVoidPtr(dptr) - self._ptr[0].dptr = _cdptr.cptr + _cydptr = utils.HelperInputVoidPtr(dptr) + self._ptr[0].dptr = _cydptr.cptr {{endif}} {{if 'struct cudaMemAllocNodeParamsV2' in found_types}} @@ -7788,7 +7788,7 @@ cdef class cudaMemAllocNodeParamsV2: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._poolProps = cudaMemPoolProps(_ptr=&self._ptr[0].poolProps) def __dealloc__(self): @@ -7827,10 +7827,10 @@ cdef class cudaMemAllocNodeParamsV2: return self._poolProps @poolProps.setter def poolProps(self, poolProps not None : cudaMemPoolProps): - string.memcpy(&self._ptr[0].poolProps, poolProps.getPtr(), sizeof(self._ptr[0].poolProps)) + string.memcpy(&self._ptr[0].poolProps, poolProps.getPtr(), sizeof(self._ptr[0].poolProps)) @property def accessDescs(self): - arrs = [self._ptr[0].accessDescs + x*sizeof(ccudart.cudaMemAccessDesc) for x in range(self._accessDescs_length)] + arrs = [self._ptr[0].accessDescs + x*sizeof(cyruntime.cudaMemAccessDesc) for x in range(self._accessDescs_length)] return [cudaMemAccessDesc(_ptr=arr) for arr in arrs] @accessDescs.setter def accessDescs(self, val): @@ -7841,13 +7841,13 @@ cdef class cudaMemAllocNodeParamsV2: else: if self._accessDescs_length != len(val): free(self._accessDescs) - self._accessDescs = calloc(len(val), sizeof(ccudart.cudaMemAccessDesc)) + self._accessDescs = calloc(len(val), sizeof(cyruntime.cudaMemAccessDesc)) if self._accessDescs is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaMemAccessDesc))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaMemAccessDesc))) self._accessDescs_length = len(val) self._ptr[0].accessDescs = self._accessDescs for idx in range(len(val)): - string.memcpy(&self._accessDescs[idx], (val[idx])._ptr, sizeof(ccudart.cudaMemAccessDesc)) + string.memcpy(&self._accessDescs[idx], (val[idx])._ptr, sizeof(cyruntime.cudaMemAccessDesc)) @property def accessDescCount(self): @@ -7866,8 +7866,8 @@ cdef class cudaMemAllocNodeParamsV2: return self._ptr[0].dptr @dptr.setter def dptr(self, dptr): - _cdptr = utils.HelperInputVoidPtr(dptr) - self._ptr[0].dptr = _cdptr.cptr + _cydptr = utils.HelperInputVoidPtr(dptr) + self._ptr[0].dptr = _cydptr.cptr {{endif}} {{if 'struct cudaMemFreeNodeParams' in found_types}} @@ -7890,7 +7890,7 @@ cdef class cudaMemFreeNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -7912,8 +7912,8 @@ cdef class cudaMemFreeNodeParams: return self._ptr[0].dptr @dptr.setter def dptr(self, dptr): - _cdptr = utils.HelperInputVoidPtr(dptr) - self._ptr[0].dptr = _cdptr.cptr + _cydptr = utils.HelperInputVoidPtr(dptr) + self._ptr[0].dptr = _cydptr.cptr {{endif}} {{if 'struct CUuuid_st' in found_types}} @@ -7934,7 +7934,7 @@ cdef class CUuuid_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -8189,7 +8189,7 @@ cdef class cudaDeviceProp: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._uuid = cudaUUID_t(_ptr=&self._ptr[0].uuid) def __dealloc__(self): @@ -8598,7 +8598,7 @@ cdef class cudaDeviceProp: return self._uuid @uuid.setter def uuid(self, uuid not None : cudaUUID_t): - string.memcpy(&self._ptr[0].uuid, uuid.getPtr(), sizeof(self._ptr[0].uuid)) + string.memcpy(&self._ptr[0].uuid, uuid.getPtr(), sizeof(self._ptr[0].uuid)) @property def luid(self): return PyBytes_FromStringAndSize(self._ptr[0].luid, 8) @@ -9196,7 +9196,7 @@ cdef class cudaIpcEventHandle_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -9252,7 +9252,7 @@ cdef class cudaIpcMemHandle_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -9306,7 +9306,7 @@ cdef class cudaMemFabricHandle_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -9359,7 +9359,7 @@ cdef class anon_struct5: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -9386,15 +9386,15 @@ cdef class anon_struct5: return self._ptr[0].handle.win32.handle @handle.setter def handle(self, handle): - _chandle = utils.HelperInputVoidPtr(handle) - self._ptr[0].handle.win32.handle = _chandle.cptr + _cyhandle = utils.HelperInputVoidPtr(handle) + self._ptr[0].handle.win32.handle = _cyhandle.cptr @property def name(self): return self._ptr[0].handle.win32.name @name.setter def name(self, name): - _cname = utils.HelperInputVoidPtr(name) - self._ptr[0].handle.win32.name = _cname.cptr + _cyname = utils.HelperInputVoidPtr(name) + self._ptr[0].handle.win32.name = _cyname.cptr {{endif}} {{if 'struct cudaExternalMemoryHandleDesc' in found_types}} @@ -9416,7 +9416,7 @@ cdef class anon_union1: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._win32 = anon_struct5(_ptr=self._ptr) @@ -9453,14 +9453,14 @@ cdef class anon_union1: return self._win32 @win32.setter def win32(self, win32 not None : anon_struct5): - string.memcpy(&self._ptr[0].handle.win32, win32.getPtr(), sizeof(self._ptr[0].handle.win32)) + string.memcpy(&self._ptr[0].handle.win32, win32.getPtr(), sizeof(self._ptr[0].handle.win32)) @property def nvSciBufObject(self): return self._ptr[0].handle.nvSciBufObject @nvSciBufObject.setter def nvSciBufObject(self, nvSciBufObject): - _cnvSciBufObject = utils.HelperInputVoidPtr(nvSciBufObject) - self._ptr[0].handle.nvSciBufObject = _cnvSciBufObject.cptr + _cynvSciBufObject = utils.HelperInputVoidPtr(nvSciBufObject) + self._ptr[0].handle.nvSciBufObject = _cynvSciBufObject.cptr {{endif}} {{if 'struct cudaExternalMemoryHandleDesc' in found_types}} @@ -9487,10 +9487,10 @@ cdef class cudaExternalMemoryHandleDesc: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccudart.cudaExternalMemoryHandleDesc)) + self._val_ptr = calloc(1, sizeof(cyruntime.cudaExternalMemoryHandleDesc)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._handle = anon_union1(_ptr=self._ptr) def __dealloc__(self): @@ -9531,7 +9531,7 @@ cdef class cudaExternalMemoryHandleDesc: return self._handle @handle.setter def handle(self, handle not None : anon_union1): - string.memcpy(&self._ptr[0].handle, handle.getPtr(), sizeof(self._ptr[0].handle)) + string.memcpy(&self._ptr[0].handle, handle.getPtr(), sizeof(self._ptr[0].handle)) @property def size(self): return self._ptr[0].size @@ -9570,7 +9570,7 @@ cdef class cudaExternalMemoryBufferDesc: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -9645,7 +9645,7 @@ cdef class cudaExternalMemoryMipmappedArrayDesc: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._formatDesc = cudaChannelFormatDesc(_ptr=&self._ptr[0].formatDesc) self._extent = cudaExtent(_ptr=&self._ptr[0].extent) @@ -9690,13 +9690,13 @@ cdef class cudaExternalMemoryMipmappedArrayDesc: return self._formatDesc @formatDesc.setter def formatDesc(self, formatDesc not None : cudaChannelFormatDesc): - string.memcpy(&self._ptr[0].formatDesc, formatDesc.getPtr(), sizeof(self._ptr[0].formatDesc)) + string.memcpy(&self._ptr[0].formatDesc, formatDesc.getPtr(), sizeof(self._ptr[0].formatDesc)) @property def extent(self): return self._extent @extent.setter def extent(self, extent not None : cudaExtent): - string.memcpy(&self._ptr[0].extent, extent.getPtr(), sizeof(self._ptr[0].extent)) + string.memcpy(&self._ptr[0].extent, extent.getPtr(), sizeof(self._ptr[0].extent)) @property def flags(self): return self._ptr[0].flags @@ -9728,7 +9728,7 @@ cdef class anon_struct6: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -9755,15 +9755,15 @@ cdef class anon_struct6: return self._ptr[0].handle.win32.handle @handle.setter def handle(self, handle): - _chandle = utils.HelperInputVoidPtr(handle) - self._ptr[0].handle.win32.handle = _chandle.cptr + _cyhandle = utils.HelperInputVoidPtr(handle) + self._ptr[0].handle.win32.handle = _cyhandle.cptr @property def name(self): return self._ptr[0].handle.win32.name @name.setter def name(self, name): - _cname = utils.HelperInputVoidPtr(name) - self._ptr[0].handle.win32.name = _cname.cptr + _cyname = utils.HelperInputVoidPtr(name) + self._ptr[0].handle.win32.name = _cyname.cptr {{endif}} {{if 'struct cudaExternalSemaphoreHandleDesc' in found_types}} @@ -9785,7 +9785,7 @@ cdef class anon_union2: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._win32 = anon_struct6(_ptr=self._ptr) @@ -9822,14 +9822,14 @@ cdef class anon_union2: return self._win32 @win32.setter def win32(self, win32 not None : anon_struct6): - string.memcpy(&self._ptr[0].handle.win32, win32.getPtr(), sizeof(self._ptr[0].handle.win32)) + string.memcpy(&self._ptr[0].handle.win32, win32.getPtr(), sizeof(self._ptr[0].handle.win32)) @property def nvSciSyncObj(self): return self._ptr[0].handle.nvSciSyncObj @nvSciSyncObj.setter def nvSciSyncObj(self, nvSciSyncObj): - _cnvSciSyncObj = utils.HelperInputVoidPtr(nvSciSyncObj) - self._ptr[0].handle.nvSciSyncObj = _cnvSciSyncObj.cptr + _cynvSciSyncObj = utils.HelperInputVoidPtr(nvSciSyncObj) + self._ptr[0].handle.nvSciSyncObj = _cynvSciSyncObj.cptr {{endif}} {{if 'struct cudaExternalSemaphoreHandleDesc' in found_types}} @@ -9854,10 +9854,10 @@ cdef class cudaExternalSemaphoreHandleDesc: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccudart.cudaExternalSemaphoreHandleDesc)) + self._val_ptr = calloc(1, sizeof(cyruntime.cudaExternalSemaphoreHandleDesc)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._handle = anon_union2(_ptr=self._ptr) def __dealloc__(self): @@ -9894,7 +9894,7 @@ cdef class cudaExternalSemaphoreHandleDesc: return self._handle @handle.setter def handle(self, handle not None : anon_union2): - string.memcpy(&self._ptr[0].handle, handle.getPtr(), sizeof(self._ptr[0].handle)) + string.memcpy(&self._ptr[0].handle, handle.getPtr(), sizeof(self._ptr[0].handle)) @property def flags(self): return self._ptr[0].flags @@ -9918,7 +9918,7 @@ cdef class anon_struct13: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -9961,7 +9961,7 @@ cdef class anon_union5: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -9988,8 +9988,8 @@ cdef class anon_union5: return self._ptr[0].params.nvSciSync.fence @fence.setter def fence(self, fence): - _cfence = utils.HelperInputVoidPtr(fence) - self._ptr[0].params.nvSciSync.fence = _cfence.cptr + _cyfence = utils.HelperInputVoidPtr(fence) + self._ptr[0].params.nvSciSync.fence = _cyfence.cptr @property def reserved(self): return self._ptr[0].params.nvSciSync.reserved @@ -10013,7 +10013,7 @@ cdef class anon_struct14: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -10060,7 +10060,7 @@ cdef class anon_struct15: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._fence = anon_struct13(_ptr=self._ptr) @@ -10097,19 +10097,19 @@ cdef class anon_struct15: return self._fence @fence.setter def fence(self, fence not None : anon_struct13): - string.memcpy(&self._ptr[0].params.fence, fence.getPtr(), sizeof(self._ptr[0].params.fence)) + string.memcpy(&self._ptr[0].params.fence, fence.getPtr(), sizeof(self._ptr[0].params.fence)) @property def nvSciSync(self): return self._nvSciSync @nvSciSync.setter def nvSciSync(self, nvSciSync not None : anon_union5): - string.memcpy(&self._ptr[0].params.nvSciSync, nvSciSync.getPtr(), sizeof(self._ptr[0].params.nvSciSync)) + string.memcpy(&self._ptr[0].params.nvSciSync, nvSciSync.getPtr(), sizeof(self._ptr[0].params.nvSciSync)) @property def keyedMutex(self): return self._keyedMutex @keyedMutex.setter def keyedMutex(self, keyedMutex not None : anon_struct14): - string.memcpy(&self._ptr[0].params.keyedMutex, keyedMutex.getPtr(), sizeof(self._ptr[0].params.keyedMutex)) + string.memcpy(&self._ptr[0].params.keyedMutex, keyedMutex.getPtr(), sizeof(self._ptr[0].params.keyedMutex)) @property def reserved(self): return self._ptr[0].params.reserved @@ -10149,7 +10149,7 @@ cdef class cudaExternalSemaphoreSignalParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._params = anon_struct15(_ptr=self._ptr) def __dealloc__(self): @@ -10179,7 +10179,7 @@ cdef class cudaExternalSemaphoreSignalParams: return self._params @params.setter def params(self, params not None : anon_struct15): - string.memcpy(&self._ptr[0].params, params.getPtr(), sizeof(self._ptr[0].params)) + string.memcpy(&self._ptr[0].params, params.getPtr(), sizeof(self._ptr[0].params)) @property def flags(self): return self._ptr[0].flags @@ -10209,7 +10209,7 @@ cdef class anon_struct16: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -10252,7 +10252,7 @@ cdef class anon_union6: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -10279,8 +10279,8 @@ cdef class anon_union6: return self._ptr[0].params.nvSciSync.fence @fence.setter def fence(self, fence): - _cfence = utils.HelperInputVoidPtr(fence) - self._ptr[0].params.nvSciSync.fence = _cfence.cptr + _cyfence = utils.HelperInputVoidPtr(fence) + self._ptr[0].params.nvSciSync.fence = _cyfence.cptr @property def reserved(self): return self._ptr[0].params.nvSciSync.reserved @@ -10306,7 +10306,7 @@ cdef class anon_struct17: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -10363,7 +10363,7 @@ cdef class anon_struct18: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._fence = anon_struct16(_ptr=self._ptr) @@ -10400,19 +10400,19 @@ cdef class anon_struct18: return self._fence @fence.setter def fence(self, fence not None : anon_struct16): - string.memcpy(&self._ptr[0].params.fence, fence.getPtr(), sizeof(self._ptr[0].params.fence)) + string.memcpy(&self._ptr[0].params.fence, fence.getPtr(), sizeof(self._ptr[0].params.fence)) @property def nvSciSync(self): return self._nvSciSync @nvSciSync.setter def nvSciSync(self, nvSciSync not None : anon_union6): - string.memcpy(&self._ptr[0].params.nvSciSync, nvSciSync.getPtr(), sizeof(self._ptr[0].params.nvSciSync)) + string.memcpy(&self._ptr[0].params.nvSciSync, nvSciSync.getPtr(), sizeof(self._ptr[0].params.nvSciSync)) @property def keyedMutex(self): return self._keyedMutex @keyedMutex.setter def keyedMutex(self, keyedMutex not None : anon_struct17): - string.memcpy(&self._ptr[0].params.keyedMutex, keyedMutex.getPtr(), sizeof(self._ptr[0].params.keyedMutex)) + string.memcpy(&self._ptr[0].params.keyedMutex, keyedMutex.getPtr(), sizeof(self._ptr[0].params.keyedMutex)) @property def reserved(self): return self._ptr[0].params.reserved @@ -10452,7 +10452,7 @@ cdef class cudaExternalSemaphoreWaitParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._params = anon_struct18(_ptr=self._ptr) def __dealloc__(self): @@ -10482,7 +10482,7 @@ cdef class cudaExternalSemaphoreWaitParams: return self._params @params.setter def params(self, params not None : anon_struct18): - string.memcpy(&self._ptr[0].params, params.getPtr(), sizeof(self._ptr[0].params)) + string.memcpy(&self._ptr[0].params, params.getPtr(), sizeof(self._ptr[0].params)) @property def flags(self): return self._ptr[0].flags @@ -10527,7 +10527,7 @@ cdef class cudaKernelNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._gridDim = dim3(_ptr=&self._ptr[0].gridDim) self._blockDim = dim3(_ptr=&self._ptr[0].blockDim) @@ -10570,20 +10570,20 @@ cdef class cudaKernelNodeParams: return self._ptr[0].func @func.setter def func(self, func): - _cfunc = utils.HelperInputVoidPtr(func) - self._ptr[0].func = _cfunc.cptr + _cyfunc = utils.HelperInputVoidPtr(func) + self._ptr[0].func = _cyfunc.cptr @property def gridDim(self): return self._gridDim @gridDim.setter def gridDim(self, gridDim not None : dim3): - string.memcpy(&self._ptr[0].gridDim, gridDim.getPtr(), sizeof(self._ptr[0].gridDim)) + string.memcpy(&self._ptr[0].gridDim, gridDim.getPtr(), sizeof(self._ptr[0].gridDim)) @property def blockDim(self): return self._blockDim @blockDim.setter def blockDim(self, blockDim not None : dim3): - string.memcpy(&self._ptr[0].blockDim, blockDim.getPtr(), sizeof(self._ptr[0].blockDim)) + string.memcpy(&self._ptr[0].blockDim, blockDim.getPtr(), sizeof(self._ptr[0].blockDim)) @property def sharedMemBytes(self): return self._ptr[0].sharedMemBytes @@ -10595,8 +10595,8 @@ cdef class cudaKernelNodeParams: return self._ptr[0].kernelParams @kernelParams.setter def kernelParams(self, kernelParams): - self._ckernelParams = utils.HelperKernelParams(kernelParams) - self._ptr[0].kernelParams = self._ckernelParams.ckernelParams + self._cykernelParams = utils.HelperKernelParams(kernelParams) + self._ptr[0].kernelParams = self._cykernelParams.ckernelParams @property def extra(self): return self._ptr[0].extra @@ -10635,7 +10635,7 @@ cdef class cudaKernelNodeParamsV2: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._gridDim = dim3(_ptr=&self._ptr[0].gridDim) self._blockDim = dim3(_ptr=&self._ptr[0].blockDim) @@ -10678,20 +10678,20 @@ cdef class cudaKernelNodeParamsV2: return self._ptr[0].func @func.setter def func(self, func): - _cfunc = utils.HelperInputVoidPtr(func) - self._ptr[0].func = _cfunc.cptr + _cyfunc = utils.HelperInputVoidPtr(func) + self._ptr[0].func = _cyfunc.cptr @property def gridDim(self): return self._gridDim @gridDim.setter def gridDim(self, gridDim not None : dim3): - string.memcpy(&self._ptr[0].gridDim, gridDim.getPtr(), sizeof(self._ptr[0].gridDim)) + string.memcpy(&self._ptr[0].gridDim, gridDim.getPtr(), sizeof(self._ptr[0].gridDim)) @property def blockDim(self): return self._blockDim @blockDim.setter def blockDim(self, blockDim not None : dim3): - string.memcpy(&self._ptr[0].blockDim, blockDim.getPtr(), sizeof(self._ptr[0].blockDim)) + string.memcpy(&self._ptr[0].blockDim, blockDim.getPtr(), sizeof(self._ptr[0].blockDim)) @property def sharedMemBytes(self): return self._ptr[0].sharedMemBytes @@ -10703,8 +10703,8 @@ cdef class cudaKernelNodeParamsV2: return self._ptr[0].kernelParams @kernelParams.setter def kernelParams(self, kernelParams): - self._ckernelParams = utils.HelperKernelParams(kernelParams) - self._ptr[0].kernelParams = self._ckernelParams.ckernelParams + self._cykernelParams = utils.HelperKernelParams(kernelParams) + self._ptr[0].kernelParams = self._cykernelParams.ckernelParams @property def extra(self): return self._ptr[0].extra @@ -10738,7 +10738,7 @@ cdef class cudaExternalSemaphoreSignalNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -10768,7 +10768,7 @@ cdef class cudaExternalSemaphoreSignalNodeParams: return '' @property def extSemArray(self): - arrs = [self._ptr[0].extSemArray + x*sizeof(ccudart.cudaExternalSemaphore_t) for x in range(self._extSemArray_length)] + arrs = [self._ptr[0].extSemArray + x*sizeof(cyruntime.cudaExternalSemaphore_t) for x in range(self._extSemArray_length)] return [cudaExternalSemaphore_t(_ptr=arr) for arr in arrs] @extSemArray.setter def extSemArray(self, val): @@ -10779,9 +10779,9 @@ cdef class cudaExternalSemaphoreSignalNodeParams: else: if self._extSemArray_length != len(val): free(self._extSemArray) - self._extSemArray = calloc(len(val), sizeof(ccudart.cudaExternalSemaphore_t)) + self._extSemArray = calloc(len(val), sizeof(cyruntime.cudaExternalSemaphore_t)) if self._extSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphore_t))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) self._extSemArray_length = len(val) self._ptr[0].extSemArray = self._extSemArray for idx in range(len(val)): @@ -10789,7 +10789,7 @@ cdef class cudaExternalSemaphoreSignalNodeParams: @property def paramsArray(self): - arrs = [self._ptr[0].paramsArray + x*sizeof(ccudart.cudaExternalSemaphoreSignalParams) for x in range(self._paramsArray_length)] + arrs = [self._ptr[0].paramsArray + x*sizeof(cyruntime.cudaExternalSemaphoreSignalParams) for x in range(self._paramsArray_length)] return [cudaExternalSemaphoreSignalParams(_ptr=arr) for arr in arrs] @paramsArray.setter def paramsArray(self, val): @@ -10800,13 +10800,13 @@ cdef class cudaExternalSemaphoreSignalNodeParams: else: if self._paramsArray_length != len(val): free(self._paramsArray) - self._paramsArray = calloc(len(val), sizeof(ccudart.cudaExternalSemaphoreSignalParams)) + self._paramsArray = calloc(len(val), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) if self._paramsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphoreSignalParams))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreSignalParams))) self._paramsArray_length = len(val) self._ptr[0].paramsArray = self._paramsArray for idx in range(len(val)): - string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(ccudart.cudaExternalSemaphoreSignalParams)) + string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) @property def numExtSems(self): @@ -10841,7 +10841,7 @@ cdef class cudaExternalSemaphoreSignalNodeParamsV2: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -10871,7 +10871,7 @@ cdef class cudaExternalSemaphoreSignalNodeParamsV2: return '' @property def extSemArray(self): - arrs = [self._ptr[0].extSemArray + x*sizeof(ccudart.cudaExternalSemaphore_t) for x in range(self._extSemArray_length)] + arrs = [self._ptr[0].extSemArray + x*sizeof(cyruntime.cudaExternalSemaphore_t) for x in range(self._extSemArray_length)] return [cudaExternalSemaphore_t(_ptr=arr) for arr in arrs] @extSemArray.setter def extSemArray(self, val): @@ -10882,9 +10882,9 @@ cdef class cudaExternalSemaphoreSignalNodeParamsV2: else: if self._extSemArray_length != len(val): free(self._extSemArray) - self._extSemArray = calloc(len(val), sizeof(ccudart.cudaExternalSemaphore_t)) + self._extSemArray = calloc(len(val), sizeof(cyruntime.cudaExternalSemaphore_t)) if self._extSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphore_t))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) self._extSemArray_length = len(val) self._ptr[0].extSemArray = self._extSemArray for idx in range(len(val)): @@ -10892,7 +10892,7 @@ cdef class cudaExternalSemaphoreSignalNodeParamsV2: @property def paramsArray(self): - arrs = [self._ptr[0].paramsArray + x*sizeof(ccudart.cudaExternalSemaphoreSignalParams) for x in range(self._paramsArray_length)] + arrs = [self._ptr[0].paramsArray + x*sizeof(cyruntime.cudaExternalSemaphoreSignalParams) for x in range(self._paramsArray_length)] return [cudaExternalSemaphoreSignalParams(_ptr=arr) for arr in arrs] @paramsArray.setter def paramsArray(self, val): @@ -10903,13 +10903,13 @@ cdef class cudaExternalSemaphoreSignalNodeParamsV2: else: if self._paramsArray_length != len(val): free(self._paramsArray) - self._paramsArray = calloc(len(val), sizeof(ccudart.cudaExternalSemaphoreSignalParams)) + self._paramsArray = calloc(len(val), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) if self._paramsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphoreSignalParams))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreSignalParams))) self._paramsArray_length = len(val) self._ptr[0].paramsArray = self._paramsArray for idx in range(len(val)): - string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(ccudart.cudaExternalSemaphoreSignalParams)) + string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) @property def numExtSems(self): @@ -10944,7 +10944,7 @@ cdef class cudaExternalSemaphoreWaitNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -10974,7 +10974,7 @@ cdef class cudaExternalSemaphoreWaitNodeParams: return '' @property def extSemArray(self): - arrs = [self._ptr[0].extSemArray + x*sizeof(ccudart.cudaExternalSemaphore_t) for x in range(self._extSemArray_length)] + arrs = [self._ptr[0].extSemArray + x*sizeof(cyruntime.cudaExternalSemaphore_t) for x in range(self._extSemArray_length)] return [cudaExternalSemaphore_t(_ptr=arr) for arr in arrs] @extSemArray.setter def extSemArray(self, val): @@ -10985,9 +10985,9 @@ cdef class cudaExternalSemaphoreWaitNodeParams: else: if self._extSemArray_length != len(val): free(self._extSemArray) - self._extSemArray = calloc(len(val), sizeof(ccudart.cudaExternalSemaphore_t)) + self._extSemArray = calloc(len(val), sizeof(cyruntime.cudaExternalSemaphore_t)) if self._extSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphore_t))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) self._extSemArray_length = len(val) self._ptr[0].extSemArray = self._extSemArray for idx in range(len(val)): @@ -10995,7 +10995,7 @@ cdef class cudaExternalSemaphoreWaitNodeParams: @property def paramsArray(self): - arrs = [self._ptr[0].paramsArray + x*sizeof(ccudart.cudaExternalSemaphoreWaitParams) for x in range(self._paramsArray_length)] + arrs = [self._ptr[0].paramsArray + x*sizeof(cyruntime.cudaExternalSemaphoreWaitParams) for x in range(self._paramsArray_length)] return [cudaExternalSemaphoreWaitParams(_ptr=arr) for arr in arrs] @paramsArray.setter def paramsArray(self, val): @@ -11006,13 +11006,13 @@ cdef class cudaExternalSemaphoreWaitNodeParams: else: if self._paramsArray_length != len(val): free(self._paramsArray) - self._paramsArray = calloc(len(val), sizeof(ccudart.cudaExternalSemaphoreWaitParams)) + self._paramsArray = calloc(len(val), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) if self._paramsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphoreWaitParams))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreWaitParams))) self._paramsArray_length = len(val) self._ptr[0].paramsArray = self._paramsArray for idx in range(len(val)): - string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(ccudart.cudaExternalSemaphoreWaitParams)) + string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) @property def numExtSems(self): @@ -11047,7 +11047,7 @@ cdef class cudaExternalSemaphoreWaitNodeParamsV2: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -11077,7 +11077,7 @@ cdef class cudaExternalSemaphoreWaitNodeParamsV2: return '' @property def extSemArray(self): - arrs = [self._ptr[0].extSemArray + x*sizeof(ccudart.cudaExternalSemaphore_t) for x in range(self._extSemArray_length)] + arrs = [self._ptr[0].extSemArray + x*sizeof(cyruntime.cudaExternalSemaphore_t) for x in range(self._extSemArray_length)] return [cudaExternalSemaphore_t(_ptr=arr) for arr in arrs] @extSemArray.setter def extSemArray(self, val): @@ -11088,9 +11088,9 @@ cdef class cudaExternalSemaphoreWaitNodeParamsV2: else: if self._extSemArray_length != len(val): free(self._extSemArray) - self._extSemArray = calloc(len(val), sizeof(ccudart.cudaExternalSemaphore_t)) + self._extSemArray = calloc(len(val), sizeof(cyruntime.cudaExternalSemaphore_t)) if self._extSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphore_t))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) self._extSemArray_length = len(val) self._ptr[0].extSemArray = self._extSemArray for idx in range(len(val)): @@ -11098,7 +11098,7 @@ cdef class cudaExternalSemaphoreWaitNodeParamsV2: @property def paramsArray(self): - arrs = [self._ptr[0].paramsArray + x*sizeof(ccudart.cudaExternalSemaphoreWaitParams) for x in range(self._paramsArray_length)] + arrs = [self._ptr[0].paramsArray + x*sizeof(cyruntime.cudaExternalSemaphoreWaitParams) for x in range(self._paramsArray_length)] return [cudaExternalSemaphoreWaitParams(_ptr=arr) for arr in arrs] @paramsArray.setter def paramsArray(self, val): @@ -11109,13 +11109,13 @@ cdef class cudaExternalSemaphoreWaitNodeParamsV2: else: if self._paramsArray_length != len(val): free(self._paramsArray) - self._paramsArray = calloc(len(val), sizeof(ccudart.cudaExternalSemaphoreWaitParams)) + self._paramsArray = calloc(len(val), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) if self._paramsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphoreWaitParams))) + raise MemoryError('Failed to allocate length x size memory: ' + str(len(val)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreWaitParams))) self._paramsArray_length = len(val) self._ptr[0].paramsArray = self._paramsArray for idx in range(len(val)): - string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(ccudart.cudaExternalSemaphoreWaitParams)) + string.memcpy(&self._paramsArray[idx], (val[idx])._ptr, sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) @property def numExtSems(self): @@ -11161,7 +11161,7 @@ cdef class cudaConditionalNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._handle = cudaGraphConditionalHandle(_ptr=&self._ptr[0].handle) def __dealloc__(self): @@ -11195,16 +11195,16 @@ cdef class cudaConditionalNodeParams: return self._handle @handle.setter def handle(self, handle): - cdef ccudart.cudaGraphConditionalHandle chandle + cdef cyruntime.cudaGraphConditionalHandle cyhandle if handle is None: - chandle = 0 + cyhandle = 0 elif isinstance(handle, (cudaGraphConditionalHandle)): phandle = int(handle) - chandle = phandle + cyhandle = phandle else: phandle = int(cudaGraphConditionalHandle(handle)) - chandle = phandle - self._handle._ptr[0] = chandle + cyhandle = phandle + self._handle._ptr[0] = cyhandle @property def type(self): @@ -11220,7 +11220,7 @@ cdef class cudaConditionalNodeParams: self._ptr[0].size = size @property def phGraph_out(self): - arrs = [self._ptr[0].phGraph_out + x*sizeof(ccudart.cudaGraph_t) for x in range(self.size)] + arrs = [self._ptr[0].phGraph_out + x*sizeof(cyruntime.cudaGraph_t) for x in range(self.size)] return [cudaGraph_t(_ptr=arr) for arr in arrs] {{endif}} {{if 'struct cudaChildGraphNodeParams' in found_types}} @@ -11245,7 +11245,7 @@ cdef class cudaChildGraphNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._graph = cudaGraph_t(_ptr=&self._ptr[0].graph) def __dealloc__(self): @@ -11267,16 +11267,16 @@ cdef class cudaChildGraphNodeParams: return self._graph @graph.setter def graph(self, graph): - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - self._graph._ptr[0] = cgraph + cygraph = pgraph + self._graph._ptr[0] = cygraph {{endif}} {{if 'struct cudaEventRecordNodeParams' in found_types}} @@ -11299,7 +11299,7 @@ cdef class cudaEventRecordNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._event = cudaEvent_t(_ptr=&self._ptr[0].event) def __dealloc__(self): @@ -11321,16 +11321,16 @@ cdef class cudaEventRecordNodeParams: return self._event @event.setter def event(self, event): - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - self._event._ptr[0] = cevent + cyevent = pevent + self._event._ptr[0] = cyevent {{endif}} {{if 'struct cudaEventWaitNodeParams' in found_types}} @@ -11353,7 +11353,7 @@ cdef class cudaEventWaitNodeParams: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._event = cudaEvent_t(_ptr=&self._ptr[0].event) def __dealloc__(self): @@ -11375,16 +11375,16 @@ cdef class cudaEventWaitNodeParams: return self._event @event.setter def event(self, event): - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - self._event._ptr[0] = cevent + cyevent = pevent + self._event._ptr[0] = cyevent {{endif}} {{if 'struct cudaGraphNodeParams' in found_types}} @@ -11435,10 +11435,10 @@ cdef class cudaGraphNodeParams: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccudart.cudaGraphNodeParams)) + self._val_ptr = calloc(1, sizeof(cyruntime.cudaGraphNodeParams)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._kernel = cudaKernelNodeParamsV2(_ptr=&self._ptr[0].kernel) self._memcpy = cudaMemcpyNodeParams(_ptr=&self._ptr[0].memcpy) @@ -11550,73 +11550,73 @@ cdef class cudaGraphNodeParams: return self._kernel @kernel.setter def kernel(self, kernel not None : cudaKernelNodeParamsV2): - string.memcpy(&self._ptr[0].kernel, kernel.getPtr(), sizeof(self._ptr[0].kernel)) + string.memcpy(&self._ptr[0].kernel, kernel.getPtr(), sizeof(self._ptr[0].kernel)) @property def memcpy(self): return self._memcpy @memcpy.setter def memcpy(self, memcpy not None : cudaMemcpyNodeParams): - string.memcpy(&self._ptr[0].memcpy, memcpy.getPtr(), sizeof(self._ptr[0].memcpy)) + string.memcpy(&self._ptr[0].memcpy, memcpy.getPtr(), sizeof(self._ptr[0].memcpy)) @property def memset(self): return self._memset @memset.setter def memset(self, memset not None : cudaMemsetParamsV2): - string.memcpy(&self._ptr[0].memset, memset.getPtr(), sizeof(self._ptr[0].memset)) + string.memcpy(&self._ptr[0].memset, memset.getPtr(), sizeof(self._ptr[0].memset)) @property def host(self): return self._host @host.setter def host(self, host not None : cudaHostNodeParamsV2): - string.memcpy(&self._ptr[0].host, host.getPtr(), sizeof(self._ptr[0].host)) + string.memcpy(&self._ptr[0].host, host.getPtr(), sizeof(self._ptr[0].host)) @property def graph(self): return self._graph @graph.setter def graph(self, graph not None : cudaChildGraphNodeParams): - string.memcpy(&self._ptr[0].graph, graph.getPtr(), sizeof(self._ptr[0].graph)) + string.memcpy(&self._ptr[0].graph, graph.getPtr(), sizeof(self._ptr[0].graph)) @property def eventWait(self): return self._eventWait @eventWait.setter def eventWait(self, eventWait not None : cudaEventWaitNodeParams): - string.memcpy(&self._ptr[0].eventWait, eventWait.getPtr(), sizeof(self._ptr[0].eventWait)) + string.memcpy(&self._ptr[0].eventWait, eventWait.getPtr(), sizeof(self._ptr[0].eventWait)) @property def eventRecord(self): return self._eventRecord @eventRecord.setter def eventRecord(self, eventRecord not None : cudaEventRecordNodeParams): - string.memcpy(&self._ptr[0].eventRecord, eventRecord.getPtr(), sizeof(self._ptr[0].eventRecord)) + string.memcpy(&self._ptr[0].eventRecord, eventRecord.getPtr(), sizeof(self._ptr[0].eventRecord)) @property def extSemSignal(self): return self._extSemSignal @extSemSignal.setter def extSemSignal(self, extSemSignal not None : cudaExternalSemaphoreSignalNodeParamsV2): - string.memcpy(&self._ptr[0].extSemSignal, extSemSignal.getPtr(), sizeof(self._ptr[0].extSemSignal)) + string.memcpy(&self._ptr[0].extSemSignal, extSemSignal.getPtr(), sizeof(self._ptr[0].extSemSignal)) @property def extSemWait(self): return self._extSemWait @extSemWait.setter def extSemWait(self, extSemWait not None : cudaExternalSemaphoreWaitNodeParamsV2): - string.memcpy(&self._ptr[0].extSemWait, extSemWait.getPtr(), sizeof(self._ptr[0].extSemWait)) + string.memcpy(&self._ptr[0].extSemWait, extSemWait.getPtr(), sizeof(self._ptr[0].extSemWait)) @property def alloc(self): return self._alloc @alloc.setter def alloc(self, alloc not None : cudaMemAllocNodeParamsV2): - string.memcpy(&self._ptr[0].alloc, alloc.getPtr(), sizeof(self._ptr[0].alloc)) + string.memcpy(&self._ptr[0].alloc, alloc.getPtr(), sizeof(self._ptr[0].alloc)) @property def free(self): return self._free @free.setter def free(self, free not None : cudaMemFreeNodeParams): - string.memcpy(&self._ptr[0].free, free.getPtr(), sizeof(self._ptr[0].free)) + string.memcpy(&self._ptr[0].free, free.getPtr(), sizeof(self._ptr[0].free)) @property def conditional(self): return self._conditional @conditional.setter def conditional(self, conditional not None : cudaConditionalNodeParams): - string.memcpy(&self._ptr[0].conditional, conditional.getPtr(), sizeof(self._ptr[0].conditional)) + string.memcpy(&self._ptr[0].conditional, conditional.getPtr(), sizeof(self._ptr[0].conditional)) @property def reserved2(self): return self._ptr[0].reserved2 @@ -11670,7 +11670,7 @@ cdef class cudaGraphEdgeData_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -11754,7 +11754,7 @@ cdef class cudaGraphInstantiateParams_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._uploadStream = cudaStream_t(_ptr=&self._ptr[0].uploadStream) self._errNode_out = cudaGraphNode_t(_ptr=&self._ptr[0].errNode_out) @@ -11795,31 +11795,31 @@ cdef class cudaGraphInstantiateParams_st: return self._uploadStream @uploadStream.setter def uploadStream(self, uploadStream): - cdef ccudart.cudaStream_t cuploadStream + cdef cyruntime.cudaStream_t cyuploadStream if uploadStream is None: - cuploadStream = 0 - elif isinstance(uploadStream, (cudaStream_t,cuda.CUstream)): + cyuploadStream = 0 + elif isinstance(uploadStream, (cudaStream_t,driver.CUstream)): puploadStream = int(uploadStream) - cuploadStream = puploadStream + cyuploadStream = puploadStream else: puploadStream = int(cudaStream_t(uploadStream)) - cuploadStream = puploadStream - self._uploadStream._ptr[0] = cuploadStream + cyuploadStream = puploadStream + self._uploadStream._ptr[0] = cyuploadStream @property def errNode_out(self): return self._errNode_out @errNode_out.setter def errNode_out(self, errNode_out): - cdef ccudart.cudaGraphNode_t cerrNode_out + cdef cyruntime.cudaGraphNode_t cyerrNode_out if errNode_out is None: - cerrNode_out = 0 - elif isinstance(errNode_out, (cudaGraphNode_t,cuda.CUgraphNode)): + cyerrNode_out = 0 + elif isinstance(errNode_out, (cudaGraphNode_t,driver.CUgraphNode)): perrNode_out = int(errNode_out) - cerrNode_out = perrNode_out + cyerrNode_out = perrNode_out else: perrNode_out = int(cudaGraphNode_t(errNode_out)) - cerrNode_out = perrNode_out - self._errNode_out._ptr[0] = cerrNode_out + cyerrNode_out = perrNode_out + self._errNode_out._ptr[0] = cyerrNode_out @property def result_out(self): return cudaGraphInstantiateResult(self._ptr[0].result_out) @@ -11855,7 +11855,7 @@ cdef class cudaGraphExecUpdateResultInfo_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._errorNode = cudaGraphNode_t(_ptr=&self._ptr[0].errorNode) self._errorFromNode = cudaGraphNode_t(_ptr=&self._ptr[0].errorFromNode) @@ -11892,31 +11892,31 @@ cdef class cudaGraphExecUpdateResultInfo_st: return self._errorNode @errorNode.setter def errorNode(self, errorNode): - cdef ccudart.cudaGraphNode_t cerrorNode + cdef cyruntime.cudaGraphNode_t cyerrorNode if errorNode is None: - cerrorNode = 0 - elif isinstance(errorNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyerrorNode = 0 + elif isinstance(errorNode, (cudaGraphNode_t,driver.CUgraphNode)): perrorNode = int(errorNode) - cerrorNode = perrorNode + cyerrorNode = perrorNode else: perrorNode = int(cudaGraphNode_t(errorNode)) - cerrorNode = perrorNode - self._errorNode._ptr[0] = cerrorNode + cyerrorNode = perrorNode + self._errorNode._ptr[0] = cyerrorNode @property def errorFromNode(self): return self._errorFromNode @errorFromNode.setter def errorFromNode(self, errorFromNode): - cdef ccudart.cudaGraphNode_t cerrorFromNode + cdef cyruntime.cudaGraphNode_t cyerrorFromNode if errorFromNode is None: - cerrorFromNode = 0 - elif isinstance(errorFromNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyerrorFromNode = 0 + elif isinstance(errorFromNode, (cudaGraphNode_t,driver.CUgraphNode)): perrorFromNode = int(errorFromNode) - cerrorFromNode = perrorFromNode + cyerrorFromNode = perrorFromNode else: perrorFromNode = int(cudaGraphNode_t(errorFromNode)) - cerrorFromNode = perrorFromNode - self._errorFromNode._ptr[0] = cerrorFromNode + cyerrorFromNode = perrorFromNode + self._errorFromNode._ptr[0] = cyerrorFromNode {{endif}} {{if 'struct cudaGraphKernelNodeUpdate' in found_types}} @@ -11938,7 +11938,7 @@ cdef class anon_struct19: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -11969,8 +11969,8 @@ cdef class anon_struct19: return self._ptr[0].updateData.param.pValue @pValue.setter def pValue(self, pValue): - _cpValue = utils.HelperInputVoidPtr(pValue) - self._ptr[0].updateData.param.pValue = _cpValue.cptr + _cypValue = utils.HelperInputVoidPtr(pValue) + self._ptr[0].updateData.param.pValue = _cypValue.cptr @property def offset(self): return self._ptr[0].updateData.param.offset @@ -12004,7 +12004,7 @@ cdef class anon_union8: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._gridDim = dim3(_ptr=&self._ptr[0].updateData.gridDim) @@ -12036,13 +12036,13 @@ cdef class anon_union8: return self._gridDim @gridDim.setter def gridDim(self, gridDim not None : dim3): - string.memcpy(&self._ptr[0].updateData.gridDim, gridDim.getPtr(), sizeof(self._ptr[0].updateData.gridDim)) + string.memcpy(&self._ptr[0].updateData.gridDim, gridDim.getPtr(), sizeof(self._ptr[0].updateData.gridDim)) @property def param(self): return self._param @param.setter def param(self, param not None : anon_struct19): - string.memcpy(&self._ptr[0].updateData.param, param.getPtr(), sizeof(self._ptr[0].updateData.param)) + string.memcpy(&self._ptr[0].updateData.param, param.getPtr(), sizeof(self._ptr[0].updateData.param)) @property def isEnabled(self): return self._ptr[0].updateData.isEnabled @@ -12075,10 +12075,10 @@ cdef class cudaGraphKernelNodeUpdate: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccudart.cudaGraphKernelNodeUpdate)) + self._val_ptr = calloc(1, sizeof(cyruntime.cudaGraphKernelNodeUpdate)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._node = cudaGraphDeviceNode_t(_ptr=&self._ptr[0].node) self._updateData = anon_union8(_ptr=self._ptr) @@ -12110,16 +12110,16 @@ cdef class cudaGraphKernelNodeUpdate: return self._node @node.setter def node(self, node): - cdef ccudart.cudaGraphDeviceNode_t cnode + cdef cyruntime.cudaGraphDeviceNode_t cynode if node is None: - cnode = 0 + cynode = 0 elif isinstance(node, (cudaGraphDeviceNode_t,)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphDeviceNode_t(node)) - cnode = pnode - self._node._ptr[0] = cnode + cynode = pnode + self._node._ptr[0] = cynode @property def field(self): return cudaGraphKernelNodeField(self._ptr[0].field) @@ -12131,7 +12131,7 @@ cdef class cudaGraphKernelNodeUpdate: return self._updateData @updateData.setter def updateData(self, updateData not None : anon_union8): - string.memcpy(&self._ptr[0].updateData, updateData.getPtr(), sizeof(self._ptr[0].updateData)) + string.memcpy(&self._ptr[0].updateData, updateData.getPtr(), sizeof(self._ptr[0].updateData)) {{endif}} {{if 'struct cudaLaunchMemSyncDomainMap_st' in found_types}} @@ -12162,7 +12162,7 @@ cdef class cudaLaunchMemSyncDomainMap_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -12216,7 +12216,7 @@ cdef class anon_struct20: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -12281,7 +12281,7 @@ cdef class anon_struct21: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._event = cudaEvent_t(_ptr=&self._ptr[0].programmaticEvent.event) @@ -12312,16 +12312,16 @@ cdef class anon_struct21: return self._event @event.setter def event(self, event): - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - self._event._ptr[0] = cevent + cyevent = pevent + self._event._ptr[0] = cyevent @property def flags(self): return self._ptr[0].programmaticEvent.flags @@ -12353,7 +12353,7 @@ cdef class anon_struct22: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._event = cudaEvent_t(_ptr=&self._ptr[0].launchCompletionEvent.event) @@ -12380,16 +12380,16 @@ cdef class anon_struct22: return self._event @event.setter def event(self, event): - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - self._event._ptr[0] = cevent + cyevent = pevent + self._event._ptr[0] = cyevent @property def flags(self): return self._ptr[0].launchCompletionEvent.flags @@ -12415,7 +12415,7 @@ cdef class anon_struct23: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._devNode = cudaGraphDeviceNode_t(_ptr=&self._ptr[0].deviceUpdatableKernelNode.devNode) @@ -12448,16 +12448,16 @@ cdef class anon_struct23: return self._devNode @devNode.setter def devNode(self, devNode): - cdef ccudart.cudaGraphDeviceNode_t cdevNode + cdef cyruntime.cudaGraphDeviceNode_t cydevNode if devNode is None: - cdevNode = 0 + cydevNode = 0 elif isinstance(devNode, (cudaGraphDeviceNode_t,)): pdevNode = int(devNode) - cdevNode = pdevNode + cydevNode = pdevNode else: pdevNode = int(cudaGraphDeviceNode_t(devNode)) - cdevNode = pdevNode - self._devNode._ptr[0] = cdevNode + cydevNode = pdevNode + self._devNode._ptr[0] = cydevNode {{endif}} {{if 'union cudaLaunchAttributeValue' in found_types}} @@ -12536,7 +12536,7 @@ cdef class cudaLaunchAttributeValue: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._accessPolicyWindow = cudaAccessPolicyWindow(_ptr=&self._ptr[0].accessPolicyWindow) self._clusterDim = anon_struct20(_ptr=self._ptr) @@ -12632,7 +12632,7 @@ cdef class cudaLaunchAttributeValue: return self._accessPolicyWindow @accessPolicyWindow.setter def accessPolicyWindow(self, accessPolicyWindow not None : cudaAccessPolicyWindow): - string.memcpy(&self._ptr[0].accessPolicyWindow, accessPolicyWindow.getPtr(), sizeof(self._ptr[0].accessPolicyWindow)) + string.memcpy(&self._ptr[0].accessPolicyWindow, accessPolicyWindow.getPtr(), sizeof(self._ptr[0].accessPolicyWindow)) @property def cooperative(self): return self._ptr[0].cooperative @@ -12650,7 +12650,7 @@ cdef class cudaLaunchAttributeValue: return self._clusterDim @clusterDim.setter def clusterDim(self, clusterDim not None : anon_struct20): - string.memcpy(&self._ptr[0].clusterDim, clusterDim.getPtr(), sizeof(self._ptr[0].clusterDim)) + string.memcpy(&self._ptr[0].clusterDim, clusterDim.getPtr(), sizeof(self._ptr[0].clusterDim)) @property def clusterSchedulingPolicyPreference(self): return cudaClusterSchedulingPolicy(self._ptr[0].clusterSchedulingPolicyPreference) @@ -12668,7 +12668,7 @@ cdef class cudaLaunchAttributeValue: return self._programmaticEvent @programmaticEvent.setter def programmaticEvent(self, programmaticEvent not None : anon_struct21): - string.memcpy(&self._ptr[0].programmaticEvent, programmaticEvent.getPtr(), sizeof(self._ptr[0].programmaticEvent)) + string.memcpy(&self._ptr[0].programmaticEvent, programmaticEvent.getPtr(), sizeof(self._ptr[0].programmaticEvent)) @property def priority(self): return self._ptr[0].priority @@ -12680,7 +12680,7 @@ cdef class cudaLaunchAttributeValue: return self._memSyncDomainMap @memSyncDomainMap.setter def memSyncDomainMap(self, memSyncDomainMap not None : cudaLaunchMemSyncDomainMap): - string.memcpy(&self._ptr[0].memSyncDomainMap, memSyncDomainMap.getPtr(), sizeof(self._ptr[0].memSyncDomainMap)) + string.memcpy(&self._ptr[0].memSyncDomainMap, memSyncDomainMap.getPtr(), sizeof(self._ptr[0].memSyncDomainMap)) @property def memSyncDomain(self): return cudaLaunchMemSyncDomain(self._ptr[0].memSyncDomain) @@ -12692,13 +12692,13 @@ cdef class cudaLaunchAttributeValue: return self._launchCompletionEvent @launchCompletionEvent.setter def launchCompletionEvent(self, launchCompletionEvent not None : anon_struct22): - string.memcpy(&self._ptr[0].launchCompletionEvent, launchCompletionEvent.getPtr(), sizeof(self._ptr[0].launchCompletionEvent)) + string.memcpy(&self._ptr[0].launchCompletionEvent, launchCompletionEvent.getPtr(), sizeof(self._ptr[0].launchCompletionEvent)) @property def deviceUpdatableKernelNode(self): return self._deviceUpdatableKernelNode @deviceUpdatableKernelNode.setter def deviceUpdatableKernelNode(self, deviceUpdatableKernelNode not None : anon_struct23): - string.memcpy(&self._ptr[0].deviceUpdatableKernelNode, deviceUpdatableKernelNode.getPtr(), sizeof(self._ptr[0].deviceUpdatableKernelNode)) + string.memcpy(&self._ptr[0].deviceUpdatableKernelNode, deviceUpdatableKernelNode.getPtr(), sizeof(self._ptr[0].deviceUpdatableKernelNode)) @property def sharedMemCarveout(self): return self._ptr[0].sharedMemCarveout @@ -12729,7 +12729,7 @@ cdef class cudaLaunchAttribute_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._val = cudaLaunchAttributeValue(_ptr=&self._ptr[0].val) def __dealloc__(self): @@ -12761,7 +12761,7 @@ cdef class cudaLaunchAttribute_st: return self._val @val.setter def val(self, val not None : cudaLaunchAttributeValue): - string.memcpy(&self._ptr[0].val, val.getPtr(), sizeof(self._ptr[0].val)) + string.memcpy(&self._ptr[0].val, val.getPtr(), sizeof(self._ptr[0].val)) {{endif}} {{if 'struct cudaAsyncNotificationInfo' in found_types}} @@ -12779,7 +12779,7 @@ cdef class anon_struct24: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -12820,7 +12820,7 @@ cdef class anon_union9: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): self._overBudget = anon_struct24(_ptr=self._ptr) @@ -12843,7 +12843,7 @@ cdef class anon_union9: return self._overBudget @overBudget.setter def overBudget(self, overBudget not None : anon_struct24): - string.memcpy(&self._ptr[0].info.overBudget, overBudget.getPtr(), sizeof(self._ptr[0].info.overBudget)) + string.memcpy(&self._ptr[0].info.overBudget, overBudget.getPtr(), sizeof(self._ptr[0].info.overBudget)) {{endif}} {{if 'struct cudaAsyncNotificationInfo' in found_types}} @@ -12866,10 +12866,10 @@ cdef class cudaAsyncNotificationInfo: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccudart.cudaAsyncNotificationInfo)) + self._val_ptr = calloc(1, sizeof(cyruntime.cudaAsyncNotificationInfo)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._info = anon_union9(_ptr=self._ptr) def __dealloc__(self): @@ -12902,7 +12902,7 @@ cdef class cudaAsyncNotificationInfo: return self._info @info.setter def info(self, info not None : anon_union9): - string.memcpy(&self._ptr[0].info, info.getPtr(), sizeof(self._ptr[0].info)) + string.memcpy(&self._ptr[0].info, info.getPtr(), sizeof(self._ptr[0].info)) {{endif}} {{if 'struct cudaTextureDesc' in found_types}} @@ -12949,7 +12949,7 @@ cdef class cudaTextureDesc: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): pass def __dealloc__(self): @@ -13127,7 +13127,7 @@ cdef class cudaEglPlaneDesc_st: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._channelDesc = cudaChannelFormatDesc(_ptr=&self._ptr[0].channelDesc) def __dealloc__(self): @@ -13203,7 +13203,7 @@ cdef class cudaEglPlaneDesc_st: return self._channelDesc @channelDesc.setter def channelDesc(self, channelDesc not None : cudaChannelFormatDesc): - string.memcpy(&self._ptr[0].channelDesc, channelDesc.getPtr(), sizeof(self._ptr[0].channelDesc)) + string.memcpy(&self._ptr[0].channelDesc, channelDesc.getPtr(), sizeof(self._ptr[0].channelDesc)) @property def reserved(self): return self._ptr[0].reserved @@ -13229,7 +13229,7 @@ cdef class anon_union10: """ def __cinit__(self, void_ptr _ptr): - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr): pass @@ -13260,20 +13260,20 @@ cdef class anon_union10: raise IndexError('not enough values found during array assignment, expected 3, got', len(pArray)) pArray = [int(_pArray) for _pArray in pArray] for _idx, _pArray in enumerate(pArray): - self._ptr[0].frame.pArray[_idx] = _pArray + self._ptr[0].frame.pArray[_idx] = _pArray @property def pPitch(self): out_pPitch = [cudaPitchedPtr() for _pPitch in self._ptr[0].frame.pPitch] for _idx in range(len(out_pPitch)): - string.memcpy(out_pPitch[_idx].getPtr(), &self._ptr[0].frame.pPitch[_idx], sizeof(ccudart.cudaPitchedPtr)) + string.memcpy(out_pPitch[_idx].getPtr(), &self._ptr[0].frame.pPitch[_idx], sizeof(cyruntime.cudaPitchedPtr)) return out_pPitch @pPitch.setter def pPitch(self, pPitch : List[cudaPitchedPtr]): if len(pPitch) != 3: raise IndexError('not enough values found during array assignment, expected 3, got', len(pPitch)) for _idx in range(len(pPitch)): - string.memcpy(&self._ptr[0].frame.pPitch[_idx], pPitch[_idx].getPtr(), sizeof(ccudart.cudaPitchedPtr)) + string.memcpy(&self._ptr[0].frame.pPitch[_idx], pPitch[_idx].getPtr(), sizeof(cyruntime.cudaPitchedPtr)) {{endif}} {{if True}} @@ -13310,10 +13310,10 @@ cdef class cudaEglFrame_st: """ def __cinit__(self, void_ptr _ptr = 0): if _ptr == 0: - self._val_ptr = calloc(1, sizeof(ccudart.cudaEglFrame_st)) + self._val_ptr = calloc(1, sizeof(cyruntime.cudaEglFrame_st)) self._ptr = self._val_ptr else: - self._ptr = _ptr + self._ptr = _ptr def __init__(self, void_ptr _ptr = 0): self._frame = anon_union10(_ptr=self._ptr) def __dealloc__(self): @@ -13352,19 +13352,19 @@ cdef class cudaEglFrame_st: return self._frame @frame.setter def frame(self, frame not None : anon_union10): - string.memcpy(&self._ptr[0].frame, frame.getPtr(), sizeof(self._ptr[0].frame)) + string.memcpy(&self._ptr[0].frame, frame.getPtr(), sizeof(self._ptr[0].frame)) @property def planeDesc(self): out_planeDesc = [cudaEglPlaneDesc() for _planeDesc in self._ptr[0].planeDesc] for _idx in range(len(out_planeDesc)): - string.memcpy(out_planeDesc[_idx].getPtr(), &self._ptr[0].planeDesc[_idx], sizeof(ccudart.cudaEglPlaneDesc)) + string.memcpy(out_planeDesc[_idx].getPtr(), &self._ptr[0].planeDesc[_idx], sizeof(cyruntime.cudaEglPlaneDesc)) return out_planeDesc @planeDesc.setter def planeDesc(self, planeDesc : List[cudaEglPlaneDesc]): if len(planeDesc) != 3: raise IndexError('not enough values found during array assignment, expected 3, got', len(planeDesc)) for _idx in range(len(planeDesc)): - string.memcpy(&self._ptr[0].planeDesc[_idx], planeDesc[_idx].getPtr(), sizeof(ccudart.cudaEglPlaneDesc)) + string.memcpy(&self._ptr[0].planeDesc[_idx], planeDesc[_idx].getPtr(), sizeof(cyruntime.cudaEglPlaneDesc)) @property def planeCount(self): @@ -13402,7 +13402,7 @@ cdef class cudaGraphConditionalHandle: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13432,7 +13432,7 @@ cdef class cudaSurfaceObject_t: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13462,7 +13462,7 @@ cdef class cudaTextureObject_t: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13490,7 +13490,7 @@ cdef class GLenum: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13518,7 +13518,7 @@ cdef class GLuint: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13546,7 +13546,7 @@ cdef class EGLint: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13574,7 +13574,7 @@ cdef class VdpDevice: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13602,7 +13602,7 @@ cdef class VdpGetProcAddress: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13630,7 +13630,7 @@ cdef class VdpVideoSurface: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13658,7 +13658,7 @@ cdef class VdpOutputSurface: if _ptr == 0: self._ptr = &self.__val else: - self._ptr = _ptr + self._ptr = _ptr if init_value: self._ptr[0] = init_value def __dealloc__(self): @@ -13713,7 +13713,7 @@ def cudaDeviceReset(): If a non-primary :py:obj:`~.CUcontext` is current to the thread, :py:obj:`~.cudaDeviceReset()` will destroy only the internal CUDA RT state for that :py:obj:`~.CUcontext`. """ - err = ccudart.cudaDeviceReset() + err = cyruntime.cudaDeviceReset() return (cudaError_t(err),) {{endif}} @@ -13740,7 +13740,7 @@ def cudaDeviceSynchronize(): :py:obj:`~.cudaDeviceReset`, :py:obj:`~.cuCtxSynchronize` """ with nogil: - err = ccudart.cudaDeviceSynchronize() + err = cyruntime.cudaDeviceSynchronize() return (cudaError_t(err),) {{endif}} @@ -13842,8 +13842,8 @@ def cudaDeviceSetLimit(limit not None : cudaLimit, size_t value): -------- :py:obj:`~.cudaDeviceGetLimit`, :py:obj:`~.cuCtxSetLimit` """ - cdef ccudart.cudaLimit climit = limit.value - err = ccudart.cudaDeviceSetLimit(climit, value) + cdef cyruntime.cudaLimit cylimit = limit.value + err = cyruntime.cudaDeviceSetLimit(cylimit, value) return (cudaError_t(err),) {{endif}} @@ -13899,8 +13899,8 @@ def cudaDeviceGetLimit(limit not None : cudaLimit): :py:obj:`~.cudaDeviceSetLimit`, :py:obj:`~.cuCtxGetLimit` """ cdef size_t pValue = 0 - cdef ccudart.cudaLimit climit = limit.value - err = ccudart.cudaDeviceGetLimit(&pValue, climit) + cdef cyruntime.cudaLimit cylimit = limit.value + err = cyruntime.cudaDeviceGetLimit(&pValue, cylimit) return (cudaError_t(err), pValue) {{endif}} @@ -13934,8 +13934,8 @@ def cudaDeviceGetTexture1DLinearMaxWidth(fmtDesc : Optional[cudaChannelFormatDes :py:obj:`~.cuDeviceGetTexture1DLinearMaxWidth` """ cdef size_t maxWidthInElements = 0 - cdef ccudart.cudaChannelFormatDesc* cfmtDesc_ptr = fmtDesc._ptr if fmtDesc != None else NULL - err = ccudart.cudaDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cfmtDesc_ptr, device) + cdef cyruntime.cudaChannelFormatDesc* cyfmtDesc_ptr = fmtDesc._ptr if fmtDesc != None else NULL + err = cyruntime.cudaDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cyfmtDesc_ptr, device) return (cudaError_t(err), maxWidthInElements) {{endif}} @@ -13981,8 +13981,8 @@ def cudaDeviceGetCacheConfig(): -------- :py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaFuncSetCacheConfig (C API)`, cudaFuncSetCacheConfig (C++ API), :py:obj:`~.cuCtxGetCacheConfig` """ - cdef ccudart.cudaFuncCache pCacheConfig - err = ccudart.cudaDeviceGetCacheConfig(&pCacheConfig) + cdef cyruntime.cudaFuncCache pCacheConfig + err = cyruntime.cudaDeviceGetCacheConfig(&pCacheConfig) return (cudaError_t(err), cudaFuncCache(pCacheConfig)) {{endif}} @@ -14026,7 +14026,7 @@ def cudaDeviceGetStreamPriorityRange(): """ cdef int leastPriority = 0 cdef int greatestPriority = 0 - err = ccudart.cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority) + err = cyruntime.cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority) return (cudaError_t(err), leastPriority, greatestPriority) {{endif}} @@ -14082,8 +14082,8 @@ def cudaDeviceSetCacheConfig(cacheConfig not None : cudaFuncCache): -------- :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaFuncSetCacheConfig (C API)`, cudaFuncSetCacheConfig (C++ API), :py:obj:`~.cuCtxSetCacheConfig` """ - cdef ccudart.cudaFuncCache ccacheConfig = cacheConfig.value - err = ccudart.cudaDeviceSetCacheConfig(ccacheConfig) + cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value + err = cyruntime.cudaDeviceSetCacheConfig(cycacheConfig) return (cudaError_t(err),) {{endif}} @@ -14115,7 +14115,7 @@ def cudaDeviceGetByPCIBusId(char* pciBusId): :py:obj:`~.cudaDeviceGetPCIBusId`, :py:obj:`~.cuDeviceGetByPCIBusId` """ cdef int device = 0 - err = ccudart.cudaDeviceGetByPCIBusId(&device, pciBusId) + err = cyruntime.cudaDeviceGetByPCIBusId(&device, pciBusId) return (cudaError_t(err), device) {{endif}} @@ -14153,7 +14153,7 @@ def cudaDeviceGetPCIBusId(int length, int device): """ pypciBusId = b" " * length cdef char* pciBusId = pypciBusId - err = ccudart.cudaDeviceGetPCIBusId(pciBusId, length, device) + err = cyruntime.cudaDeviceGetPCIBusId(pciBusId, length, device) return (cudaError_t(err), pypciBusId) {{endif}} @@ -14202,17 +14202,17 @@ def cudaIpcGetEventHandle(event): -------- :py:obj:`~.cudaEventCreate`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaIpcOpenEventHandle`, :py:obj:`~.cudaIpcGetMemHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle`, :py:obj:`~.cuIpcGetEventHandle` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent + cyevent = pevent cdef cudaIpcEventHandle_t handle = cudaIpcEventHandle_t() - err = ccudart.cudaIpcGetEventHandle(handle._ptr, cevent) + err = cyruntime.cudaIpcGetEventHandle(handle._ptr, cyevent) return (cudaError_t(err), handle) {{endif}} @@ -14256,7 +14256,7 @@ def cudaIpcOpenEventHandle(handle not None : cudaIpcEventHandle_t): :py:obj:`~.cudaEventCreate`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcGetMemHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle`, :py:obj:`~.cuIpcOpenEventHandle` """ cdef cudaEvent_t event = cudaEvent_t() - err = ccudart.cudaIpcOpenEventHandle(event._ptr, handle._ptr[0]) + err = cyruntime.cudaIpcOpenEventHandle(event._ptr, handle._ptr[0]) return (cudaError_t(err), event) {{endif}} @@ -14301,9 +14301,9 @@ def cudaIpcGetMemHandle(devPtr): :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcOpenEventHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle`, :py:obj:`~.cuIpcGetMemHandle` """ cdef cudaIpcMemHandle_t handle = cudaIpcMemHandle_t() - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr - err = ccudart.cudaIpcGetMemHandle(handle._ptr, cdevPtr_ptr) + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr + err = cyruntime.cudaIpcGetMemHandle(handle._ptr, cydevPtr_ptr) return (cudaError_t(err), handle) {{endif}} @@ -14373,7 +14373,7 @@ def cudaIpcOpenMemHandle(handle not None : cudaIpcMemHandle_t, unsigned int flag In particular, multiple processes may not receive the same address for the same `handle`. """ cdef void_ptr devPtr = 0 - err = ccudart.cudaIpcOpenMemHandle(&devPtr, handle._ptr[0], flags) + err = cyruntime.cudaIpcOpenMemHandle(&devPtr, handle._ptr[0], flags) return (cudaError_t(err), devPtr) {{endif}} @@ -14413,9 +14413,9 @@ def cudaIpcCloseMemHandle(devPtr): -------- :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcOpenEventHandle`, :py:obj:`~.cudaIpcGetMemHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cuIpcCloseMemHandle` """ - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr - err = ccudart.cudaIpcCloseMemHandle(cdevPtr_ptr) + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr + err = cyruntime.cudaIpcCloseMemHandle(cydevPtr_ptr) return (cudaError_t(err),) {{endif}} @@ -14455,9 +14455,9 @@ def cudaDeviceFlushGPUDirectRDMAWrites(target not None : cudaFlushGPUDirectRDMAW -------- :py:obj:`~.cuFlushGPUDirectRDMAWrites` """ - cdef ccudart.cudaFlushGPUDirectRDMAWritesTarget ctarget = target.value - cdef ccudart.cudaFlushGPUDirectRDMAWritesScope cscope = scope.value - err = ccudart.cudaDeviceFlushGPUDirectRDMAWrites(ctarget, cscope) + cdef cyruntime.cudaFlushGPUDirectRDMAWritesTarget cytarget = target.value + cdef cyruntime.cudaFlushGPUDirectRDMAWritesScope cyscope = scope.value + err = cyruntime.cudaDeviceFlushGPUDirectRDMAWrites(cytarget, cyscope) return (cudaError_t(err),) {{endif}} @@ -14506,19 +14506,19 @@ def cudaDeviceRegisterAsyncNotification(int device, callbackFunc, userData): -------- :py:obj:`~.cudaDeviceUnregisterAsyncNotification` """ - cdef ccudart.cudaAsyncCallback ccallbackFunc + cdef cyruntime.cudaAsyncCallback cycallbackFunc if callbackFunc is None: - ccallbackFunc = 0 + cycallbackFunc = 0 elif isinstance(callbackFunc, (cudaAsyncCallback,)): pcallbackFunc = int(callbackFunc) - ccallbackFunc = pcallbackFunc + cycallbackFunc = pcallbackFunc else: pcallbackFunc = int(cudaAsyncCallback(callbackFunc)) - ccallbackFunc = pcallbackFunc - cuserData = utils.HelperInputVoidPtr(userData) - cdef void* cuserData_ptr = cuserData.cptr + cycallbackFunc = pcallbackFunc + cyuserData = utils.HelperInputVoidPtr(userData) + cdef void* cyuserData_ptr = cyuserData.cptr cdef cudaAsyncCallbackHandle_t callback = cudaAsyncCallbackHandle_t() - err = ccudart.cudaDeviceRegisterAsyncNotification(device, ccallbackFunc, cuserData_ptr, callback._ptr) + err = cyruntime.cudaDeviceRegisterAsyncNotification(device, cycallbackFunc, cyuserData_ptr, callback._ptr) return (cudaError_t(err), callback) {{endif}} @@ -14548,16 +14548,16 @@ def cudaDeviceUnregisterAsyncNotification(int device, callback): -------- :py:obj:`~.cudaDeviceRegisterAsyncNotification` """ - cdef ccudart.cudaAsyncCallbackHandle_t ccallback + cdef cyruntime.cudaAsyncCallbackHandle_t cycallback if callback is None: - ccallback = 0 + cycallback = 0 elif isinstance(callback, (cudaAsyncCallbackHandle_t,)): pcallback = int(callback) - ccallback = pcallback + cycallback = pcallback else: pcallback = int(cudaAsyncCallbackHandle_t(callback)) - ccallback = pcallback - err = ccudart.cudaDeviceUnregisterAsyncNotification(device, ccallback) + cycallback = pcallback + err = cyruntime.cudaDeviceUnregisterAsyncNotification(device, cycallback) return (cudaError_t(err),) {{endif}} @@ -14597,8 +14597,8 @@ def cudaDeviceGetSharedMemConfig(): -------- :py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaDeviceSetSharedMemConfig`, :py:obj:`~.cudaFuncSetCacheConfig`, :py:obj:`~.cuCtxGetSharedMemConfig` """ - cdef ccudart.cudaSharedMemConfig pConfig - err = ccudart.cudaDeviceGetSharedMemConfig(&pConfig) + cdef cyruntime.cudaSharedMemConfig pConfig + err = cyruntime.cudaDeviceGetSharedMemConfig(&pConfig) return (cudaError_t(err), cudaSharedMemConfig(pConfig)) {{endif}} @@ -14653,8 +14653,8 @@ def cudaDeviceSetSharedMemConfig(config not None : cudaSharedMemConfig): -------- :py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaDeviceGetSharedMemConfig`, :py:obj:`~.cudaFuncSetCacheConfig`, :py:obj:`~.cuCtxSetSharedMemConfig` """ - cdef ccudart.cudaSharedMemConfig cconfig = config.value - err = ccudart.cudaDeviceSetSharedMemConfig(cconfig) + cdef cyruntime.cudaSharedMemConfig cyconfig = config.value + err = cyruntime.cudaDeviceSetSharedMemConfig(cyconfig) return (cudaError_t(err),) {{endif}} @@ -14681,7 +14681,7 @@ def cudaGetLastError(): -------- :py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaError` """ - err = ccudart.cudaGetLastError() + err = cyruntime.cudaGetLastError() return (cudaError_t(err),) {{endif}} @@ -14709,7 +14709,7 @@ def cudaPeekAtLastError(): -------- :py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaError` """ - err = ccudart.cudaPeekAtLastError() + err = cyruntime.cudaPeekAtLastError() return (cudaError_t(err),) {{endif}} @@ -14739,8 +14739,8 @@ def cudaGetErrorName(error not None : cudaError_t): -------- :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaError`, :py:obj:`~.cuGetErrorName` """ - cdef ccudart.cudaError_t cerror = error.value - err = ccudart.cudaGetErrorName(cerror) + cdef cyruntime.cudaError_t cyerror = error.value + err = cyruntime.cudaGetErrorName(cyerror) return (cudaError_t.cudaSuccess, err) {{endif}} @@ -14769,8 +14769,8 @@ def cudaGetErrorString(error not None : cudaError_t): -------- :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaError`, :py:obj:`~.cuGetErrorString` """ - cdef ccudart.cudaError_t cerror = error.value - err = ccudart.cudaGetErrorString(cerror) + cdef cyruntime.cudaError_t cyerror = error.value + err = cyruntime.cudaGetErrorString(cyerror) return (cudaError_t.cudaSuccess, err) {{endif}} @@ -14796,7 +14796,7 @@ def cudaGetDeviceCount(): :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuDeviceGetCount` """ cdef int count = 0 - err = ccudart.cudaGetDeviceCount(&count) + err = cyruntime.cudaGetDeviceCount(&count) return (cudaError_t(err), count) {{endif}} @@ -15126,7 +15126,7 @@ def cudaGetDeviceProperties(int device): :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaDeviceGetAttribute`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuDeviceGetAttribute`, :py:obj:`~.cuDeviceGetName` """ cdef cudaDeviceProp prop = cudaDeviceProp() - err = ccudart.cudaGetDeviceProperties(prop._ptr, device) + err = cyruntime.cudaGetDeviceProperties(prop._ptr, device) return (cudaError_t(err), prop) {{endif}} @@ -15495,8 +15495,8 @@ def cudaDeviceGetAttribute(attr not None : cudaDeviceAttr, int device): :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuDeviceGetAttribute` """ cdef int value = 0 - cdef ccudart.cudaDeviceAttr cattr = attr.value - err = ccudart.cudaDeviceGetAttribute(&value, cattr, device) + cdef cyruntime.cudaDeviceAttr cyattr = attr.value + err = cyruntime.cudaDeviceGetAttribute(&value, cyattr, device) return (cudaError_t(err), value) {{endif}} @@ -15527,7 +15527,7 @@ def cudaDeviceGetDefaultMemPool(int device): """ cdef cudaMemPool_t memPool = cudaMemPool_t() with nogil: - err = ccudart.cudaDeviceGetDefaultMemPool(memPool._ptr, device) + err = cyruntime.cudaDeviceGetDefaultMemPool(memPool._ptr, device) return (cudaError_t(err), memPool) {{endif}} @@ -15564,17 +15564,17 @@ def cudaDeviceSetMemPool(int device, memPool): ----- Use :py:obj:`~.cudaMallocFromPoolAsync` to specify asynchronous allocations from a device different than the one the stream runs on. """ - cdef ccudart.cudaMemPool_t cmemPool + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool + cymemPool = pmemPool with nogil: - err = ccudart.cudaDeviceSetMemPool(device, cmemPool) + err = cyruntime.cudaDeviceSetMemPool(device, cymemPool) return (cudaError_t(err),) {{endif}} @@ -15610,7 +15610,7 @@ def cudaDeviceGetMemPool(int device): """ cdef cudaMemPool_t memPool = cudaMemPool_t() with nogil: - err = ccudart.cudaDeviceGetMemPool(memPool._ptr, device) + err = cyruntime.cudaDeviceGetMemPool(memPool._ptr, device) return (cudaError_t(err), memPool) {{endif}} @@ -15694,9 +15694,9 @@ def cudaDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, int device, int flags): -------- :py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` """ - cnvSciSyncAttrList = utils.HelperInputVoidPtr(nvSciSyncAttrList) - cdef void* cnvSciSyncAttrList_ptr = cnvSciSyncAttrList.cptr - err = ccudart.cudaDeviceGetNvSciSyncAttributes(cnvSciSyncAttrList_ptr, device, flags) + cynvSciSyncAttrList = utils.HelperInputVoidPtr(nvSciSyncAttrList) + cdef void* cynvSciSyncAttrList_ptr = cynvSciSyncAttrList.cptr + err = cyruntime.cudaDeviceGetNvSciSyncAttributes(cynvSciSyncAttrList_ptr, device, flags) return (cudaError_t(err),) {{endif}} @@ -15752,8 +15752,8 @@ def cudaDeviceGetP2PAttribute(attr not None : cudaDeviceP2PAttr, int srcDevice, :py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cuDeviceGetP2PAttribute` """ cdef int value = 0 - cdef ccudart.cudaDeviceP2PAttr cattr = attr.value - err = ccudart.cudaDeviceGetP2PAttribute(&value, cattr, srcDevice, dstDevice) + cdef cyruntime.cudaDeviceP2PAttr cyattr = attr.value + err = cyruntime.cudaDeviceGetP2PAttribute(&value, cyattr, srcDevice, dstDevice) return (cudaError_t(err), value) {{endif}} @@ -15783,8 +15783,8 @@ def cudaChooseDevice(prop : Optional[cudaDeviceProp]): :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaInitDevice` """ cdef int device = 0 - cdef ccudart.cudaDeviceProp* cprop_ptr = prop._ptr if prop != None else NULL - err = ccudart.cudaChooseDevice(&device, cprop_ptr) + cdef cyruntime.cudaDeviceProp* cyprop_ptr = prop._ptr if prop != None else NULL + err = cyruntime.cudaChooseDevice(&device, cyprop_ptr) return (cudaError_t(err), device) {{endif}} @@ -15826,7 +15826,7 @@ def cudaInitDevice(int device, unsigned int deviceFlags, unsigned int flags): -------- :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaSetDevice` :py:obj:`~.cuCtxSetCurrent` """ - err = ccudart.cudaInitDevice(device, deviceFlags, flags) + err = cyruntime.cudaInitDevice(device, deviceFlags, flags) return (cudaError_t(err),) {{endif}} @@ -15881,7 +15881,7 @@ def cudaSetDevice(int device): -------- :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuCtxSetCurrent` """ - err = ccudart.cudaSetDevice(device) + err = cyruntime.cudaSetDevice(device) return (cudaError_t(err),) {{endif}} @@ -15906,7 +15906,7 @@ def cudaGetDevice(): :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cuCtxGetCurrent` """ cdef int device = 0 - err = ccudart.cudaGetDevice(&device) + err = cyruntime.cudaGetDevice(&device) return (cudaError_t(err), device) {{endif}} @@ -15992,7 +15992,7 @@ def cudaSetDeviceFlags(unsigned int flags): -------- :py:obj:`~.cudaGetDeviceFlags`, :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaSetValidDevices`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cuDevicePrimaryCtxSetFlags` """ - err = ccudart.cudaSetDeviceFlags(flags) + err = cyruntime.cudaSetDeviceFlags(flags) return (cudaError_t(err),) {{endif}} @@ -16039,7 +16039,7 @@ def cudaGetDeviceFlags(): :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaSetDeviceFlags`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuDevicePrimaryCtxGetState` """ cdef unsigned int flags = 0 - err = ccudart.cudaGetDeviceFlags(&flags) + err = cyruntime.cudaGetDeviceFlags(&flags) return (cudaError_t(err), flags) {{endif}} @@ -16066,7 +16066,7 @@ def cudaStreamCreate(): :py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamCreate` """ cdef cudaStream_t pStream = cudaStream_t() - err = ccudart.cudaStreamCreate(pStream._ptr) + err = cyruntime.cudaStreamCreate(pStream._ptr) return (cudaError_t(err), pStream) {{endif}} @@ -16107,7 +16107,7 @@ def cudaStreamCreateWithFlags(unsigned int flags): :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamCreate` """ cdef cudaStream_t pStream = cudaStream_t() - err = ccudart.cudaStreamCreateWithFlags(pStream._ptr, flags) + err = cyruntime.cudaStreamCreateWithFlags(pStream._ptr, flags) return (cudaError_t(err), pStream) {{endif}} @@ -16165,7 +16165,7 @@ def cudaStreamCreateWithPriority(unsigned int flags, int priority): In the current implementation, only compute kernels launched in priority streams are affected by the stream's priority. Stream priorities have no effect on host-to-device and device-to-host memory operations. """ cdef cudaStream_t pStream = cudaStream_t() - err = ccudart.cudaStreamCreateWithPriority(pStream._ptr, flags, priority) + err = cyruntime.cudaStreamCreateWithPriority(pStream._ptr, flags, priority) return (cudaError_t(err), pStream) {{endif}} @@ -16199,17 +16199,17 @@ def cudaStreamGetPriority(hStream): -------- :py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaDeviceGetStreamPriorityRange`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cuStreamGetPriority` """ - cdef ccudart.cudaStream_t chStream + cdef cyruntime.cudaStream_t cyhStream if hStream is None: - chStream = 0 - elif isinstance(hStream, (cudaStream_t,cuda.CUstream)): + cyhStream = 0 + elif isinstance(hStream, (cudaStream_t,driver.CUstream)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(cudaStream_t(hStream)) - chStream = phStream + cyhStream = phStream cdef int priority = 0 - err = ccudart.cudaStreamGetPriority(chStream, &priority) + err = cyruntime.cudaStreamGetPriority(cyhStream, &priority) return (cudaError_t(err), priority) {{endif}} @@ -16239,17 +16239,17 @@ def cudaStreamGetFlags(hStream): -------- :py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cuStreamGetFlags` """ - cdef ccudart.cudaStream_t chStream + cdef cyruntime.cudaStream_t cyhStream if hStream is None: - chStream = 0 - elif isinstance(hStream, (cudaStream_t,cuda.CUstream)): + cyhStream = 0 + elif isinstance(hStream, (cudaStream_t,driver.CUstream)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(cudaStream_t(hStream)) - chStream = phStream + cyhStream = phStream cdef unsigned int flags = 0 - err = ccudart.cudaStreamGetFlags(chStream, &flags) + err = cyruntime.cudaStreamGetFlags(cyhStream, &flags) return (cudaError_t(err), flags) {{endif}} @@ -16293,17 +16293,17 @@ def cudaStreamGetId(hStream): -------- :py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cuStreamGetId` """ - cdef ccudart.cudaStream_t chStream + cdef cyruntime.cudaStream_t cyhStream if hStream is None: - chStream = 0 - elif isinstance(hStream, (cudaStream_t,cuda.CUstream)): + cyhStream = 0 + elif isinstance(hStream, (cudaStream_t,driver.CUstream)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(cudaStream_t(hStream)) - chStream = phStream + cyhStream = phStream cdef unsigned long long streamId = 0 - err = ccudart.cudaStreamGetId(chStream, &streamId) + err = cyruntime.cudaStreamGetId(cyhStream, &streamId) return (cudaError_t(err), streamId) {{endif}} @@ -16325,7 +16325,7 @@ def cudaCtxResetPersistingL2Cache(): -------- :py:obj:`~.cudaAccessPolicyWindow` """ - err = ccudart.cudaCtxResetPersistingL2Cache() + err = cyruntime.cudaCtxResetPersistingL2Cache() return (cudaError_t(err),) {{endif}} @@ -16354,25 +16354,25 @@ def cudaStreamCopyAttributes(dst, src): -------- :py:obj:`~.cudaAccessPolicyWindow` """ - cdef ccudart.cudaStream_t csrc + cdef cyruntime.cudaStream_t cysrc if src is None: - csrc = 0 - elif isinstance(src, (cudaStream_t,cuda.CUstream)): + cysrc = 0 + elif isinstance(src, (cudaStream_t,driver.CUstream)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(cudaStream_t(src)) - csrc = psrc - cdef ccudart.cudaStream_t cdst + cysrc = psrc + cdef cyruntime.cudaStream_t cydst if dst is None: - cdst = 0 - elif isinstance(dst, (cudaStream_t,cuda.CUstream)): + cydst = 0 + elif isinstance(dst, (cudaStream_t,driver.CUstream)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(cudaStream_t(dst)) - cdst = pdst - err = ccudart.cudaStreamCopyAttributes(cdst, csrc) + cydst = pdst + err = cyruntime.cudaStreamCopyAttributes(cydst, cysrc) return (cudaError_t(err),) {{endif}} @@ -16403,18 +16403,18 @@ def cudaStreamGetAttribute(hStream, attr not None : cudaStreamAttrID): -------- :py:obj:`~.cudaAccessPolicyWindow` """ - cdef ccudart.cudaStream_t chStream + cdef cyruntime.cudaStream_t cyhStream if hStream is None: - chStream = 0 - elif isinstance(hStream, (cudaStream_t,cuda.CUstream)): + cyhStream = 0 + elif isinstance(hStream, (cudaStream_t,driver.CUstream)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(cudaStream_t(hStream)) - chStream = phStream - cdef ccudart.cudaStreamAttrID cattr = attr.value + cyhStream = phStream + cdef cyruntime.cudaStreamAttrID cyattr = attr.value cdef cudaStreamAttrValue value_out = cudaStreamAttrValue() - err = ccudart.cudaStreamGetAttribute(chStream, cattr, value_out._ptr) + err = cyruntime.cudaStreamGetAttribute(cyhStream, cyattr, value_out._ptr) return (cudaError_t(err), value_out) {{endif}} @@ -16446,18 +16446,18 @@ def cudaStreamSetAttribute(hStream, attr not None : cudaStreamAttrID, value : Op -------- :py:obj:`~.cudaAccessPolicyWindow` """ - cdef ccudart.cudaStream_t chStream + cdef cyruntime.cudaStream_t cyhStream if hStream is None: - chStream = 0 - elif isinstance(hStream, (cudaStream_t,cuda.CUstream)): + cyhStream = 0 + elif isinstance(hStream, (cudaStream_t,driver.CUstream)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(cudaStream_t(hStream)) - chStream = phStream - cdef ccudart.cudaStreamAttrID cattr = attr.value - cdef ccudart.cudaStreamAttrValue* cvalue_ptr = value._ptr if value != None else NULL - err = ccudart.cudaStreamSetAttribute(chStream, cattr, cvalue_ptr) + cyhStream = phStream + cdef cyruntime.cudaStreamAttrID cyattr = attr.value + cdef cyruntime.cudaStreamAttrValue* cyvalue_ptr = value._ptr if value != None else NULL + err = cyruntime.cudaStreamSetAttribute(cyhStream, cyattr, cyvalue_ptr) return (cudaError_t(err),) {{endif}} @@ -16488,16 +16488,16 @@ def cudaStreamDestroy(stream): -------- :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cuStreamDestroy` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - err = ccudart.cudaStreamDestroy(cstream) + cystream = pstream + err = cyruntime.cudaStreamDestroy(cystream) return (cudaError_t(err),) {{endif}} @@ -16538,26 +16538,26 @@ def cudaStreamWaitEvent(stream, event, unsigned int flags): -------- :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamWaitEvent` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - cdef ccudart.cudaStream_t cstream + cyevent = pevent + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream + cystream = pstream with nogil: - err = ccudart.cudaStreamWaitEvent(cstream, cevent, flags) + err = cyruntime.cudaStreamWaitEvent(cystream, cyevent, flags) return (cudaError_t(err),) {{endif}} @@ -16633,28 +16633,28 @@ def cudaStreamAddCallback(stream, callback, userData, unsigned int flags): ----- This function is slated for eventual deprecation and removal. If you do not require the callback to execute in case of a device error, consider using :py:obj:`~.cudaLaunchHostFunc`. Additionally, this function is not supported with :py:obj:`~.cudaStreamBeginCapture` and :py:obj:`~.cudaStreamEndCapture`, unlike :py:obj:`~.cudaLaunchHostFunc`. """ - cdef ccudart.cudaStreamCallback_t ccallback + cdef cyruntime.cudaStreamCallback_t cycallback if callback is None: - ccallback = 0 + cycallback = 0 elif isinstance(callback, (cudaStreamCallback_t,)): pcallback = int(callback) - ccallback = pcallback + cycallback = pcallback else: pcallback = int(cudaStreamCallback_t(callback)) - ccallback = pcallback - cdef ccudart.cudaStream_t cstream + cycallback = pcallback + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cuserData = utils.HelperInputVoidPtr(userData) - cdef void* cuserData_ptr = cuserData.cptr + cystream = pstream + cyuserData = utils.HelperInputVoidPtr(userData) + cdef void* cyuserData_ptr = cyuserData.cptr with nogil: - err = ccudart.cudaStreamAddCallback(cstream, ccallback, cuserData_ptr, flags) + err = cyruntime.cudaStreamAddCallback(cystream, cycallback, cyuserData_ptr, flags) return (cudaError_t(err),) {{endif}} @@ -16684,17 +16684,17 @@ def cudaStreamSynchronize(stream): -------- :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamSynchronize` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream + cystream = pstream with nogil: - err = ccudart.cudaStreamSynchronize(cstream) + err = cyruntime.cudaStreamSynchronize(cystream) return (cudaError_t(err),) {{endif}} @@ -16726,16 +16726,16 @@ def cudaStreamQuery(stream): -------- :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamQuery` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - err = ccudart.cudaStreamQuery(cstream) + cystream = pstream + err = cyruntime.cudaStreamQuery(cystream) return (cudaError_t(err),) {{endif}} @@ -16835,18 +16835,18 @@ def cudaStreamAttachMemAsync(stream, devPtr, size_t length, unsigned int flags): -------- :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cudaMallocManaged`, :py:obj:`~.cuStreamAttachMemAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr - err = ccudart.cudaStreamAttachMemAsync(cstream, cdevPtr_ptr, length, flags) + cystream = pstream + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr + err = cyruntime.cudaStreamAttachMemAsync(cystream, cydevPtr_ptr, length, flags) return (cudaError_t(err),) {{endif}} @@ -16893,17 +16893,17 @@ def cudaStreamBeginCapture(stream, mode not None : cudaStreamCaptureMode): ----- Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects. """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaStreamCaptureMode cmode = mode.value - err = ccudart.cudaStreamBeginCapture(cstream, cmode) + cystream = pstream + cdef cyruntime.cudaStreamCaptureMode cymode = mode.value + err = cyruntime.cudaStreamBeginCapture(cystream, cymode) return (cudaError_t(err),) {{endif}} @@ -16962,50 +16962,50 @@ def cudaStreamBeginCaptureToGraph(stream, graph, dependencies : Optional[Tuple[c """ dependencyData = [] if dependencyData is None else dependencyData if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): - raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[ccudart.cudaGraphEdgeData,] or List[ccudart.cudaGraphEdgeData,]") + raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[cyruntime.cudaGraphEdgeData,] or List[cyruntime.cudaGraphEdgeData,]") dependencies = [] if dependencies is None else dependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - cdef ccudart.cudaStream_t cstream + cygraph = pgraph + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaGraphNode_t* cdependencies = NULL + cystream = pstream + cdef cyruntime.cudaGraphNode_t* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccudart.cudaGraphNode_t)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] - cdef ccudart.cudaGraphEdgeData* cdependencyData = NULL + cydependencies[idx] = (dependencies[idx])._ptr[0] + cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL if len(dependencyData) > 0: - cdependencyData = calloc(len(dependencyData), sizeof(ccudart.cudaGraphEdgeData)) - if cdependencyData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(ccudart.cudaGraphEdgeData))) + cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) + if cydependencyData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) for idx in range(len(dependencyData)): - string.memcpy(&cdependencyData[idx], (dependencyData[idx])._ptr, sizeof(ccudart.cudaGraphEdgeData)) + string.memcpy(&cydependencyData[idx], (dependencyData[idx])._ptr, sizeof(cyruntime.cudaGraphEdgeData)) if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - cdef ccudart.cudaStreamCaptureMode cmode = mode.value - err = ccudart.cudaStreamBeginCaptureToGraph(cstream, cgraph, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cdependencyData, numDependencies, cmode) - if cdependencies is not NULL: - free(cdependencies) - if cdependencyData is not NULL: - free(cdependencyData) + cdef cyruntime.cudaStreamCaptureMode cymode = mode.value + err = cyruntime.cudaStreamBeginCaptureToGraph(cystream, cygraph, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cydependencyData, numDependencies, cymode) + if cydependencies is not NULL: + free(cydependencies) + if cydependencyData is not NULL: + free(cydependencyData) return (cudaError_t(err),) {{endif}} @@ -17075,9 +17075,9 @@ def cudaThreadExchangeStreamCaptureMode(mode not None : cudaStreamCaptureMode): -------- :py:obj:`~.cudaStreamBeginCapture` """ - cdef ccudart.cudaStreamCaptureMode cmode = mode.value - err = ccudart.cudaThreadExchangeStreamCaptureMode(&cmode) - return (cudaError_t(err), cudaStreamCaptureMode(cmode)) + cdef cyruntime.cudaStreamCaptureMode cymode = mode.value + err = cyruntime.cudaThreadExchangeStreamCaptureMode(&cymode) + return (cudaError_t(err), cudaStreamCaptureMode(cymode)) {{endif}} {{if 'cudaStreamEndCapture' in found_functions}} @@ -17112,17 +17112,17 @@ def cudaStreamEndCapture(stream): -------- :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamIsCapturing`, :py:obj:`~.cudaGraphDestroy` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream + cystream = pstream cdef cudaGraph_t pGraph = cudaGraph_t() - err = ccudart.cudaStreamEndCapture(cstream, pGraph._ptr) + err = cyruntime.cudaStreamEndCapture(cystream, pGraph._ptr) return (cudaError_t(err), pGraph) {{endif}} @@ -17172,17 +17172,17 @@ def cudaStreamIsCapturing(stream): -------- :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamEndCapture` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaStreamCaptureStatus pCaptureStatus - err = ccudart.cudaStreamIsCapturing(cstream, &pCaptureStatus) + cystream = pstream + cdef cyruntime.cudaStreamCaptureStatus pCaptureStatus + err = cyruntime.cudaStreamIsCapturing(cystream, &pCaptureStatus) return (cudaError_t(err), cudaStreamCaptureStatus(pCaptureStatus)) {{endif}} @@ -17247,24 +17247,24 @@ def cudaStreamGetCaptureInfo(stream): -------- :py:obj:`~.cudaStreamGetCaptureInfo_v3`, :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamIsCapturing`, :py:obj:`~.cudaStreamUpdateCaptureDependencies` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaStreamCaptureStatus captureStatus_out + cystream = pstream + cdef cyruntime.cudaStreamCaptureStatus captureStatus_out cdef unsigned long long id_out = 0 cdef cudaGraph_t graph_out = cudaGraph_t() - cdef const ccudart.cudaGraphNode_t* cdependencies_out = NULL + cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL pydependencies_out = [] cdef size_t numDependencies_out = 0 - err = ccudart.cudaStreamGetCaptureInfo(cstream, &captureStatus_out, &id_out, graph_out._ptr, &cdependencies_out, &numDependencies_out) + err = cyruntime.cudaStreamGetCaptureInfo(cystream, &captureStatus_out, &id_out, graph_out._ptr, &cydependencies_out, &numDependencies_out) if cudaError_t(err) == cudaError_t(0): - pydependencies_out = [cudaGraphNode_t(init_value=cdependencies_out[idx]) for idx in range(numDependencies_out)] + pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] return (cudaError_t(err), cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, numDependencies_out) {{endif}} @@ -17341,28 +17341,28 @@ def cudaStreamGetCaptureInfo_v3(stream): -------- :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamIsCapturing`, :py:obj:`~.cudaStreamUpdateCaptureDependencies` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaStreamCaptureStatus captureStatus_out + cystream = pstream + cdef cyruntime.cudaStreamCaptureStatus captureStatus_out cdef unsigned long long id_out = 0 cdef cudaGraph_t graph_out = cudaGraph_t() - cdef const ccudart.cudaGraphNode_t* cdependencies_out = NULL + cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL pydependencies_out = [] - cdef const ccudart.cudaGraphEdgeData* cedgeData_out = NULL + cdef const cyruntime.cudaGraphEdgeData* cyedgeData_out = NULL pyedgeData_out = [] cdef size_t numDependencies_out = 0 - err = ccudart.cudaStreamGetCaptureInfo_v3(cstream, &captureStatus_out, &id_out, graph_out._ptr, &cdependencies_out, &cedgeData_out, &numDependencies_out) + err = cyruntime.cudaStreamGetCaptureInfo_v3(cystream, &captureStatus_out, &id_out, graph_out._ptr, &cydependencies_out, &cyedgeData_out, &numDependencies_out) if cudaError_t(err) == cudaError_t(0): - pydependencies_out = [cudaGraphNode_t(init_value=cdependencies_out[idx]) for idx in range(numDependencies_out)] + pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] if cudaError_t(err) == cudaError_t(0): - pyedgeData_out = [cudaGraphEdgeData(_ptr=&cedgeData_out[idx]) for idx in range(numDependencies_out)] + pyedgeData_out = [cudaGraphEdgeData(_ptr=&cyedgeData_out[idx]) for idx in range(numDependencies_out)] return (cudaError_t(err), cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, pyedgeData_out, numDependencies_out) {{endif}} @@ -17414,29 +17414,29 @@ def cudaStreamUpdateCaptureDependencies(stream, dependencies : Optional[Tuple[cu :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamGetCaptureInfo`, """ dependencies = [] if dependencies is None else dependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaStream_t cstream + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaGraphNode_t* cdependencies = NULL + cystream = pstream + cdef cyruntime.cudaGraphNode_t* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccudart.cudaGraphNode_t)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] + cydependencies[idx] = (dependencies[idx])._ptr[0] if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) - err = ccudart.cudaStreamUpdateCaptureDependencies(cstream, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, numDependencies, flags) - if cdependencies is not NULL: - free(cdependencies) + err = cyruntime.cudaStreamUpdateCaptureDependencies(cystream, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, numDependencies, flags) + if cydependencies is not NULL: + free(cydependencies) return (cudaError_t(err),) {{endif}} @@ -17487,39 +17487,39 @@ def cudaStreamUpdateCaptureDependencies_v2(stream, dependencies : Optional[Tuple """ dependencyData = [] if dependencyData is None else dependencyData if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): - raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[ccudart.cudaGraphEdgeData,] or List[ccudart.cudaGraphEdgeData,]") + raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[cyruntime.cudaGraphEdgeData,] or List[cyruntime.cudaGraphEdgeData,]") dependencies = [] if dependencies is None else dependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in dependencies): - raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaStream_t cstream + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): + raise TypeError("Argument 'dependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaGraphNode_t* cdependencies = NULL + cystream = pstream + cdef cyruntime.cudaGraphNode_t* cydependencies = NULL if len(dependencies) > 0: - cdependencies = calloc(len(dependencies), sizeof(ccudart.cudaGraphNode_t)) - if cdependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cydependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(dependencies)): - cdependencies[idx] = (dependencies[idx])._ptr[0] - cdef ccudart.cudaGraphEdgeData* cdependencyData = NULL + cydependencies[idx] = (dependencies[idx])._ptr[0] + cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL if len(dependencyData) > 0: - cdependencyData = calloc(len(dependencyData), sizeof(ccudart.cudaGraphEdgeData)) - if cdependencyData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(ccudart.cudaGraphEdgeData))) + cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) + if cydependencyData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) for idx in range(len(dependencyData)): - string.memcpy(&cdependencyData[idx], (dependencyData[idx])._ptr, sizeof(ccudart.cudaGraphEdgeData)) - err = ccudart.cudaStreamUpdateCaptureDependencies_v2(cstream, (dependencies[0])._ptr if len(dependencies) == 1 else cdependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cdependencyData, numDependencies, flags) - if cdependencies is not NULL: - free(cdependencies) - if cdependencyData is not NULL: - free(cdependencyData) + string.memcpy(&cydependencyData[idx], (dependencyData[idx])._ptr, sizeof(cyruntime.cudaGraphEdgeData)) + err = cyruntime.cudaStreamUpdateCaptureDependencies_v2(cystream, (dependencies[0])._ptr if len(dependencies) == 1 else cydependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cydependencyData, numDependencies, flags) + if cydependencies is not NULL: + free(cydependencies) + if cydependencyData is not NULL: + free(cydependencyData) return (cudaError_t(err),) {{endif}} @@ -17544,7 +17544,7 @@ def cudaEventCreate(): cudaEventCreate (C++ API), :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cuEventCreate` """ cdef cudaEvent_t event = cudaEvent_t() - err = ccudart.cudaEventCreate(event._ptr) + err = cyruntime.cudaEventCreate(event._ptr) return (cudaError_t(err), event) {{endif}} @@ -17593,7 +17593,7 @@ def cudaEventCreateWithFlags(unsigned int flags): :py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cuEventCreate` """ cdef cudaEvent_t event = cudaEvent_t() - err = ccudart.cudaEventCreateWithFlags(event._ptr, flags) + err = cyruntime.cudaEventCreateWithFlags(event._ptr, flags) return (cudaError_t(err), event) {{endif}} @@ -17635,25 +17635,25 @@ def cudaEventRecord(event, stream): -------- :py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cuEventRecord` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaEvent_t cevent + cystream = pstream + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - err = ccudart.cudaEventRecord(cevent, cstream) + cyevent = pevent + err = cyruntime.cudaEventRecord(cyevent, cystream) return (cudaError_t(err),) {{endif}} @@ -17704,25 +17704,25 @@ def cudaEventRecordWithFlags(event, stream, unsigned int flags): -------- :py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cuEventRecord`, """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaEvent_t cevent + cystream = pstream + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - err = ccudart.cudaEventRecordWithFlags(cevent, cstream, flags) + cyevent = pevent + err = cyruntime.cudaEventRecordWithFlags(cyevent, cystream, flags) return (cudaError_t(err),) {{endif}} @@ -17758,16 +17758,16 @@ def cudaEventQuery(event): -------- :py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cuEventQuery` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - err = ccudart.cudaEventQuery(cevent) + cyevent = pevent + err = cyruntime.cudaEventQuery(cyevent) return (cudaError_t(err),) {{endif}} @@ -17802,16 +17802,16 @@ def cudaEventSynchronize(event): -------- :py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cuEventSynchronize` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - err = ccudart.cudaEventSynchronize(cevent) + cyevent = pevent + err = cyruntime.cudaEventSynchronize(cyevent) return (cudaError_t(err),) {{endif}} @@ -17843,16 +17843,16 @@ def cudaEventDestroy(event): -------- :py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cuEventDestroy` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - err = ccudart.cudaEventDestroy(cevent) + cyevent = pevent + err = cyruntime.cudaEventDestroy(cyevent) return (cudaError_t(err),) {{endif}} @@ -17901,26 +17901,26 @@ def cudaEventElapsedTime(start, end): -------- :py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cuEventElapsedTime` """ - cdef ccudart.cudaEvent_t cend + cdef cyruntime.cudaEvent_t cyend if end is None: - cend = 0 - elif isinstance(end, (cudaEvent_t,cuda.CUevent)): + cyend = 0 + elif isinstance(end, (cudaEvent_t,driver.CUevent)): pend = int(end) - cend = pend + cyend = pend else: pend = int(cudaEvent_t(end)) - cend = pend - cdef ccudart.cudaEvent_t cstart + cyend = pend + cdef cyruntime.cudaEvent_t cystart if start is None: - cstart = 0 - elif isinstance(start, (cudaEvent_t,cuda.CUevent)): + cystart = 0 + elif isinstance(start, (cudaEvent_t,driver.CUevent)): pstart = int(start) - cstart = pstart + cystart = pstart else: pstart = int(cudaEvent_t(start)) - cstart = pstart + cystart = pstart cdef float ms = 0 - err = ccudart.cudaEventElapsedTime(&ms, cstart, cend) + err = cyruntime.cudaEventElapsedTime(&ms, cystart, cyend) return (cudaError_t(err), ms) {{endif}} @@ -18070,8 +18070,8 @@ def cudaImportExternalMemory(memHandleDesc : Optional[cudaExternalMemoryHandleDe and Cache Control" chapter from Vulkan specification. """ cdef cudaExternalMemory_t extMem_out = cudaExternalMemory_t() - cdef ccudart.cudaExternalMemoryHandleDesc* cmemHandleDesc_ptr = memHandleDesc._ptr if memHandleDesc != None else NULL - err = ccudart.cudaImportExternalMemory(extMem_out._ptr, cmemHandleDesc_ptr) + cdef cyruntime.cudaExternalMemoryHandleDesc* cymemHandleDesc_ptr = memHandleDesc._ptr if memHandleDesc != None else NULL + err = cyruntime.cudaImportExternalMemory(extMem_out._ptr, cymemHandleDesc_ptr) return (cudaError_t(err), extMem_out) {{endif}} @@ -18126,18 +18126,18 @@ def cudaExternalMemoryGetMappedBuffer(extMem, bufferDesc : Optional[cudaExternal -------- :py:obj:`~.cudaImportExternalMemory`, :py:obj:`~.cudaDestroyExternalMemory`, :py:obj:`~.cudaExternalMemoryGetMappedMipmappedArray` """ - cdef ccudart.cudaExternalMemory_t cextMem + cdef cyruntime.cudaExternalMemory_t cyextMem if extMem is None: - cextMem = 0 + cyextMem = 0 elif isinstance(extMem, (cudaExternalMemory_t,)): pextMem = int(extMem) - cextMem = pextMem + cyextMem = pextMem else: pextMem = int(cudaExternalMemory_t(extMem)) - cextMem = pextMem + cyextMem = pextMem cdef void_ptr devPtr = 0 - cdef ccudart.cudaExternalMemoryBufferDesc* cbufferDesc_ptr = bufferDesc._ptr if bufferDesc != None else NULL - err = ccudart.cudaExternalMemoryGetMappedBuffer(&devPtr, cextMem, cbufferDesc_ptr) + cdef cyruntime.cudaExternalMemoryBufferDesc* cybufferDesc_ptr = bufferDesc._ptr if bufferDesc != None else NULL + err = cyruntime.cudaExternalMemoryGetMappedBuffer(&devPtr, cyextMem, cybufferDesc_ptr) return (cudaError_t(err), devPtr) {{endif}} @@ -18196,18 +18196,18 @@ def cudaExternalMemoryGetMappedMipmappedArray(extMem, mipmapDesc : Optional[cuda ----- If :py:obj:`~.cudaExternalMemoryHandleDesc.type` is :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, then :py:obj:`~.cudaExternalMemoryMipmappedArrayDesc.numLevels` must not be greater than 1. """ - cdef ccudart.cudaExternalMemory_t cextMem + cdef cyruntime.cudaExternalMemory_t cyextMem if extMem is None: - cextMem = 0 + cyextMem = 0 elif isinstance(extMem, (cudaExternalMemory_t,)): pextMem = int(extMem) - cextMem = pextMem + cyextMem = pextMem else: pextMem = int(cudaExternalMemory_t(extMem)) - cextMem = pextMem + cyextMem = pextMem cdef cudaMipmappedArray_t mipmap = cudaMipmappedArray_t() - cdef ccudart.cudaExternalMemoryMipmappedArrayDesc* cmipmapDesc_ptr = mipmapDesc._ptr if mipmapDesc != None else NULL - err = ccudart.cudaExternalMemoryGetMappedMipmappedArray(mipmap._ptr, cextMem, cmipmapDesc_ptr) + cdef cyruntime.cudaExternalMemoryMipmappedArrayDesc* cymipmapDesc_ptr = mipmapDesc._ptr if mipmapDesc != None else NULL + err = cyruntime.cudaExternalMemoryGetMappedMipmappedArray(mipmap._ptr, cyextMem, cymipmapDesc_ptr) return (cudaError_t(err), mipmap) {{endif}} @@ -18236,16 +18236,16 @@ def cudaDestroyExternalMemory(extMem): -------- :py:obj:`~.cudaImportExternalMemory`, :py:obj:`~.cudaExternalMemoryGetMappedBuffer`, :py:obj:`~.cudaExternalMemoryGetMappedMipmappedArray` """ - cdef ccudart.cudaExternalMemory_t cextMem + cdef cyruntime.cudaExternalMemory_t cyextMem if extMem is None: - cextMem = 0 + cyextMem = 0 elif isinstance(extMem, (cudaExternalMemory_t,)): pextMem = int(extMem) - cextMem = pextMem + cyextMem = pextMem else: pextMem = int(cudaExternalMemory_t(extMem)) - cextMem = pextMem - err = ccudart.cudaDestroyExternalMemory(cextMem) + cyextMem = pextMem + err = cyruntime.cudaDestroyExternalMemory(cyextMem) return (cudaError_t(err),) {{endif}} @@ -18390,8 +18390,8 @@ def cudaImportExternalSemaphore(semHandleDesc : Optional[cudaExternalSemaphoreHa :py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` """ cdef cudaExternalSemaphore_t extSem_out = cudaExternalSemaphore_t() - cdef ccudart.cudaExternalSemaphoreHandleDesc* csemHandleDesc_ptr = semHandleDesc._ptr if semHandleDesc != None else NULL - err = ccudart.cudaImportExternalSemaphore(extSem_out._ptr, csemHandleDesc_ptr) + cdef cyruntime.cudaExternalSemaphoreHandleDesc* cysemHandleDesc_ptr = semHandleDesc._ptr if semHandleDesc != None else NULL + err = cyruntime.cudaImportExternalSemaphore(extSem_out._ptr, cysemHandleDesc_ptr) return (cudaError_t(err), extSem_out) {{endif}} @@ -18491,43 +18491,43 @@ def cudaSignalExternalSemaphoresAsync(extSemArray : Optional[Tuple[cudaExternalS -------- :py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream + cystream = pstream paramsArray = [] if paramsArray is None else paramsArray if not all(isinstance(_x, (cudaExternalSemaphoreSignalParams,)) for _x in paramsArray): - raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[ccudart.cudaExternalSemaphoreSignalParams,] or List[ccudart.cudaExternalSemaphoreSignalParams,]") + raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or List[cyruntime.cudaExternalSemaphoreSignalParams,]") extSemArray = [] if extSemArray is None else extSemArray if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): - raise TypeError("Argument 'extSemArray' is not instance of type (expected Tuple[ccudart.cudaExternalSemaphore_t,] or List[ccudart.cudaExternalSemaphore_t,]") - cdef ccudart.cudaExternalSemaphore_t* cextSemArray = NULL + raise TypeError("Argument 'extSemArray' is not instance of type (expected Tuple[cyruntime.cudaExternalSemaphore_t,] or List[cyruntime.cudaExternalSemaphore_t,]") + cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL if len(extSemArray) > 0: - cextSemArray = calloc(len(extSemArray), sizeof(ccudart.cudaExternalSemaphore_t)) - if cextSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphore_t))) + cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) + if cyextSemArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) else: for idx in range(len(extSemArray)): - cextSemArray[idx] = (extSemArray[idx])._ptr[0] - cdef ccudart.cudaExternalSemaphoreSignalParams* cparamsArray = NULL + cyextSemArray[idx] = (extSemArray[idx])._ptr[0] + cdef cyruntime.cudaExternalSemaphoreSignalParams* cyparamsArray = NULL if len(paramsArray) > 0: - cparamsArray = calloc(len(paramsArray), sizeof(ccudart.cudaExternalSemaphoreSignalParams)) - if cparamsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphoreSignalParams))) + cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) + if cyparamsArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreSignalParams))) for idx in range(len(paramsArray)): - string.memcpy(&cparamsArray[idx], (paramsArray[idx])._ptr, sizeof(ccudart.cudaExternalSemaphoreSignalParams)) + string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._ptr, sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) - err = ccudart.cudaSignalExternalSemaphoresAsync((extSemArray[0])._ptr if len(extSemArray) == 1 else cextSemArray, (paramsArray[0])._ptr if len(paramsArray) == 1 else cparamsArray, numExtSems, cstream) - if cextSemArray is not NULL: - free(cextSemArray) - if cparamsArray is not NULL: - free(cparamsArray) + err = cyruntime.cudaSignalExternalSemaphoresAsync((extSemArray[0])._ptr if len(extSemArray) == 1 else cyextSemArray, (paramsArray[0])._ptr if len(paramsArray) == 1 else cyparamsArray, numExtSems, cystream) + if cyextSemArray is not NULL: + free(cyextSemArray) + if cyparamsArray is not NULL: + free(cyparamsArray) return (cudaError_t(err),) {{endif}} @@ -18616,43 +18616,43 @@ def cudaWaitExternalSemaphoresAsync(extSemArray : Optional[Tuple[cudaExternalSem -------- :py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream + cystream = pstream paramsArray = [] if paramsArray is None else paramsArray if not all(isinstance(_x, (cudaExternalSemaphoreWaitParams,)) for _x in paramsArray): - raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[ccudart.cudaExternalSemaphoreWaitParams,] or List[ccudart.cudaExternalSemaphoreWaitParams,]") + raise TypeError("Argument 'paramsArray' is not instance of type (expected Tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or List[cyruntime.cudaExternalSemaphoreWaitParams,]") extSemArray = [] if extSemArray is None else extSemArray if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): - raise TypeError("Argument 'extSemArray' is not instance of type (expected Tuple[ccudart.cudaExternalSemaphore_t,] or List[ccudart.cudaExternalSemaphore_t,]") - cdef ccudart.cudaExternalSemaphore_t* cextSemArray = NULL + raise TypeError("Argument 'extSemArray' is not instance of type (expected Tuple[cyruntime.cudaExternalSemaphore_t,] or List[cyruntime.cudaExternalSemaphore_t,]") + cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL if len(extSemArray) > 0: - cextSemArray = calloc(len(extSemArray), sizeof(ccudart.cudaExternalSemaphore_t)) - if cextSemArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphore_t))) + cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) + if cyextSemArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) else: for idx in range(len(extSemArray)): - cextSemArray[idx] = (extSemArray[idx])._ptr[0] - cdef ccudart.cudaExternalSemaphoreWaitParams* cparamsArray = NULL + cyextSemArray[idx] = (extSemArray[idx])._ptr[0] + cdef cyruntime.cudaExternalSemaphoreWaitParams* cyparamsArray = NULL if len(paramsArray) > 0: - cparamsArray = calloc(len(paramsArray), sizeof(ccudart.cudaExternalSemaphoreWaitParams)) - if cparamsArray is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(ccudart.cudaExternalSemaphoreWaitParams))) + cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) + if cyparamsArray is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreWaitParams))) for idx in range(len(paramsArray)): - string.memcpy(&cparamsArray[idx], (paramsArray[idx])._ptr, sizeof(ccudart.cudaExternalSemaphoreWaitParams)) + string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._ptr, sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) - err = ccudart.cudaWaitExternalSemaphoresAsync((extSemArray[0])._ptr if len(extSemArray) == 1 else cextSemArray, (paramsArray[0])._ptr if len(paramsArray) == 1 else cparamsArray, numExtSems, cstream) - if cextSemArray is not NULL: - free(cextSemArray) - if cparamsArray is not NULL: - free(cparamsArray) + err = cyruntime.cudaWaitExternalSemaphoresAsync((extSemArray[0])._ptr if len(extSemArray) == 1 else cyextSemArray, (paramsArray[0])._ptr if len(paramsArray) == 1 else cyparamsArray, numExtSems, cystream) + if cyextSemArray is not NULL: + free(cyextSemArray) + if cyparamsArray is not NULL: + free(cyparamsArray) return (cudaError_t(err),) {{endif}} @@ -18680,16 +18680,16 @@ def cudaDestroyExternalSemaphore(extSem): -------- :py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` """ - cdef ccudart.cudaExternalSemaphore_t cextSem + cdef cyruntime.cudaExternalSemaphore_t cyextSem if extSem is None: - cextSem = 0 + cyextSem = 0 elif isinstance(extSem, (cudaExternalSemaphore_t,)): pextSem = int(extSem) - cextSem = pextSem + cyextSem = pextSem else: pextSem = int(cudaExternalSemaphore_t(extSem)) - cextSem = pextSem - err = ccudart.cudaDestroyExternalSemaphore(cextSem) + cyextSem = pextSem + err = cyruntime.cudaDestroyExternalSemaphore(cyextSem) return (cudaError_t(err),) {{endif}} @@ -18748,10 +18748,10 @@ def cudaFuncSetCacheConfig(func, cacheConfig not None : cudaFuncCache): -------- cudaFuncSetCacheConfig (C++ API), :py:obj:`~.cudaFuncGetAttributes (C API)`, :py:obj:`~.cudaLaunchKernel (C API)`, :py:obj:`~.cuFuncSetCacheConfig` """ - cfunc = utils.HelperInputVoidPtr(func) - cdef void* cfunc_ptr = cfunc.cptr - cdef ccudart.cudaFuncCache ccacheConfig = cacheConfig.value - err = ccudart.cudaFuncSetCacheConfig(cfunc_ptr, ccacheConfig) + cyfunc = utils.HelperInputVoidPtr(func) + cdef void* cyfunc_ptr = cyfunc.cptr + cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value + err = cyruntime.cudaFuncSetCacheConfig(cyfunc_ptr, cycacheConfig) return (cudaError_t(err),) {{endif}} @@ -18790,9 +18790,9 @@ def cudaFuncGetAttributes(func): :py:obj:`~.cudaFuncSetCacheConfig (C API)`, cudaFuncGetAttributes (C++ API), :py:obj:`~.cudaLaunchKernel (C API)`, :py:obj:`~.cuFuncGetAttribute` """ cdef cudaFuncAttributes attr = cudaFuncAttributes() - cfunc = utils.HelperInputVoidPtr(func) - cdef void* cfunc_ptr = cfunc.cptr - err = ccudart.cudaFuncGetAttributes(attr._ptr, cfunc_ptr) + cyfunc = utils.HelperInputVoidPtr(func) + cdef void* cyfunc_ptr = cyfunc.cptr + err = cyruntime.cudaFuncGetAttributes(attr._ptr, cyfunc_ptr) return (cudaError_t(err), attr) {{endif}} @@ -18874,10 +18874,10 @@ def cudaFuncSetAttribute(func, attr not None : cudaFuncAttribute, int value): cudaError_t :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDeviceFunction`, :py:obj:`~.cudaErrorInvalidValue` """ - cfunc = utils.HelperInputVoidPtr(func) - cdef void* cfunc_ptr = cfunc.cptr - cdef ccudart.cudaFuncAttribute cattr = attr.value - err = ccudart.cudaFuncSetAttribute(cfunc_ptr, cattr, value) + cyfunc = utils.HelperInputVoidPtr(func) + cdef void* cyfunc_ptr = cyfunc.cptr + cdef cyruntime.cudaFuncAttribute cyattr = attr.value + err = cyruntime.cudaFuncSetAttribute(cyfunc_ptr, cyattr, value) return (cudaError_t(err),) {{endif}} @@ -18945,28 +18945,28 @@ def cudaLaunchHostFunc(stream, fn, userData): -------- :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cudaMallocManaged`, :py:obj:`~.cudaStreamAttachMemAsync`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cuLaunchHostFunc` """ - cdef ccudart.cudaHostFn_t cfn + cdef cyruntime.cudaHostFn_t cyfn if fn is None: - cfn = 0 + cyfn = 0 elif isinstance(fn, (cudaHostFn_t,)): pfn = int(fn) - cfn = pfn + cyfn = pfn else: pfn = int(cudaHostFn_t(fn)) - cfn = pfn - cdef ccudart.cudaStream_t cstream + cyfn = pfn + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cuserData = utils.HelperInputVoidPtr(userData) - cdef void* cuserData_ptr = cuserData.cptr + cystream = pstream + cyuserData = utils.HelperInputVoidPtr(userData) + cdef void* cyuserData_ptr = cyuserData.cptr with nogil: - err = ccudart.cudaLaunchHostFunc(cstream, cfn, cuserData_ptr) + err = cyruntime.cudaLaunchHostFunc(cystream, cyfn, cyuserData_ptr) return (cudaError_t(err),) {{endif}} @@ -19031,10 +19031,10 @@ def cudaFuncSetSharedMemConfig(func, config not None : cudaSharedMemConfig): -------- :py:obj:`~.cudaDeviceSetSharedMemConfig`, :py:obj:`~.cudaDeviceGetSharedMemConfig`, :py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaFuncSetCacheConfig`, :py:obj:`~.cuFuncSetSharedMemConfig` """ - cfunc = utils.HelperInputVoidPtr(func) - cdef void* cfunc_ptr = cfunc.cptr - cdef ccudart.cudaSharedMemConfig cconfig = config.value - err = ccudart.cudaFuncSetSharedMemConfig(cfunc_ptr, cconfig) + cyfunc = utils.HelperInputVoidPtr(func) + cdef void* cyfunc_ptr = cyfunc.cptr + cdef cyruntime.cudaSharedMemConfig cyconfig = config.value + err = cyruntime.cudaFuncSetSharedMemConfig(cyfunc_ptr, cyconfig) return (cudaError_t(err),) {{endif}} @@ -19068,9 +19068,9 @@ def cudaOccupancyMaxActiveBlocksPerMultiprocessor(func, int blockSize, size_t dy :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags`, cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API), cudaOccupancyAvailableDynamicSMemPerBlock (C++ API), :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessor` """ cdef int numBlocks = 0 - cfunc = utils.HelperInputVoidPtr(func) - cdef void* cfunc_ptr = cfunc.cptr - err = ccudart.cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cfunc_ptr, blockSize, dynamicSMemSize) + cyfunc = utils.HelperInputVoidPtr(func) + cdef void* cyfunc_ptr = cyfunc.cptr + err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize) return (cudaError_t(err), numBlocks) {{endif}} @@ -19104,9 +19104,9 @@ def cudaOccupancyAvailableDynamicSMemPerBlock(func, int numBlocks, int blockSize :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags`, cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API), :py:obj:`~.cudaOccupancyAvailableDynamicSMemPerBlock` """ cdef size_t dynamicSmemSize = 0 - cfunc = utils.HelperInputVoidPtr(func) - cdef void* cfunc_ptr = cfunc.cptr - err = ccudart.cudaOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cfunc_ptr, numBlocks, blockSize) + cyfunc = utils.HelperInputVoidPtr(func) + cdef void* cyfunc_ptr = cyfunc.cptr + err = cyruntime.cudaOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cyfunc_ptr, numBlocks, blockSize) return (cudaError_t(err), dynamicSmemSize) {{endif}} @@ -19157,9 +19157,9 @@ def cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(func, int blockSize, :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessor`, cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API), cudaOccupancyAvailableDynamicSMemPerBlock (C++ API), :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` """ cdef int numBlocks = 0 - cfunc = utils.HelperInputVoidPtr(func) - cdef void* cfunc_ptr = cfunc.cptr - err = ccudart.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cfunc_ptr, blockSize, dynamicSMemSize, flags) + cyfunc = utils.HelperInputVoidPtr(func) + cdef void* cyfunc_ptr = cyfunc.cptr + err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize, flags) return (cudaError_t(err), numBlocks) {{endif}} @@ -19291,7 +19291,7 @@ def cudaMallocManaged(size_t size, unsigned int flags): """ cdef void_ptr devPtr = 0 with nogil: - err = ccudart.cudaMallocManaged(&devPtr, size, flags) + err = cyruntime.cudaMallocManaged(&devPtr, size, flags) return (cudaError_t(err), devPtr) {{endif}} @@ -19329,7 +19329,7 @@ def cudaMalloc(size_t size): """ cdef void_ptr devPtr = 0 with nogil: - err = ccudart.cudaMalloc(&devPtr, size) + err = cyruntime.cudaMalloc(&devPtr, size) return (cudaError_t(err), devPtr) {{endif}} @@ -19375,7 +19375,7 @@ def cudaMallocHost(size_t size): :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, cudaMallocHost (C++ API), :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemAllocHost` """ cdef void_ptr ptr = 0 - err = ccudart.cudaMallocHost(&ptr, size) + err = cyruntime.cudaMallocHost(&ptr, size) return (cudaError_t(err), ptr) {{endif}} @@ -19428,7 +19428,7 @@ def cudaMallocPitch(size_t width, size_t height): """ cdef void_ptr devPtr = 0 cdef size_t pitch = 0 - err = ccudart.cudaMallocPitch(&devPtr, &pitch, width, height) + err = cyruntime.cudaMallocPitch(&devPtr, &pitch, width, height) return (cudaError_t(err), devPtr, pitch) {{endif}} @@ -19501,9 +19501,9 @@ def cudaMallocArray(desc : Optional[cudaChannelFormatDesc], size_t width, size_t :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuArrayCreate` """ cdef cudaArray_t array = cudaArray_t() - cdef ccudart.cudaChannelFormatDesc* cdesc_ptr = desc._ptr if desc != None else NULL + cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._ptr if desc != None else NULL with nogil: - err = ccudart.cudaMallocArray(array._ptr, cdesc_ptr, width, height, flags) + err = cyruntime.cudaMallocArray(array._ptr, cydesc_ptr, width, height, flags) return (cudaError_t(err), array) {{endif}} @@ -19551,10 +19551,10 @@ def cudaFree(devPtr): -------- :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaMallocManaged`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaMallocFromPoolAsync` :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaFreeAsync` :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemFree` """ - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr with nogil: - err = ccudart.cudaFree(cdevPtr_ptr) + err = cyruntime.cudaFree(cydevPtr_ptr) return (cudaError_t(err),) {{endif}} @@ -19583,10 +19583,10 @@ def cudaFreeHost(ptr): -------- :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemFreeHost` """ - cptr = utils.HelperInputVoidPtr(ptr) - cdef void* cptr_ptr = cptr.cptr + cyptr = utils.HelperInputVoidPtr(ptr) + cdef void* cyptr_ptr = cyptr.cptr with nogil: - err = ccudart.cudaFreeHost(cptr_ptr) + err = cyruntime.cudaFreeHost(cyptr_ptr) return (cudaError_t(err),) {{endif}} @@ -19615,17 +19615,17 @@ def cudaFreeArray(array): -------- :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuArrayDestroy` """ - cdef ccudart.cudaArray_t carray + cdef cyruntime.cudaArray_t cyarray if array is None: - carray = 0 + cyarray = 0 elif isinstance(array, (cudaArray_t,)): parray = int(array) - carray = parray + cyarray = parray else: parray = int(cudaArray_t(array)) - carray = parray + cyarray = parray with nogil: - err = ccudart.cudaFreeArray(carray) + err = cyruntime.cudaFreeArray(cyarray) return (cudaError_t(err),) {{endif}} @@ -19654,16 +19654,16 @@ def cudaFreeMipmappedArray(mipmappedArray): -------- :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMipmappedArrayDestroy` """ - cdef ccudart.cudaMipmappedArray_t cmipmappedArray + cdef cyruntime.cudaMipmappedArray_t cymipmappedArray if mipmappedArray is None: - cmipmappedArray = 0 + cymipmappedArray = 0 elif isinstance(mipmappedArray, (cudaMipmappedArray_t,)): pmipmappedArray = int(mipmappedArray) - cmipmappedArray = pmipmappedArray + cymipmappedArray = pmipmappedArray else: pmipmappedArray = int(cudaMipmappedArray_t(mipmappedArray)) - cmipmappedArray = pmipmappedArray - err = ccudart.cudaFreeMipmappedArray(cmipmappedArray) + cymipmappedArray = pmipmappedArray + err = cyruntime.cudaFreeMipmappedArray(cymipmappedArray) return (cudaError_t(err),) {{endif}} @@ -19746,7 +19746,7 @@ def cudaHostAlloc(size_t size, unsigned int flags): """ cdef void_ptr pHost = 0 with nogil: - err = ccudart.cudaHostAlloc(&pHost, size, flags) + err = cyruntime.cudaHostAlloc(&pHost, size, flags) return (cudaError_t(err), pHost) {{endif}} @@ -19860,10 +19860,10 @@ def cudaHostRegister(ptr, size_t size, unsigned int flags): -------- :py:obj:`~.cudaHostUnregister`, :py:obj:`~.cudaHostGetFlags`, :py:obj:`~.cudaHostGetDevicePointer`, :py:obj:`~.cuMemHostRegister` """ - cptr = utils.HelperInputVoidPtr(ptr) - cdef void* cptr_ptr = cptr.cptr + cyptr = utils.HelperInputVoidPtr(ptr) + cdef void* cyptr_ptr = cyptr.cptr with nogil: - err = ccudart.cudaHostRegister(cptr_ptr, size, flags) + err = cyruntime.cudaHostRegister(cyptr_ptr, size, flags) return (cudaError_t(err),) {{endif}} @@ -19894,10 +19894,10 @@ def cudaHostUnregister(ptr): -------- :py:obj:`~.cudaHostUnregister`, :py:obj:`~.cuMemHostUnregister` """ - cptr = utils.HelperInputVoidPtr(ptr) - cdef void* cptr_ptr = cptr.cptr + cyptr = utils.HelperInputVoidPtr(ptr) + cdef void* cyptr_ptr = cyptr.cptr with nogil: - err = ccudart.cudaHostUnregister(cptr_ptr) + err = cyruntime.cudaHostUnregister(cyptr_ptr) return (cudaError_t(err),) {{endif}} @@ -19955,9 +19955,9 @@ def cudaHostGetDevicePointer(pHost, unsigned int flags): :py:obj:`~.cudaSetDeviceFlags`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer` """ cdef void_ptr pDevice = 0 - cpHost = utils.HelperInputVoidPtr(pHost) - cdef void* cpHost_ptr = cpHost.cptr - err = ccudart.cudaHostGetDevicePointer(&pDevice, cpHost_ptr, flags) + cypHost = utils.HelperInputVoidPtr(pHost) + cdef void* cypHost_ptr = cypHost.cptr + err = cyruntime.cudaHostGetDevicePointer(&pDevice, cypHost_ptr, flags) return (cudaError_t(err), pDevice) {{endif}} @@ -19987,9 +19987,9 @@ def cudaHostGetFlags(pHost): :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemHostGetFlags` """ cdef unsigned int pFlags = 0 - cpHost = utils.HelperInputVoidPtr(pHost) - cdef void* cpHost_ptr = cpHost.cptr - err = ccudart.cudaHostGetFlags(&pFlags, cpHost_ptr) + cypHost = utils.HelperInputVoidPtr(pHost) + cdef void* cypHost_ptr = cypHost.cptr + err = cyruntime.cudaHostGetFlags(&pFlags, cypHost_ptr) return (cudaError_t(err), pFlags) {{endif}} @@ -20035,7 +20035,7 @@ def cudaMalloc3D(extent not None : cudaExtent): :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaPitchedPtr`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuMemAllocPitch` """ cdef cudaPitchedPtr pitchedDevPtr = cudaPitchedPtr() - err = ccudart.cudaMalloc3D(pitchedDevPtr._ptr, extent._ptr[0]) + err = cyruntime.cudaMalloc3D(pitchedDevPtr._ptr, extent._ptr[0]) return (cudaError_t(err), pitchedDevPtr) {{endif}} @@ -20154,9 +20154,9 @@ def cudaMalloc3DArray(desc : Optional[cudaChannelFormatDesc], extent not None : :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuArray3DCreate` """ cdef cudaArray_t array = cudaArray_t() - cdef ccudart.cudaChannelFormatDesc* cdesc_ptr = desc._ptr if desc != None else NULL + cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._ptr if desc != None else NULL with nogil: - err = ccudart.cudaMalloc3DArray(array._ptr, cdesc_ptr, extent._ptr[0], flags) + err = cyruntime.cudaMalloc3DArray(array._ptr, cydesc_ptr, extent._ptr[0], flags) return (cudaError_t(err), array) {{endif}} @@ -20279,8 +20279,8 @@ def cudaMallocMipmappedArray(desc : Optional[cudaChannelFormatDesc], extent not :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuMipmappedArrayCreate` """ cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() - cdef ccudart.cudaChannelFormatDesc* cdesc_ptr = desc._ptr if desc != None else NULL - err = ccudart.cudaMallocMipmappedArray(mipmappedArray._ptr, cdesc_ptr, extent._ptr[0], numLevels, flags) + cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._ptr if desc != None else NULL + err = cyruntime.cudaMallocMipmappedArray(mipmappedArray._ptr, cydesc_ptr, extent._ptr[0], numLevels, flags) return (cudaError_t(err), mipmappedArray) {{endif}} @@ -20317,17 +20317,17 @@ def cudaGetMipmappedArrayLevel(mipmappedArray, unsigned int level): -------- :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuMipmappedArrayGetLevel` """ - cdef ccudart.cudaMipmappedArray_const_t cmipmappedArray + cdef cyruntime.cudaMipmappedArray_const_t cymipmappedArray if mipmappedArray is None: - cmipmappedArray = 0 + cymipmappedArray = 0 elif isinstance(mipmappedArray, (cudaMipmappedArray_const_t,)): pmipmappedArray = int(mipmappedArray) - cmipmappedArray = pmipmappedArray + cymipmappedArray = pmipmappedArray else: pmipmappedArray = int(cudaMipmappedArray_const_t(mipmappedArray)) - cmipmappedArray = pmipmappedArray + cymipmappedArray = pmipmappedArray cdef cudaArray_t levelArray = cudaArray_t() - err = ccudart.cudaGetMipmappedArrayLevel(levelArray._ptr, cmipmappedArray, level) + err = cyruntime.cudaGetMipmappedArrayLevel(levelArray._ptr, cymipmappedArray, level) return (cudaError_t(err), levelArray) {{endif}} @@ -20409,9 +20409,9 @@ def cudaMemcpy3D(p : Optional[cudaMemcpy3DParms]): -------- :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemcpy3DAsync`, :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.make_cudaPos`, :py:obj:`~.cuMemcpy3D` """ - cdef ccudart.cudaMemcpy3DParms* cp_ptr = p._ptr if p != None else NULL + cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._ptr if p != None else NULL with nogil: - err = ccudart.cudaMemcpy3D(cp_ptr) + err = cyruntime.cudaMemcpy3D(cyp_ptr) return (cudaError_t(err),) {{endif}} @@ -20447,8 +20447,8 @@ def cudaMemcpy3DPeer(p : Optional[cudaMemcpy3DPeerParms]): -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyPeerAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpy3DPeer` """ - cdef ccudart.cudaMemcpy3DPeerParms* cp_ptr = p._ptr if p != None else NULL - err = ccudart.cudaMemcpy3DPeer(cp_ptr) + cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._ptr if p != None else NULL + err = cyruntime.cudaMemcpy3DPeer(cyp_ptr) return (cudaError_t(err),) {{endif}} @@ -20543,18 +20543,18 @@ def cudaMemcpy3DAsync(p : Optional[cudaMemcpy3DParms], stream): -------- :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, ::::py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.make_cudaPos`, :py:obj:`~.cuMemcpy3DAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaMemcpy3DParms* cp_ptr = p._ptr if p != None else NULL + cystream = pstream + cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._ptr if p != None else NULL with nogil: - err = ccudart.cudaMemcpy3DAsync(cp_ptr, cstream) + err = cyruntime.cudaMemcpy3DAsync(cyp_ptr, cystream) return (cudaError_t(err),) {{endif}} @@ -20585,17 +20585,17 @@ def cudaMemcpy3DPeerAsync(p : Optional[cudaMemcpy3DPeerParms], stream): -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyPeerAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpy3DPeerAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaMemcpy3DPeerParms* cp_ptr = p._ptr if p != None else NULL - err = ccudart.cudaMemcpy3DPeerAsync(cp_ptr, cstream) + cystream = pstream + cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._ptr if p != None else NULL + err = cyruntime.cudaMemcpy3DPeerAsync(cyp_ptr, cystream) return (cudaError_t(err),) {{endif}} @@ -20637,7 +20637,7 @@ def cudaMemGetInfo(): """ cdef size_t free = 0 cdef size_t total = 0 - err = ccudart.cudaMemGetInfo(&free, &total) + err = cyruntime.cudaMemGetInfo(&free, &total) return (cudaError_t(err), free, total) {{endif}} @@ -20672,19 +20672,19 @@ def cudaArrayGetInfo(array): -------- :py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuArray3DGetDescriptor` """ - cdef ccudart.cudaArray_t carray + cdef cyruntime.cudaArray_t cyarray if array is None: - carray = 0 + cyarray = 0 elif isinstance(array, (cudaArray_t,)): parray = int(array) - carray = parray + cyarray = parray else: parray = int(cudaArray_t(array)) - carray = parray + cyarray = parray cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() cdef cudaExtent extent = cudaExtent() cdef unsigned int flags = 0 - err = ccudart.cudaArrayGetInfo(desc._ptr, extent._ptr, &flags, carray) + err = cyruntime.cudaArrayGetInfo(desc._ptr, extent._ptr, &flags, cyarray) return (cudaError_t(err), desc, extent, flags) {{endif}} @@ -20728,17 +20728,17 @@ def cudaArrayGetPlane(hArray, unsigned int planeIdx): -------- :py:obj:`~.cuArrayGetPlane` """ - cdef ccudart.cudaArray_t chArray + cdef cyruntime.cudaArray_t cyhArray if hArray is None: - chArray = 0 + cyhArray = 0 elif isinstance(hArray, (cudaArray_t,)): phArray = int(hArray) - chArray = phArray + cyhArray = phArray else: phArray = int(cudaArray_t(hArray)) - chArray = phArray + cyhArray = phArray cdef cudaArray_t pPlaneArray = cudaArray_t() - err = ccudart.cudaArrayGetPlane(pPlaneArray._ptr, chArray, planeIdx) + err = cyruntime.cudaArrayGetPlane(pPlaneArray._ptr, cyhArray, planeIdx) return (cudaError_t(err), pPlaneArray) {{endif}} @@ -20776,17 +20776,17 @@ def cudaArrayGetMemoryRequirements(array, int device): -------- :py:obj:`~.cudaMipmappedArrayGetMemoryRequirements` """ - cdef ccudart.cudaArray_t carray + cdef cyruntime.cudaArray_t cyarray if array is None: - carray = 0 + cyarray = 0 elif isinstance(array, (cudaArray_t,)): parray = int(array) - carray = parray + cyarray = parray else: parray = int(cudaArray_t(array)) - carray = parray + cyarray = parray cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() - err = ccudart.cudaArrayGetMemoryRequirements(memoryRequirements._ptr, carray, device) + err = cyruntime.cudaArrayGetMemoryRequirements(memoryRequirements._ptr, cyarray, device) return (cudaError_t(err), memoryRequirements) {{endif}} @@ -20824,17 +20824,17 @@ def cudaMipmappedArrayGetMemoryRequirements(mipmap, int device): -------- :py:obj:`~.cudaArrayGetMemoryRequirements` """ - cdef ccudart.cudaMipmappedArray_t cmipmap + cdef cyruntime.cudaMipmappedArray_t cymipmap if mipmap is None: - cmipmap = 0 + cymipmap = 0 elif isinstance(mipmap, (cudaMipmappedArray_t,)): pmipmap = int(mipmap) - cmipmap = pmipmap + cymipmap = pmipmap else: pmipmap = int(cudaMipmappedArray_t(mipmap)) - cmipmap = pmipmap + cymipmap = pmipmap cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() - err = ccudart.cudaMipmappedArrayGetMemoryRequirements(memoryRequirements._ptr, cmipmap, device) + err = cyruntime.cudaMipmappedArrayGetMemoryRequirements(memoryRequirements._ptr, cymipmap, device) return (cudaError_t(err), memoryRequirements) {{endif}} @@ -20878,17 +20878,17 @@ def cudaArrayGetSparseProperties(array): -------- :py:obj:`~.cudaMipmappedArrayGetSparseProperties`, :py:obj:`~.cuMemMapArrayAsync` """ - cdef ccudart.cudaArray_t carray + cdef cyruntime.cudaArray_t cyarray if array is None: - carray = 0 + cyarray = 0 elif isinstance(array, (cudaArray_t,)): parray = int(array) - carray = parray + cyarray = parray else: parray = int(cudaArray_t(array)) - carray = parray + cyarray = parray cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() - err = ccudart.cudaArrayGetSparseProperties(sparseProperties._ptr, carray) + err = cyruntime.cudaArrayGetSparseProperties(sparseProperties._ptr, cyarray) return (cudaError_t(err), sparseProperties) {{endif}} @@ -20932,17 +20932,17 @@ def cudaMipmappedArrayGetSparseProperties(mipmap): -------- :py:obj:`~.cudaArrayGetSparseProperties`, :py:obj:`~.cuMemMapArrayAsync` """ - cdef ccudart.cudaMipmappedArray_t cmipmap + cdef cyruntime.cudaMipmappedArray_t cymipmap if mipmap is None: - cmipmap = 0 + cymipmap = 0 elif isinstance(mipmap, (cudaMipmappedArray_t,)): pmipmap = int(mipmap) - cmipmap = pmipmap + cymipmap = pmipmap else: pmipmap = int(cudaMipmappedArray_t(mipmap)) - cmipmap = pmipmap + cymipmap = pmipmap cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() - err = ccudart.cudaMipmappedArrayGetSparseProperties(sparseProperties._ptr, cmipmap) + err = cyruntime.cudaMipmappedArrayGetSparseProperties(sparseProperties._ptr, cymipmap) return (cudaError_t(err), sparseProperties) {{endif}} @@ -20986,13 +20986,13 @@ def cudaMemcpy(dst, src, size_t count, kind not None : cudaMemcpyKind): -------- :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpy` """ - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value with nogil: - err = ccudart.cudaMemcpy(cdst_ptr, csrc_ptr, count, ckind) + err = cyruntime.cudaMemcpy(cydst_ptr, cysrc_ptr, count, cykind) return (cudaError_t(err),) {{endif}} @@ -21036,12 +21036,12 @@ def cudaMemcpyPeer(dst, int dstDevice, src, int srcDevice, size_t count): -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyPeerAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpyPeer` """ - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr with nogil: - err = ccudart.cudaMemcpyPeer(cdst_ptr, dstDevice, csrc_ptr, srcDevice, count) + err = cyruntime.cudaMemcpyPeer(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count) return (cudaError_t(err),) {{endif}} @@ -21096,13 +21096,13 @@ def cudaMemcpy2D(dst, size_t dpitch, src, size_t spitch, size_t width, size_t he -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned` """ - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value with nogil: - err = ccudart.cudaMemcpy2D(cdst_ptr, dpitch, csrc_ptr, spitch, width, height, ckind) + err = cyruntime.cudaMemcpy2D(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind) return (cudaError_t(err),) {{endif}} @@ -21158,20 +21158,20 @@ def cudaMemcpy2DToArray(dst, size_t wOffset, size_t hOffset, src, size_t spitch, -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned` """ - cdef ccudart.cudaArray_t cdst + cdef cyruntime.cudaArray_t cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (cudaArray_t,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(cudaArray_t(dst)) - cdst = pdst - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value + cydst = pdst + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value with nogil: - err = ccudart.cudaMemcpy2DToArray(cdst, wOffset, hOffset, csrc_ptr, spitch, width, height, ckind) + err = cyruntime.cudaMemcpy2DToArray(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind) return (cudaError_t(err),) {{endif}} @@ -21227,20 +21227,20 @@ def cudaMemcpy2DFromArray(dst, size_t dpitch, src, size_t wOffset, size_t hOffse -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned` """ - cdef ccudart.cudaArray_const_t csrc + cdef cyruntime.cudaArray_const_t cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (cudaArray_const_t,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(cudaArray_const_t(src)) - csrc = psrc - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value + cysrc = psrc + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value with nogil: - err = ccudart.cudaMemcpy2DFromArray(cdst_ptr, dpitch, csrc, wOffset, hOffset, width, height, ckind) + err = cyruntime.cudaMemcpy2DFromArray(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind) return (cudaError_t(err),) {{endif}} @@ -21296,26 +21296,26 @@ def cudaMemcpy2DArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, siz -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned` """ - cdef ccudart.cudaArray_const_t csrc + cdef cyruntime.cudaArray_const_t cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (cudaArray_const_t,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(cudaArray_const_t(src)) - csrc = psrc - cdef ccudart.cudaArray_t cdst + cysrc = psrc + cdef cyruntime.cudaArray_t cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (cudaArray_t,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(cudaArray_t(dst)) - cdst = pdst - cdef ccudart.cudaMemcpyKind ckind = kind.value - err = ccudart.cudaMemcpy2DArrayToArray(cdst, wOffsetDst, hOffsetDst, csrc, wOffsetSrc, hOffsetSrc, width, height, ckind) + cydst = pdst + cdef cyruntime.cudaMemcpyKind cykind = kind.value + err = cyruntime.cudaMemcpy2DArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, width, height, cykind) return (cudaError_t(err),) {{endif}} @@ -21371,22 +21371,22 @@ def cudaMemcpyAsync(dst, src, size_t count, kind not None : cudaMemcpyKind, stre -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyAsync`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemcpyDtoDAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value + cystream = pstream + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value with nogil: - err = ccudart.cudaMemcpyAsync(cdst_ptr, csrc_ptr, count, ckind, cstream) + err = cyruntime.cudaMemcpyAsync(cydst_ptr, cysrc_ptr, count, cykind, cystream) return (cudaError_t(err),) {{endif}} @@ -21430,21 +21430,21 @@ def cudaMemcpyPeerAsync(dst, int dstDevice, src, int srcDevice, size_t count, st -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpyPeerAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr + cystream = pstream + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr with nogil: - err = ccudart.cudaMemcpyPeerAsync(cdst_ptr, dstDevice, csrc_ptr, srcDevice, count, cstream) + err = cyruntime.cudaMemcpyPeerAsync(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count, cystream) return (cudaError_t(err),) {{endif}} @@ -21512,22 +21512,22 @@ def cudaMemcpy2DAsync(dst, size_t dpitch, src, size_t spitch, size_t width, size -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2DAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value + cystream = pstream + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value with nogil: - err = ccudart.cudaMemcpy2DAsync(cdst_ptr, dpitch, csrc_ptr, spitch, width, height, ckind, cstream) + err = cyruntime.cudaMemcpy2DAsync(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind, cystream) return (cudaError_t(err),) {{endif}} @@ -21596,29 +21596,29 @@ def cudaMemcpy2DToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t sp -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaArray_t cdst + cystream = pstream + cdef cyruntime.cudaArray_t cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (cudaArray_t,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(cudaArray_t(dst)) - cdst = pdst - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value + cydst = pdst + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value with nogil: - err = ccudart.cudaMemcpy2DToArrayAsync(cdst, wOffset, hOffset, csrc_ptr, spitch, width, height, ckind, cstream) + err = cyruntime.cudaMemcpy2DToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind, cystream) return (cudaError_t(err),) {{endif}} @@ -21686,29 +21686,29 @@ def cudaMemcpy2DFromArrayAsync(dst, size_t dpitch, src, size_t wOffset, size_t h -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaArray_const_t csrc + cystream = pstream + cdef cyruntime.cudaArray_const_t cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (cudaArray_const_t,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(cudaArray_const_t(src)) - csrc = psrc - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value + cysrc = psrc + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value with nogil: - err = ccudart.cudaMemcpy2DFromArrayAsync(cdst_ptr, dpitch, csrc, wOffset, hOffset, width, height, ckind, cstream) + err = cyruntime.cudaMemcpy2DFromArrayAsync(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind, cystream) return (cudaError_t(err),) {{endif}} @@ -21743,10 +21743,10 @@ def cudaMemset(devPtr, int value, size_t count): -------- :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32` """ - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr with nogil: - err = ccudart.cudaMemset(cdevPtr_ptr, value, count) + err = cyruntime.cudaMemset(cydevPtr_ptr, value, count) return (cudaError_t(err),) {{endif}} @@ -21788,9 +21788,9 @@ def cudaMemset2D(devPtr, size_t pitch, int value, size_t width, size_t height): -------- :py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32` """ - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr - err = ccudart.cudaMemset2D(cdevPtr_ptr, pitch, value, width, height) + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr + err = cyruntime.cudaMemset2D(cydevPtr_ptr, pitch, value, width, height) return (cudaError_t(err),) {{endif}} @@ -21843,7 +21843,7 @@ def cudaMemset3D(pitchedDevPtr not None : cudaPitchedPtr, int value, extent not -------- :py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.make_cudaPitchedPtr`, :py:obj:`~.make_cudaExtent` """ - err = ccudart.cudaMemset3D(pitchedDevPtr._ptr[0], value, extent._ptr[0]) + err = cyruntime.cudaMemset3D(pitchedDevPtr._ptr[0], value, extent._ptr[0]) return (cudaError_t(err),) {{endif}} @@ -21885,19 +21885,19 @@ def cudaMemsetAsync(devPtr, int value, size_t count, stream): -------- :py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32Async` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr + cystream = pstream + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr with nogil: - err = ccudart.cudaMemsetAsync(cdevPtr_ptr, value, count, cstream) + err = cyruntime.cudaMemsetAsync(cydevPtr_ptr, value, count, cystream) return (cudaError_t(err),) {{endif}} @@ -21947,18 +21947,18 @@ def cudaMemset2DAsync(devPtr, size_t pitch, int value, size_t width, size_t heig -------- :py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32Async` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr - err = ccudart.cudaMemset2DAsync(cdevPtr_ptr, pitch, value, width, height, cstream) + cystream = pstream + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr + err = cyruntime.cudaMemset2DAsync(cydevPtr_ptr, pitch, value, width, height, cystream) return (cudaError_t(err),) {{endif}} @@ -22019,16 +22019,16 @@ def cudaMemset3DAsync(pitchedDevPtr not None : cudaPitchedPtr, int value, extent -------- :py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.make_cudaPitchedPtr`, :py:obj:`~.make_cudaExtent` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - err = ccudart.cudaMemset3DAsync(pitchedDevPtr._ptr[0], value, extent._ptr[0], cstream) + cystream = pstream + err = cyruntime.cudaMemset3DAsync(pitchedDevPtr._ptr[0], value, extent._ptr[0], cystream) return (cudaError_t(err),) {{endif}} @@ -22116,19 +22116,19 @@ def cudaMemPrefetchAsync(devPtr, size_t count, int dstDevice, stream): -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemAdvise`, :py:obj:`~.cudaMemAdvise_v2` :py:obj:`~.cuMemPrefetchAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr + cystream = pstream + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr with nogil: - err = ccudart.cudaMemPrefetchAsync(cdevPtr_ptr, count, dstDevice, cstream) + err = cyruntime.cudaMemPrefetchAsync(cydevPtr_ptr, count, dstDevice, cystream) return (cudaError_t(err),) {{endif}} @@ -22235,19 +22235,19 @@ def cudaMemPrefetchAsync_v2(devPtr, size_t count, location not None : cudaMemLoc -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemAdvise`, :py:obj:`~.cudaMemAdvise_v2` :py:obj:`~.cuMemPrefetchAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr + cystream = pstream + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr with nogil: - err = ccudart.cudaMemPrefetchAsync_v2(cdevPtr_ptr, count, location._ptr[0], flags, cstream) + err = cyruntime.cudaMemPrefetchAsync_v2(cydevPtr_ptr, count, location._ptr[0], flags, cystream) return (cudaError_t(err),) {{endif}} @@ -22410,11 +22410,11 @@ def cudaMemAdvise(devPtr, size_t count, advice not None : cudaMemoryAdvise, int -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cuMemAdvise` """ - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr - cdef ccudart.cudaMemoryAdvise cadvice = advice.value + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr + cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value with nogil: - err = ccudart.cudaMemAdvise(cdevPtr_ptr, count, cadvice, device) + err = cyruntime.cudaMemAdvise(cydevPtr_ptr, count, cyadvice, device) return (cudaError_t(err),) {{endif}} @@ -22608,11 +22608,11 @@ def cudaMemAdvise_v2(devPtr, size_t count, advice not None : cudaMemoryAdvise, l -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cuMemAdvise`, :py:obj:`~.cuMemAdvise_v2` """ - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr - cdef ccudart.cudaMemoryAdvise cadvice = advice.value + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr + cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value with nogil: - err = ccudart.cudaMemAdvise_v2(cdevPtr_ptr, count, cadvice, location._ptr[0]) + err = cyruntime.cudaMemAdvise_v2(cydevPtr_ptr, count, cyadvice, location._ptr[0]) return (cudaError_t(err),) {{endif}} @@ -22756,13 +22756,13 @@ def cudaMemRangeGetAttribute(size_t dataSize, attribute not None : cudaMemRangeA -------- :py:obj:`~.cudaMemRangeGetAttributes`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cudaMemAdvise`, :py:obj:`~.cuMemRangeGetAttribute` """ - cdef utils.HelperCUmem_range_attribute cdata = utils.HelperCUmem_range_attribute(attribute, dataSize) - cdef void* cdata_ptr = cdata.cptr - cdef ccudart.cudaMemRangeAttribute cattribute = attribute.value - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr - err = ccudart.cudaMemRangeGetAttribute(cdata_ptr, dataSize, cattribute, cdevPtr_ptr, count) - return (cudaError_t(err), cdata.pyObj()) + cdef utils.HelperCUmem_range_attribute cydata = utils.HelperCUmem_range_attribute(attribute, dataSize) + cdef void* cydata_ptr = cydata.cptr + cdef cyruntime.cudaMemRangeAttribute cyattribute = attribute.value + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr + err = cyruntime.cudaMemRangeGetAttribute(cydata_ptr, dataSize, cyattribute, cydevPtr_ptr, count) + return (cudaError_t(err), cydata.pyObj()) {{endif}} {{if 'cudaMemRangeGetAttributes' in found_functions}} @@ -22827,19 +22827,19 @@ def cudaMemRangeGetAttributes(dataSizes : Tuple[int] | List[int], attributes : O """ attributes = [] if attributes is None else attributes if not all(isinstance(_x, (cudaMemRangeAttribute)) for _x in attributes): - raise TypeError("Argument 'attributes' is not instance of type (expected Tuple[ccudart.cudaMemRangeAttribute] or List[ccudart.cudaMemRangeAttribute]") + raise TypeError("Argument 'attributes' is not instance of type (expected Tuple[cyruntime.cudaMemRangeAttribute] or List[cyruntime.cudaMemRangeAttribute]") if not all(isinstance(_x, (int)) for _x in dataSizes): raise TypeError("Argument 'dataSizes' is not instance of type (expected Tuple[int] or List[int]") pylist = [utils.HelperCUmem_range_attribute(pyattributes, pydataSizes) for (pyattributes, pydataSizes) in zip(attributes, dataSizes)] cdef utils.InputVoidPtrPtrHelper voidStarHelperdata = utils.InputVoidPtrPtrHelper(pylist) - cdef void** cvoidStarHelper_ptr = voidStarHelperdata.cptr - cdef vector[size_t] cdataSizes = dataSizes - cdef vector[ccudart.cudaMemRangeAttribute] cattributes = [pyattributes.value for pyattributes in (attributes)] + cdef void** cyvoidStarHelper_ptr = voidStarHelperdata.cptr + cdef vector[size_t] cydataSizes = dataSizes + cdef vector[cyruntime.cudaMemRangeAttribute] cyattributes = [pyattributes.value for pyattributes in (attributes)] if numAttributes > len(dataSizes): raise RuntimeError("List is too small: " + str(len(dataSizes)) + " < " + str(numAttributes)) if numAttributes > len(attributes): raise RuntimeError("List is too small: " + str(len(attributes)) + " < " + str(numAttributes)) - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr - err = ccudart.cudaMemRangeGetAttributes(cvoidStarHelper_ptr, cdataSizes.data(), cattributes.data(), numAttributes, cdevPtr_ptr, count) + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr + err = cyruntime.cudaMemRangeGetAttributes(cyvoidStarHelper_ptr, cydataSizes.data(), cyattributes.data(), numAttributes, cydevPtr_ptr, count) return (cudaError_t(err), [obj.pyObj() for obj in pylist]) {{endif}} @@ -22886,19 +22886,19 @@ def cudaMemcpyToArray(dst, size_t wOffset, size_t hOffset, src, size_t count, ki -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpyFromArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpyArrayToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpyToArrayAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpyFromArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyDtoA` """ - cdef ccudart.cudaArray_t cdst + cdef cyruntime.cudaArray_t cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (cudaArray_t,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(cudaArray_t(dst)) - cdst = pdst - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value - err = ccudart.cudaMemcpyToArray(cdst, wOffset, hOffset, csrc_ptr, count, ckind) + cydst = pdst + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value + err = cyruntime.cudaMemcpyToArray(cydst, wOffset, hOffset, cysrc_ptr, count, cykind) return (cudaError_t(err),) {{endif}} @@ -22945,19 +22945,19 @@ def cudaMemcpyFromArray(dst, src, size_t wOffset, size_t hOffset, size_t count, -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpyToArray`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpyArrayToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpyToArrayAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpyFromArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoD` """ - cdef ccudart.cudaArray_const_t csrc + cdef cyruntime.cudaArray_const_t cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (cudaArray_const_t,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(cudaArray_const_t(src)) - csrc = psrc - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value - err = ccudart.cudaMemcpyFromArray(cdst_ptr, csrc, wOffset, hOffset, count, ckind) + cysrc = psrc + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value + err = cyruntime.cudaMemcpyFromArray(cydst_ptr, cysrc, wOffset, hOffset, count, cykind) return (cudaError_t(err),) {{endif}} @@ -23009,26 +23009,26 @@ def cudaMemcpyArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, size_ -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpyToArray`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpyFromArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpyToArrayAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpyFromArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyAtoA` """ - cdef ccudart.cudaArray_const_t csrc + cdef cyruntime.cudaArray_const_t cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (cudaArray_const_t,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(cudaArray_const_t(src)) - csrc = psrc - cdef ccudart.cudaArray_t cdst + cysrc = psrc + cdef cyruntime.cudaArray_t cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (cudaArray_t,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(cudaArray_t(dst)) - cdst = pdst - cdef ccudart.cudaMemcpyKind ckind = kind.value - err = ccudart.cudaMemcpyArrayToArray(cdst, wOffsetDst, hOffsetDst, csrc, wOffsetSrc, hOffsetSrc, count, ckind) + cydst = pdst + cdef cyruntime.cudaMemcpyKind cykind = kind.value + err = cyruntime.cudaMemcpyArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, count, cykind) return (cudaError_t(err),) {{endif}} @@ -23084,28 +23084,28 @@ def cudaMemcpyToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t coun -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpyToArray`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpyFromArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpyArrayToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpyFromArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpy2DAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaArray_t cdst + cystream = pstream + cdef cyruntime.cudaArray_t cydst if dst is None: - cdst = 0 + cydst = 0 elif isinstance(dst, (cudaArray_t,)): pdst = int(dst) - cdst = pdst + cydst = pdst else: pdst = int(cudaArray_t(dst)) - cdst = pdst - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value - err = ccudart.cudaMemcpyToArrayAsync(cdst, wOffset, hOffset, csrc_ptr, count, ckind, cstream) + cydst = pdst + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value + err = cyruntime.cudaMemcpyToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, count, cykind, cystream) return (cudaError_t(err),) {{endif}} @@ -23161,28 +23161,28 @@ def cudaMemcpyFromArrayAsync(dst, src, size_t wOffset, size_t hOffset, size_t co -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpyToArray`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpyFromArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpyArrayToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpyToArrayAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpy2DAsync` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaArray_const_t csrc + cystream = pstream + cdef cyruntime.cudaArray_const_t cysrc if src is None: - csrc = 0 + cysrc = 0 elif isinstance(src, (cudaArray_const_t,)): psrc = int(src) - csrc = psrc + cysrc = psrc else: psrc = int(cudaArray_const_t(src)) - csrc = psrc - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value - err = ccudart.cudaMemcpyFromArrayAsync(cdst_ptr, csrc, wOffset, hOffset, count, ckind, cstream) + cysrc = psrc + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value + err = cyruntime.cudaMemcpyFromArrayAsync(cydst_ptr, cysrc, wOffset, hOffset, count, cykind, cystream) return (cudaError_t(err),) {{endif}} @@ -23225,18 +23225,18 @@ def cudaMallocAsync(size_t size, hStream): During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool's properties are used to set the node's creation parameters. """ - cdef ccudart.cudaStream_t chStream + cdef cyruntime.cudaStream_t cyhStream if hStream is None: - chStream = 0 - elif isinstance(hStream, (cudaStream_t,cuda.CUstream)): + cyhStream = 0 + elif isinstance(hStream, (cudaStream_t,driver.CUstream)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(cudaStream_t(hStream)) - chStream = phStream + cyhStream = phStream cdef void_ptr devPtr = 0 with nogil: - err = ccudart.cudaMallocAsync(&devPtr, size, chStream) + err = cyruntime.cudaMallocAsync(&devPtr, size, cyhStream) return (cudaError_t(err), devPtr) {{endif}} @@ -23272,19 +23272,19 @@ def cudaFreeAsync(devPtr, hStream): ----- During stream capture, this function results in the creation of a free node and must therefore be passed the address of a graph allocation. """ - cdef ccudart.cudaStream_t chStream + cdef cyruntime.cudaStream_t cyhStream if hStream is None: - chStream = 0 - elif isinstance(hStream, (cudaStream_t,cuda.CUstream)): + cyhStream = 0 + elif isinstance(hStream, (cudaStream_t,driver.CUstream)): phStream = int(hStream) - chStream = phStream + cyhStream = phStream else: phStream = int(cudaStream_t(hStream)) - chStream = phStream - cdevPtr = utils.HelperInputVoidPtr(devPtr) - cdef void* cdevPtr_ptr = cdevPtr.cptr + cyhStream = phStream + cydevPtr = utils.HelperInputVoidPtr(devPtr) + cdef void* cydevPtr_ptr = cydevPtr.cptr with nogil: - err = ccudart.cudaFreeAsync(cdevPtr_ptr, chStream) + err = cyruntime.cudaFreeAsync(cydevPtr_ptr, cyhStream) return (cudaError_t(err),) {{endif}} @@ -23326,17 +23326,17 @@ def cudaMemPoolTrimTo(memPool, size_t minBytesToKeep): : Allocations that have been asynchronously freed but whose completion has not been observed on the host (eg. by a synchronize) can count as outstanding. """ - cdef ccudart.cudaMemPool_t cmemPool + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool + cymemPool = pmemPool with nogil: - err = ccudart.cudaMemPoolTrimTo(cmemPool, minBytesToKeep) + err = cyruntime.cudaMemPoolTrimTo(cymemPool, minBytesToKeep) return (cudaError_t(err),) {{endif}} @@ -23401,20 +23401,20 @@ def cudaMemPoolSetAttribute(memPool, attr not None : cudaMemPoolAttr, value): -------- :py:obj:`~.cuMemPoolSetAttribute`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceGetMemPool`, :py:obj:`~.cudaMemPoolCreate` """ - cdef ccudart.cudaMemPool_t cmemPool + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool - cdef ccudart.cudaMemPoolAttr cattr = attr.value - cdef utils.HelperCUmemPool_attribute cvalue = utils.HelperCUmemPool_attribute(attr, value, is_getter=False) - cdef void* cvalue_ptr = cvalue.cptr + cymemPool = pmemPool + cdef cyruntime.cudaMemPoolAttr cyattr = attr.value + cdef utils.HelperCUmemPool_attribute cyvalue = utils.HelperCUmemPool_attribute(attr, value, is_getter=False) + cdef void* cyvalue_ptr = cyvalue.cptr with nogil: - err = ccudart.cudaMemPoolSetAttribute(cmemPool, cattr, cvalue_ptr) + err = cyruntime.cudaMemPoolSetAttribute(cymemPool, cyattr, cyvalue_ptr) return (cudaError_t(err),) {{endif}} @@ -23485,22 +23485,22 @@ def cudaMemPoolGetAttribute(memPool, attr not None : cudaMemPoolAttr): -------- :py:obj:`~.cuMemPoolGetAttribute`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceGetMemPool`, :py:obj:`~.cudaMemPoolCreate` """ - cdef ccudart.cudaMemPool_t cmemPool + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool - cdef ccudart.cudaMemPoolAttr cattr = attr.value - cdef utils.HelperCUmemPool_attribute cvalue = utils.HelperCUmemPool_attribute(attr, 0, is_getter=True) - cdef void* cvalue_ptr = cvalue.cptr + cymemPool = pmemPool + cdef cyruntime.cudaMemPoolAttr cyattr = attr.value + cdef utils.HelperCUmemPool_attribute cyvalue = utils.HelperCUmemPool_attribute(attr, 0, is_getter=True) + cdef void* cyvalue_ptr = cyvalue.cptr with nogil: - err = ccudart.cudaMemPoolGetAttribute(cmemPool, cattr, cvalue_ptr) + err = cyruntime.cudaMemPoolGetAttribute(cymemPool, cyattr, cyvalue_ptr) - return (cudaError_t(err), cvalue.pyObj()) + return (cudaError_t(err), cyvalue.pyObj()) {{endif}} {{if 'cudaMemPoolSetAccess' in found_functions}} @@ -23530,27 +23530,27 @@ def cudaMemPoolSetAccess(memPool, descList : Optional[Tuple[cudaMemAccessDesc] | """ descList = [] if descList is None else descList if not all(isinstance(_x, (cudaMemAccessDesc,)) for _x in descList): - raise TypeError("Argument 'descList' is not instance of type (expected Tuple[ccudart.cudaMemAccessDesc,] or List[ccudart.cudaMemAccessDesc,]") - cdef ccudart.cudaMemPool_t cmemPool + raise TypeError("Argument 'descList' is not instance of type (expected Tuple[cyruntime.cudaMemAccessDesc,] or List[cyruntime.cudaMemAccessDesc,]") + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool - cdef ccudart.cudaMemAccessDesc* cdescList = NULL + cymemPool = pmemPool + cdef cyruntime.cudaMemAccessDesc* cydescList = NULL if len(descList) > 0: - cdescList = calloc(len(descList), sizeof(ccudart.cudaMemAccessDesc)) - if cdescList is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(descList)) + 'x' + str(sizeof(ccudart.cudaMemAccessDesc))) + cydescList = calloc(len(descList), sizeof(cyruntime.cudaMemAccessDesc)) + if cydescList is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(descList)) + 'x' + str(sizeof(cyruntime.cudaMemAccessDesc))) for idx in range(len(descList)): - string.memcpy(&cdescList[idx], (descList[idx])._ptr, sizeof(ccudart.cudaMemAccessDesc)) + string.memcpy(&cydescList[idx], (descList[idx])._ptr, sizeof(cyruntime.cudaMemAccessDesc)) if count > len(descList): raise RuntimeError("List is too small: " + str(len(descList)) + " < " + str(count)) - err = ccudart.cudaMemPoolSetAccess(cmemPool, (descList[0])._ptr if len(descList) == 1 else cdescList, count) - if cdescList is not NULL: - free(cdescList) + err = cyruntime.cudaMemPoolSetAccess(cymemPool, (descList[0])._ptr if len(descList) == 1 else cydescList, count) + if cydescList is not NULL: + free(cydescList) return (cudaError_t(err),) {{endif}} @@ -23581,18 +23581,18 @@ def cudaMemPoolGetAccess(memPool, location : Optional[cudaMemLocation]): -------- :py:obj:`~.cuMemPoolGetAccess`, :py:obj:`~.cudaMemPoolSetAccess` """ - cdef ccudart.cudaMemPool_t cmemPool + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool - cdef ccudart.cudaMemAccessFlags flags - cdef ccudart.cudaMemLocation* clocation_ptr = location._ptr if location != None else NULL - err = ccudart.cudaMemPoolGetAccess(&flags, cmemPool, clocation_ptr) + cymemPool = pmemPool + cdef cyruntime.cudaMemAccessFlags flags + cdef cyruntime.cudaMemLocation* cylocation_ptr = location._ptr if location != None else NULL + err = cyruntime.cudaMemPoolGetAccess(&flags, cymemPool, cylocation_ptr) return (cudaError_t(err), cudaMemAccessFlags(flags)) {{endif}} @@ -23659,8 +23659,8 @@ def cudaMemPoolCreate(poolProps : Optional[cudaMemPoolProps]): Specifying cudaMemHandleTypeNone creates a memory pool that will not support IPC. """ cdef cudaMemPool_t memPool = cudaMemPool_t() - cdef ccudart.cudaMemPoolProps* cpoolProps_ptr = poolProps._ptr if poolProps != None else NULL - err = ccudart.cudaMemPoolCreate(memPool._ptr, cpoolProps_ptr) + cdef cyruntime.cudaMemPoolProps* cypoolProps_ptr = poolProps._ptr if poolProps != None else NULL + err = cyruntime.cudaMemPoolCreate(memPool._ptr, cypoolProps_ptr) return (cudaError_t(err), memPool) {{endif}} @@ -23697,16 +23697,16 @@ def cudaMemPoolDestroy(memPool): ----- A device's default memory pool cannot be destroyed. """ - cdef ccudart.cudaMemPool_t cmemPool + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool - err = ccudart.cudaMemPoolDestroy(cmemPool) + cymemPool = pmemPool + err = cyruntime.cudaMemPoolDestroy(cymemPool) return (cudaError_t(err),) {{endif}} @@ -23745,26 +23745,26 @@ def cudaMallocFromPoolAsync(size_t size, memPool, stream): ----- During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool's properties are used to set the node's creation parameters. """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaMemPool_t cmemPool + cystream = pstream + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool + cymemPool = pmemPool cdef void_ptr ptr = 0 - err = ccudart.cudaMallocFromPoolAsync(&ptr, size, cmemPool, cstream) + err = cyruntime.cudaMallocFromPoolAsync(&ptr, size, cymemPool, cystream) return (cudaError_t(err), ptr) {{endif}} @@ -23807,20 +23807,20 @@ def cudaMemPoolExportToShareableHandle(memPool, handleType not None : cudaMemAll ----- : To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than cudaMemHandleTypeNone. """ - cdef ccudart.cudaMemPool_t cmemPool + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool - cdef utils.HelperCUmemAllocationHandleType cshareableHandle = utils.HelperCUmemAllocationHandleType(handleType) - cdef void* cshareableHandle_ptr = cshareableHandle.cptr - cdef ccudart.cudaMemAllocationHandleType chandleType = handleType.value - err = ccudart.cudaMemPoolExportToShareableHandle(cshareableHandle_ptr, cmemPool, chandleType, flags) - return (cudaError_t(err), cshareableHandle.pyObj()) + cymemPool = pmemPool + cdef utils.HelperCUmemAllocationHandleType cyshareableHandle = utils.HelperCUmemAllocationHandleType(handleType) + cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr + cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value + err = cyruntime.cudaMemPoolExportToShareableHandle(cyshareableHandle_ptr, cymemPool, cyhandleType, flags) + return (cudaError_t(err), cyshareableHandle.pyObj()) {{endif}} {{if 'cudaMemPoolImportFromShareableHandle' in found_functions}} @@ -23857,10 +23857,10 @@ def cudaMemPoolImportFromShareableHandle(shareableHandle, handleType not None : Imported memory pools do not support creating new allocations. As such imported memory pools may not be used in :py:obj:`~.cudaDeviceSetMemPool` or :py:obj:`~.cudaMallocFromPoolAsync` calls. """ cdef cudaMemPool_t memPool = cudaMemPool_t() - cshareableHandle = utils.HelperInputVoidPtr(shareableHandle) - cdef void* cshareableHandle_ptr = cshareableHandle.cptr - cdef ccudart.cudaMemAllocationHandleType chandleType = handleType.value - err = ccudart.cudaMemPoolImportFromShareableHandle(memPool._ptr, cshareableHandle_ptr, chandleType, flags) + cyshareableHandle = utils.HelperInputVoidPtr(shareableHandle) + cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr + cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value + err = cyruntime.cudaMemPoolImportFromShareableHandle(memPool._ptr, cyshareableHandle_ptr, cyhandleType, flags) return (cudaError_t(err), memPool) {{endif}} @@ -23892,9 +23892,9 @@ def cudaMemPoolExportPointer(ptr): :py:obj:`~.cuMemPoolExportPointer`, :py:obj:`~.cudaMemPoolExportToShareableHandle`, :py:obj:`~.cudaMemPoolImportFromShareableHandle`, :py:obj:`~.cudaMemPoolImportPointer` """ cdef cudaMemPoolPtrExportData exportData = cudaMemPoolPtrExportData() - cptr = utils.HelperInputVoidPtr(ptr) - cdef void* cptr_ptr = cptr.cptr - err = ccudart.cudaMemPoolExportPointer(exportData._ptr, cptr_ptr) + cyptr = utils.HelperInputVoidPtr(ptr) + cdef void* cyptr_ptr = cyptr.cptr + err = cyruntime.cudaMemPoolExportPointer(exportData._ptr, cyptr_ptr) return (cudaError_t(err), exportData) {{endif}} @@ -23934,18 +23934,18 @@ def cudaMemPoolImportPointer(memPool, exportData : Optional[cudaMemPoolPtrExport ----- The :py:obj:`~.cudaFreeAsync` api may be used in the exporting process before the :py:obj:`~.cudaFreeAsync` operation completes in its stream as long as the :py:obj:`~.cudaFreeAsync` in the exporting process specifies a stream with a stream dependency on the importing process's :py:obj:`~.cudaFreeAsync`. """ - cdef ccudart.cudaMemPool_t cmemPool + cdef cyruntime.cudaMemPool_t cymemPool if memPool is None: - cmemPool = 0 - elif isinstance(memPool, (cudaMemPool_t,cuda.CUmemoryPool)): + cymemPool = 0 + elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): pmemPool = int(memPool) - cmemPool = pmemPool + cymemPool = pmemPool else: pmemPool = int(cudaMemPool_t(memPool)) - cmemPool = pmemPool + cymemPool = pmemPool cdef void_ptr ptr = 0 - cdef ccudart.cudaMemPoolPtrExportData* cexportData_ptr = exportData._ptr if exportData != None else NULL - err = ccudart.cudaMemPoolImportPointer(&ptr, cmemPool, cexportData_ptr) + cdef cyruntime.cudaMemPoolPtrExportData* cyexportData_ptr = exportData._ptr if exportData != None else NULL + err = cyruntime.cudaMemPoolImportPointer(&ptr, cymemPool, cyexportData_ptr) return (cudaError_t(err), ptr) {{endif}} @@ -24012,9 +24012,9 @@ def cudaPointerGetAttributes(ptr): In CUDA 11.0 forward passing host pointer will return :py:obj:`~.cudaMemoryTypeUnregistered` in :py:obj:`~.cudaPointerAttributes.type` and call will return :py:obj:`~.cudaSuccess`. """ cdef cudaPointerAttributes attributes = cudaPointerAttributes() - cptr = utils.HelperInputVoidPtr(ptr) - cdef void* cptr_ptr = cptr.cptr - err = ccudart.cudaPointerGetAttributes(attributes._ptr, cptr_ptr) + cyptr = utils.HelperInputVoidPtr(ptr) + cdef void* cyptr_ptr = cyptr.cptr + err = cyruntime.cudaPointerGetAttributes(attributes._ptr, cyptr_ptr) return (cudaError_t(err), attributes) {{endif}} @@ -24050,7 +24050,7 @@ def cudaDeviceCanAccessPeer(int device, int peerDevice): :py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cuDeviceCanAccessPeer` """ cdef int canAccessPeer = 0 - err = ccudart.cudaDeviceCanAccessPeer(&canAccessPeer, device, peerDevice) + err = cyruntime.cudaDeviceCanAccessPeer(&canAccessPeer, device, peerDevice) return (cudaError_t(err), canAccessPeer) {{endif}} @@ -24100,7 +24100,7 @@ def cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags): -------- :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cuCtxEnablePeerAccess` """ - err = ccudart.cudaDeviceEnablePeerAccess(peerDevice, flags) + err = cyruntime.cudaDeviceEnablePeerAccess(peerDevice, flags) return (cudaError_t(err),) {{endif}} @@ -24128,7 +24128,7 @@ def cudaDeviceDisablePeerAccess(int peerDevice): -------- :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cuCtxDisablePeerAccess` """ - err = ccudart.cudaDeviceDisablePeerAccess(peerDevice) + err = cyruntime.cudaDeviceDisablePeerAccess(peerDevice) return (cudaError_t(err),) {{endif}} @@ -24158,16 +24158,16 @@ def cudaGraphicsUnregisterResource(resource): -------- :py:obj:`~.cudaGraphicsD3D9RegisterResource`, :py:obj:`~.cudaGraphicsD3D10RegisterResource`, :py:obj:`~.cudaGraphicsD3D11RegisterResource`, :py:obj:`~.cudaGraphicsGLRegisterBuffer`, :py:obj:`~.cudaGraphicsGLRegisterImage`, :py:obj:`~.cuGraphicsUnregisterResource` """ - cdef ccudart.cudaGraphicsResource_t cresource + cdef cyruntime.cudaGraphicsResource_t cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (cudaGraphicsResource_t,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(cudaGraphicsResource_t(resource)) - cresource = presource - err = ccudart.cudaGraphicsUnregisterResource(cresource) + cyresource = presource + err = cyruntime.cudaGraphicsUnregisterResource(cyresource) return (cudaError_t(err),) {{endif}} @@ -24214,16 +24214,16 @@ def cudaGraphicsResourceSetMapFlags(resource, unsigned int flags): -------- :py:obj:`~.cudaGraphicsMapResources`, :py:obj:`~.cuGraphicsResourceSetMapFlags` """ - cdef ccudart.cudaGraphicsResource_t cresource + cdef cyruntime.cudaGraphicsResource_t cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (cudaGraphicsResource_t,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(cudaGraphicsResource_t(resource)) - cresource = presource - err = ccudart.cudaGraphicsResourceSetMapFlags(cresource, flags) + cyresource = presource + err = cyruntime.cudaGraphicsResourceSetMapFlags(cyresource, flags) return (cudaError_t(err),) {{endif}} @@ -24267,26 +24267,26 @@ def cudaGraphicsMapResources(int count, resources, stream): -------- :py:obj:`~.cudaGraphicsResourceGetMappedPointer`, :py:obj:`~.cudaGraphicsSubResourceGetMappedArray`, :py:obj:`~.cudaGraphicsUnmapResources`, :py:obj:`~.cuGraphicsMapResources` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaGraphicsResource_t *cresources + cystream = pstream + cdef cyruntime.cudaGraphicsResource_t *cyresources if resources is None: - cresources = NULL + cyresources = NULL elif isinstance(resources, (cudaGraphicsResource_t,)): presources = resources.getPtr() - cresources = presources + cyresources = presources elif isinstance(resources, (int)): - cresources = resources + cyresources = resources else: - raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) - err = ccudart.cudaGraphicsMapResources(count, cresources, cstream) + raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) + err = cyruntime.cudaGraphicsMapResources(count, cyresources, cystream) return (cudaError_t(err),) {{endif}} @@ -24328,26 +24328,26 @@ def cudaGraphicsUnmapResources(int count, resources, stream): -------- :py:obj:`~.cudaGraphicsMapResources`, :py:obj:`~.cuGraphicsUnmapResources` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaGraphicsResource_t *cresources + cystream = pstream + cdef cyruntime.cudaGraphicsResource_t *cyresources if resources is None: - cresources = NULL + cyresources = NULL elif isinstance(resources, (cudaGraphicsResource_t,)): presources = resources.getPtr() - cresources = presources + cyresources = presources elif isinstance(resources, (int)): - cresources = resources + cyresources = resources else: - raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) - err = ccudart.cudaGraphicsUnmapResources(count, cresources, cstream) + raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) + err = cyruntime.cudaGraphicsUnmapResources(count, cyresources, cystream) return (cudaError_t(err),) {{endif}} @@ -24380,18 +24380,18 @@ def cudaGraphicsResourceGetMappedPointer(resource): size : int None """ - cdef ccudart.cudaGraphicsResource_t cresource + cdef cyruntime.cudaGraphicsResource_t cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (cudaGraphicsResource_t,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(cudaGraphicsResource_t(resource)) - cresource = presource + cyresource = presource cdef void_ptr devPtr = 0 cdef size_t size = 0 - err = ccudart.cudaGraphicsResourceGetMappedPointer(&devPtr, &size, cresource) + err = cyruntime.cudaGraphicsResourceGetMappedPointer(&devPtr, &size, cyresource) return (cudaError_t(err), devPtr, size) {{endif}} @@ -24436,17 +24436,17 @@ def cudaGraphicsSubResourceGetMappedArray(resource, unsigned int arrayIndex, uns -------- :py:obj:`~.cudaGraphicsResourceGetMappedPointer`, :py:obj:`~.cuGraphicsSubResourceGetMappedArray` """ - cdef ccudart.cudaGraphicsResource_t cresource + cdef cyruntime.cudaGraphicsResource_t cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (cudaGraphicsResource_t,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(cudaGraphicsResource_t(resource)) - cresource = presource + cyresource = presource cdef cudaArray_t array = cudaArray_t() - err = ccudart.cudaGraphicsSubResourceGetMappedArray(array._ptr, cresource, arrayIndex, mipLevel) + err = cyruntime.cudaGraphicsSubResourceGetMappedArray(array._ptr, cyresource, arrayIndex, mipLevel) return (cudaError_t(err), array) {{endif}} @@ -24480,17 +24480,17 @@ def cudaGraphicsResourceGetMappedMipmappedArray(resource): -------- :py:obj:`~.cudaGraphicsResourceGetMappedPointer`, :py:obj:`~.cuGraphicsResourceGetMappedMipmappedArray` """ - cdef ccudart.cudaGraphicsResource_t cresource + cdef cyruntime.cudaGraphicsResource_t cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (cudaGraphicsResource_t,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(cudaGraphicsResource_t(resource)) - cresource = presource + cyresource = presource cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() - err = ccudart.cudaGraphicsResourceGetMappedMipmappedArray(mipmappedArray._ptr, cresource) + err = cyruntime.cudaGraphicsResourceGetMappedMipmappedArray(mipmappedArray._ptr, cyresource) return (cudaError_t(err), mipmappedArray) {{endif}} @@ -24518,18 +24518,18 @@ def cudaGetChannelDesc(array): -------- :py:obj:`~.cudaCreateChannelDesc (C API)`, :py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cudaCreateSurfaceObject` """ - cdef ccudart.cudaArray_const_t carray + cdef cyruntime.cudaArray_const_t cyarray if array is None: - carray = 0 + cyarray = 0 elif isinstance(array, (cudaArray_const_t,)): parray = int(array) - carray = parray + cyarray = parray else: parray = int(cudaArray_const_t(array)) - carray = parray + cyarray = parray cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() with nogil: - err = ccudart.cudaGetChannelDesc(desc._ptr, carray) + err = cyruntime.cudaGetChannelDesc(desc._ptr, cyarray) return (cudaError_t(err), desc) {{endif}} @@ -24575,8 +24575,8 @@ def cudaCreateChannelDesc(int x, int y, int z, int w, f not None : cudaChannelFo -------- cudaCreateChannelDesc (C++ API), :py:obj:`~.cudaGetChannelDesc`, :py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cudaCreateSurfaceObject` """ - cdef ccudart.cudaChannelFormatKind cf = f.value - err = ccudart.cudaCreateChannelDesc(x, y, z, w, cf) + cdef cyruntime.cudaChannelFormatKind cyf = f.value + err = cyruntime.cudaCreateChannelDesc(x, y, z, w, cyf) cdef cudaChannelFormatDesc wrapper = cudaChannelFormatDesc() wrapper._ptr[0] = err return (cudaError_t.cudaSuccess, wrapper) @@ -24817,10 +24817,10 @@ def cudaCreateTextureObject(pResDesc : Optional[cudaResourceDesc], pTexDesc : Op :py:obj:`~.cudaDestroyTextureObject`, :py:obj:`~.cuTexObjectCreate` """ cdef cudaTextureObject_t pTexObject = cudaTextureObject_t() - cdef ccudart.cudaResourceDesc* cpResDesc_ptr = pResDesc._ptr if pResDesc != None else NULL - cdef ccudart.cudaTextureDesc* cpTexDesc_ptr = pTexDesc._ptr if pTexDesc != None else NULL - cdef ccudart.cudaResourceViewDesc* cpResViewDesc_ptr = pResViewDesc._ptr if pResViewDesc != None else NULL - err = ccudart.cudaCreateTextureObject(pTexObject._ptr, cpResDesc_ptr, cpTexDesc_ptr, cpResViewDesc_ptr) + cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._ptr if pResDesc != None else NULL + cdef cyruntime.cudaTextureDesc* cypTexDesc_ptr = pTexDesc._ptr if pTexDesc != None else NULL + cdef cyruntime.cudaResourceViewDesc* cypResViewDesc_ptr = pResViewDesc._ptr if pResViewDesc != None else NULL + err = cyruntime.cudaCreateTextureObject(pTexObject._ptr, cypResDesc_ptr, cypTexDesc_ptr, cypResViewDesc_ptr) return (cudaError_t(err), pTexObject) {{endif}} @@ -24846,17 +24846,17 @@ def cudaDestroyTextureObject(texObject): -------- :py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cuTexObjectDestroy` """ - cdef ccudart.cudaTextureObject_t ctexObject + cdef cyruntime.cudaTextureObject_t cytexObject if texObject is None: - ctexObject = 0 + cytexObject = 0 elif isinstance(texObject, (cudaTextureObject_t,)): ptexObject = int(texObject) - ctexObject = ptexObject + cytexObject = ptexObject else: ptexObject = int(cudaTextureObject_t(texObject)) - ctexObject = ptexObject + cytexObject = ptexObject with nogil: - err = ccudart.cudaDestroyTextureObject(ctexObject) + err = cyruntime.cudaDestroyTextureObject(cytexObject) return (cudaError_t(err),) {{endif}} @@ -24886,18 +24886,18 @@ def cudaGetTextureObjectResourceDesc(texObject): -------- :py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cuTexObjectGetResourceDesc` """ - cdef ccudart.cudaTextureObject_t ctexObject + cdef cyruntime.cudaTextureObject_t cytexObject if texObject is None: - ctexObject = 0 + cytexObject = 0 elif isinstance(texObject, (cudaTextureObject_t,)): ptexObject = int(texObject) - ctexObject = ptexObject + cytexObject = ptexObject else: ptexObject = int(cudaTextureObject_t(texObject)) - ctexObject = ptexObject + cytexObject = ptexObject cdef cudaResourceDesc pResDesc = cudaResourceDesc() with nogil: - err = ccudart.cudaGetTextureObjectResourceDesc(pResDesc._ptr, ctexObject) + err = cyruntime.cudaGetTextureObjectResourceDesc(pResDesc._ptr, cytexObject) return (cudaError_t(err), pResDesc) {{endif}} @@ -24927,18 +24927,18 @@ def cudaGetTextureObjectTextureDesc(texObject): -------- :py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cuTexObjectGetTextureDesc` """ - cdef ccudart.cudaTextureObject_t ctexObject + cdef cyruntime.cudaTextureObject_t cytexObject if texObject is None: - ctexObject = 0 + cytexObject = 0 elif isinstance(texObject, (cudaTextureObject_t,)): ptexObject = int(texObject) - ctexObject = ptexObject + cytexObject = ptexObject else: ptexObject = int(cudaTextureObject_t(texObject)) - ctexObject = ptexObject + cytexObject = ptexObject cdef cudaTextureDesc pTexDesc = cudaTextureDesc() with nogil: - err = ccudart.cudaGetTextureObjectTextureDesc(pTexDesc._ptr, ctexObject) + err = cyruntime.cudaGetTextureObjectTextureDesc(pTexDesc._ptr, cytexObject) return (cudaError_t(err), pTexDesc) {{endif}} @@ -24969,17 +24969,17 @@ def cudaGetTextureObjectResourceViewDesc(texObject): -------- :py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cuTexObjectGetResourceViewDesc` """ - cdef ccudart.cudaTextureObject_t ctexObject + cdef cyruntime.cudaTextureObject_t cytexObject if texObject is None: - ctexObject = 0 + cytexObject = 0 elif isinstance(texObject, (cudaTextureObject_t,)): ptexObject = int(texObject) - ctexObject = ptexObject + cytexObject = ptexObject else: ptexObject = int(cudaTextureObject_t(texObject)) - ctexObject = ptexObject + cytexObject = ptexObject cdef cudaResourceViewDesc pResViewDesc = cudaResourceViewDesc() - err = ccudart.cudaGetTextureObjectResourceViewDesc(pResViewDesc._ptr, ctexObject) + err = cyruntime.cudaGetTextureObjectResourceViewDesc(pResViewDesc._ptr, cytexObject) return (cudaError_t(err), pResViewDesc) {{endif}} @@ -25017,9 +25017,9 @@ def cudaCreateSurfaceObject(pResDesc : Optional[cudaResourceDesc]): :py:obj:`~.cudaDestroySurfaceObject`, :py:obj:`~.cuSurfObjectCreate` """ cdef cudaSurfaceObject_t pSurfObject = cudaSurfaceObject_t() - cdef ccudart.cudaResourceDesc* cpResDesc_ptr = pResDesc._ptr if pResDesc != None else NULL + cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._ptr if pResDesc != None else NULL with nogil: - err = ccudart.cudaCreateSurfaceObject(pSurfObject._ptr, cpResDesc_ptr) + err = cyruntime.cudaCreateSurfaceObject(pSurfObject._ptr, cypResDesc_ptr) return (cudaError_t(err), pSurfObject) {{endif}} @@ -25046,17 +25046,17 @@ def cudaDestroySurfaceObject(surfObject): -------- :py:obj:`~.cudaCreateSurfaceObject`, :py:obj:`~.cuSurfObjectDestroy` """ - cdef ccudart.cudaSurfaceObject_t csurfObject + cdef cyruntime.cudaSurfaceObject_t cysurfObject if surfObject is None: - csurfObject = 0 + cysurfObject = 0 elif isinstance(surfObject, (cudaSurfaceObject_t,)): psurfObject = int(surfObject) - csurfObject = psurfObject + cysurfObject = psurfObject else: psurfObject = int(cudaSurfaceObject_t(surfObject)) - csurfObject = psurfObject + cysurfObject = psurfObject with nogil: - err = ccudart.cudaDestroySurfaceObject(csurfObject) + err = cyruntime.cudaDestroySurfaceObject(cysurfObject) return (cudaError_t(err),) {{endif}} @@ -25083,17 +25083,17 @@ def cudaGetSurfaceObjectResourceDesc(surfObject): -------- :py:obj:`~.cudaCreateSurfaceObject`, :py:obj:`~.cuSurfObjectGetResourceDesc` """ - cdef ccudart.cudaSurfaceObject_t csurfObject + cdef cyruntime.cudaSurfaceObject_t cysurfObject if surfObject is None: - csurfObject = 0 + cysurfObject = 0 elif isinstance(surfObject, (cudaSurfaceObject_t,)): psurfObject = int(surfObject) - csurfObject = psurfObject + cysurfObject = psurfObject else: psurfObject = int(cudaSurfaceObject_t(surfObject)) - csurfObject = psurfObject + cysurfObject = psurfObject cdef cudaResourceDesc pResDesc = cudaResourceDesc() - err = ccudart.cudaGetSurfaceObjectResourceDesc(pResDesc._ptr, csurfObject) + err = cyruntime.cudaGetSurfaceObjectResourceDesc(pResDesc._ptr, cysurfObject) return (cudaError_t(err), pResDesc) {{endif}} @@ -25123,7 +25123,7 @@ def cudaDriverGetVersion(): :py:obj:`~.cudaRuntimeGetVersion`, :py:obj:`~.cuDriverGetVersion` """ cdef int driverVersion = 0 - err = ccudart.cudaDriverGetVersion(&driverVersion) + err = cyruntime.cudaDriverGetVersion(&driverVersion) return (cudaError_t(err), driverVersion) {{endif}} @@ -25156,7 +25156,7 @@ def cudaRuntimeGetVersion(): :py:obj:`~.cudaDriverGetVersion`, :py:obj:`~.cuDriverGetVersion` """ cdef int runtimeVersion = 0 - err = ccudart.cudaRuntimeGetVersion(&runtimeVersion) + err = cyruntime.cudaRuntimeGetVersion(&runtimeVersion) return (cudaError_t(err), runtimeVersion) {{endif}} @@ -25185,7 +25185,7 @@ def cudaGraphCreate(unsigned int flags): :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphInstantiate`, :py:obj:`~.cudaGraphDestroy`, :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphClone` """ cdef cudaGraph_t pGraph = cudaGraph_t() - err = ccudart.cudaGraphCreate(pGraph._ptr, flags) + err = cyruntime.cudaGraphCreate(pGraph._ptr, flags) return (cudaError_t(err), pGraph) {{endif}} @@ -25279,31 +25279,31 @@ def cudaGraphAddKernelNode(graph, pDependencies : Optional[Tuple[cudaGraphNode_t Kernels launched using graphs must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects. """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - cdef ccudart.cudaKernelNodeParams* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphAddKernelNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cpNodeParams_ptr) - if cpDependencies is not NULL: - free(cpDependencies) + cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphAddKernelNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cypNodeParams_ptr) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -25340,17 +25340,17 @@ def cudaGraphKernelNodeGetParams(node): -------- :py:obj:`~.cudaLaunchKernel`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphKernelNodeSetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode + cynode = pnode cdef cudaKernelNodeParams pNodeParams = cudaKernelNodeParams() - err = ccudart.cudaGraphKernelNodeGetParams(cnode, pNodeParams._ptr) + err = cyruntime.cudaGraphKernelNodeGetParams(cynode, pNodeParams._ptr) return (cudaError_t(err), pNodeParams) {{endif}} @@ -25378,17 +25378,17 @@ def cudaGraphKernelNodeSetParams(node, pNodeParams : Optional[cudaKernelNodePara -------- :py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaLaunchKernel`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphKernelNodeGetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaKernelNodeParams* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphKernelNodeSetParams(cnode, cpNodeParams_ptr) + cynode = pnode + cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphKernelNodeSetParams(cynode, cypNodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -25418,25 +25418,25 @@ def cudaGraphKernelNodeCopyAttributes(hSrc, hDst): -------- :py:obj:`~.cudaAccessPolicyWindow` """ - cdef ccudart.cudaGraphNode_t chDst + cdef cyruntime.cudaGraphNode_t cyhDst if hDst is None: - chDst = 0 - elif isinstance(hDst, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhDst = 0 + elif isinstance(hDst, (cudaGraphNode_t,driver.CUgraphNode)): phDst = int(hDst) - chDst = phDst + cyhDst = phDst else: phDst = int(cudaGraphNode_t(hDst)) - chDst = phDst - cdef ccudart.cudaGraphNode_t chSrc + cyhDst = phDst + cdef cyruntime.cudaGraphNode_t cyhSrc if hSrc is None: - chSrc = 0 - elif isinstance(hSrc, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhSrc = 0 + elif isinstance(hSrc, (cudaGraphNode_t,driver.CUgraphNode)): phSrc = int(hSrc) - chSrc = phSrc + cyhSrc = phSrc else: phSrc = int(cudaGraphNode_t(hSrc)) - chSrc = phSrc - err = ccudart.cudaGraphKernelNodeCopyAttributes(chSrc, chDst) + cyhSrc = phSrc + err = cyruntime.cudaGraphKernelNodeCopyAttributes(cyhSrc, cyhDst) return (cudaError_t(err),) {{endif}} @@ -25467,18 +25467,18 @@ def cudaGraphKernelNodeGetAttribute(hNode, attr not None : cudaKernelNodeAttrID) -------- :py:obj:`~.cudaAccessPolicyWindow` """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaKernelNodeAttrID cattr = attr.value + cyhNode = phNode + cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value cdef cudaKernelNodeAttrValue value_out = cudaKernelNodeAttrValue() - err = ccudart.cudaGraphKernelNodeGetAttribute(chNode, cattr, value_out._ptr) + err = cyruntime.cudaGraphKernelNodeGetAttribute(cyhNode, cyattr, value_out._ptr) return (cudaError_t(err), value_out) {{endif}} @@ -25509,18 +25509,18 @@ def cudaGraphKernelNodeSetAttribute(hNode, attr not None : cudaKernelNodeAttrID, -------- :py:obj:`~.cudaAccessPolicyWindow` """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaKernelNodeAttrID cattr = attr.value - cdef ccudart.cudaKernelNodeAttrValue* cvalue_ptr = value._ptr if value != None else NULL - err = ccudart.cudaGraphKernelNodeSetAttribute(chNode, cattr, cvalue_ptr) + cyhNode = phNode + cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value + cdef cyruntime.cudaKernelNodeAttrValue* cyvalue_ptr = value._ptr if value != None else NULL + err = cyruntime.cudaGraphKernelNodeSetAttribute(cyhNode, cyattr, cyvalue_ptr) return (cudaError_t(err),) {{endif}} @@ -25568,31 +25568,31 @@ def cudaGraphAddMemcpyNode(graph, pDependencies : Optional[Tuple[cudaGraphNode_t :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaGraphAddMemcpyNodeToSymbol`, :py:obj:`~.cudaGraphAddMemcpyNodeFromSymbol`, :py:obj:`~.cudaGraphAddMemcpyNode1D`, :py:obj:`~.cudaGraphMemcpyNodeGetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - cdef ccudart.cudaMemcpy3DParms* cpCopyParams_ptr = pCopyParams._ptr if pCopyParams != None else NULL - err = ccudart.cudaGraphAddMemcpyNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cpCopyParams_ptr) - if cpDependencies is not NULL: - free(cpDependencies) + cdef cyruntime.cudaMemcpy3DParms* cypCopyParams_ptr = pCopyParams._ptr if pCopyParams != None else NULL + err = cyruntime.cudaGraphAddMemcpyNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cypCopyParams_ptr) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -25656,34 +25656,34 @@ def cudaGraphAddMemcpyNode1D(graph, pDependencies : Optional[Tuple[cudaGraphNode :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeGetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams1D`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value - err = ccudart.cudaGraphAddMemcpyNode1D(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cdst_ptr, csrc_ptr, count, ckind) - if cpDependencies is not NULL: - free(cpDependencies) + cypDependencies[idx] = (pDependencies[idx])._ptr[0] + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value + err = cyruntime.cudaGraphAddMemcpyNode1D(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cydst_ptr, cysrc_ptr, count, cykind) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -25711,17 +25711,17 @@ def cudaGraphMemcpyNodeGetParams(node): -------- :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeSetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode + cynode = pnode cdef cudaMemcpy3DParms pNodeParams = cudaMemcpy3DParms() - err = ccudart.cudaGraphMemcpyNodeGetParams(cnode, pNodeParams._ptr) + err = cyruntime.cudaGraphMemcpyNodeGetParams(cynode, pNodeParams._ptr) return (cudaError_t(err), pNodeParams) {{endif}} @@ -25749,17 +25749,17 @@ def cudaGraphMemcpyNodeSetParams(node, pNodeParams : Optional[cudaMemcpy3DParms] -------- :py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaGraphMemcpyNodeSetParamsToSymbol`, :py:obj:`~.cudaGraphMemcpyNodeSetParamsFromSymbol`, :py:obj:`~.cudaGraphMemcpyNodeSetParams1D`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeGetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaMemcpy3DParms* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphMemcpyNodeSetParams(cnode, cpNodeParams_ptr) + cynode = pnode + cdef cyruntime.cudaMemcpy3DParms* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphMemcpyNodeSetParams(cynode, cypNodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -25807,21 +25807,21 @@ def cudaGraphMemcpyNodeSetParams1D(node, dst, src, size_t count, kind not None : -------- :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeGetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value - err = ccudart.cudaGraphMemcpyNodeSetParams1D(cnode, cdst_ptr, csrc_ptr, count, ckind) + cynode = pnode + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value + err = cyruntime.cudaGraphMemcpyNodeSetParams1D(cynode, cydst_ptr, cysrc_ptr, count, cykind) return (cudaError_t(err),) {{endif}} @@ -25863,31 +25863,31 @@ def cudaGraphAddMemsetNode(graph, pDependencies : Optional[Tuple[cudaGraphNode_t :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaGraphMemsetNodeGetParams`, :py:obj:`~.cudaGraphMemsetNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - cdef ccudart.cudaMemsetParams* cpMemsetParams_ptr = pMemsetParams._ptr if pMemsetParams != None else NULL - err = ccudart.cudaGraphAddMemsetNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cpMemsetParams_ptr) - if cpDependencies is not NULL: - free(cpDependencies) + cdef cyruntime.cudaMemsetParams* cypMemsetParams_ptr = pMemsetParams._ptr if pMemsetParams != None else NULL + err = cyruntime.cudaGraphAddMemsetNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cypMemsetParams_ptr) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -25915,17 +25915,17 @@ def cudaGraphMemsetNodeGetParams(node): -------- :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphMemsetNodeSetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode + cynode = pnode cdef cudaMemsetParams pNodeParams = cudaMemsetParams() - err = ccudart.cudaGraphMemsetNodeGetParams(cnode, pNodeParams._ptr) + err = cyruntime.cudaGraphMemsetNodeGetParams(cynode, pNodeParams._ptr) return (cudaError_t(err), pNodeParams) {{endif}} @@ -25953,17 +25953,17 @@ def cudaGraphMemsetNodeSetParams(node, pNodeParams : Optional[cudaMemsetParams]) -------- :py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphMemsetNodeGetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaMemsetParams* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphMemsetNodeSetParams(cnode, cpNodeParams_ptr) + cynode = pnode + cdef cyruntime.cudaMemsetParams* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphMemsetNodeSetParams(cynode, cypNodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -26006,31 +26006,31 @@ def cudaGraphAddHostNode(graph, pDependencies : Optional[Tuple[cudaGraphNode_t] :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaLaunchHostFunc`, :py:obj:`~.cudaGraphHostNodeGetParams`, :py:obj:`~.cudaGraphHostNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - cdef ccudart.cudaHostNodeParams* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphAddHostNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cpNodeParams_ptr) - if cpDependencies is not NULL: - free(cpDependencies) + cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphAddHostNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cypNodeParams_ptr) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -26058,17 +26058,17 @@ def cudaGraphHostNodeGetParams(node): -------- :py:obj:`~.cudaLaunchHostFunc`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphHostNodeSetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode + cynode = pnode cdef cudaHostNodeParams pNodeParams = cudaHostNodeParams() - err = ccudart.cudaGraphHostNodeGetParams(cnode, pNodeParams._ptr) + err = cyruntime.cudaGraphHostNodeGetParams(cynode, pNodeParams._ptr) return (cudaError_t(err), pNodeParams) {{endif}} @@ -26096,17 +26096,17 @@ def cudaGraphHostNodeSetParams(node, pNodeParams : Optional[cudaHostNodeParams]) -------- :py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaLaunchHostFunc`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphHostNodeGetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaHostNodeParams* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphHostNodeSetParams(cnode, cpNodeParams_ptr) + cynode = pnode + cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphHostNodeSetParams(cynode, cypNodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -26151,40 +26151,40 @@ def cudaGraphAddChildGraphNode(graph, pDependencies : Optional[Tuple[cudaGraphNo -------- :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphChildGraphNodeGetGraph`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphClone` """ - cdef ccudart.cudaGraph_t cchildGraph + cdef cyruntime.cudaGraph_t cychildGraph if childGraph is None: - cchildGraph = 0 - elif isinstance(childGraph, (cudaGraph_t,cuda.CUgraph)): + cychildGraph = 0 + elif isinstance(childGraph, (cudaGraph_t,driver.CUgraph)): pchildGraph = int(childGraph) - cchildGraph = pchildGraph + cychildGraph = pchildGraph else: pchildGraph = int(cudaGraph_t(childGraph)) - cchildGraph = pchildGraph + cychildGraph = pchildGraph pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - err = ccudart.cudaGraphAddChildGraphNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cchildGraph) - if cpDependencies is not NULL: - free(cpDependencies) + err = cyruntime.cudaGraphAddChildGraphNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cychildGraph) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -26217,17 +26217,17 @@ def cudaGraphChildGraphNodeGetGraph(node): -------- :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphNodeFindInClone` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode + cynode = pnode cdef cudaGraph_t pGraph = cudaGraph_t() - err = ccudart.cudaGraphChildGraphNodeGetGraph(cnode, pGraph._ptr) + err = cyruntime.cudaGraphChildGraphNodeGetGraph(cynode, pGraph._ptr) return (cudaError_t(err), pGraph) {{endif}} @@ -26271,30 +26271,30 @@ def cudaGraphAddEmptyNode(graph, pDependencies : Optional[Tuple[cudaGraphNode_t] :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - err = ccudart.cudaGraphAddEmptyNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies) - if cpDependencies is not NULL: - free(cpDependencies) + err = cyruntime.cudaGraphAddEmptyNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -26338,40 +26338,40 @@ def cudaGraphAddEventRecordNode(graph, pDependencies : Optional[Tuple[cudaGraphN -------- :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphAddEventWaitNode`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent + cyevent = pevent pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - err = ccudart.cudaGraphAddEventRecordNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cevent) - if cpDependencies is not NULL: - free(cpDependencies) + err = cyruntime.cudaGraphAddEventRecordNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cyevent) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -26399,17 +26399,17 @@ def cudaGraphEventRecordNodeGetEvent(node): -------- :py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaGraphEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphEventWaitNodeGetEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode + cynode = pnode cdef cudaEvent_t event_out = cudaEvent_t() - err = ccudart.cudaGraphEventRecordNodeGetEvent(cnode, event_out._ptr) + err = cyruntime.cudaGraphEventRecordNodeGetEvent(cynode, event_out._ptr) return (cudaError_t(err), event_out) {{endif}} @@ -26437,25 +26437,25 @@ def cudaGraphEventRecordNodeSetEvent(node, event): -------- :py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaGraphEventRecordNodeGetEvent`, :py:obj:`~.cudaGraphEventWaitNodeSetEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - cdef ccudart.cudaGraphNode_t cnode + cyevent = pevent + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - err = ccudart.cudaGraphEventRecordNodeSetEvent(cnode, cevent) + cynode = pnode + err = cyruntime.cudaGraphEventRecordNodeSetEvent(cynode, cyevent) return (cudaError_t(err),) {{endif}} @@ -26502,40 +26502,40 @@ def cudaGraphAddEventWaitNode(graph, pDependencies : Optional[Tuple[cudaGraphNod -------- :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent + cyevent = pevent pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - err = ccudart.cudaGraphAddEventWaitNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cevent) - if cpDependencies is not NULL: - free(cpDependencies) + err = cyruntime.cudaGraphAddEventWaitNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cyevent) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -26563,17 +26563,17 @@ def cudaGraphEventWaitNodeGetEvent(node): -------- :py:obj:`~.cudaGraphAddEventWaitNode`, :py:obj:`~.cudaGraphEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphEventRecordNodeGetEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode + cynode = pnode cdef cudaEvent_t event_out = cudaEvent_t() - err = ccudart.cudaGraphEventWaitNodeGetEvent(cnode, event_out._ptr) + err = cyruntime.cudaGraphEventWaitNodeGetEvent(cynode, event_out._ptr) return (cudaError_t(err), event_out) {{endif}} @@ -26601,25 +26601,25 @@ def cudaGraphEventWaitNodeSetEvent(node, event): -------- :py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaGraphAddEventWaitNode`, :py:obj:`~.cudaGraphEventWaitNodeGetEvent`, :py:obj:`~.cudaGraphEventRecordNodeSetEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - cdef ccudart.cudaGraphNode_t cnode + cyevent = pevent + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - err = ccudart.cudaGraphEventWaitNodeSetEvent(cnode, cevent) + cynode = pnode + err = cyruntime.cudaGraphEventWaitNodeSetEvent(cynode, cyevent) return (cudaError_t(err),) {{endif}} @@ -26663,31 +26663,31 @@ def cudaGraphAddExternalSemaphoresSignalNode(graph, pDependencies : Optional[Tup :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphExternalSemaphoresSignalNodeGetParams`, :py:obj:`~.cudaGraphExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaGraphAddEventWaitNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - cdef ccudart.cudaExternalSemaphoreSignalNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphAddExternalSemaphoresSignalNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cnodeParams_ptr) - if cpDependencies is not NULL: - free(cpDependencies) + cdef cyruntime.cudaExternalSemaphoreSignalNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphAddExternalSemaphoresSignalNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cynodeParams_ptr) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -26721,17 +26721,17 @@ def cudaGraphExternalSemaphoresSignalNodeGetParams(hNode): -------- :py:obj:`~.cudaLaunchKernel`, :py:obj:`~.cudaGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cudaGraphExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode + cyhNode = phNode cdef cudaExternalSemaphoreSignalNodeParams params_out = cudaExternalSemaphoreSignalNodeParams() - err = ccudart.cudaGraphExternalSemaphoresSignalNodeGetParams(chNode, params_out._ptr) + err = cyruntime.cudaGraphExternalSemaphoresSignalNodeGetParams(cyhNode, params_out._ptr) return (cudaError_t(err), params_out) {{endif}} @@ -26760,17 +26760,17 @@ def cudaGraphExternalSemaphoresSignalNodeSetParams(hNode, nodeParams : Optional[ -------- :py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cudaGraphExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaExternalSemaphoreSignalNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphExternalSemaphoresSignalNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cyruntime.cudaExternalSemaphoreSignalNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphExternalSemaphoresSignalNodeSetParams(cyhNode, cynodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -26814,31 +26814,31 @@ def cudaGraphAddExternalSemaphoresWaitNode(graph, pDependencies : Optional[Tuple :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphExternalSemaphoresWaitNodeGetParams`, :py:obj:`~.cudaGraphExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaGraphAddEventWaitNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - cdef ccudart.cudaExternalSemaphoreWaitNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphAddExternalSemaphoresWaitNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cnodeParams_ptr) - if cpDependencies is not NULL: - free(cpDependencies) + cdef cyruntime.cudaExternalSemaphoreWaitNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphAddExternalSemaphoresWaitNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cynodeParams_ptr) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -26872,17 +26872,17 @@ def cudaGraphExternalSemaphoresWaitNodeGetParams(hNode): -------- :py:obj:`~.cudaLaunchKernel`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaGraphExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode + cyhNode = phNode cdef cudaExternalSemaphoreWaitNodeParams params_out = cudaExternalSemaphoreWaitNodeParams() - err = ccudart.cudaGraphExternalSemaphoresWaitNodeGetParams(chNode, params_out._ptr) + err = cyruntime.cudaGraphExternalSemaphoresWaitNodeGetParams(cyhNode, params_out._ptr) return (cudaError_t(err), params_out) {{endif}} @@ -26911,17 +26911,17 @@ def cudaGraphExternalSemaphoresWaitNodeSetParams(hNode, nodeParams : Optional[cu -------- :py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaGraphExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaExternalSemaphoreWaitNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphExternalSemaphoresWaitNodeSetParams(chNode, cnodeParams_ptr) + cyhNode = phNode + cdef cyruntime.cudaExternalSemaphoreWaitNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphExternalSemaphoresWaitNodeSetParams(cyhNode, cynodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -27003,31 +27003,31 @@ def cudaGraphAddMemAllocNode(graph, pDependencies : Optional[Tuple[cudaGraphNode :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphAddMemFreeNode`, :py:obj:`~.cudaGraphMemAllocNodeGetParams`, :py:obj:`~.cudaDeviceGraphMemTrim`, :py:obj:`~.cudaDeviceGetGraphMemAttribute`, :py:obj:`~.cudaDeviceSetGraphMemAttribute`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaGraphAddEventWaitNode`, :py:obj:`~.cudaGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - cdef ccudart.cudaMemAllocNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphAddMemAllocNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cnodeParams_ptr) - if cpDependencies is not NULL: - free(cpDependencies) + cdef cyruntime.cudaMemAllocNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphAddMemAllocNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cynodeParams_ptr) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -27058,17 +27058,17 @@ def cudaGraphMemAllocNodeGetParams(node): -------- :py:obj:`~.cudaGraphAddMemAllocNode`, :py:obj:`~.cudaGraphMemFreeNodeGetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode + cynode = pnode cdef cudaMemAllocNodeParams params_out = cudaMemAllocNodeParams() - err = ccudart.cudaGraphMemAllocNodeGetParams(cnode, params_out._ptr) + err = cyruntime.cudaGraphMemAllocNodeGetParams(cynode, params_out._ptr) return (cudaError_t(err), params_out) {{endif}} @@ -27128,32 +27128,32 @@ def cudaGraphAddMemFreeNode(graph, pDependencies : Optional[Tuple[cudaGraphNode_ :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphAddMemAllocNode`, :py:obj:`~.cudaGraphMemFreeNodeGetParams`, :py:obj:`~.cudaDeviceGraphMemTrim`, :py:obj:`~.cudaDeviceGetGraphMemAttribute`, :py:obj:`~.cudaDeviceSetGraphMemAttribute`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaGraphAddEventWaitNode`, :py:obj:`~.cudaGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - cdptr = utils.HelperInputVoidPtr(dptr) - cdef void* cdptr_ptr = cdptr.cptr - err = ccudart.cudaGraphAddMemFreeNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cdptr_ptr) - if cpDependencies is not NULL: - free(cpDependencies) + cydptr = utils.HelperInputVoidPtr(dptr) + cdef void* cydptr_ptr = cydptr.cptr + err = cyruntime.cudaGraphAddMemFreeNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cydptr_ptr) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -27181,18 +27181,18 @@ def cudaGraphMemFreeNodeGetParams(node): -------- :py:obj:`~.cudaGraphAddMemFreeNode`, :py:obj:`~.cudaGraphMemFreeNodeGetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode + cynode = pnode cdef void_ptr dptr_out = 0 - cdef void* cdptr_out_ptr = &dptr_out - err = ccudart.cudaGraphMemFreeNodeGetParams(cnode, cdptr_out_ptr) + cdef void* cydptr_out_ptr = &dptr_out + err = cyruntime.cudaGraphMemFreeNodeGetParams(cynode, cydptr_out_ptr) return (cudaError_t(err), dptr_out) {{endif}} @@ -27220,7 +27220,7 @@ def cudaDeviceGraphMemTrim(int device): -------- :py:obj:`~.cudaGraphAddMemAllocNode`, :py:obj:`~.cudaGraphAddMemFreeNode`, :py:obj:`~.cudaDeviceGetGraphMemAttribute`, :py:obj:`~.cudaDeviceSetGraphMemAttribute`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync` """ - err = ccudart.cudaDeviceGraphMemTrim(device) + err = cyruntime.cudaDeviceGraphMemTrim(device) return (cudaError_t(err),) {{endif}} @@ -27265,11 +27265,11 @@ def cudaDeviceGetGraphMemAttribute(int device, attr not None : cudaGraphMemAttri -------- :py:obj:`~.cudaDeviceSetGraphMemAttribute`, :py:obj:`~.cudaGraphAddMemAllocNode`, :py:obj:`~.cudaGraphAddMemFreeNode`, :py:obj:`~.cudaDeviceGraphMemTrim`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync` """ - cdef ccudart.cudaGraphMemAttributeType cattr = attr.value - cdef utils.HelperCUgraphMem_attribute cvalue = utils.HelperCUgraphMem_attribute(attr, 0, is_getter=True) - cdef void* cvalue_ptr = cvalue.cptr - err = ccudart.cudaDeviceGetGraphMemAttribute(device, cattr, cvalue_ptr) - return (cudaError_t(err), cvalue.pyObj()) + cdef cyruntime.cudaGraphMemAttributeType cyattr = attr.value + cdef utils.HelperCUgraphMem_attribute cyvalue = utils.HelperCUgraphMem_attribute(attr, 0, is_getter=True) + cdef void* cyvalue_ptr = cyvalue.cptr + err = cyruntime.cudaDeviceGetGraphMemAttribute(device, cyattr, cyvalue_ptr) + return (cudaError_t(err), cyvalue.pyObj()) {{endif}} {{if 'cudaDeviceSetGraphMemAttribute' in found_functions}} @@ -27306,10 +27306,10 @@ def cudaDeviceSetGraphMemAttribute(int device, attr not None : cudaGraphMemAttri -------- :py:obj:`~.cudaDeviceGetGraphMemAttribute`, :py:obj:`~.cudaGraphAddMemAllocNode`, :py:obj:`~.cudaGraphAddMemFreeNode`, :py:obj:`~.cudaDeviceGraphMemTrim`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync` """ - cdef ccudart.cudaGraphMemAttributeType cattr = attr.value - cdef utils.HelperCUgraphMem_attribute cvalue = utils.HelperCUgraphMem_attribute(attr, value, is_getter=False) - cdef void* cvalue_ptr = cvalue.cptr - err = ccudart.cudaDeviceSetGraphMemAttribute(device, cattr, cvalue_ptr) + cdef cyruntime.cudaGraphMemAttributeType cyattr = attr.value + cdef utils.HelperCUgraphMem_attribute cyvalue = utils.HelperCUgraphMem_attribute(attr, value, is_getter=False) + cdef void* cyvalue_ptr = cyvalue.cptr + err = cyruntime.cudaDeviceSetGraphMemAttribute(device, cyattr, cyvalue_ptr) return (cudaError_t(err),) {{endif}} @@ -27343,17 +27343,17 @@ def cudaGraphClone(originalGraph): -------- :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphNodeFindInClone` """ - cdef ccudart.cudaGraph_t coriginalGraph + cdef cyruntime.cudaGraph_t cyoriginalGraph if originalGraph is None: - coriginalGraph = 0 - elif isinstance(originalGraph, (cudaGraph_t,cuda.CUgraph)): + cyoriginalGraph = 0 + elif isinstance(originalGraph, (cudaGraph_t,driver.CUgraph)): poriginalGraph = int(originalGraph) - coriginalGraph = poriginalGraph + cyoriginalGraph = poriginalGraph else: poriginalGraph = int(cudaGraph_t(originalGraph)) - coriginalGraph = poriginalGraph + cyoriginalGraph = poriginalGraph cdef cudaGraph_t pGraphClone = cudaGraph_t() - err = ccudart.cudaGraphClone(pGraphClone._ptr, coriginalGraph) + err = cyruntime.cudaGraphClone(pGraphClone._ptr, cyoriginalGraph) return (cudaError_t(err), pGraphClone) {{endif}} @@ -27390,26 +27390,26 @@ def cudaGraphNodeFindInClone(originalNode, clonedGraph): -------- :py:obj:`~.cudaGraphClone` """ - cdef ccudart.cudaGraph_t cclonedGraph + cdef cyruntime.cudaGraph_t cyclonedGraph if clonedGraph is None: - cclonedGraph = 0 - elif isinstance(clonedGraph, (cudaGraph_t,cuda.CUgraph)): + cyclonedGraph = 0 + elif isinstance(clonedGraph, (cudaGraph_t,driver.CUgraph)): pclonedGraph = int(clonedGraph) - cclonedGraph = pclonedGraph + cyclonedGraph = pclonedGraph else: pclonedGraph = int(cudaGraph_t(clonedGraph)) - cclonedGraph = pclonedGraph - cdef ccudart.cudaGraphNode_t coriginalNode + cyclonedGraph = pclonedGraph + cdef cyruntime.cudaGraphNode_t cyoriginalNode if originalNode is None: - coriginalNode = 0 - elif isinstance(originalNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyoriginalNode = 0 + elif isinstance(originalNode, (cudaGraphNode_t,driver.CUgraphNode)): poriginalNode = int(originalNode) - coriginalNode = poriginalNode + cyoriginalNode = poriginalNode else: poriginalNode = int(cudaGraphNode_t(originalNode)) - coriginalNode = poriginalNode + cyoriginalNode = poriginalNode cdef cudaGraphNode_t pNode = cudaGraphNode_t() - err = ccudart.cudaGraphNodeFindInClone(pNode._ptr, coriginalNode, cclonedGraph) + err = cyruntime.cudaGraphNodeFindInClone(pNode._ptr, cyoriginalNode, cyclonedGraph) return (cudaError_t(err), pNode) {{endif}} @@ -27437,17 +27437,17 @@ def cudaGraphNodeGetType(node): -------- :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphChildGraphNodeGetGraph`, :py:obj:`~.cudaGraphKernelNodeGetParams`, :py:obj:`~.cudaGraphKernelNodeSetParams`, :py:obj:`~.cudaGraphHostNodeGetParams`, :py:obj:`~.cudaGraphHostNodeSetParams`, :py:obj:`~.cudaGraphMemcpyNodeGetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphMemsetNodeGetParams`, :py:obj:`~.cudaGraphMemsetNodeSetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphNodeType pType - err = ccudart.cudaGraphNodeGetType(cnode, &pType) + cynode = pnode + cdef cyruntime.cudaGraphNodeType pType + err = cyruntime.cudaGraphNodeGetType(cynode, &pType) return (cudaError_t(err), cudaGraphNodeType(pType)) {{endif}} @@ -27485,26 +27485,26 @@ def cudaGraphGetNodes(graph, size_t numNodes = 0): :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphNodeGetType`, :py:obj:`~.cudaGraphNodeGetDependencies`, :py:obj:`~.cudaGraphNodeGetDependentNodes` """ cdef size_t _graph_length = numNodes - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - cdef ccudart.cudaGraphNode_t* cnodes = NULL + cygraph = pgraph + cdef cyruntime.cudaGraphNode_t* cynodes = NULL pynodes = [] if _graph_length != 0: - cnodes = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cnodes is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - err = ccudart.cudaGraphGetNodes(cgraph, cnodes, &numNodes) + cynodes = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cynodes is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + err = cyruntime.cudaGraphGetNodes(cygraph, cynodes, &numNodes) if cudaError_t(err) == cudaError_t(0): - pynodes = [cudaGraphNode_t(init_value=cnodes[idx]) for idx in range(_graph_length)] - if cnodes is not NULL: - free(cnodes) + pynodes = [cudaGraphNode_t(init_value=cynodes[idx]) for idx in range(_graph_length)] + if cynodes is not NULL: + free(cynodes) return (cudaError_t(err), pynodes, numNodes) {{endif}} @@ -27542,26 +27542,26 @@ def cudaGraphGetRootNodes(graph, size_t pNumRootNodes = 0): :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphNodeGetType`, :py:obj:`~.cudaGraphNodeGetDependencies`, :py:obj:`~.cudaGraphNodeGetDependentNodes` """ cdef size_t _graph_length = pNumRootNodes - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - cdef ccudart.cudaGraphNode_t* cpRootNodes = NULL + cygraph = pgraph + cdef cyruntime.cudaGraphNode_t* cypRootNodes = NULL pypRootNodes = [] if _graph_length != 0: - cpRootNodes = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cpRootNodes is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - err = ccudart.cudaGraphGetRootNodes(cgraph, cpRootNodes, &pNumRootNodes) + cypRootNodes = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cypRootNodes is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + err = cyruntime.cudaGraphGetRootNodes(cygraph, cypRootNodes, &pNumRootNodes) if cudaError_t(err) == cudaError_t(0): - pypRootNodes = [cudaGraphNode_t(init_value=cpRootNodes[idx]) for idx in range(_graph_length)] - if cpRootNodes is not NULL: - free(cpRootNodes) + pypRootNodes = [cudaGraphNode_t(init_value=cypRootNodes[idx]) for idx in range(_graph_length)] + if cypRootNodes is not NULL: + free(cypRootNodes) return (cudaError_t(err), pypRootNodes, pNumRootNodes) {{endif}} @@ -27603,36 +27603,36 @@ def cudaGraphGetEdges(graph, size_t numEdges = 0): :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphAddDependencies`, :py:obj:`~.cudaGraphRemoveDependencies`, :py:obj:`~.cudaGraphNodeGetDependencies`, :py:obj:`~.cudaGraphNodeGetDependentNodes` """ cdef size_t _graph_length = numEdges - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - cdef ccudart.cudaGraphNode_t* cfrom_ = NULL + cygraph = pgraph + cdef cyruntime.cudaGraphNode_t* cyfrom_ = NULL pyfrom_ = [] if _graph_length != 0: - cfrom_ = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - cdef ccudart.cudaGraphNode_t* cto = NULL + cyfrom_ = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + cdef cyruntime.cudaGraphNode_t* cyto = NULL pyto = [] if _graph_length != 0: - cto = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - err = ccudart.cudaGraphGetEdges(cgraph, cfrom_, cto, &numEdges) + cyto = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + err = cyruntime.cudaGraphGetEdges(cygraph, cyfrom_, cyto, &numEdges) if cudaError_t(err) == cudaError_t(0): - pyfrom_ = [cudaGraphNode_t(init_value=cfrom_[idx]) for idx in range(_graph_length)] - if cfrom_ is not NULL: - free(cfrom_) + pyfrom_ = [cudaGraphNode_t(init_value=cyfrom_[idx]) for idx in range(_graph_length)] + if cyfrom_ is not NULL: + free(cyfrom_) if cudaError_t(err) == cudaError_t(0): - pyto = [cudaGraphNode_t(init_value=cto[idx]) for idx in range(_graph_length)] - if cto is not NULL: - free(cto) + pyto = [cudaGraphNode_t(init_value=cyto[idx]) for idx in range(_graph_length)] + if cyto is not NULL: + free(cyto) return (cudaError_t(err), pyfrom_, pyto, numEdges) {{endif}} @@ -27681,46 +27681,46 @@ def cudaGraphGetEdges_v2(graph, size_t numEdges = 0): :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphAddDependencies`, :py:obj:`~.cudaGraphRemoveDependencies`, :py:obj:`~.cudaGraphNodeGetDependencies`, :py:obj:`~.cudaGraphNodeGetDependentNodes` """ cdef size_t _graph_length = numEdges - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - cdef ccudart.cudaGraphNode_t* cfrom_ = NULL + cygraph = pgraph + cdef cyruntime.cudaGraphNode_t* cyfrom_ = NULL pyfrom_ = [] if _graph_length != 0: - cfrom_ = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - cdef ccudart.cudaGraphNode_t* cto = NULL + cyfrom_ = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + cdef cyruntime.cudaGraphNode_t* cyto = NULL pyto = [] if _graph_length != 0: - cto = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - cdef ccudart.cudaGraphEdgeData* cedgeData = NULL + cyto = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + cdef cyruntime.cudaGraphEdgeData* cyedgeData = NULL pyedgeData = [] if _graph_length != 0: - cedgeData = calloc(_graph_length, sizeof(ccudart.cudaGraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphEdgeData))) - err = ccudart.cudaGraphGetEdges_v2(cgraph, cfrom_, cto, cedgeData, &numEdges) + cyedgeData = calloc(_graph_length, sizeof(cyruntime.cudaGraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) + err = cyruntime.cudaGraphGetEdges_v2(cygraph, cyfrom_, cyto, cyedgeData, &numEdges) if cudaError_t(err) == cudaError_t(0): - pyfrom_ = [cudaGraphNode_t(init_value=cfrom_[idx]) for idx in range(_graph_length)] - if cfrom_ is not NULL: - free(cfrom_) + pyfrom_ = [cudaGraphNode_t(init_value=cyfrom_[idx]) for idx in range(_graph_length)] + if cyfrom_ is not NULL: + free(cyfrom_) if cudaError_t(err) == cudaError_t(0): - pyto = [cudaGraphNode_t(init_value=cto[idx]) for idx in range(_graph_length)] - if cto is not NULL: - free(cto) + pyto = [cudaGraphNode_t(init_value=cyto[idx]) for idx in range(_graph_length)] + if cyto is not NULL: + free(cyto) if cudaError_t(err) == cudaError_t(0): - pyedgeData = [cudaGraphEdgeData(_ptr=&cedgeData[idx]) for idx in range(_graph_length)] - if cedgeData is not NULL: - free(cedgeData) + pyedgeData = [cudaGraphEdgeData(_ptr=&cyedgeData[idx]) for idx in range(_graph_length)] + if cyedgeData is not NULL: + free(cyedgeData) return (cudaError_t(err), pyfrom_, pyto, pyedgeData, numEdges) {{endif}} @@ -27759,26 +27759,26 @@ def cudaGraphNodeGetDependencies(node, size_t pNumDependencies = 0): :py:obj:`~.cudaGraphNodeGetDependentNodes`, :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphAddDependencies`, :py:obj:`~.cudaGraphRemoveDependencies` """ cdef size_t _graph_length = pNumDependencies - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cynode = pnode + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL pypDependencies = [] if _graph_length != 0: - cpDependencies = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - err = ccudart.cudaGraphNodeGetDependencies(cnode, cpDependencies, &pNumDependencies) + cypDependencies = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + err = cyruntime.cudaGraphNodeGetDependencies(cynode, cypDependencies, &pNumDependencies) if cudaError_t(err) == cudaError_t(0): - pypDependencies = [cudaGraphNode_t(init_value=cpDependencies[idx]) for idx in range(_graph_length)] - if cpDependencies is not NULL: - free(cpDependencies) + pypDependencies = [cudaGraphNode_t(init_value=cypDependencies[idx]) for idx in range(_graph_length)] + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pypDependencies, pNumDependencies) {{endif}} @@ -27824,36 +27824,36 @@ def cudaGraphNodeGetDependencies_v2(node, size_t pNumDependencies = 0): :py:obj:`~.cudaGraphNodeGetDependentNodes`, :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphAddDependencies`, :py:obj:`~.cudaGraphRemoveDependencies` """ cdef size_t _graph_length = pNumDependencies - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cynode = pnode + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL pypDependencies = [] if _graph_length != 0: - cpDependencies = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - cdef ccudart.cudaGraphEdgeData* cedgeData = NULL + cypDependencies = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + cdef cyruntime.cudaGraphEdgeData* cyedgeData = NULL pyedgeData = [] if _graph_length != 0: - cedgeData = calloc(_graph_length, sizeof(ccudart.cudaGraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphEdgeData))) - err = ccudart.cudaGraphNodeGetDependencies_v2(cnode, cpDependencies, cedgeData, &pNumDependencies) + cyedgeData = calloc(_graph_length, sizeof(cyruntime.cudaGraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) + err = cyruntime.cudaGraphNodeGetDependencies_v2(cynode, cypDependencies, cyedgeData, &pNumDependencies) if cudaError_t(err) == cudaError_t(0): - pypDependencies = [cudaGraphNode_t(init_value=cpDependencies[idx]) for idx in range(_graph_length)] - if cpDependencies is not NULL: - free(cpDependencies) + pypDependencies = [cudaGraphNode_t(init_value=cypDependencies[idx]) for idx in range(_graph_length)] + if cypDependencies is not NULL: + free(cypDependencies) if cudaError_t(err) == cudaError_t(0): - pyedgeData = [cudaGraphEdgeData(_ptr=&cedgeData[idx]) for idx in range(_graph_length)] - if cedgeData is not NULL: - free(cedgeData) + pyedgeData = [cudaGraphEdgeData(_ptr=&cyedgeData[idx]) for idx in range(_graph_length)] + if cyedgeData is not NULL: + free(cyedgeData) return (cudaError_t(err), pypDependencies, pyedgeData, pNumDependencies) {{endif}} @@ -27892,26 +27892,26 @@ def cudaGraphNodeGetDependentNodes(node, size_t pNumDependentNodes = 0): :py:obj:`~.cudaGraphNodeGetDependencies`, :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphAddDependencies`, :py:obj:`~.cudaGraphRemoveDependencies` """ cdef size_t _graph_length = pNumDependentNodes - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphNode_t* cpDependentNodes = NULL + cynode = pnode + cdef cyruntime.cudaGraphNode_t* cypDependentNodes = NULL pypDependentNodes = [] if _graph_length != 0: - cpDependentNodes = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cpDependentNodes is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - err = ccudart.cudaGraphNodeGetDependentNodes(cnode, cpDependentNodes, &pNumDependentNodes) + cypDependentNodes = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cypDependentNodes is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + err = cyruntime.cudaGraphNodeGetDependentNodes(cynode, cypDependentNodes, &pNumDependentNodes) if cudaError_t(err) == cudaError_t(0): - pypDependentNodes = [cudaGraphNode_t(init_value=cpDependentNodes[idx]) for idx in range(_graph_length)] - if cpDependentNodes is not NULL: - free(cpDependentNodes) + pypDependentNodes = [cudaGraphNode_t(init_value=cypDependentNodes[idx]) for idx in range(_graph_length)] + if cypDependentNodes is not NULL: + free(cypDependentNodes) return (cudaError_t(err), pypDependentNodes, pNumDependentNodes) {{endif}} @@ -27957,36 +27957,36 @@ def cudaGraphNodeGetDependentNodes_v2(node, size_t pNumDependentNodes = 0): :py:obj:`~.cudaGraphNodeGetDependencies`, :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphAddDependencies`, :py:obj:`~.cudaGraphRemoveDependencies` """ cdef size_t _graph_length = pNumDependentNodes - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphNode_t* cpDependentNodes = NULL + cynode = pnode + cdef cyruntime.cudaGraphNode_t* cypDependentNodes = NULL pypDependentNodes = [] if _graph_length != 0: - cpDependentNodes = calloc(_graph_length, sizeof(ccudart.cudaGraphNode_t)) - if cpDependentNodes is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) - cdef ccudart.cudaGraphEdgeData* cedgeData = NULL + cypDependentNodes = calloc(_graph_length, sizeof(cyruntime.cudaGraphNode_t)) + if cypDependentNodes is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) + cdef cyruntime.cudaGraphEdgeData* cyedgeData = NULL pyedgeData = [] if _graph_length != 0: - cedgeData = calloc(_graph_length, sizeof(ccudart.cudaGraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(ccudart.cudaGraphEdgeData))) - err = ccudart.cudaGraphNodeGetDependentNodes_v2(cnode, cpDependentNodes, cedgeData, &pNumDependentNodes) + cyedgeData = calloc(_graph_length, sizeof(cyruntime.cudaGraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(_graph_length) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) + err = cyruntime.cudaGraphNodeGetDependentNodes_v2(cynode, cypDependentNodes, cyedgeData, &pNumDependentNodes) if cudaError_t(err) == cudaError_t(0): - pypDependentNodes = [cudaGraphNode_t(init_value=cpDependentNodes[idx]) for idx in range(_graph_length)] - if cpDependentNodes is not NULL: - free(cpDependentNodes) + pypDependentNodes = [cudaGraphNode_t(init_value=cypDependentNodes[idx]) for idx in range(_graph_length)] + if cypDependentNodes is not NULL: + free(cypDependentNodes) if cudaError_t(err) == cudaError_t(0): - pyedgeData = [cudaGraphEdgeData(_ptr=&cedgeData[idx]) for idx in range(_graph_length)] - if cedgeData is not NULL: - free(cedgeData) + pyedgeData = [cudaGraphEdgeData(_ptr=&cyedgeData[idx]) for idx in range(_graph_length)] + if cyedgeData is not NULL: + free(cyedgeData) return (cudaError_t(err), pypDependentNodes, pyedgeData, pNumDependentNodes) {{endif}} @@ -28024,43 +28024,43 @@ def cudaGraphAddDependencies(graph, from_ : Optional[Tuple[cudaGraphNode_t] | Li :py:obj:`~.cudaGraphRemoveDependencies`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphNodeGetDependencies`, :py:obj:`~.cudaGraphNodeGetDependentNodes` """ to = [] if to is None else to - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in to): - raise TypeError("Argument 'to' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in to): + raise TypeError("Argument 'to' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") from_ = [] if from_ is None else from_ - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in from_): - raise TypeError("Argument 'from_' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in from_): + raise TypeError("Argument 'from_' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - cdef ccudart.cudaGraphNode_t* cfrom_ = NULL + cygraph = pgraph + cdef cyruntime.cudaGraphNode_t* cyfrom_ = NULL if len(from_) > 0: - cfrom_ = calloc(len(from_), sizeof(ccudart.cudaGraphNode_t)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cyfrom_ = calloc(len(from_), sizeof(cyruntime.cudaGraphNode_t)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(from_)): - cfrom_[idx] = (from_[idx])._ptr[0] - cdef ccudart.cudaGraphNode_t* cto = NULL + cyfrom_[idx] = (from_[idx])._ptr[0] + cdef cyruntime.cudaGraphNode_t* cyto = NULL if len(to) > 0: - cto = calloc(len(to), sizeof(ccudart.cudaGraphNode_t)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cyto = calloc(len(to), sizeof(cyruntime.cudaGraphNode_t)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(to)): - cto[idx] = (to[idx])._ptr[0] + cyto[idx] = (to[idx])._ptr[0] if numDependencies > len(from_): raise RuntimeError("List is too small: " + str(len(from_)) + " < " + str(numDependencies)) if numDependencies > len(to): raise RuntimeError("List is too small: " + str(len(to)) + " < " + str(numDependencies)) - err = ccudart.cudaGraphAddDependencies(cgraph, (from_[0])._ptr if len(from_) == 1 else cfrom_, (to[0])._ptr if len(to) == 1 else cto, numDependencies) - if cfrom_ is not NULL: - free(cfrom_) - if cto is not NULL: - free(cto) + err = cyruntime.cudaGraphAddDependencies(cygraph, (from_[0])._ptr if len(from_) == 1 else cyfrom_, (to[0])._ptr if len(to) == 1 else cyto, numDependencies) + if cyfrom_ is not NULL: + free(cyfrom_) + if cyto is not NULL: + free(cyto) return (cudaError_t(err),) {{endif}} @@ -28102,52 +28102,52 @@ def cudaGraphAddDependencies_v2(graph, from_ : Optional[Tuple[cudaGraphNode_t] | """ edgeData = [] if edgeData is None else edgeData if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in edgeData): - raise TypeError("Argument 'edgeData' is not instance of type (expected Tuple[ccudart.cudaGraphEdgeData,] or List[ccudart.cudaGraphEdgeData,]") + raise TypeError("Argument 'edgeData' is not instance of type (expected Tuple[cyruntime.cudaGraphEdgeData,] or List[cyruntime.cudaGraphEdgeData,]") to = [] if to is None else to - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in to): - raise TypeError("Argument 'to' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in to): + raise TypeError("Argument 'to' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") from_ = [] if from_ is None else from_ - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in from_): - raise TypeError("Argument 'from_' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in from_): + raise TypeError("Argument 'from_' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - cdef ccudart.cudaGraphNode_t* cfrom_ = NULL + cygraph = pgraph + cdef cyruntime.cudaGraphNode_t* cyfrom_ = NULL if len(from_) > 0: - cfrom_ = calloc(len(from_), sizeof(ccudart.cudaGraphNode_t)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cyfrom_ = calloc(len(from_), sizeof(cyruntime.cudaGraphNode_t)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(from_)): - cfrom_[idx] = (from_[idx])._ptr[0] - cdef ccudart.cudaGraphNode_t* cto = NULL + cyfrom_[idx] = (from_[idx])._ptr[0] + cdef cyruntime.cudaGraphNode_t* cyto = NULL if len(to) > 0: - cto = calloc(len(to), sizeof(ccudart.cudaGraphNode_t)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cyto = calloc(len(to), sizeof(cyruntime.cudaGraphNode_t)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(to)): - cto[idx] = (to[idx])._ptr[0] - cdef ccudart.cudaGraphEdgeData* cedgeData = NULL + cyto[idx] = (to[idx])._ptr[0] + cdef cyruntime.cudaGraphEdgeData* cyedgeData = NULL if len(edgeData) > 0: - cedgeData = calloc(len(edgeData), sizeof(ccudart.cudaGraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(edgeData)) + 'x' + str(sizeof(ccudart.cudaGraphEdgeData))) + cyedgeData = calloc(len(edgeData), sizeof(cyruntime.cudaGraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(edgeData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) for idx in range(len(edgeData)): - string.memcpy(&cedgeData[idx], (edgeData[idx])._ptr, sizeof(ccudart.cudaGraphEdgeData)) - err = ccudart.cudaGraphAddDependencies_v2(cgraph, (from_[0])._ptr if len(from_) == 1 else cfrom_, (to[0])._ptr if len(to) == 1 else cto, (edgeData[0])._ptr if len(edgeData) == 1 else cedgeData, numDependencies) - if cfrom_ is not NULL: - free(cfrom_) - if cto is not NULL: - free(cto) - if cedgeData is not NULL: - free(cedgeData) + string.memcpy(&cyedgeData[idx], (edgeData[idx])._ptr, sizeof(cyruntime.cudaGraphEdgeData)) + err = cyruntime.cudaGraphAddDependencies_v2(cygraph, (from_[0])._ptr if len(from_) == 1 else cyfrom_, (to[0])._ptr if len(to) == 1 else cyto, (edgeData[0])._ptr if len(edgeData) == 1 else cyedgeData, numDependencies) + if cyfrom_ is not NULL: + free(cyfrom_) + if cyto is not NULL: + free(cyto) + if cyedgeData is not NULL: + free(cyedgeData) return (cudaError_t(err),) {{endif}} @@ -28186,43 +28186,43 @@ def cudaGraphRemoveDependencies(graph, from_ : Optional[Tuple[cudaGraphNode_t] | :py:obj:`~.cudaGraphAddDependencies`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphNodeGetDependencies`, :py:obj:`~.cudaGraphNodeGetDependentNodes` """ to = [] if to is None else to - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in to): - raise TypeError("Argument 'to' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in to): + raise TypeError("Argument 'to' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") from_ = [] if from_ is None else from_ - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in from_): - raise TypeError("Argument 'from_' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in from_): + raise TypeError("Argument 'from_' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - cdef ccudart.cudaGraphNode_t* cfrom_ = NULL + cygraph = pgraph + cdef cyruntime.cudaGraphNode_t* cyfrom_ = NULL if len(from_) > 0: - cfrom_ = calloc(len(from_), sizeof(ccudart.cudaGraphNode_t)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cyfrom_ = calloc(len(from_), sizeof(cyruntime.cudaGraphNode_t)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(from_)): - cfrom_[idx] = (from_[idx])._ptr[0] - cdef ccudart.cudaGraphNode_t* cto = NULL + cyfrom_[idx] = (from_[idx])._ptr[0] + cdef cyruntime.cudaGraphNode_t* cyto = NULL if len(to) > 0: - cto = calloc(len(to), sizeof(ccudart.cudaGraphNode_t)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cyto = calloc(len(to), sizeof(cyruntime.cudaGraphNode_t)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(to)): - cto[idx] = (to[idx])._ptr[0] + cyto[idx] = (to[idx])._ptr[0] if numDependencies > len(from_): raise RuntimeError("List is too small: " + str(len(from_)) + " < " + str(numDependencies)) if numDependencies > len(to): raise RuntimeError("List is too small: " + str(len(to)) + " < " + str(numDependencies)) - err = ccudart.cudaGraphRemoveDependencies(cgraph, (from_[0])._ptr if len(from_) == 1 else cfrom_, (to[0])._ptr if len(to) == 1 else cto, numDependencies) - if cfrom_ is not NULL: - free(cfrom_) - if cto is not NULL: - free(cto) + err = cyruntime.cudaGraphRemoveDependencies(cygraph, (from_[0])._ptr if len(from_) == 1 else cyfrom_, (to[0])._ptr if len(to) == 1 else cyto, numDependencies) + if cyfrom_ is not NULL: + free(cyfrom_) + if cyto is not NULL: + free(cyto) return (cudaError_t(err),) {{endif}} @@ -28267,52 +28267,52 @@ def cudaGraphRemoveDependencies_v2(graph, from_ : Optional[Tuple[cudaGraphNode_t """ edgeData = [] if edgeData is None else edgeData if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in edgeData): - raise TypeError("Argument 'edgeData' is not instance of type (expected Tuple[ccudart.cudaGraphEdgeData,] or List[ccudart.cudaGraphEdgeData,]") + raise TypeError("Argument 'edgeData' is not instance of type (expected Tuple[cyruntime.cudaGraphEdgeData,] or List[cyruntime.cudaGraphEdgeData,]") to = [] if to is None else to - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in to): - raise TypeError("Argument 'to' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in to): + raise TypeError("Argument 'to' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") from_ = [] if from_ is None else from_ - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in from_): - raise TypeError("Argument 'from_' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in from_): + raise TypeError("Argument 'from_' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - cdef ccudart.cudaGraphNode_t* cfrom_ = NULL + cygraph = pgraph + cdef cyruntime.cudaGraphNode_t* cyfrom_ = NULL if len(from_) > 0: - cfrom_ = calloc(len(from_), sizeof(ccudart.cudaGraphNode_t)) - if cfrom_ is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cyfrom_ = calloc(len(from_), sizeof(cyruntime.cudaGraphNode_t)) + if cyfrom_ is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(from_)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(from_)): - cfrom_[idx] = (from_[idx])._ptr[0] - cdef ccudart.cudaGraphNode_t* cto = NULL + cyfrom_[idx] = (from_[idx])._ptr[0] + cdef cyruntime.cudaGraphNode_t* cyto = NULL if len(to) > 0: - cto = calloc(len(to), sizeof(ccudart.cudaGraphNode_t)) - if cto is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cyto = calloc(len(to), sizeof(cyruntime.cudaGraphNode_t)) + if cyto is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(to)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(to)): - cto[idx] = (to[idx])._ptr[0] - cdef ccudart.cudaGraphEdgeData* cedgeData = NULL + cyto[idx] = (to[idx])._ptr[0] + cdef cyruntime.cudaGraphEdgeData* cyedgeData = NULL if len(edgeData) > 0: - cedgeData = calloc(len(edgeData), sizeof(ccudart.cudaGraphEdgeData)) - if cedgeData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(edgeData)) + 'x' + str(sizeof(ccudart.cudaGraphEdgeData))) + cyedgeData = calloc(len(edgeData), sizeof(cyruntime.cudaGraphEdgeData)) + if cyedgeData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(edgeData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) for idx in range(len(edgeData)): - string.memcpy(&cedgeData[idx], (edgeData[idx])._ptr, sizeof(ccudart.cudaGraphEdgeData)) - err = ccudart.cudaGraphRemoveDependencies_v2(cgraph, (from_[0])._ptr if len(from_) == 1 else cfrom_, (to[0])._ptr if len(to) == 1 else cto, (edgeData[0])._ptr if len(edgeData) == 1 else cedgeData, numDependencies) - if cfrom_ is not NULL: - free(cfrom_) - if cto is not NULL: - free(cto) - if cedgeData is not NULL: - free(cedgeData) + string.memcpy(&cyedgeData[idx], (edgeData[idx])._ptr, sizeof(cyruntime.cudaGraphEdgeData)) + err = cyruntime.cudaGraphRemoveDependencies_v2(cygraph, (from_[0])._ptr if len(from_) == 1 else cyfrom_, (to[0])._ptr if len(to) == 1 else cyto, (edgeData[0])._ptr if len(edgeData) == 1 else cyedgeData, numDependencies) + if cyfrom_ is not NULL: + free(cyfrom_) + if cyto is not NULL: + free(cyto) + if cyedgeData is not NULL: + free(cyedgeData) return (cudaError_t(err),) {{endif}} @@ -28342,16 +28342,16 @@ def cudaGraphDestroyNode(node): -------- :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - err = ccudart.cudaGraphDestroyNode(cnode) + cynode = pnode + err = cyruntime.cudaGraphDestroyNode(cynode) return (cudaError_t(err),) {{endif}} @@ -28443,17 +28443,17 @@ def cudaGraphInstantiate(graph, unsigned long long flags): -------- :py:obj:`~.cudaGraphInstantiateWithFlags`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphUpload`, :py:obj:`~.cudaGraphLaunch`, :py:obj:`~.cudaGraphExecDestroy` """ - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphExec_t pGraphExec = cudaGraphExec_t() - err = ccudart.cudaGraphInstantiate(pGraphExec._ptr, cgraph, flags) + err = cyruntime.cudaGraphInstantiate(pGraphExec._ptr, cygraph, flags) return (cudaError_t(err), pGraphExec) {{endif}} @@ -28547,17 +28547,17 @@ def cudaGraphInstantiateWithFlags(graph, unsigned long long flags): -------- :py:obj:`~.cudaGraphInstantiate`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphUpload`, :py:obj:`~.cudaGraphLaunch`, :py:obj:`~.cudaGraphExecDestroy` """ - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphExec_t pGraphExec = cudaGraphExec_t() - err = ccudart.cudaGraphInstantiateWithFlags(pGraphExec._ptr, cgraph, flags) + err = cyruntime.cudaGraphInstantiateWithFlags(pGraphExec._ptr, cygraph, flags) return (cudaError_t(err), pGraphExec) {{endif}} @@ -28691,18 +28691,18 @@ def cudaGraphInstantiateWithParams(graph, instantiateParams : Optional[cudaGraph -------- :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphInstantiate`, :py:obj:`~.cudaGraphInstantiateWithFlags`, :py:obj:`~.cudaGraphExecDestroy` """ - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphExec_t pGraphExec = cudaGraphExec_t() - cdef ccudart.cudaGraphInstantiateParams* cinstantiateParams_ptr = instantiateParams._ptr if instantiateParams != None else NULL - err = ccudart.cudaGraphInstantiateWithParams(pGraphExec._ptr, cgraph, cinstantiateParams_ptr) + cdef cyruntime.cudaGraphInstantiateParams* cyinstantiateParams_ptr = instantiateParams._ptr if instantiateParams != None else NULL + err = cyruntime.cudaGraphInstantiateWithParams(pGraphExec._ptr, cygraph, cyinstantiateParams_ptr) return (cudaError_t(err), pGraphExec) {{endif}} @@ -28733,17 +28733,17 @@ def cudaGraphExecGetFlags(graphExec): -------- :py:obj:`~.cudaGraphInstantiate`, :py:obj:`~.cudaGraphInstantiateWithFlags`, :py:obj:`~.cudaGraphInstantiateWithParams` """ - cdef ccudart.cudaGraphExec_t cgraphExec + cdef cyruntime.cudaGraphExec_t cygraphExec if graphExec is None: - cgraphExec = 0 - elif isinstance(graphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cygraphExec = 0 + elif isinstance(graphExec, (cudaGraphExec_t,driver.CUgraphExec)): pgraphExec = int(graphExec) - cgraphExec = pgraphExec + cygraphExec = pgraphExec else: pgraphExec = int(cudaGraphExec_t(graphExec)) - cgraphExec = pgraphExec + cygraphExec = pgraphExec cdef unsigned long long flags = 0 - err = ccudart.cudaGraphExecGetFlags(cgraphExec, &flags) + err = cyruntime.cudaGraphExecGetFlags(cygraphExec, &flags) return (cudaError_t(err), flags) {{endif}} @@ -28807,26 +28807,26 @@ def cudaGraphExecKernelNodeSetParams(hGraphExec, node, pNodeParams : Optional[cu -------- :py:obj:`~.cudaGraphExecNodeSetParams`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecHostNodeSetParams`, :py:obj:`~.cudaGraphExecChildGraphNodeSetParams`, :py:obj:`~.cudaGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphExec_t chGraphExec + cynode = pnode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - cdef ccudart.cudaKernelNodeParams* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphExecKernelNodeSetParams(chGraphExec, cnode, cpNodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphExecKernelNodeSetParams(cyhGraphExec, cynode, cypNodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -28873,26 +28873,26 @@ def cudaGraphExecMemcpyNodeSetParams(hGraphExec, node, pNodeParams : Optional[cu -------- :py:obj:`~.cudaGraphExecNodeSetParams`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParamsToSymbol`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParamsFromSymbol`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams1D`, :py:obj:`~.cudaGraphExecKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecHostNodeSetParams`, :py:obj:`~.cudaGraphExecChildGraphNodeSetParams`, :py:obj:`~.cudaGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphExec_t chGraphExec + cynode = pnode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - cdef ccudart.cudaMemcpy3DParms* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphExecMemcpyNodeSetParams(chGraphExec, cnode, cpNodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cyruntime.cudaMemcpy3DParms* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphExecMemcpyNodeSetParams(cyhGraphExec, cynode, cypNodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -28943,30 +28943,30 @@ def cudaGraphExecMemcpyNodeSetParams1D(hGraphExec, node, dst, src, size_t count, -------- :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemcpyNode1D`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams1D`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecHostNodeSetParams`, :py:obj:`~.cudaGraphExecChildGraphNodeSetParams`, :py:obj:`~.cudaGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphExec_t chGraphExec + cynode = pnode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - cdst = utils.HelperInputVoidPtr(dst) - cdef void* cdst_ptr = cdst.cptr - csrc = utils.HelperInputVoidPtr(src) - cdef void* csrc_ptr = csrc.cptr - cdef ccudart.cudaMemcpyKind ckind = kind.value - err = ccudart.cudaGraphExecMemcpyNodeSetParams1D(chGraphExec, cnode, cdst_ptr, csrc_ptr, count, ckind) + cyhGraphExec = phGraphExec + cydst = utils.HelperInputVoidPtr(dst) + cdef void* cydst_ptr = cydst.cptr + cysrc = utils.HelperInputVoidPtr(src) + cdef void* cysrc_ptr = cysrc.cptr + cdef cyruntime.cudaMemcpyKind cykind = kind.value + err = cyruntime.cudaGraphExecMemcpyNodeSetParams1D(cyhGraphExec, cynode, cydst_ptr, cysrc_ptr, count, cykind) return (cudaError_t(err),) {{endif}} @@ -29018,26 +29018,26 @@ def cudaGraphExecMemsetNodeSetParams(hGraphExec, node, pNodeParams : Optional[cu -------- :py:obj:`~.cudaGraphExecNodeSetParams`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecHostNodeSetParams`, :py:obj:`~.cudaGraphExecChildGraphNodeSetParams`, :py:obj:`~.cudaGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphExec_t chGraphExec + cynode = pnode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - cdef ccudart.cudaMemsetParams* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphExecMemsetNodeSetParams(chGraphExec, cnode, cpNodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cyruntime.cudaMemsetParams* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphExecMemsetNodeSetParams(cyhGraphExec, cynode, cypNodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -29074,26 +29074,26 @@ def cudaGraphExecHostNodeSetParams(hGraphExec, node, pNodeParams : Optional[cuda -------- :py:obj:`~.cudaGraphExecNodeSetParams`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphHostNodeSetParams`, :py:obj:`~.cudaGraphExecKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecChildGraphNodeSetParams`, :py:obj:`~.cudaGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphExec_t chGraphExec + cynode = pnode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - cdef ccudart.cudaHostNodeParams* cpNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL - err = ccudart.cudaGraphExecHostNodeSetParams(chGraphExec, cnode, cpNodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._ptr if pNodeParams != None else NULL + err = cyruntime.cudaGraphExecHostNodeSetParams(cyhGraphExec, cynode, cypNodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -29138,34 +29138,34 @@ def cudaGraphExecChildGraphNodeSetParams(hGraphExec, node, childGraph): -------- :py:obj:`~.cudaGraphExecNodeSetParams`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphChildGraphNodeGetGraph`, :py:obj:`~.cudaGraphExecKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecHostNodeSetParams`, :py:obj:`~.cudaGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraph_t cchildGraph + cdef cyruntime.cudaGraph_t cychildGraph if childGraph is None: - cchildGraph = 0 - elif isinstance(childGraph, (cudaGraph_t,cuda.CUgraph)): + cychildGraph = 0 + elif isinstance(childGraph, (cudaGraph_t,driver.CUgraph)): pchildGraph = int(childGraph) - cchildGraph = pchildGraph + cychildGraph = pchildGraph else: pchildGraph = int(cudaGraph_t(childGraph)) - cchildGraph = pchildGraph - cdef ccudart.cudaGraphNode_t cnode + cychildGraph = pchildGraph + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphExec_t chGraphExec + cynode = pnode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - err = ccudart.cudaGraphExecChildGraphNodeSetParams(chGraphExec, cnode, cchildGraph) + cyhGraphExec = phGraphExec + err = cyruntime.cudaGraphExecChildGraphNodeSetParams(cyhGraphExec, cynode, cychildGraph) return (cudaError_t(err),) {{endif}} @@ -29203,34 +29203,34 @@ def cudaGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event): -------- :py:obj:`~.cudaGraphExecNodeSetParams`, :py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaGraphEventRecordNodeGetEvent`, :py:obj:`~.cudaGraphEventWaitNodeSetEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaGraphExecKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecHostNodeSetParams`, :py:obj:`~.cudaGraphExecChildGraphNodeSetParams`, :py:obj:`~.cudaGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - cdef ccudart.cudaGraphNode_t chNode + cyevent = pevent + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaGraphExec_t chGraphExec + cyhNode = phNode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - err = ccudart.cudaGraphExecEventRecordNodeSetEvent(chGraphExec, chNode, cevent) + cyhGraphExec = phGraphExec + err = cyruntime.cudaGraphExecEventRecordNodeSetEvent(cyhGraphExec, cyhNode, cyevent) return (cudaError_t(err),) {{endif}} @@ -29268,34 +29268,34 @@ def cudaGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event): -------- :py:obj:`~.cudaGraphExecNodeSetParams`, :py:obj:`~.cudaGraphAddEventWaitNode`, :py:obj:`~.cudaGraphEventWaitNodeGetEvent`, :py:obj:`~.cudaGraphEventRecordNodeSetEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaGraphExecKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecHostNodeSetParams`, :py:obj:`~.cudaGraphExecChildGraphNodeSetParams`, :py:obj:`~.cudaGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaEvent_t cevent + cdef cyruntime.cudaEvent_t cyevent if event is None: - cevent = 0 - elif isinstance(event, (cudaEvent_t,cuda.CUevent)): + cyevent = 0 + elif isinstance(event, (cudaEvent_t,driver.CUevent)): pevent = int(event) - cevent = pevent + cyevent = pevent else: pevent = int(cudaEvent_t(event)) - cevent = pevent - cdef ccudart.cudaGraphNode_t chNode + cyevent = pevent + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaGraphExec_t chGraphExec + cyhNode = phNode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - err = ccudart.cudaGraphExecEventWaitNodeSetEvent(chGraphExec, chNode, cevent) + cyhGraphExec = phGraphExec + err = cyruntime.cudaGraphExecEventWaitNodeSetEvent(cyhGraphExec, cyhNode, cyevent) return (cudaError_t(err),) {{endif}} @@ -29337,26 +29337,26 @@ def cudaGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, nodePa -------- :py:obj:`~.cudaGraphExecNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresSignalNode`, :py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync`, :py:obj:`~.cudaGraphExecKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecHostNodeSetParams`, :py:obj:`~.cudaGraphExecChildGraphNodeSetParams`, :py:obj:`~.cudaGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresWaitNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaGraphExec_t chGraphExec + cyhNode = phNode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - cdef ccudart.cudaExternalSemaphoreSignalNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphExecExternalSemaphoresSignalNodeSetParams(chGraphExec, chNode, cnodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cyruntime.cudaExternalSemaphoreSignalNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphExecExternalSemaphoresSignalNodeSetParams(cyhGraphExec, cyhNode, cynodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -29398,26 +29398,26 @@ def cudaGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, nodePara -------- :py:obj:`~.cudaGraphExecNodeSetParams`, :py:obj:`~.cudaGraphAddExternalSemaphoresWaitNode`, :py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync`, :py:obj:`~.cudaGraphExecKernelNodeSetParams`, :py:obj:`~.cudaGraphExecMemcpyNodeSetParams`, :py:obj:`~.cudaGraphExecMemsetNodeSetParams`, :py:obj:`~.cudaGraphExecHostNodeSetParams`, :py:obj:`~.cudaGraphExecChildGraphNodeSetParams`, :py:obj:`~.cudaGraphExecEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphExecEventWaitNodeSetEvent`, :py:obj:`~.cudaGraphExecExternalSemaphoresSignalNodeSetParams`, :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaGraphExec_t chGraphExec + cyhNode = phNode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - cdef ccudart.cudaExternalSemaphoreWaitNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphExecExternalSemaphoresWaitNodeSetParams(chGraphExec, chNode, cnodeParams_ptr) + cyhGraphExec = phGraphExec + cdef cyruntime.cudaExternalSemaphoreWaitNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphExecExternalSemaphoresWaitNodeSetParams(cyhGraphExec, cyhNode, cynodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -29463,25 +29463,25 @@ def cudaGraphNodeSetEnabled(hGraphExec, hNode, unsigned int isEnabled): ----- Currently only kernel, memset and memcpy nodes are supported. """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaGraphExec_t chGraphExec + cyhNode = phNode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec - err = ccudart.cudaGraphNodeSetEnabled(chGraphExec, chNode, isEnabled) + cyhGraphExec = phGraphExec + err = cyruntime.cudaGraphNodeSetEnabled(cyhGraphExec, cyhNode, isEnabled) return (cudaError_t(err),) {{endif}} @@ -29520,26 +29520,26 @@ def cudaGraphNodeGetEnabled(hGraphExec, hNode): ----- Currently only kernel, memset and memcpy nodes are supported. """ - cdef ccudart.cudaGraphNode_t chNode + cdef cyruntime.cudaGraphNode_t cyhNode if hNode is None: - chNode = 0 - elif isinstance(hNode, (cudaGraphNode_t,cuda.CUgraphNode)): + cyhNode = 0 + elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): phNode = int(hNode) - chNode = phNode + cyhNode = phNode else: phNode = int(cudaGraphNode_t(hNode)) - chNode = phNode - cdef ccudart.cudaGraphExec_t chGraphExec + cyhNode = phNode + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec cdef unsigned int isEnabled = 0 - err = ccudart.cudaGraphNodeGetEnabled(chGraphExec, chNode, &isEnabled) + err = cyruntime.cudaGraphNodeGetEnabled(cyhGraphExec, cyhNode, &isEnabled) return (cudaError_t(err), isEnabled) {{endif}} @@ -29695,26 +29695,26 @@ def cudaGraphExecUpdate(hGraphExec, hGraph): -------- :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraph_t chGraph + cdef cyruntime.cudaGraph_t cyhGraph if hGraph is None: - chGraph = 0 - elif isinstance(hGraph, (cudaGraph_t,cuda.CUgraph)): + cyhGraph = 0 + elif isinstance(hGraph, (cudaGraph_t,driver.CUgraph)): phGraph = int(hGraph) - chGraph = phGraph + cyhGraph = phGraph else: phGraph = int(cudaGraph_t(hGraph)) - chGraph = phGraph - cdef ccudart.cudaGraphExec_t chGraphExec + cyhGraph = phGraph + cdef cyruntime.cudaGraphExec_t cyhGraphExec if hGraphExec is None: - chGraphExec = 0 - elif isinstance(hGraphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cyhGraphExec = 0 + elif isinstance(hGraphExec, (cudaGraphExec_t,driver.CUgraphExec)): phGraphExec = int(hGraphExec) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec else: phGraphExec = int(cudaGraphExec_t(hGraphExec)) - chGraphExec = phGraphExec + cyhGraphExec = phGraphExec cdef cudaGraphExecUpdateResultInfo resultInfo = cudaGraphExecUpdateResultInfo() - err = ccudart.cudaGraphExecUpdate(chGraphExec, chGraph, resultInfo._ptr) + err = cyruntime.cudaGraphExecUpdate(cyhGraphExec, cyhGraph, resultInfo._ptr) return (cudaError_t(err), resultInfo) {{endif}} @@ -29746,25 +29746,25 @@ def cudaGraphUpload(graphExec, stream): -------- :py:obj:`~.cudaGraphInstantiate`, :py:obj:`~.cudaGraphLaunch`, :py:obj:`~.cudaGraphExecDestroy` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaGraphExec_t cgraphExec + cystream = pstream + cdef cyruntime.cudaGraphExec_t cygraphExec if graphExec is None: - cgraphExec = 0 - elif isinstance(graphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cygraphExec = 0 + elif isinstance(graphExec, (cudaGraphExec_t,driver.CUgraphExec)): pgraphExec = int(graphExec) - cgraphExec = pgraphExec + cygraphExec = pgraphExec else: pgraphExec = int(cudaGraphExec_t(graphExec)) - cgraphExec = pgraphExec - err = ccudart.cudaGraphUpload(cgraphExec, cstream) + cygraphExec = pgraphExec + err = cyruntime.cudaGraphUpload(cygraphExec, cystream) return (cudaError_t(err),) {{endif}} @@ -29801,25 +29801,25 @@ def cudaGraphLaunch(graphExec, stream): -------- :py:obj:`~.cudaGraphInstantiate`, :py:obj:`~.cudaGraphUpload`, :py:obj:`~.cudaGraphExecDestroy` """ - cdef ccudart.cudaStream_t cstream + cdef cyruntime.cudaStream_t cystream if stream is None: - cstream = 0 - elif isinstance(stream, (cudaStream_t,cuda.CUstream)): + cystream = 0 + elif isinstance(stream, (cudaStream_t,driver.CUstream)): pstream = int(stream) - cstream = pstream + cystream = pstream else: pstream = int(cudaStream_t(stream)) - cstream = pstream - cdef ccudart.cudaGraphExec_t cgraphExec + cystream = pstream + cdef cyruntime.cudaGraphExec_t cygraphExec if graphExec is None: - cgraphExec = 0 - elif isinstance(graphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cygraphExec = 0 + elif isinstance(graphExec, (cudaGraphExec_t,driver.CUgraphExec)): pgraphExec = int(graphExec) - cgraphExec = pgraphExec + cygraphExec = pgraphExec else: pgraphExec = int(cudaGraphExec_t(graphExec)) - cgraphExec = pgraphExec - err = ccudart.cudaGraphLaunch(cgraphExec, cstream) + cygraphExec = pgraphExec + err = cyruntime.cudaGraphLaunch(cygraphExec, cystream) return (cudaError_t(err),) {{endif}} @@ -29845,16 +29845,16 @@ def cudaGraphExecDestroy(graphExec): -------- :py:obj:`~.cudaGraphInstantiate`, :py:obj:`~.cudaGraphUpload`, :py:obj:`~.cudaGraphLaunch` """ - cdef ccudart.cudaGraphExec_t cgraphExec + cdef cyruntime.cudaGraphExec_t cygraphExec if graphExec is None: - cgraphExec = 0 - elif isinstance(graphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cygraphExec = 0 + elif isinstance(graphExec, (cudaGraphExec_t,driver.CUgraphExec)): pgraphExec = int(graphExec) - cgraphExec = pgraphExec + cygraphExec = pgraphExec else: pgraphExec = int(cudaGraphExec_t(graphExec)) - cgraphExec = pgraphExec - err = ccudart.cudaGraphExecDestroy(cgraphExec) + cygraphExec = pgraphExec + err = cyruntime.cudaGraphExecDestroy(cygraphExec) return (cudaError_t(err),) {{endif}} @@ -29880,16 +29880,16 @@ def cudaGraphDestroy(graph): -------- :py:obj:`~.cudaGraphCreate` """ - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - err = ccudart.cudaGraphDestroy(cgraph) + cygraph = pgraph + err = cyruntime.cudaGraphDestroy(cygraph) return (cudaError_t(err),) {{endif}} @@ -29920,16 +29920,16 @@ def cudaGraphDebugDotPrint(graph, char* path, unsigned int flags): cudaError_t :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorOperatingSystem` """ - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - err = ccudart.cudaGraphDebugDotPrint(cgraph, path, flags) + cygraph = pgraph + err = cyruntime.cudaGraphDebugDotPrint(cygraph, path, flags) return (cudaError_t(err),) {{endif}} @@ -29977,19 +29977,19 @@ def cudaUserObjectCreate(ptr, destroy, unsigned int initialRefcount, unsigned in -------- :py:obj:`~.cudaUserObjectRetain`, :py:obj:`~.cudaUserObjectRelease`, :py:obj:`~.cudaGraphRetainUserObject`, :py:obj:`~.cudaGraphReleaseUserObject`, :py:obj:`~.cudaGraphCreate` """ - cdef ccudart.cudaHostFn_t cdestroy + cdef cyruntime.cudaHostFn_t cydestroy if destroy is None: - cdestroy = 0 + cydestroy = 0 elif isinstance(destroy, (cudaHostFn_t,)): pdestroy = int(destroy) - cdestroy = pdestroy + cydestroy = pdestroy else: pdestroy = int(cudaHostFn_t(destroy)) - cdestroy = pdestroy + cydestroy = pdestroy cdef cudaUserObject_t object_out = cudaUserObject_t() - cptr = utils.HelperInputVoidPtr(ptr) - cdef void* cptr_ptr = cptr.cptr - err = ccudart.cudaUserObjectCreate(object_out._ptr, cptr_ptr, cdestroy, initialRefcount, flags) + cyptr = utils.HelperInputVoidPtr(ptr) + cdef void* cyptr_ptr = cyptr.cptr + err = cyruntime.cudaUserObjectCreate(object_out._ptr, cyptr_ptr, cydestroy, initialRefcount, flags) return (cudaError_t(err), object_out) {{endif}} @@ -30022,16 +30022,16 @@ def cudaUserObjectRetain(object, unsigned int count): -------- :py:obj:`~.cudaUserObjectCreate`, :py:obj:`~.cudaUserObjectRelease`, :py:obj:`~.cudaGraphRetainUserObject`, :py:obj:`~.cudaGraphReleaseUserObject`, :py:obj:`~.cudaGraphCreate` """ - cdef ccudart.cudaUserObject_t cobject + cdef cyruntime.cudaUserObject_t cyobject if object is None: - cobject = 0 - elif isinstance(object, (cudaUserObject_t,cuda.CUuserObject)): + cyobject = 0 + elif isinstance(object, (cudaUserObject_t,driver.CUuserObject)): pobject = int(object) - cobject = pobject + cyobject = pobject else: pobject = int(cudaUserObject_t(object)) - cobject = pobject - err = ccudart.cudaUserObjectRetain(cobject, count) + cyobject = pobject + err = cyruntime.cudaUserObjectRetain(cyobject, count) return (cudaError_t(err),) {{endif}} @@ -30067,16 +30067,16 @@ def cudaUserObjectRelease(object, unsigned int count): -------- :py:obj:`~.cudaUserObjectCreate`, :py:obj:`~.cudaUserObjectRetain`, :py:obj:`~.cudaGraphRetainUserObject`, :py:obj:`~.cudaGraphReleaseUserObject`, :py:obj:`~.cudaGraphCreate` """ - cdef ccudart.cudaUserObject_t cobject + cdef cyruntime.cudaUserObject_t cyobject if object is None: - cobject = 0 - elif isinstance(object, (cudaUserObject_t,cuda.CUuserObject)): + cyobject = 0 + elif isinstance(object, (cudaUserObject_t,driver.CUuserObject)): pobject = int(object) - cobject = pobject + cyobject = pobject else: pobject = int(cudaUserObject_t(object)) - cobject = pobject - err = ccudart.cudaUserObjectRelease(cobject, count) + cyobject = pobject + err = cyruntime.cudaUserObjectRelease(cyobject, count) return (cudaError_t(err),) {{endif}} @@ -30115,25 +30115,25 @@ def cudaGraphRetainUserObject(graph, object, unsigned int count, unsigned int fl -------- :py:obj:`~.cudaUserObjectCreate` :py:obj:`~.cudaUserObjectRetain`, :py:obj:`~.cudaUserObjectRelease`, :py:obj:`~.cudaGraphReleaseUserObject`, :py:obj:`~.cudaGraphCreate` """ - cdef ccudart.cudaUserObject_t cobject + cdef cyruntime.cudaUserObject_t cyobject if object is None: - cobject = 0 - elif isinstance(object, (cudaUserObject_t,cuda.CUuserObject)): + cyobject = 0 + elif isinstance(object, (cudaUserObject_t,driver.CUuserObject)): pobject = int(object) - cobject = pobject + cyobject = pobject else: pobject = int(cudaUserObject_t(object)) - cobject = pobject - cdef ccudart.cudaGraph_t cgraph + cyobject = pobject + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - err = ccudart.cudaGraphRetainUserObject(cgraph, cobject, count, flags) + cygraph = pgraph + err = cyruntime.cudaGraphRetainUserObject(cygraph, cyobject, count, flags) return (cudaError_t(err),) {{endif}} @@ -30167,25 +30167,25 @@ def cudaGraphReleaseUserObject(graph, object, unsigned int count): -------- :py:obj:`~.cudaUserObjectCreate` :py:obj:`~.cudaUserObjectRetain`, :py:obj:`~.cudaUserObjectRelease`, :py:obj:`~.cudaGraphRetainUserObject`, :py:obj:`~.cudaGraphCreate` """ - cdef ccudart.cudaUserObject_t cobject + cdef cyruntime.cudaUserObject_t cyobject if object is None: - cobject = 0 - elif isinstance(object, (cudaUserObject_t,cuda.CUuserObject)): + cyobject = 0 + elif isinstance(object, (cudaUserObject_t,driver.CUuserObject)): pobject = int(object) - cobject = pobject + cyobject = pobject else: pobject = int(cudaUserObject_t(object)) - cobject = pobject - cdef ccudart.cudaGraph_t cgraph + cyobject = pobject + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph - err = ccudart.cudaGraphReleaseUserObject(cgraph, cobject, count) + cygraph = pgraph + err = cyruntime.cudaGraphReleaseUserObject(cygraph, cyobject, count) return (cudaError_t(err),) {{endif}} @@ -30237,31 +30237,31 @@ def cudaGraphAddNode(graph, pDependencies : Optional[Tuple[cudaGraphNode_t] | Li :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaGraphExecNodeSetParams` """ pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] + cypDependencies[idx] = (pDependencies[idx])._ptr[0] if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) - cdef ccudart.cudaGraphNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphAddNode(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, numDependencies, cnodeParams_ptr) - if cpDependencies is not NULL: - free(cpDependencies) + cdef cyruntime.cudaGraphNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphAddNode(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, numDependencies, cynodeParams_ptr) + if cypDependencies is not NULL: + free(cypDependencies) return (cudaError_t(err), pGraphNode) {{endif}} @@ -30317,43 +30317,43 @@ def cudaGraphAddNode_v2(graph, pDependencies : Optional[Tuple[cudaGraphNode_t] | """ dependencyData = [] if dependencyData is None else dependencyData if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): - raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[ccudart.cudaGraphEdgeData,] or List[ccudart.cudaGraphEdgeData,]") + raise TypeError("Argument 'dependencyData' is not instance of type (expected Tuple[cyruntime.cudaGraphEdgeData,] or List[cyruntime.cudaGraphEdgeData,]") pDependencies = [] if pDependencies is None else pDependencies - if not all(isinstance(_x, (cudaGraphNode_t,cuda.CUgraphNode)) for _x in pDependencies): - raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[ccudart.cudaGraphNode_t,cuda.CUgraphNode] or List[ccudart.cudaGraphNode_t,cuda.CUgraphNode]") - cdef ccudart.cudaGraph_t cgraph + if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): + raise TypeError("Argument 'pDependencies' is not instance of type (expected Tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or List[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() - cdef ccudart.cudaGraphNode_t* cpDependencies = NULL + cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL if len(pDependencies) > 0: - cpDependencies = calloc(len(pDependencies), sizeof(ccudart.cudaGraphNode_t)) - if cpDependencies is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(ccudart.cudaGraphNode_t))) + cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) + if cypDependencies is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) else: for idx in range(len(pDependencies)): - cpDependencies[idx] = (pDependencies[idx])._ptr[0] - cdef ccudart.cudaGraphEdgeData* cdependencyData = NULL + cypDependencies[idx] = (pDependencies[idx])._ptr[0] + cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL if len(dependencyData) > 0: - cdependencyData = calloc(len(dependencyData), sizeof(ccudart.cudaGraphEdgeData)) - if cdependencyData is NULL: - raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(ccudart.cudaGraphEdgeData))) + cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) + if cydependencyData is NULL: + raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) for idx in range(len(dependencyData)): - string.memcpy(&cdependencyData[idx], (dependencyData[idx])._ptr, sizeof(ccudart.cudaGraphEdgeData)) + string.memcpy(&cydependencyData[idx], (dependencyData[idx])._ptr, sizeof(cyruntime.cudaGraphEdgeData)) if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) if numDependencies > len(dependencyData): raise RuntimeError("List is too small: " + str(len(dependencyData)) + " < " + str(numDependencies)) - cdef ccudart.cudaGraphNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphAddNode_v2(pGraphNode._ptr, cgraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cpDependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cdependencyData, numDependencies, cnodeParams_ptr) - if cpDependencies is not NULL: - free(cpDependencies) - if cdependencyData is not NULL: - free(cdependencyData) + cdef cyruntime.cudaGraphNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphAddNode_v2(pGraphNode._ptr, cygraph, (pDependencies[0])._ptr if len(pDependencies) == 1 else cypDependencies, (dependencyData[0])._ptr if len(dependencyData) == 1 else cydependencyData, numDependencies, cynodeParams_ptr) + if cypDependencies is not NULL: + free(cypDependencies) + if cydependencyData is not NULL: + free(cydependencyData) return (cudaError_t(err), pGraphNode) {{endif}} @@ -30387,17 +30387,17 @@ def cudaGraphNodeSetParams(node, nodeParams : Optional[cudaGraphNodeParams]): -------- :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphExecNodeSetParams` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphNodeSetParams(cnode, cnodeParams_ptr) + cynode = pnode + cdef cyruntime.cudaGraphNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphNodeSetParams(cynode, cynodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -30439,26 +30439,26 @@ def cudaGraphExecNodeSetParams(graphExec, node, nodeParams : Optional[cudaGraphN -------- :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphNodeSetParams` :py:obj:`~.cudaGraphExecUpdate`, :py:obj:`~.cudaGraphInstantiate` """ - cdef ccudart.cudaGraphNode_t cnode + cdef cyruntime.cudaGraphNode_t cynode if node is None: - cnode = 0 - elif isinstance(node, (cudaGraphNode_t,cuda.CUgraphNode)): + cynode = 0 + elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): pnode = int(node) - cnode = pnode + cynode = pnode else: pnode = int(cudaGraphNode_t(node)) - cnode = pnode - cdef ccudart.cudaGraphExec_t cgraphExec + cynode = pnode + cdef cyruntime.cudaGraphExec_t cygraphExec if graphExec is None: - cgraphExec = 0 - elif isinstance(graphExec, (cudaGraphExec_t,cuda.CUgraphExec)): + cygraphExec = 0 + elif isinstance(graphExec, (cudaGraphExec_t,driver.CUgraphExec)): pgraphExec = int(graphExec) - cgraphExec = pgraphExec + cygraphExec = pgraphExec else: pgraphExec = int(cudaGraphExec_t(graphExec)) - cgraphExec = pgraphExec - cdef ccudart.cudaGraphNodeParams* cnodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL - err = ccudart.cudaGraphExecNodeSetParams(cgraphExec, cnode, cnodeParams_ptr) + cygraphExec = pgraphExec + cdef cyruntime.cudaGraphNodeParams* cynodeParams_ptr = nodeParams._ptr if nodeParams != None else NULL + err = cyruntime.cudaGraphExecNodeSetParams(cygraphExec, cynode, cynodeParams_ptr) return (cudaError_t(err),) {{endif}} @@ -30496,17 +30496,17 @@ def cudaGraphConditionalHandleCreate(graph, unsigned int defaultLaunchValue, uns -------- :py:obj:`~.cuGraphAddNode`, """ - cdef ccudart.cudaGraph_t cgraph + cdef cyruntime.cudaGraph_t cygraph if graph is None: - cgraph = 0 - elif isinstance(graph, (cudaGraph_t,cuda.CUgraph)): + cygraph = 0 + elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): pgraph = int(graph) - cgraph = pgraph + cygraph = pgraph else: pgraph = int(cudaGraph_t(graph)) - cgraph = pgraph + cygraph = pgraph cdef cudaGraphConditionalHandle pHandle_out = cudaGraphConditionalHandle() - err = ccudart.cudaGraphConditionalHandleCreate(pHandle_out._ptr, cgraph, defaultLaunchValue, flags) + err = cyruntime.cudaGraphConditionalHandleCreate(pHandle_out._ptr, cygraph, defaultLaunchValue, flags) return (cudaError_t(err), pHandle_out) {{endif}} @@ -30600,8 +30600,8 @@ def cudaGetDriverEntryPoint(char* symbol, unsigned long long flags): :py:obj:`~.cuGetProcAddress` """ cdef void_ptr funcPtr = 0 - cdef ccudart.cudaDriverEntryPointQueryResult driverStatus - err = ccudart.cudaGetDriverEntryPoint(symbol, &funcPtr, flags, &driverStatus) + cdef cyruntime.cudaDriverEntryPointQueryResult driverStatus + err = cyruntime.cudaGetDriverEntryPoint(symbol, &funcPtr, flags, &driverStatus) return (cudaError_t(err), funcPtr, cudaDriverEntryPointQueryResult(driverStatus)) {{endif}} @@ -30703,8 +30703,8 @@ def cudaGetDriverEntryPointByVersion(char* symbol, unsigned int cudaVersion, uns :py:obj:`~.cuGetProcAddress` """ cdef void_ptr funcPtr = 0 - cdef ccudart.cudaDriverEntryPointQueryResult driverStatus - err = ccudart.cudaGetDriverEntryPointByVersion(symbol, &funcPtr, cudaVersion, flags, &driverStatus) + cdef cyruntime.cudaDriverEntryPointQueryResult driverStatus + err = cyruntime.cudaGetDriverEntryPointByVersion(symbol, &funcPtr, cudaVersion, flags, &driverStatus) return (cudaError_t(err), funcPtr, cudaDriverEntryPointQueryResult(driverStatus)) {{endif}} @@ -30714,8 +30714,8 @@ def cudaGetDriverEntryPointByVersion(char* symbol, unsigned int cudaVersion, uns def cudaGetExportTable(pExportTableId : Optional[cudaUUID_t]): """""" cdef void_ptr ppExportTable = 0 - cdef ccudart.cudaUUID_t* cpExportTableId_ptr = pExportTableId._ptr if pExportTableId != None else NULL - err = ccudart.cudaGetExportTable(&ppExportTable, cpExportTableId_ptr) + cdef cyruntime.cudaUUID_t* cypExportTableId_ptr = pExportTableId._ptr if pExportTableId != None else NULL + err = cyruntime.cudaGetExportTable(&ppExportTable, cypExportTableId_ptr) return (cudaError_t(err), ppExportTable) {{endif}} @@ -30745,9 +30745,9 @@ def cudaGetKernel(entryFuncAddr): cudaGetKernel (C++ API) """ cdef cudaKernel_t kernelPtr = cudaKernel_t() - centryFuncAddr = utils.HelperInputVoidPtr(entryFuncAddr) - cdef void* centryFuncAddr_ptr = centryFuncAddr.cptr - err = ccudart.cudaGetKernel(kernelPtr._ptr, centryFuncAddr_ptr) + cyentryFuncAddr = utils.HelperInputVoidPtr(entryFuncAddr) + cdef void* cyentryFuncAddr_ptr = cyentryFuncAddr.cptr + err = cyruntime.cudaGetKernel(kernelPtr._ptr, cyentryFuncAddr_ptr) return (cudaError_t(err), kernelPtr) {{endif}} @@ -30782,9 +30782,9 @@ def make_cudaPitchedPtr(d, size_t p, size_t xsz, size_t ysz): -------- make_cudaExtent, make_cudaPos """ - cd = utils.HelperInputVoidPtr(d) - cdef void* cd_ptr = cd.cptr - err = ccudart.make_cudaPitchedPtr(cd_ptr, p, xsz, ysz) + cyd = utils.HelperInputVoidPtr(d) + cdef void* cyd_ptr = cyd.cptr + err = cyruntime.make_cudaPitchedPtr(cyd_ptr, p, xsz, ysz) cdef cudaPitchedPtr wrapper = cudaPitchedPtr() wrapper._ptr[0] = err return wrapper @@ -30819,7 +30819,7 @@ def make_cudaPos(size_t x, size_t y, size_t z): -------- make_cudaExtent, make_cudaPitchedPtr """ - err = ccudart.make_cudaPos(x, y, z) + err = cyruntime.make_cudaPos(x, y, z) cdef cudaPos wrapper = cudaPos() wrapper._ptr[0] = err return wrapper @@ -30855,7 +30855,7 @@ def make_cudaExtent(size_t w, size_t h, size_t d): -------- make_cudaPitchedPtr, make_cudaPos """ - err = ccudart.make_cudaExtent(w, h, d) + err = cyruntime.make_cudaExtent(w, h, d) cdef cudaExtent wrapper = cudaExtent() wrapper._ptr[0] = err return wrapper @@ -30921,17 +30921,17 @@ def cudaGraphicsEGLRegisterImage(image, unsigned int flags): -------- :py:obj:`~.cudaGraphicsUnregisterResource`, :py:obj:`~.cudaGraphicsResourceGetMappedEglFrame`, :py:obj:`~.cuGraphicsEGLRegisterImage` """ - cdef ccudart.EGLImageKHR cimage + cdef cyruntime.EGLImageKHR cyimage if image is None: - cimage = 0 + cyimage = 0 elif isinstance(image, (EGLImageKHR,)): pimage = int(image) - cimage = pimage + cyimage = pimage else: pimage = int(EGLImageKHR(image)) - cimage = pimage + cyimage = pimage cdef cudaGraphicsResource_t pCudaResource = cudaGraphicsResource_t() - err = ccudart.cudaGraphicsEGLRegisterImage(pCudaResource._ptr, cimage, flags) + err = cyruntime.cudaGraphicsEGLRegisterImage(pCudaResource._ptr, cyimage, flags) return (cudaError_t(err), pCudaResource) {{endif}} @@ -30962,17 +30962,17 @@ def cudaEGLStreamConsumerConnect(eglStream): -------- :py:obj:`~.cudaEGLStreamConsumerDisconnect`, :py:obj:`~.cudaEGLStreamConsumerAcquireFrame`, :py:obj:`~.cudaEGLStreamConsumerReleaseFrame`, :py:obj:`~.cuEGLStreamConsumerConnect` """ - cdef ccudart.EGLStreamKHR ceglStream + cdef cyruntime.EGLStreamKHR cyeglStream if eglStream is None: - ceglStream = 0 + cyeglStream = 0 elif isinstance(eglStream, (EGLStreamKHR,)): peglStream = int(eglStream) - ceglStream = peglStream + cyeglStream = peglStream else: peglStream = int(EGLStreamKHR(eglStream)) - ceglStream = peglStream + cyeglStream = peglStream cdef cudaEglStreamConnection conn = cudaEglStreamConnection() - err = ccudart.cudaEGLStreamConsumerConnect(conn._ptr, ceglStream) + err = cyruntime.cudaEGLStreamConsumerConnect(conn._ptr, cyeglStream) return (cudaError_t(err), conn) {{endif}} @@ -31007,17 +31007,17 @@ def cudaEGLStreamConsumerConnectWithFlags(eglStream, unsigned int flags): -------- :py:obj:`~.cudaEGLStreamConsumerDisconnect`, :py:obj:`~.cudaEGLStreamConsumerAcquireFrame`, :py:obj:`~.cudaEGLStreamConsumerReleaseFrame`, :py:obj:`~.cuEGLStreamConsumerConnectWithFlags` """ - cdef ccudart.EGLStreamKHR ceglStream + cdef cyruntime.EGLStreamKHR cyeglStream if eglStream is None: - ceglStream = 0 + cyeglStream = 0 elif isinstance(eglStream, (EGLStreamKHR,)): peglStream = int(eglStream) - ceglStream = peglStream + cyeglStream = peglStream else: peglStream = int(EGLStreamKHR(eglStream)) - ceglStream = peglStream + cyeglStream = peglStream cdef cudaEglStreamConnection conn = cudaEglStreamConnection() - err = ccudart.cudaEGLStreamConsumerConnectWithFlags(conn._ptr, ceglStream, flags) + err = cyruntime.cudaEGLStreamConsumerConnectWithFlags(conn._ptr, cyeglStream, flags) return (cudaError_t(err), conn) {{endif}} @@ -31043,17 +31043,17 @@ def cudaEGLStreamConsumerDisconnect(conn): -------- :py:obj:`~.cudaEGLStreamConsumerConnect`, :py:obj:`~.cudaEGLStreamConsumerAcquireFrame`, :py:obj:`~.cudaEGLStreamConsumerReleaseFrame`, :py:obj:`~.cuEGLStreamConsumerDisconnect` """ - cdef ccudart.cudaEglStreamConnection *cconn + cdef cyruntime.cudaEglStreamConnection *cyconn if conn is None: - cconn = NULL - elif isinstance(conn, (cudaEglStreamConnection,cuda.CUeglStreamConnection)): + cyconn = NULL + elif isinstance(conn, (cudaEglStreamConnection,driver.CUeglStreamConnection)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccudart.cudaEGLStreamConsumerDisconnect(cconn) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cyruntime.cudaEGLStreamConsumerDisconnect(cyconn) return (cudaError_t(err),) {{endif}} @@ -31088,37 +31088,37 @@ def cudaEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, unsigned int -------- :py:obj:`~.cudaEGLStreamConsumerConnect`, :py:obj:`~.cudaEGLStreamConsumerDisconnect`, :py:obj:`~.cudaEGLStreamConsumerReleaseFrame`, :py:obj:`~.cuEGLStreamConsumerAcquireFrame` """ - cdef ccudart.cudaStream_t *cpStream + cdef cyruntime.cudaStream_t *cypStream if pStream is None: - cpStream = NULL - elif isinstance(pStream, (cudaStream_t,cuda.CUstream)): + cypStream = NULL + elif isinstance(pStream, (cudaStream_t,driver.CUstream)): ppStream = pStream.getPtr() - cpStream = ppStream + cypStream = ppStream elif isinstance(pStream, (int)): - cpStream = pStream + cypStream = pStream else: - raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) - cdef ccudart.cudaGraphicsResource_t *cpCudaResource + raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) + cdef cyruntime.cudaGraphicsResource_t *cypCudaResource if pCudaResource is None: - cpCudaResource = NULL + cypCudaResource = NULL elif isinstance(pCudaResource, (cudaGraphicsResource_t,)): ppCudaResource = pCudaResource.getPtr() - cpCudaResource = ppCudaResource + cypCudaResource = ppCudaResource elif isinstance(pCudaResource, (int)): - cpCudaResource = pCudaResource + cypCudaResource = pCudaResource else: - raise TypeError("Argument 'pCudaResource' is not instance of type (expected , found " + str(type(pCudaResource))) - cdef ccudart.cudaEglStreamConnection *cconn + raise TypeError("Argument 'pCudaResource' is not instance of type (expected , found " + str(type(pCudaResource))) + cdef cyruntime.cudaEglStreamConnection *cyconn if conn is None: - cconn = NULL - elif isinstance(conn, (cudaEglStreamConnection,cuda.CUeglStreamConnection)): + cyconn = NULL + elif isinstance(conn, (cudaEglStreamConnection,driver.CUeglStreamConnection)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccudart.cudaEGLStreamConsumerAcquireFrame(cconn, cpCudaResource, cpStream, timeout) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cyruntime.cudaEGLStreamConsumerAcquireFrame(cyconn, cypCudaResource, cypStream, timeout) return (cudaError_t(err),) {{endif}} @@ -31149,36 +31149,36 @@ def cudaEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream): -------- :py:obj:`~.cudaEGLStreamConsumerConnect`, :py:obj:`~.cudaEGLStreamConsumerDisconnect`, :py:obj:`~.cudaEGLStreamConsumerAcquireFrame`, :py:obj:`~.cuEGLStreamConsumerReleaseFrame` """ - cdef ccudart.cudaStream_t *cpStream + cdef cyruntime.cudaStream_t *cypStream if pStream is None: - cpStream = NULL - elif isinstance(pStream, (cudaStream_t,cuda.CUstream)): + cypStream = NULL + elif isinstance(pStream, (cudaStream_t,driver.CUstream)): ppStream = pStream.getPtr() - cpStream = ppStream + cypStream = ppStream elif isinstance(pStream, (int)): - cpStream = pStream + cypStream = pStream else: - raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) - cdef ccudart.cudaGraphicsResource_t cpCudaResource + raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) + cdef cyruntime.cudaGraphicsResource_t cypCudaResource if pCudaResource is None: - cpCudaResource = 0 + cypCudaResource = 0 elif isinstance(pCudaResource, (cudaGraphicsResource_t,)): ppCudaResource = int(pCudaResource) - cpCudaResource = ppCudaResource + cypCudaResource = ppCudaResource else: ppCudaResource = int(cudaGraphicsResource_t(pCudaResource)) - cpCudaResource = ppCudaResource - cdef ccudart.cudaEglStreamConnection *cconn + cypCudaResource = ppCudaResource + cdef cyruntime.cudaEglStreamConnection *cyconn if conn is None: - cconn = NULL - elif isinstance(conn, (cudaEglStreamConnection,cuda.CUeglStreamConnection)): + cyconn = NULL + elif isinstance(conn, (cudaEglStreamConnection,driver.CUeglStreamConnection)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccudart.cudaEGLStreamConsumerReleaseFrame(cconn, cpCudaResource, cpStream) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cyruntime.cudaEGLStreamConsumerReleaseFrame(cyconn, cypCudaResource, cypStream) return (cudaError_t(err),) {{endif}} @@ -31213,35 +31213,35 @@ def cudaEGLStreamProducerConnect(eglStream, width, height): -------- :py:obj:`~.cudaEGLStreamProducerDisconnect`, :py:obj:`~.cudaEGLStreamProducerPresentFrame`, :py:obj:`~.cudaEGLStreamProducerReturnFrame`, :py:obj:`~.cuEGLStreamProducerConnect` """ - cdef ccudart.EGLint cheight + cdef cyruntime.EGLint cyheight if height is None: - cheight = 0 + cyheight = 0 elif isinstance(height, (EGLint,)): pheight = int(height) - cheight = pheight + cyheight = pheight else: pheight = int(EGLint(height)) - cheight = pheight - cdef ccudart.EGLint cwidth + cyheight = pheight + cdef cyruntime.EGLint cywidth if width is None: - cwidth = 0 + cywidth = 0 elif isinstance(width, (EGLint,)): pwidth = int(width) - cwidth = pwidth + cywidth = pwidth else: pwidth = int(EGLint(width)) - cwidth = pwidth - cdef ccudart.EGLStreamKHR ceglStream + cywidth = pwidth + cdef cyruntime.EGLStreamKHR cyeglStream if eglStream is None: - ceglStream = 0 + cyeglStream = 0 elif isinstance(eglStream, (EGLStreamKHR,)): peglStream = int(eglStream) - ceglStream = peglStream + cyeglStream = peglStream else: peglStream = int(EGLStreamKHR(eglStream)) - ceglStream = peglStream + cyeglStream = peglStream cdef cudaEglStreamConnection conn = cudaEglStreamConnection() - err = ccudart.cudaEGLStreamProducerConnect(conn._ptr, ceglStream, cwidth, cheight) + err = cyruntime.cudaEGLStreamProducerConnect(conn._ptr, cyeglStream, cywidth, cyheight) return (cudaError_t(err), conn) {{endif}} @@ -31267,17 +31267,17 @@ def cudaEGLStreamProducerDisconnect(conn): -------- :py:obj:`~.cudaEGLStreamProducerConnect`, :py:obj:`~.cudaEGLStreamProducerPresentFrame`, :py:obj:`~.cudaEGLStreamProducerReturnFrame`, :py:obj:`~.cuEGLStreamProducerDisconnect` """ - cdef ccudart.cudaEglStreamConnection *cconn + cdef cyruntime.cudaEglStreamConnection *cyconn if conn is None: - cconn = NULL - elif isinstance(conn, (cudaEglStreamConnection,cuda.CUeglStreamConnection)): + cyconn = NULL + elif isinstance(conn, (cudaEglStreamConnection,driver.CUeglStreamConnection)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccudart.cudaEGLStreamProducerDisconnect(cconn) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cyruntime.cudaEGLStreamProducerDisconnect(cyconn) return (cudaError_t(err),) {{endif}} @@ -31316,27 +31316,27 @@ def cudaEGLStreamProducerPresentFrame(conn, eglframe not None : cudaEglFrame, pS -------- :py:obj:`~.cudaEGLStreamProducerConnect`, :py:obj:`~.cudaEGLStreamProducerDisconnect`, :py:obj:`~.cudaEGLStreamProducerReturnFrame`, :py:obj:`~.cuEGLStreamProducerPresentFrame` """ - cdef ccudart.cudaStream_t *cpStream + cdef cyruntime.cudaStream_t *cypStream if pStream is None: - cpStream = NULL - elif isinstance(pStream, (cudaStream_t,cuda.CUstream)): + cypStream = NULL + elif isinstance(pStream, (cudaStream_t,driver.CUstream)): ppStream = pStream.getPtr() - cpStream = ppStream + cypStream = ppStream elif isinstance(pStream, (int)): - cpStream = pStream + cypStream = pStream else: - raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) - cdef ccudart.cudaEglStreamConnection *cconn + raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) + cdef cyruntime.cudaEglStreamConnection *cyconn if conn is None: - cconn = NULL - elif isinstance(conn, (cudaEglStreamConnection,cuda.CUeglStreamConnection)): + cyconn = NULL + elif isinstance(conn, (cudaEglStreamConnection,driver.CUeglStreamConnection)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - err = ccudart.cudaEGLStreamProducerPresentFrame(cconn, eglframe._ptr[0], cpStream) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + err = cyruntime.cudaEGLStreamProducerPresentFrame(cyconn, eglframe._ptr[0], cypStream) return (cudaError_t(err),) {{endif}} @@ -31369,28 +31369,28 @@ def cudaEGLStreamProducerReturnFrame(conn, eglframe : Optional[cudaEglFrame], pS -------- :py:obj:`~.cudaEGLStreamProducerConnect`, :py:obj:`~.cudaEGLStreamProducerDisconnect`, :py:obj:`~.cudaEGLStreamProducerPresentFrame`, :py:obj:`~.cuEGLStreamProducerReturnFrame` """ - cdef ccudart.cudaStream_t *cpStream + cdef cyruntime.cudaStream_t *cypStream if pStream is None: - cpStream = NULL - elif isinstance(pStream, (cudaStream_t,cuda.CUstream)): + cypStream = NULL + elif isinstance(pStream, (cudaStream_t,driver.CUstream)): ppStream = pStream.getPtr() - cpStream = ppStream + cypStream = ppStream elif isinstance(pStream, (int)): - cpStream = pStream + cypStream = pStream else: - raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) - cdef ccudart.cudaEglStreamConnection *cconn + raise TypeError("Argument 'pStream' is not instance of type (expected , found " + str(type(pStream))) + cdef cyruntime.cudaEglStreamConnection *cyconn if conn is None: - cconn = NULL - elif isinstance(conn, (cudaEglStreamConnection,cuda.CUeglStreamConnection)): + cyconn = NULL + elif isinstance(conn, (cudaEglStreamConnection,driver.CUeglStreamConnection)): pconn = conn.getPtr() - cconn = pconn + cyconn = pconn elif isinstance(conn, (int)): - cconn = conn + cyconn = conn else: - raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) - cdef ccudart.cudaEglFrame* ceglframe_ptr = eglframe._ptr if eglframe != None else NULL - err = ccudart.cudaEGLStreamProducerReturnFrame(cconn, ceglframe_ptr, cpStream) + raise TypeError("Argument 'conn' is not instance of type (expected , found " + str(type(conn))) + cdef cyruntime.cudaEglFrame* cyeglframe_ptr = eglframe._ptr if eglframe != None else NULL + err = cyruntime.cudaEGLStreamProducerReturnFrame(cyconn, cyeglframe_ptr, cypStream) return (cudaError_t(err),) {{endif}} @@ -31432,17 +31432,17 @@ def cudaGraphicsResourceGetMappedEglFrame(resource, unsigned int index, unsigned ----- Note that in case of multiplanar `*eglFrame`, pitch of only first plane (unsigned int :py:obj:`~.cudaEglPlaneDesc.pitch`) is to be considered by the application. """ - cdef ccudart.cudaGraphicsResource_t cresource + cdef cyruntime.cudaGraphicsResource_t cyresource if resource is None: - cresource = 0 + cyresource = 0 elif isinstance(resource, (cudaGraphicsResource_t,)): presource = int(resource) - cresource = presource + cyresource = presource else: presource = int(cudaGraphicsResource_t(resource)) - cresource = presource + cyresource = presource cdef cudaEglFrame eglFrame = cudaEglFrame() - err = ccudart.cudaGraphicsResourceGetMappedEglFrame(eglFrame._ptr, cresource, index, mipLevel) + err = cyruntime.cudaGraphicsResourceGetMappedEglFrame(eglFrame._ptr, cyresource, index, mipLevel) return (cudaError_t(err), eglFrame) {{endif}} @@ -31486,17 +31486,17 @@ def cudaEventCreateFromEGLSync(eglSync, unsigned int flags): -------- :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy` """ - cdef ccudart.EGLSyncKHR ceglSync + cdef cyruntime.EGLSyncKHR cyeglSync if eglSync is None: - ceglSync = 0 + cyeglSync = 0 elif isinstance(eglSync, (EGLSyncKHR,)): peglSync = int(eglSync) - ceglSync = peglSync + cyeglSync = peglSync else: peglSync = int(EGLSyncKHR(eglSync)) - ceglSync = peglSync + cyeglSync = peglSync cdef cudaEvent_t phEvent = cudaEvent_t() - err = ccudart.cudaEventCreateFromEGLSync(phEvent._ptr, ceglSync, flags) + err = cyruntime.cudaEventCreateFromEGLSync(phEvent._ptr, cyeglSync, flags) return (cudaError_t(err), phEvent) {{endif}} @@ -31523,7 +31523,7 @@ def cudaProfilerStart(): -------- :py:obj:`~.cudaProfilerStop`, :py:obj:`~.cuProfilerStart` """ - err = ccudart.cudaProfilerStart() + err = cyruntime.cudaProfilerStart() return (cudaError_t(err),) {{endif}} @@ -31550,7 +31550,7 @@ def cudaProfilerStop(): -------- :py:obj:`~.cudaProfilerStart`, :py:obj:`~.cuProfilerStop` """ - err = ccudart.cudaProfilerStop() + err = cyruntime.cudaProfilerStop() return (cudaError_t(err),) {{endif}} @@ -31604,18 +31604,18 @@ def cudaGLGetDevices(unsigned int cudaDeviceCount, deviceList not None : cudaGLD """ cdef unsigned int pCudaDeviceCount = 0 - cdef int* cpCudaDevices = NULL + cdef int* cypCudaDevices = NULL pypCudaDevices = [] if cudaDeviceCount != 0: - cpCudaDevices = calloc(cudaDeviceCount, sizeof(int)) - if cpCudaDevices is NULL: + cypCudaDevices = calloc(cudaDeviceCount, sizeof(int)) + if cypCudaDevices is NULL: raise MemoryError('Failed to allocate length x size memory: ' + str(cudaDeviceCount) + 'x' + str(sizeof(int))) - cdef ccudart.cudaGLDeviceList cdeviceList = deviceList.value - err = ccudart.cudaGLGetDevices(&pCudaDeviceCount, cpCudaDevices, cudaDeviceCount, cdeviceList) + cdef cyruntime.cudaGLDeviceList cydeviceList = deviceList.value + err = cyruntime.cudaGLGetDevices(&pCudaDeviceCount, cypCudaDevices, cudaDeviceCount, cydeviceList) if cudaError_t(err) == cudaError_t(0): - pypCudaDevices = [cpCudaDevices[idx] for idx in range(cudaDeviceCount)] - if cpCudaDevices is not NULL: - free(cpCudaDevices) + pypCudaDevices = [cypCudaDevices[idx] for idx in range(cudaDeviceCount)] + if cypCudaDevices is not NULL: + free(cypCudaDevices) return (cudaError_t(err), pCudaDeviceCount, pypCudaDevices) {{endif}} @@ -31695,26 +31695,26 @@ def cudaGraphicsGLRegisterImage(image, target, unsigned int flags): -------- :py:obj:`~.cudaGraphicsUnregisterResource`, :py:obj:`~.cudaGraphicsMapResources`, :py:obj:`~.cudaGraphicsSubResourceGetMappedArray`, :py:obj:`~.cuGraphicsGLRegisterImage` """ - cdef ccudart.GLenum ctarget + cdef cyruntime.GLenum cytarget if target is None: - ctarget = 0 + cytarget = 0 elif isinstance(target, (GLenum,)): ptarget = int(target) - ctarget = ptarget + cytarget = ptarget else: ptarget = int(GLenum(target)) - ctarget = ptarget - cdef ccudart.GLuint cimage + cytarget = ptarget + cdef cyruntime.GLuint cyimage if image is None: - cimage = 0 + cyimage = 0 elif isinstance(image, (GLuint,)): pimage = int(image) - cimage = pimage + cyimage = pimage else: pimage = int(GLuint(image)) - cimage = pimage + cyimage = pimage cdef cudaGraphicsResource_t resource = cudaGraphicsResource_t() - err = ccudart.cudaGraphicsGLRegisterImage(resource._ptr, cimage, ctarget, flags) + err = cyruntime.cudaGraphicsGLRegisterImage(resource._ptr, cyimage, cytarget, flags) return (cudaError_t(err), resource) {{endif}} @@ -31759,17 +31759,17 @@ def cudaGraphicsGLRegisterBuffer(buffer, unsigned int flags): -------- :py:obj:`~.cudaGraphicsUnregisterResource`, :py:obj:`~.cudaGraphicsMapResources`, :py:obj:`~.cudaGraphicsResourceGetMappedPointer`, :py:obj:`~.cuGraphicsGLRegisterBuffer` """ - cdef ccudart.GLuint cbuffer + cdef cyruntime.GLuint cybuffer if buffer is None: - cbuffer = 0 + cybuffer = 0 elif isinstance(buffer, (GLuint,)): pbuffer = int(buffer) - cbuffer = pbuffer + cybuffer = pbuffer else: pbuffer = int(GLuint(buffer)) - cbuffer = pbuffer + cybuffer = pbuffer cdef cudaGraphicsResource_t resource = cudaGraphicsResource_t() - err = ccudart.cudaGraphicsGLRegisterBuffer(resource._ptr, cbuffer, flags) + err = cyruntime.cudaGraphicsGLRegisterBuffer(resource._ptr, cybuffer, flags) return (cudaError_t(err), resource) {{endif}} @@ -31800,27 +31800,27 @@ def cudaVDPAUGetDevice(vdpDevice, vdpGetProcAddress): -------- :py:obj:`~.cudaVDPAUSetVDPAUDevice`, :py:obj:`~.cuVDPAUGetDevice` """ - cdef ccudart.VdpGetProcAddress *cvdpGetProcAddress + cdef cyruntime.VdpGetProcAddress *cyvdpGetProcAddress if vdpGetProcAddress is None: - cvdpGetProcAddress = NULL + cyvdpGetProcAddress = NULL elif isinstance(vdpGetProcAddress, (VdpGetProcAddress,)): pvdpGetProcAddress = vdpGetProcAddress.getPtr() - cvdpGetProcAddress = pvdpGetProcAddress + cyvdpGetProcAddress = pvdpGetProcAddress elif isinstance(vdpGetProcAddress, (int)): - cvdpGetProcAddress = vdpGetProcAddress + cyvdpGetProcAddress = vdpGetProcAddress else: - raise TypeError("Argument 'vdpGetProcAddress' is not instance of type (expected , found " + str(type(vdpGetProcAddress))) - cdef ccudart.VdpDevice cvdpDevice + raise TypeError("Argument 'vdpGetProcAddress' is not instance of type (expected , found " + str(type(vdpGetProcAddress))) + cdef cyruntime.VdpDevice cyvdpDevice if vdpDevice is None: - cvdpDevice = 0 + cyvdpDevice = 0 elif isinstance(vdpDevice, (VdpDevice,)): pvdpDevice = int(vdpDevice) - cvdpDevice = pvdpDevice + cyvdpDevice = pvdpDevice else: pvdpDevice = int(VdpDevice(vdpDevice)) - cvdpDevice = pvdpDevice + cyvdpDevice = pvdpDevice cdef int device = 0 - err = ccudart.cudaVDPAUGetDevice(&device, cvdpDevice, cvdpGetProcAddress) + err = cyruntime.cudaVDPAUGetDevice(&device, cyvdpDevice, cyvdpGetProcAddress) return (cudaError_t(err), device) {{endif}} @@ -31860,26 +31860,26 @@ def cudaVDPAUSetVDPAUDevice(int device, vdpDevice, vdpGetProcAddress): -------- :py:obj:`~.cudaGraphicsVDPAURegisterVideoSurface`, :py:obj:`~.cudaGraphicsVDPAURegisterOutputSurface`, :py:obj:`~.cudaDeviceReset` """ - cdef ccudart.VdpGetProcAddress *cvdpGetProcAddress + cdef cyruntime.VdpGetProcAddress *cyvdpGetProcAddress if vdpGetProcAddress is None: - cvdpGetProcAddress = NULL + cyvdpGetProcAddress = NULL elif isinstance(vdpGetProcAddress, (VdpGetProcAddress,)): pvdpGetProcAddress = vdpGetProcAddress.getPtr() - cvdpGetProcAddress = pvdpGetProcAddress + cyvdpGetProcAddress = pvdpGetProcAddress elif isinstance(vdpGetProcAddress, (int)): - cvdpGetProcAddress = vdpGetProcAddress + cyvdpGetProcAddress = vdpGetProcAddress else: - raise TypeError("Argument 'vdpGetProcAddress' is not instance of type (expected , found " + str(type(vdpGetProcAddress))) - cdef ccudart.VdpDevice cvdpDevice + raise TypeError("Argument 'vdpGetProcAddress' is not instance of type (expected , found " + str(type(vdpGetProcAddress))) + cdef cyruntime.VdpDevice cyvdpDevice if vdpDevice is None: - cvdpDevice = 0 + cyvdpDevice = 0 elif isinstance(vdpDevice, (VdpDevice,)): pvdpDevice = int(vdpDevice) - cvdpDevice = pvdpDevice + cyvdpDevice = pvdpDevice else: pvdpDevice = int(VdpDevice(vdpDevice)) - cvdpDevice = pvdpDevice - err = ccudart.cudaVDPAUSetVDPAUDevice(device, cvdpDevice, cvdpGetProcAddress) + cyvdpDevice = pvdpDevice + err = cyruntime.cudaVDPAUSetVDPAUDevice(device, cyvdpDevice, cyvdpGetProcAddress) return (cudaError_t(err),) {{endif}} @@ -31924,17 +31924,17 @@ def cudaGraphicsVDPAURegisterVideoSurface(vdpSurface, unsigned int flags): -------- :py:obj:`~.cudaVDPAUSetVDPAUDevice`, :py:obj:`~.cudaGraphicsUnregisterResource`, :py:obj:`~.cudaGraphicsSubResourceGetMappedArray`, :py:obj:`~.cuGraphicsVDPAURegisterVideoSurface` """ - cdef ccudart.VdpVideoSurface cvdpSurface + cdef cyruntime.VdpVideoSurface cyvdpSurface if vdpSurface is None: - cvdpSurface = 0 + cyvdpSurface = 0 elif isinstance(vdpSurface, (VdpVideoSurface,)): pvdpSurface = int(vdpSurface) - cvdpSurface = pvdpSurface + cyvdpSurface = pvdpSurface else: pvdpSurface = int(VdpVideoSurface(vdpSurface)) - cvdpSurface = pvdpSurface + cyvdpSurface = pvdpSurface cdef cudaGraphicsResource_t resource = cudaGraphicsResource_t() - err = ccudart.cudaGraphicsVDPAURegisterVideoSurface(resource._ptr, cvdpSurface, flags) + err = cyruntime.cudaGraphicsVDPAURegisterVideoSurface(resource._ptr, cyvdpSurface, flags) return (cudaError_t(err), resource) {{endif}} @@ -31979,17 +31979,17 @@ def cudaGraphicsVDPAURegisterOutputSurface(vdpSurface, unsigned int flags): -------- :py:obj:`~.cudaVDPAUSetVDPAUDevice`, :py:obj:`~.cudaGraphicsUnregisterResource`, :py:obj:`~.cudaGraphicsSubResourceGetMappedArray`, :py:obj:`~.cuGraphicsVDPAURegisterOutputSurface` """ - cdef ccudart.VdpOutputSurface cvdpSurface + cdef cyruntime.VdpOutputSurface cyvdpSurface if vdpSurface is None: - cvdpSurface = 0 + cyvdpSurface = 0 elif isinstance(vdpSurface, (VdpOutputSurface,)): pvdpSurface = int(vdpSurface) - cvdpSurface = pvdpSurface + cyvdpSurface = pvdpSurface else: pvdpSurface = int(VdpOutputSurface(vdpSurface)) - cvdpSurface = pvdpSurface + cyvdpSurface = pvdpSurface cdef cudaGraphicsResource_t resource = cudaGraphicsResource_t() - err = ccudart.cudaGraphicsVDPAURegisterOutputSurface(resource._ptr, cvdpSurface, flags) + err = cyruntime.cudaGraphicsVDPAURegisterOutputSurface(resource._ptr, cyvdpSurface, flags) return (cudaError_t(err), resource) {{endif}} @@ -32021,7 +32021,7 @@ def getLocalRuntimeVersion(): :py:obj:`~.cudaDriverGetVersion`, :py:obj:`~.cuDriverGetVersion` """ cdef int runtimeVersion = 0 - err = ccudart.getLocalRuntimeVersion(&runtimeVersion) + err = cyruntime.getLocalRuntimeVersion(&runtimeVersion) return (cudaError_t(err), runtimeVersion) @@ -32041,329 +32041,329 @@ def sizeof(objType): """ {{if 'struct dim3' in found_types}} if objType == dim3: - return sizeof(ccudart.dim3){{endif}} + return sizeof(cyruntime.dim3){{endif}} {{if 'struct cudaChannelFormatDesc' in found_types}} if objType == cudaChannelFormatDesc: - return sizeof(ccudart.cudaChannelFormatDesc){{endif}} + return sizeof(cyruntime.cudaChannelFormatDesc){{endif}} {{if 'cudaArray_t' in found_types}} if objType == cudaArray_t: - return sizeof(ccudart.cudaArray_t){{endif}} + return sizeof(cyruntime.cudaArray_t){{endif}} {{if 'cudaArray_const_t' in found_types}} if objType == cudaArray_const_t: - return sizeof(ccudart.cudaArray_const_t){{endif}} + return sizeof(cyruntime.cudaArray_const_t){{endif}} {{if 'cudaMipmappedArray_t' in found_types}} if objType == cudaMipmappedArray_t: - return sizeof(ccudart.cudaMipmappedArray_t){{endif}} + return sizeof(cyruntime.cudaMipmappedArray_t){{endif}} {{if 'cudaMipmappedArray_const_t' in found_types}} if objType == cudaMipmappedArray_const_t: - return sizeof(ccudart.cudaMipmappedArray_const_t){{endif}} + return sizeof(cyruntime.cudaMipmappedArray_const_t){{endif}} {{if 'struct cudaArraySparseProperties' in found_types}} if objType == cudaArraySparseProperties: - return sizeof(ccudart.cudaArraySparseProperties){{endif}} + return sizeof(cyruntime.cudaArraySparseProperties){{endif}} {{if 'struct cudaArrayMemoryRequirements' in found_types}} if objType == cudaArrayMemoryRequirements: - return sizeof(ccudart.cudaArrayMemoryRequirements){{endif}} + return sizeof(cyruntime.cudaArrayMemoryRequirements){{endif}} {{if 'struct cudaPitchedPtr' in found_types}} if objType == cudaPitchedPtr: - return sizeof(ccudart.cudaPitchedPtr){{endif}} + return sizeof(cyruntime.cudaPitchedPtr){{endif}} {{if 'struct cudaExtent' in found_types}} if objType == cudaExtent: - return sizeof(ccudart.cudaExtent){{endif}} + return sizeof(cyruntime.cudaExtent){{endif}} {{if 'struct cudaPos' in found_types}} if objType == cudaPos: - return sizeof(ccudart.cudaPos){{endif}} + return sizeof(cyruntime.cudaPos){{endif}} {{if 'struct cudaMemcpy3DParms' in found_types}} if objType == cudaMemcpy3DParms: - return sizeof(ccudart.cudaMemcpy3DParms){{endif}} + return sizeof(cyruntime.cudaMemcpy3DParms){{endif}} {{if 'struct cudaMemcpyNodeParams' in found_types}} if objType == cudaMemcpyNodeParams: - return sizeof(ccudart.cudaMemcpyNodeParams){{endif}} + return sizeof(cyruntime.cudaMemcpyNodeParams){{endif}} {{if 'struct cudaMemcpy3DPeerParms' in found_types}} if objType == cudaMemcpy3DPeerParms: - return sizeof(ccudart.cudaMemcpy3DPeerParms){{endif}} + return sizeof(cyruntime.cudaMemcpy3DPeerParms){{endif}} {{if 'struct cudaMemsetParams' in found_types}} if objType == cudaMemsetParams: - return sizeof(ccudart.cudaMemsetParams){{endif}} + return sizeof(cyruntime.cudaMemsetParams){{endif}} {{if 'struct cudaMemsetParamsV2' in found_types}} if objType == cudaMemsetParamsV2: - return sizeof(ccudart.cudaMemsetParamsV2){{endif}} + return sizeof(cyruntime.cudaMemsetParamsV2){{endif}} {{if 'struct cudaAccessPolicyWindow' in found_types}} if objType == cudaAccessPolicyWindow: - return sizeof(ccudart.cudaAccessPolicyWindow){{endif}} + return sizeof(cyruntime.cudaAccessPolicyWindow){{endif}} {{if 'cudaHostFn_t' in found_types}} if objType == cudaHostFn_t: - return sizeof(ccudart.cudaHostFn_t){{endif}} + return sizeof(cyruntime.cudaHostFn_t){{endif}} {{if 'struct cudaHostNodeParams' in found_types}} if objType == cudaHostNodeParams: - return sizeof(ccudart.cudaHostNodeParams){{endif}} + return sizeof(cyruntime.cudaHostNodeParams){{endif}} {{if 'struct cudaHostNodeParamsV2' in found_types}} if objType == cudaHostNodeParamsV2: - return sizeof(ccudart.cudaHostNodeParamsV2){{endif}} + return sizeof(cyruntime.cudaHostNodeParamsV2){{endif}} {{if 'struct cudaResourceDesc' in found_types}} if objType == cudaResourceDesc: - return sizeof(ccudart.cudaResourceDesc){{endif}} + return sizeof(cyruntime.cudaResourceDesc){{endif}} {{if 'struct cudaResourceViewDesc' in found_types}} if objType == cudaResourceViewDesc: - return sizeof(ccudart.cudaResourceViewDesc){{endif}} + return sizeof(cyruntime.cudaResourceViewDesc){{endif}} {{if 'struct cudaPointerAttributes' in found_types}} if objType == cudaPointerAttributes: - return sizeof(ccudart.cudaPointerAttributes){{endif}} + return sizeof(cyruntime.cudaPointerAttributes){{endif}} {{if 'struct cudaFuncAttributes' in found_types}} if objType == cudaFuncAttributes: - return sizeof(ccudart.cudaFuncAttributes){{endif}} + return sizeof(cyruntime.cudaFuncAttributes){{endif}} {{if 'struct cudaMemLocation' in found_types}} if objType == cudaMemLocation: - return sizeof(ccudart.cudaMemLocation){{endif}} + return sizeof(cyruntime.cudaMemLocation){{endif}} {{if 'struct cudaMemAccessDesc' in found_types}} if objType == cudaMemAccessDesc: - return sizeof(ccudart.cudaMemAccessDesc){{endif}} + return sizeof(cyruntime.cudaMemAccessDesc){{endif}} {{if 'struct cudaMemPoolProps' in found_types}} if objType == cudaMemPoolProps: - return sizeof(ccudart.cudaMemPoolProps){{endif}} + return sizeof(cyruntime.cudaMemPoolProps){{endif}} {{if 'struct cudaMemPoolPtrExportData' in found_types}} if objType == cudaMemPoolPtrExportData: - return sizeof(ccudart.cudaMemPoolPtrExportData){{endif}} + return sizeof(cyruntime.cudaMemPoolPtrExportData){{endif}} {{if 'struct cudaMemAllocNodeParams' in found_types}} if objType == cudaMemAllocNodeParams: - return sizeof(ccudart.cudaMemAllocNodeParams){{endif}} + return sizeof(cyruntime.cudaMemAllocNodeParams){{endif}} {{if 'struct cudaMemAllocNodeParamsV2' in found_types}} if objType == cudaMemAllocNodeParamsV2: - return sizeof(ccudart.cudaMemAllocNodeParamsV2){{endif}} + return sizeof(cyruntime.cudaMemAllocNodeParamsV2){{endif}} {{if 'struct cudaMemFreeNodeParams' in found_types}} if objType == cudaMemFreeNodeParams: - return sizeof(ccudart.cudaMemFreeNodeParams){{endif}} + return sizeof(cyruntime.cudaMemFreeNodeParams){{endif}} {{if 'struct CUuuid_st' in found_types}} if objType == CUuuid_st: - return sizeof(ccudart.CUuuid_st){{endif}} + return sizeof(cyruntime.CUuuid_st){{endif}} {{if 'CUuuid' in found_types}} if objType == CUuuid: - return sizeof(ccudart.CUuuid){{endif}} + return sizeof(cyruntime.CUuuid){{endif}} {{if 'cudaUUID_t' in found_types}} if objType == cudaUUID_t: - return sizeof(ccudart.cudaUUID_t){{endif}} + return sizeof(cyruntime.cudaUUID_t){{endif}} {{if 'struct cudaDeviceProp' in found_types}} if objType == cudaDeviceProp: - return sizeof(ccudart.cudaDeviceProp){{endif}} + return sizeof(cyruntime.cudaDeviceProp){{endif}} {{if 'struct cudaIpcEventHandle_st' in found_types}} if objType == cudaIpcEventHandle_st: - return sizeof(ccudart.cudaIpcEventHandle_st){{endif}} + return sizeof(cyruntime.cudaIpcEventHandle_st){{endif}} {{if 'cudaIpcEventHandle_t' in found_types}} if objType == cudaIpcEventHandle_t: - return sizeof(ccudart.cudaIpcEventHandle_t){{endif}} + return sizeof(cyruntime.cudaIpcEventHandle_t){{endif}} {{if 'struct cudaIpcMemHandle_st' in found_types}} if objType == cudaIpcMemHandle_st: - return sizeof(ccudart.cudaIpcMemHandle_st){{endif}} + return sizeof(cyruntime.cudaIpcMemHandle_st){{endif}} {{if 'cudaIpcMemHandle_t' in found_types}} if objType == cudaIpcMemHandle_t: - return sizeof(ccudart.cudaIpcMemHandle_t){{endif}} + return sizeof(cyruntime.cudaIpcMemHandle_t){{endif}} {{if 'struct cudaMemFabricHandle_st' in found_types}} if objType == cudaMemFabricHandle_st: - return sizeof(ccudart.cudaMemFabricHandle_st){{endif}} + return sizeof(cyruntime.cudaMemFabricHandle_st){{endif}} {{if 'cudaMemFabricHandle_t' in found_types}} if objType == cudaMemFabricHandle_t: - return sizeof(ccudart.cudaMemFabricHandle_t){{endif}} + return sizeof(cyruntime.cudaMemFabricHandle_t){{endif}} {{if 'struct cudaExternalMemoryHandleDesc' in found_types}} if objType == cudaExternalMemoryHandleDesc: - return sizeof(ccudart.cudaExternalMemoryHandleDesc){{endif}} + return sizeof(cyruntime.cudaExternalMemoryHandleDesc){{endif}} {{if 'struct cudaExternalMemoryBufferDesc' in found_types}} if objType == cudaExternalMemoryBufferDesc: - return sizeof(ccudart.cudaExternalMemoryBufferDesc){{endif}} + return sizeof(cyruntime.cudaExternalMemoryBufferDesc){{endif}} {{if 'struct cudaExternalMemoryMipmappedArrayDesc' in found_types}} if objType == cudaExternalMemoryMipmappedArrayDesc: - return sizeof(ccudart.cudaExternalMemoryMipmappedArrayDesc){{endif}} + return sizeof(cyruntime.cudaExternalMemoryMipmappedArrayDesc){{endif}} {{if 'struct cudaExternalSemaphoreHandleDesc' in found_types}} if objType == cudaExternalSemaphoreHandleDesc: - return sizeof(ccudart.cudaExternalSemaphoreHandleDesc){{endif}} + return sizeof(cyruntime.cudaExternalSemaphoreHandleDesc){{endif}} {{if 'struct cudaExternalSemaphoreSignalParams' in found_types}} if objType == cudaExternalSemaphoreSignalParams: - return sizeof(ccudart.cudaExternalSemaphoreSignalParams){{endif}} + return sizeof(cyruntime.cudaExternalSemaphoreSignalParams){{endif}} {{if 'struct cudaExternalSemaphoreWaitParams' in found_types}} if objType == cudaExternalSemaphoreWaitParams: - return sizeof(ccudart.cudaExternalSemaphoreWaitParams){{endif}} + return sizeof(cyruntime.cudaExternalSemaphoreWaitParams){{endif}} {{if 'cudaStream_t' in found_types}} if objType == cudaStream_t: - return sizeof(ccudart.cudaStream_t){{endif}} + return sizeof(cyruntime.cudaStream_t){{endif}} {{if 'cudaEvent_t' in found_types}} if objType == cudaEvent_t: - return sizeof(ccudart.cudaEvent_t){{endif}} + return sizeof(cyruntime.cudaEvent_t){{endif}} {{if 'cudaGraphicsResource_t' in found_types}} if objType == cudaGraphicsResource_t: - return sizeof(ccudart.cudaGraphicsResource_t){{endif}} + return sizeof(cyruntime.cudaGraphicsResource_t){{endif}} {{if 'cudaExternalMemory_t' in found_types}} if objType == cudaExternalMemory_t: - return sizeof(ccudart.cudaExternalMemory_t){{endif}} + return sizeof(cyruntime.cudaExternalMemory_t){{endif}} {{if 'cudaExternalSemaphore_t' in found_types}} if objType == cudaExternalSemaphore_t: - return sizeof(ccudart.cudaExternalSemaphore_t){{endif}} + return sizeof(cyruntime.cudaExternalSemaphore_t){{endif}} {{if 'cudaGraph_t' in found_types}} if objType == cudaGraph_t: - return sizeof(ccudart.cudaGraph_t){{endif}} + return sizeof(cyruntime.cudaGraph_t){{endif}} {{if 'cudaGraphNode_t' in found_types}} if objType == cudaGraphNode_t: - return sizeof(ccudart.cudaGraphNode_t){{endif}} + return sizeof(cyruntime.cudaGraphNode_t){{endif}} {{if 'cudaUserObject_t' in found_types}} if objType == cudaUserObject_t: - return sizeof(ccudart.cudaUserObject_t){{endif}} + return sizeof(cyruntime.cudaUserObject_t){{endif}} {{if 'cudaGraphConditionalHandle' in found_types}} if objType == cudaGraphConditionalHandle: - return sizeof(ccudart.cudaGraphConditionalHandle){{endif}} + return sizeof(cyruntime.cudaGraphConditionalHandle){{endif}} {{if 'cudaFunction_t' in found_types}} if objType == cudaFunction_t: - return sizeof(ccudart.cudaFunction_t){{endif}} + return sizeof(cyruntime.cudaFunction_t){{endif}} {{if 'cudaKernel_t' in found_types}} if objType == cudaKernel_t: - return sizeof(ccudart.cudaKernel_t){{endif}} + return sizeof(cyruntime.cudaKernel_t){{endif}} {{if 'cudaMemPool_t' in found_types}} if objType == cudaMemPool_t: - return sizeof(ccudart.cudaMemPool_t){{endif}} + return sizeof(cyruntime.cudaMemPool_t){{endif}} {{if 'struct cudaKernelNodeParams' in found_types}} if objType == cudaKernelNodeParams: - return sizeof(ccudart.cudaKernelNodeParams){{endif}} + return sizeof(cyruntime.cudaKernelNodeParams){{endif}} {{if 'struct cudaKernelNodeParamsV2' in found_types}} if objType == cudaKernelNodeParamsV2: - return sizeof(ccudart.cudaKernelNodeParamsV2){{endif}} + return sizeof(cyruntime.cudaKernelNodeParamsV2){{endif}} {{if 'struct cudaExternalSemaphoreSignalNodeParams' in found_types}} if objType == cudaExternalSemaphoreSignalNodeParams: - return sizeof(ccudart.cudaExternalSemaphoreSignalNodeParams){{endif}} + return sizeof(cyruntime.cudaExternalSemaphoreSignalNodeParams){{endif}} {{if 'struct cudaExternalSemaphoreSignalNodeParamsV2' in found_types}} if objType == cudaExternalSemaphoreSignalNodeParamsV2: - return sizeof(ccudart.cudaExternalSemaphoreSignalNodeParamsV2){{endif}} + return sizeof(cyruntime.cudaExternalSemaphoreSignalNodeParamsV2){{endif}} {{if 'struct cudaExternalSemaphoreWaitNodeParams' in found_types}} if objType == cudaExternalSemaphoreWaitNodeParams: - return sizeof(ccudart.cudaExternalSemaphoreWaitNodeParams){{endif}} + return sizeof(cyruntime.cudaExternalSemaphoreWaitNodeParams){{endif}} {{if 'struct cudaExternalSemaphoreWaitNodeParamsV2' in found_types}} if objType == cudaExternalSemaphoreWaitNodeParamsV2: - return sizeof(ccudart.cudaExternalSemaphoreWaitNodeParamsV2){{endif}} + return sizeof(cyruntime.cudaExternalSemaphoreWaitNodeParamsV2){{endif}} {{if 'struct cudaConditionalNodeParams' in found_types}} if objType == cudaConditionalNodeParams: - return sizeof(ccudart.cudaConditionalNodeParams){{endif}} + return sizeof(cyruntime.cudaConditionalNodeParams){{endif}} {{if 'struct cudaChildGraphNodeParams' in found_types}} if objType == cudaChildGraphNodeParams: - return sizeof(ccudart.cudaChildGraphNodeParams){{endif}} + return sizeof(cyruntime.cudaChildGraphNodeParams){{endif}} {{if 'struct cudaEventRecordNodeParams' in found_types}} if objType == cudaEventRecordNodeParams: - return sizeof(ccudart.cudaEventRecordNodeParams){{endif}} + return sizeof(cyruntime.cudaEventRecordNodeParams){{endif}} {{if 'struct cudaEventWaitNodeParams' in found_types}} if objType == cudaEventWaitNodeParams: - return sizeof(ccudart.cudaEventWaitNodeParams){{endif}} + return sizeof(cyruntime.cudaEventWaitNodeParams){{endif}} {{if 'struct cudaGraphNodeParams' in found_types}} if objType == cudaGraphNodeParams: - return sizeof(ccudart.cudaGraphNodeParams){{endif}} + return sizeof(cyruntime.cudaGraphNodeParams){{endif}} {{if 'struct cudaGraphEdgeData_st' in found_types}} if objType == cudaGraphEdgeData_st: - return sizeof(ccudart.cudaGraphEdgeData_st){{endif}} + return sizeof(cyruntime.cudaGraphEdgeData_st){{endif}} {{if 'cudaGraphEdgeData' in found_types}} if objType == cudaGraphEdgeData: - return sizeof(ccudart.cudaGraphEdgeData){{endif}} + return sizeof(cyruntime.cudaGraphEdgeData){{endif}} {{if 'cudaGraphExec_t' in found_types}} if objType == cudaGraphExec_t: - return sizeof(ccudart.cudaGraphExec_t){{endif}} + return sizeof(cyruntime.cudaGraphExec_t){{endif}} {{if 'struct cudaGraphInstantiateParams_st' in found_types}} if objType == cudaGraphInstantiateParams_st: - return sizeof(ccudart.cudaGraphInstantiateParams_st){{endif}} + return sizeof(cyruntime.cudaGraphInstantiateParams_st){{endif}} {{if 'cudaGraphInstantiateParams' in found_types}} if objType == cudaGraphInstantiateParams: - return sizeof(ccudart.cudaGraphInstantiateParams){{endif}} + return sizeof(cyruntime.cudaGraphInstantiateParams){{endif}} {{if 'struct cudaGraphExecUpdateResultInfo_st' in found_types}} if objType == cudaGraphExecUpdateResultInfo_st: - return sizeof(ccudart.cudaGraphExecUpdateResultInfo_st){{endif}} + return sizeof(cyruntime.cudaGraphExecUpdateResultInfo_st){{endif}} {{if 'cudaGraphExecUpdateResultInfo' in found_types}} if objType == cudaGraphExecUpdateResultInfo: - return sizeof(ccudart.cudaGraphExecUpdateResultInfo){{endif}} + return sizeof(cyruntime.cudaGraphExecUpdateResultInfo){{endif}} {{if 'cudaGraphDeviceNode_t' in found_types}} if objType == cudaGraphDeviceNode_t: - return sizeof(ccudart.cudaGraphDeviceNode_t){{endif}} + return sizeof(cyruntime.cudaGraphDeviceNode_t){{endif}} {{if 'struct cudaGraphKernelNodeUpdate' in found_types}} if objType == cudaGraphKernelNodeUpdate: - return sizeof(ccudart.cudaGraphKernelNodeUpdate){{endif}} + return sizeof(cyruntime.cudaGraphKernelNodeUpdate){{endif}} {{if 'struct cudaLaunchMemSyncDomainMap_st' in found_types}} if objType == cudaLaunchMemSyncDomainMap_st: - return sizeof(ccudart.cudaLaunchMemSyncDomainMap_st){{endif}} + return sizeof(cyruntime.cudaLaunchMemSyncDomainMap_st){{endif}} {{if 'cudaLaunchMemSyncDomainMap' in found_types}} if objType == cudaLaunchMemSyncDomainMap: - return sizeof(ccudart.cudaLaunchMemSyncDomainMap){{endif}} + return sizeof(cyruntime.cudaLaunchMemSyncDomainMap){{endif}} {{if 'union cudaLaunchAttributeValue' in found_types}} if objType == cudaLaunchAttributeValue: - return sizeof(ccudart.cudaLaunchAttributeValue){{endif}} + return sizeof(cyruntime.cudaLaunchAttributeValue){{endif}} {{if 'struct cudaLaunchAttribute_st' in found_types}} if objType == cudaLaunchAttribute_st: - return sizeof(ccudart.cudaLaunchAttribute_st){{endif}} + return sizeof(cyruntime.cudaLaunchAttribute_st){{endif}} {{if 'cudaLaunchAttribute' in found_types}} if objType == cudaLaunchAttribute: - return sizeof(ccudart.cudaLaunchAttribute){{endif}} + return sizeof(cyruntime.cudaLaunchAttribute){{endif}} {{if 'cudaAsyncCallbackHandle_t' in found_types}} if objType == cudaAsyncCallbackHandle_t: - return sizeof(ccudart.cudaAsyncCallbackHandle_t){{endif}} + return sizeof(cyruntime.cudaAsyncCallbackHandle_t){{endif}} {{if 'struct cudaAsyncNotificationInfo' in found_types}} if objType == cudaAsyncNotificationInfo: - return sizeof(ccudart.cudaAsyncNotificationInfo){{endif}} + return sizeof(cyruntime.cudaAsyncNotificationInfo){{endif}} {{if 'cudaAsyncNotificationInfo_t' in found_types}} if objType == cudaAsyncNotificationInfo_t: - return sizeof(ccudart.cudaAsyncNotificationInfo_t){{endif}} + return sizeof(cyruntime.cudaAsyncNotificationInfo_t){{endif}} {{if 'cudaAsyncCallback' in found_types}} if objType == cudaAsyncCallback: - return sizeof(ccudart.cudaAsyncCallback){{endif}} + return sizeof(cyruntime.cudaAsyncCallback){{endif}} {{if 'cudaSurfaceObject_t' in found_types}} if objType == cudaSurfaceObject_t: - return sizeof(ccudart.cudaSurfaceObject_t){{endif}} + return sizeof(cyruntime.cudaSurfaceObject_t){{endif}} {{if 'struct cudaTextureDesc' in found_types}} if objType == cudaTextureDesc: - return sizeof(ccudart.cudaTextureDesc){{endif}} + return sizeof(cyruntime.cudaTextureDesc){{endif}} {{if 'cudaTextureObject_t' in found_types}} if objType == cudaTextureObject_t: - return sizeof(ccudart.cudaTextureObject_t){{endif}} + return sizeof(cyruntime.cudaTextureObject_t){{endif}} {{if 'cudaStreamCallback_t' in found_types}} if objType == cudaStreamCallback_t: - return sizeof(ccudart.cudaStreamCallback_t){{endif}} + return sizeof(cyruntime.cudaStreamCallback_t){{endif}} {{if True}} if objType == GLenum: - return sizeof(ccudart.GLenum){{endif}} + return sizeof(cyruntime.GLenum){{endif}} {{if True}} if objType == GLuint: - return sizeof(ccudart.GLuint){{endif}} + return sizeof(cyruntime.GLuint){{endif}} {{if True}} if objType == EGLImageKHR: - return sizeof(ccudart.EGLImageKHR){{endif}} + return sizeof(cyruntime.EGLImageKHR){{endif}} {{if True}} if objType == EGLStreamKHR: - return sizeof(ccudart.EGLStreamKHR){{endif}} + return sizeof(cyruntime.EGLStreamKHR){{endif}} {{if True}} if objType == EGLint: - return sizeof(ccudart.EGLint){{endif}} + return sizeof(cyruntime.EGLint){{endif}} {{if True}} if objType == EGLSyncKHR: - return sizeof(ccudart.EGLSyncKHR){{endif}} + return sizeof(cyruntime.EGLSyncKHR){{endif}} {{if True}} if objType == VdpDevice: - return sizeof(ccudart.VdpDevice){{endif}} + return sizeof(cyruntime.VdpDevice){{endif}} {{if True}} if objType == VdpGetProcAddress: - return sizeof(ccudart.VdpGetProcAddress){{endif}} + return sizeof(cyruntime.VdpGetProcAddress){{endif}} {{if True}} if objType == VdpVideoSurface: - return sizeof(ccudart.VdpVideoSurface){{endif}} + return sizeof(cyruntime.VdpVideoSurface){{endif}} {{if True}} if objType == VdpOutputSurface: - return sizeof(ccudart.VdpOutputSurface){{endif}} + return sizeof(cyruntime.VdpOutputSurface){{endif}} {{if True}} if objType == cudaStreamAttrValue: - return sizeof(ccudart.cudaStreamAttrValue){{endif}} + return sizeof(cyruntime.cudaStreamAttrValue){{endif}} {{if True}} if objType == cudaKernelNodeAttrValue: - return sizeof(ccudart.cudaKernelNodeAttrValue){{endif}} + return sizeof(cyruntime.cudaKernelNodeAttrValue){{endif}} {{if True}} if objType == cudaEglPlaneDesc_st: - return sizeof(ccudart.cudaEglPlaneDesc_st){{endif}} + return sizeof(cyruntime.cudaEglPlaneDesc_st){{endif}} {{if True}} if objType == cudaEglPlaneDesc: - return sizeof(ccudart.cudaEglPlaneDesc){{endif}} + return sizeof(cyruntime.cudaEglPlaneDesc){{endif}} {{if True}} if objType == cudaEglFrame_st: - return sizeof(ccudart.cudaEglFrame_st){{endif}} + return sizeof(cyruntime.cudaEglFrame_st){{endif}} {{if True}} if objType == cudaEglFrame: - return sizeof(ccudart.cudaEglFrame){{endif}} + return sizeof(cyruntime.cudaEglFrame){{endif}} {{if True}} if objType == cudaEglStreamConnection: - return sizeof(ccudart.cudaEglStreamConnection){{endif}} + return sizeof(cyruntime.cudaEglStreamConnection){{endif}} raise TypeError("Unknown type: " + str(objType)) diff --git a/cuda/cuda/bindings/tests/__init__.py b/cuda/cuda/bindings/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cuda/tests/test_ccuda.pyx b/cuda/cuda/bindings/tests/test_ccuda.pyx similarity index 91% rename from cuda/tests/test_ccuda.pyx rename to cuda/cuda/bindings/tests/test_ccuda.pyx index 9bd08650..0d90ba90 100644 --- a/cuda/tests/test_ccuda.pyx +++ b/cuda/cuda/bindings/tests/test_ccuda.pyx @@ -9,6 +9,8 @@ from libc.string cimport ( memset, memcmp ) +# TODO: update to new module once the old ones are removed, we use the +# tests to cover backward compatibility. cimport cuda.ccuda as ccuda def test_ccuda_memcpy(): @@ -53,4 +55,4 @@ def test_ccuda_memcpy(): err = ccuda.cuMemFree(dptr) assert(err == 0) err = ccuda.cuCtxDestroy(ctx) - assert(err == 0) \ No newline at end of file + assert(err == 0) diff --git a/cuda/tests/test_ccudart.pyx b/cuda/cuda/bindings/tests/test_ccudart.pyx similarity index 95% rename from cuda/tests/test_ccudart.pyx rename to cuda/cuda/bindings/tests/test_ccudart.pyx index a871fa6f..b228661c 100644 --- a/cuda/tests/test_ccudart.pyx +++ b/cuda/cuda/bindings/tests/test_ccudart.pyx @@ -9,6 +9,8 @@ from libc.string cimport ( memset, memcmp ) +# TODO: update to new module once the old ones are removed, we use the +# tests to cover backward compatibility. cimport cuda.ccudart as ccudart def test_ccudart_memcpy(): diff --git a/cuda/tests/test_cuda.py b/cuda/cuda/bindings/tests/test_cuda.py similarity index 100% rename from cuda/tests/test_cuda.py rename to cuda/cuda/bindings/tests/test_cuda.py diff --git a/cuda/tests/test_cudart.py b/cuda/cuda/bindings/tests/test_cudart.py similarity index 100% rename from cuda/tests/test_cudart.py rename to cuda/cuda/bindings/tests/test_cudart.py diff --git a/cuda/tests/test_cython.py b/cuda/cuda/bindings/tests/test_cython.py similarity index 85% rename from cuda/tests/test_cython.py rename to cuda/cuda/bindings/tests/test_cython.py index 4787742f..e5577ede 100644 --- a/cuda/tests/test_cython.py +++ b/cuda/cuda/bindings/tests/test_cython.py @@ -22,9 +22,9 @@ def wrapped(*args, **kwargs): return wrapped -cython_test_modules = ["cuda.tests.test_ccuda", - "cuda.tests.test_ccudart", - "cuda.tests.test_interoperability_cython"] +cython_test_modules = ["cuda.bindings.tests.test_ccuda", + "cuda.bindings.tests.test_ccudart", + "cuda.bindings.tests.test_interoperability_cython"] for mod in cython_test_modules: diff --git a/cuda/tests/test_interoperability.py b/cuda/cuda/bindings/tests/test_interoperability.py similarity index 100% rename from cuda/tests/test_interoperability.py rename to cuda/cuda/bindings/tests/test_interoperability.py diff --git a/cuda/tests/test_interoperability_cython.pyx b/cuda/cuda/bindings/tests/test_interoperability_cython.pyx similarity index 98% rename from cuda/tests/test_interoperability_cython.pyx rename to cuda/cuda/bindings/tests/test_interoperability_cython.pyx index 96b50ce5..5c58e8ea 100644 --- a/cuda/tests/test_interoperability_cython.pyx +++ b/cuda/cuda/bindings/tests/test_interoperability_cython.pyx @@ -5,6 +5,9 @@ # this software. Any use, reproduction, disclosure, or distribution of # this software and related documentation outside the terms of the EULA # is strictly prohibited. + +# TODO: update to new module once the old ones are removed, we use the +# tests to cover backward compatibility. import pytest import cuda.cuda as cuda import cuda.cudart as cudart diff --git a/cuda/tests/test_kernelParams.py b/cuda/cuda/bindings/tests/test_kernelParams.py similarity index 100% rename from cuda/tests/test_kernelParams.py rename to cuda/cuda/bindings/tests/test_kernelParams.py diff --git a/cuda/tests/test_nvrtc.py b/cuda/cuda/bindings/tests/test_nvrtc.py similarity index 100% rename from cuda/tests/test_nvrtc.py rename to cuda/cuda/bindings/tests/test_nvrtc.py diff --git a/cuda/cuda/ccuda.pxd b/cuda/cuda/ccuda.pxd new file mode 100644 index 00000000..73f3fc5c --- /dev/null +++ b/cuda/cuda/ccuda.pxd @@ -0,0 +1,7 @@ +from cuda.bindings.cydriver cimport * + +cdef extern from *: + """ + #pragma message ( "The cuda.ccuda module is deprecated and will be removed in a future release, " \ + "please switch to use the cuda.bindings.cydriver module instead." ) + """ diff --git a/cuda/cuda/ccuda.pyx b/cuda/cuda/ccuda.pyx new file mode 100644 index 00000000..73f3fc5c --- /dev/null +++ b/cuda/cuda/ccuda.pyx @@ -0,0 +1,7 @@ +from cuda.bindings.cydriver cimport * + +cdef extern from *: + """ + #pragma message ( "The cuda.ccuda module is deprecated and will be removed in a future release, " \ + "please switch to use the cuda.bindings.cydriver module instead." ) + """ diff --git a/cuda/cuda/ccudart.pxd b/cuda/cuda/ccudart.pxd new file mode 100644 index 00000000..b32eece8 --- /dev/null +++ b/cuda/cuda/ccudart.pxd @@ -0,0 +1,7 @@ +from cuda.bindings.cyruntime cimport * + +cdef extern from *: + """ + #pragma message ( "The cuda.ccudart module is deprecated and will be removed in a future release, " \ + "please switch to use the cuda.bindings.cyruntime module instead." ) + """ diff --git a/cuda/cuda/ccudart.pyx b/cuda/cuda/ccudart.pyx new file mode 100644 index 00000000..b32eece8 --- /dev/null +++ b/cuda/cuda/ccudart.pyx @@ -0,0 +1,7 @@ +from cuda.bindings.cyruntime cimport * + +cdef extern from *: + """ + #pragma message ( "The cuda.ccudart module is deprecated and will be removed in a future release, " \ + "please switch to use the cuda.bindings.cyruntime module instead." ) + """ diff --git a/cuda/cuda/cnvrtc.pxd b/cuda/cuda/cnvrtc.pxd new file mode 100644 index 00000000..d4034084 --- /dev/null +++ b/cuda/cuda/cnvrtc.pxd @@ -0,0 +1,7 @@ +from cuda.bindings.cynvrtc cimport * + +cdef extern from *: + """ + #pragma message ( "The cuda.cnvrtc module is deprecated and will be removed in a future release, " \ + "please switch to use the cuda.bindings.cynvrtc module instead." ) + """ diff --git a/cuda/cuda/cnvrtc.pyx b/cuda/cuda/cnvrtc.pyx new file mode 100644 index 00000000..d4034084 --- /dev/null +++ b/cuda/cuda/cnvrtc.pyx @@ -0,0 +1,7 @@ +from cuda.bindings.cynvrtc cimport * + +cdef extern from *: + """ + #pragma message ( "The cuda.cnvrtc module is deprecated and will be removed in a future release, " \ + "please switch to use the cuda.bindings.cynvrtc module instead." ) + """ diff --git a/cuda/cuda/cuda.pyx b/cuda/cuda/cuda.pyx new file mode 100644 index 00000000..f8b197f7 --- /dev/null +++ b/cuda/cuda/cuda.pyx @@ -0,0 +1,14 @@ +import warnings as _warnings + +from cuda.bindings.driver import * + + +cdef extern from *: + """ + #pragma message ( "The cuda.cuda module is deprecated and will be removed in a future release, " \ + "please switch to use the cuda.bindings.driver module instead." ) + """ + + +_warnings.warn("The cuda.cuda module is deprecated and will be removed in a future release, " + "please switch to use the cuda.bindings.driver module instead.", DeprecationWarning, stacklevel=2) diff --git a/cuda/cuda/cudart.pyx b/cuda/cuda/cudart.pyx new file mode 100644 index 00000000..8c342df8 --- /dev/null +++ b/cuda/cuda/cudart.pyx @@ -0,0 +1,14 @@ +import warnings as _warnings + +from cuda.bindings.runtime import * + + +cdef extern from *: + """ + #pragma message ( "The cuda.cudart module is deprecated and will be removed in a future release, " \ + "please switch to use the cuda.bindings.runtime module instead." ) + """ + + +_warnings.warn("The cuda.cudart module is deprecated and will be removed in a future release, " + "please switch to use the cuda.bindings.runtime module instead.", DeprecationWarning, stacklevel=2) diff --git a/cuda/cuda/nvrtc.pyx b/cuda/cuda/nvrtc.pyx new file mode 100644 index 00000000..4a9e048f --- /dev/null +++ b/cuda/cuda/nvrtc.pyx @@ -0,0 +1,14 @@ +import warnings as _warnings + +from cuda.bindings.nvrtc import * + + +cdef extern from *: + """ + #pragma message ( "The cuda.nvrtc module is deprecated and will be removed in a future release, " \ + "please switch to use the cuda.bindings.nvrtc module instead." ) + """ + + +_warnings.warn("The cuda.nvrtc module is deprecated and will be removed in a future release, " + "please switch to use the cuda.bindings.nvrtc module instead.", DeprecationWarning, stacklevel=2) diff --git a/pyproject.toml b/cuda/pyproject.toml similarity index 94% rename from pyproject.toml rename to cuda/pyproject.toml index 470fe642..f4c9c5bc 100644 --- a/pyproject.toml +++ b/cuda/pyproject.toml @@ -52,7 +52,7 @@ Documentation = "https://nvidia.github.io/cuda-python/" [tool.versioneer] VCS = "git" style = "pep440" -versionfile_source = "cuda/_version.py" -versionfile_build = "cuda/_version.py" +versionfile_source = "cuda/bindings/_version.py" +versionfile_build = "cuda/bindings/_version.py" tag_prefix = "v" parentdir_prefix = "cuda-python-" diff --git a/pytest.ini b/cuda/pytest.ini similarity index 100% rename from pytest.ini rename to cuda/pytest.ini diff --git a/requirements.txt b/cuda/requirements.txt similarity index 100% rename from requirements.txt rename to cuda/requirements.txt diff --git a/setup.py b/cuda/setup.py similarity index 78% rename from setup.py rename to cuda/setup.py index 62fec10f..ec523626 100644 --- a/setup.py +++ b/cuda/setup.py @@ -6,18 +6,21 @@ # this software and related documentation outside the terms of the EULA # is strictly prohibited. -from Cython import Tempita -from Cython.Build import cythonize +import glob import os import platform -from pyclibrary import CParser import sys import sysconfig + +from Cython import Tempita +from Cython.Build import cythonize +from pyclibrary import CParser from setuptools import find_packages, setup from setuptools.extension import Extension from setuptools.command.build_ext import build_ext import versioneer + # ---------------------------------------------------------------------- # Fetch configuration options @@ -116,6 +119,7 @@ def unwrapMembers(found_dict): # ---------------------------------------------------------------------- # Generate + def fetch_input_files(path): return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.in')] @@ -136,9 +140,10 @@ def generate_output(infile, local): f.write(pxdcontent) path_list = [os.path.join('cuda'), - os.path.join('cuda', '_cuda'), - os.path.join('cuda', '_lib'), - os.path.join('cuda', '_lib', 'ccudart')] + os.path.join('cuda', 'bindings'), + os.path.join('cuda', 'bindings', '_bindings'), + os.path.join('cuda', 'bindings', '_lib'), + os.path.join('cuda', 'bindings', '_lib', 'cyruntime')] input_files = [] for path in path_list: input_files += fetch_input_files(path) @@ -172,49 +177,59 @@ def generate_output(infile, local): extra_compile_args += ['-O3'] # For Setup -if sys.platform == 'win32': - from distutils import _msvccompiler - _msvccompiler.PLAT_TO_VCVARS['win-amd64'] = 'amd64' - extensions = [] cmdclass = {} # ---------------------------------------------------------------------- # Cythonize -def do_cythonize(sources): +def prep_extensions(sources): + pattern = sources[0] + files = glob.glob(pattern) + exts = [] + for pyx in files: + mod_name = pyx.replace(".pyx", "").replace(os.sep, ".").replace("/", ".") + exts.append( + Extension( + mod_name, + sources=[pyx, *sources[1:]], + include_dirs=include_dirs, + library_dirs=library_dirs, + runtime_library_dirs=[], + libraries=[], + language="c++", + extra_compile_args=extra_compile_args, + ) + ) + return exts + + +def do_cythonize(extensions): return cythonize( - [ - Extension( - "*", - sources=sources, - include_dirs=include_dirs, - library_dirs=library_dirs, - runtime_library_dirs=[], - libraries=[], - language="c++", - extra_compile_args=extra_compile_args, - ) - ], - nthreads=nthreads, - compiler_directives=dict( - profile=True, language_level=3, embedsignature=True, binding=True - ), - **extra_cythonize_kwargs) + extensions, + nthreads=nthreads, + compiler_directives=dict( + profile=True, language_level=3, embedsignature=True, binding=True + ), + **extra_cythonize_kwargs) + sources_list = [ # private - ["cuda/_cuda/*.pyx", "cuda/_cuda/loader.cpp"], + ["cuda/bindings/_bindings/*.pyx", "cuda/bindings/_bindings/loader.cpp"], # utils - ["cuda/_lib/*.pyx", "cuda/_lib/param_packer.cpp"], - ["cuda/_lib/ccudart/*.pyx"], + ["cuda/bindings/_lib/*.pyx", "cuda/bindings/_lib/param_packer.cpp"], + ["cuda/bindings/_lib/cyruntime/*.pyx"], # public + ["cuda/bindings/*.pyx"], + # public (deprecated, to be removed) ["cuda/*.pyx"], # tests - ["cuda/tests/*.pyx"]] + ["cuda/bindings/tests/*.pyx"], +] for sources in sources_list: - extensions += do_cythonize(sources) + extensions += prep_extensions(sources) # --------------------------------------------------------------------- # Custom build_ext command @@ -240,11 +255,11 @@ def finalize_options(self): setup( version=versioneer.get_version(), - ext_modules=extensions, - packages=find_packages(include=["cuda", "cuda.*"]), + ext_modules=do_cythonize(extensions), + packages=find_packages(include=["cuda.cuda", "cuda.cuda.*", "cuda.cuda.bindings", "cuda.cuda.bindings._bindings", "cuda.cuda.bindings._lib", "cuda.cuda.bindings._lib.cyruntime", "cuda.cuda.bindings.tests"]), package_data=dict.fromkeys( - find_packages(include=["cuda", "cuda.*"]), - ["*.pxd", "*.pyx", "*.h", "*.cpp"], + find_packages(include=["cuda.cuda", "cuda.cuda.*", "cuda.cuda.bindings", "cuda.cuda.bindings._bindings", "cuda.cuda.bindings._lib", "cuda.cuda.bindings._lib.cyruntime", "cuda.cuda.bindings.tests"]), + ["*.pxd", "*.pyx", "*.py", "*.h", "*.cpp"], ), cmdclass=cmdclass, zip_safe=False, diff --git a/docs/.buildinfo b/docs/.buildinfo index a04688fb..63736599 100644 --- a/docs/.buildinfo +++ b/docs/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: c62c0b70b25e180ebf5c2c55c04c14fd +config: b213ac36e34cef7446ec345411893f07 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_sources/api.rst.txt b/docs/_sources/api.rst.txt index aec35f5e..4dcac337 100644 --- a/docs/_sources/api.rst.txt +++ b/docs/_sources/api.rst.txt @@ -6,6 +6,6 @@ CUDA Python API Reference :maxdepth: 3 :caption: CaptionHolder: - module/cuda - module/cudart + module/driver + module/runtime module/nvrtc diff --git a/docs/_sources/module/cuda.rst.txt b/docs/_sources/module/cuda.rst.txt deleted file mode 100644 index 8aa8c8fc..00000000 --- a/docs/_sources/module/cuda.rst.txt +++ /dev/null @@ -1,6792 +0,0 @@ ----- -cuda ----- - -Data types used by CUDA driver ------------------------------- - - - -.. autoclass:: cuda.cuda.CUuuid_st -.. autoclass:: cuda.cuda.CUmemFabricHandle_st -.. autoclass:: cuda.cuda.CUipcEventHandle_st -.. autoclass:: cuda.cuda.CUipcMemHandle_st -.. autoclass:: cuda.cuda.CUstreamBatchMemOpParams_union -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUasyncNotificationInfo_st -.. autoclass:: cuda.cuda.CUdevprop_st -.. autoclass:: cuda.cuda.CUaccessPolicyWindow_st -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v3_st -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_CONDITIONAL_NODE_PARAMS -.. autoclass:: cuda.cuda.CUgraphEdgeData_st -.. autoclass:: cuda.cuda.CUDA_GRAPH_INSTANTIATE_PARAMS_st -.. autoclass:: cuda.cuda.CUlaunchMemSyncDomainMap_st -.. autoclass:: cuda.cuda.CUlaunchAttributeValue_union -.. autoclass:: cuda.cuda.CUlaunchAttribute_st -.. autoclass:: cuda.cuda.CUlaunchConfig_st -.. autoclass:: cuda.cuda.CUexecAffinitySmCount_st -.. autoclass:: cuda.cuda.CUexecAffinityParam_st -.. autoclass:: cuda.cuda.CUctxCigParam_st -.. autoclass:: cuda.cuda.CUctxCreateParams_st -.. autoclass:: cuda.cuda.CUlibraryHostUniversalFunctionAndDataTable_st -.. autoclass:: cuda.cuda.CUDA_MEMCPY2D_st -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_st -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_PEER_st -.. autoclass:: cuda.cuda.CUDA_MEMCPY_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_ARRAY_DESCRIPTOR_st -.. autoclass:: cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR_st -.. autoclass:: cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES_st -.. autoclass:: cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_st -.. autoclass:: cuda.cuda.CUDA_RESOURCE_DESC_st -.. autoclass:: cuda.cuda.CUDA_TEXTURE_DESC_st -.. autoclass:: cuda.cuda.CUDA_RESOURCE_VIEW_DESC_st -.. autoclass:: cuda.cuda.CUtensorMap_st -.. autoclass:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st -.. autoclass:: cuda.cuda.CUDA_LAUNCH_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUarrayMapInfo_st -.. autoclass:: cuda.cuda.CUmemLocation_st -.. autoclass:: cuda.cuda.CUmemAllocationProp_st -.. autoclass:: cuda.cuda.CUmulticastObjectProp_st -.. autoclass:: cuda.cuda.CUmemAccessDesc_st -.. autoclass:: cuda.cuda.CUgraphExecUpdateResultInfo_st -.. autoclass:: cuda.cuda.CUmemPoolProps_st -.. autoclass:: cuda.cuda.CUmemPoolPtrExportData_st -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_MEM_FREE_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_CHILD_GRAPH_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EVENT_RECORD_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EVENT_WAIT_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUgraphNodeParams_st -.. autoclass:: cuda.cuda.CUeglFrame_st -.. autoclass:: cuda.cuda.CUipcMem_flags - - .. autoattribute:: cuda.cuda.CUipcMem_flags.CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS - - - Automatically enable peer access between remote devices as needed - -.. autoclass:: cuda.cuda.CUmemAttach_flags - - .. autoattribute:: cuda.cuda.CUmemAttach_flags.CU_MEM_ATTACH_GLOBAL - - - Memory can be accessed by any stream on any device - - - .. autoattribute:: cuda.cuda.CUmemAttach_flags.CU_MEM_ATTACH_HOST - - - Memory cannot be accessed by any stream on any device - - - .. autoattribute:: cuda.cuda.CUmemAttach_flags.CU_MEM_ATTACH_SINGLE - - - Memory can only be accessed by a single stream on the associated device - -.. autoclass:: cuda.cuda.CUctx_flags - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_AUTO - - - Automatic scheduling - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_SPIN - - - Set spin as default scheduling - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_YIELD - - - Set yield as default scheduling - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling [Deprecated] - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_MASK - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_MAP_HOST - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_LMEM_RESIZE_TO_MAX - - - Keep local memory allocation after launch - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_COREDUMP_ENABLE - - - Trigger coredumps from exceptions in this context - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_USER_COREDUMP_ENABLE - - - Enable user pipe to trigger coredumps in this context - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SYNC_MEMOPS - - - Ensure synchronous memory operations on this context will synchronize - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_FLAGS_MASK - -.. autoclass:: cuda.cuda.CUevent_sched_flags - - .. autoattribute:: cuda.cuda.CUevent_sched_flags.CU_EVENT_SCHED_AUTO - - - Automatic scheduling - - - .. autoattribute:: cuda.cuda.CUevent_sched_flags.CU_EVENT_SCHED_SPIN - - - Set spin as default scheduling - - - .. autoattribute:: cuda.cuda.CUevent_sched_flags.CU_EVENT_SCHED_YIELD - - - Set yield as default scheduling - - - .. autoattribute:: cuda.cuda.CUevent_sched_flags.CU_EVENT_SCHED_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling - -.. autoclass:: cuda.cuda.cl_event_flags - - .. autoattribute:: cuda.cuda.cl_event_flags.NVCL_EVENT_SCHED_AUTO - - - Automatic scheduling - - - .. autoattribute:: cuda.cuda.cl_event_flags.NVCL_EVENT_SCHED_SPIN - - - Set spin as default scheduling - - - .. autoattribute:: cuda.cuda.cl_event_flags.NVCL_EVENT_SCHED_YIELD - - - Set yield as default scheduling - - - .. autoattribute:: cuda.cuda.cl_event_flags.NVCL_EVENT_SCHED_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling - -.. autoclass:: cuda.cuda.cl_context_flags - - .. autoattribute:: cuda.cuda.cl_context_flags.NVCL_CTX_SCHED_AUTO - - - Automatic scheduling - - - .. autoattribute:: cuda.cuda.cl_context_flags.NVCL_CTX_SCHED_SPIN - - - Set spin as default scheduling - - - .. autoattribute:: cuda.cuda.cl_context_flags.NVCL_CTX_SCHED_YIELD - - - Set yield as default scheduling - - - .. autoattribute:: cuda.cuda.cl_context_flags.NVCL_CTX_SCHED_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling - -.. autoclass:: cuda.cuda.CUstream_flags - - .. autoattribute:: cuda.cuda.CUstream_flags.CU_STREAM_DEFAULT - - - Default stream flag - - - .. autoattribute:: cuda.cuda.CUstream_flags.CU_STREAM_NON_BLOCKING - - - Stream does not synchronize with stream 0 (the NULL stream) - -.. autoclass:: cuda.cuda.CUevent_flags - - .. autoattribute:: cuda.cuda.CUevent_flags.CU_EVENT_DEFAULT - - - Default event flag - - - .. autoattribute:: cuda.cuda.CUevent_flags.CU_EVENT_BLOCKING_SYNC - - - Event uses blocking synchronization - - - .. autoattribute:: cuda.cuda.CUevent_flags.CU_EVENT_DISABLE_TIMING - - - Event will not record timing data - - - .. autoattribute:: cuda.cuda.CUevent_flags.CU_EVENT_INTERPROCESS - - - Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set - -.. autoclass:: cuda.cuda.CUevent_record_flags - - .. autoattribute:: cuda.cuda.CUevent_record_flags.CU_EVENT_RECORD_DEFAULT - - - Default event record flag - - - .. autoattribute:: cuda.cuda.CUevent_record_flags.CU_EVENT_RECORD_EXTERNAL - - - When using stream capture, create an event record node instead of the default behavior. This flag is invalid when used outside of capture. - -.. autoclass:: cuda.cuda.CUevent_wait_flags - - .. autoattribute:: cuda.cuda.CUevent_wait_flags.CU_EVENT_WAIT_DEFAULT - - - Default event wait flag - - - .. autoattribute:: cuda.cuda.CUevent_wait_flags.CU_EVENT_WAIT_EXTERNAL - - - When using stream capture, create an event wait node instead of the default behavior. This flag is invalid when used outside of capture. - -.. autoclass:: cuda.cuda.CUstreamWaitValue_flags - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_GEQ - - - Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit values). Note this is a cyclic comparison which ignores wraparound. (Default behavior.) - - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_EQ - - - Wait until *addr == value. - - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_AND - - - Wait until (*addr & value) != 0. - - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_NOR - - - Wait until ~(*addr | value) != 0. Support for this operation can be queried with :py:obj:`~.cuDeviceGetAttribute()` and :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR`. - - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_FLUSH - - - Follow the wait operation with a flush of outstanding remote writes. This means that, if a remote write operation is guaranteed to have reached the device before the wait can be satisfied, that write is guaranteed to be visible to downstream device work. The device is permitted to reorder remote writes internally. For example, this flag would be required if two remote writes arrive in a defined order, the wait is satisfied by the second write, and downstream work needs to observe the first write. Support for this operation is restricted to selected platforms and can be queried with :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES`. - -.. autoclass:: cuda.cuda.CUstreamWriteValue_flags - - .. autoattribute:: cuda.cuda.CUstreamWriteValue_flags.CU_STREAM_WRITE_VALUE_DEFAULT - - - Default behavior - - - .. autoattribute:: cuda.cuda.CUstreamWriteValue_flags.CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER - - - Permits the write to be reordered with writes which were issued before it, as a performance optimization. Normally, :py:obj:`~.cuStreamWriteValue32` will provide a memory fence before the write, which has similar semantics to __threadfence_system() but is scoped to the stream rather than a CUDA thread. This flag is not supported in the v2 API. - -.. autoclass:: cuda.cuda.CUstreamBatchMemOpType - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_32 - - - Represents a :py:obj:`~.cuStreamWaitValue32` operation - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WRITE_VALUE_32 - - - Represents a :py:obj:`~.cuStreamWriteValue32` operation - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_64 - - - Represents a :py:obj:`~.cuStreamWaitValue64` operation - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WRITE_VALUE_64 - - - Represents a :py:obj:`~.cuStreamWriteValue64` operation - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_BARRIER - - - Insert a memory barrier of the specified type - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES - - - This has the same effect as :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH`, but as a standalone operation. - -.. autoclass:: cuda.cuda.CUstreamMemoryBarrier_flags - - .. autoattribute:: cuda.cuda.CUstreamMemoryBarrier_flags.CU_STREAM_MEMORY_BARRIER_TYPE_SYS - - - System-wide memory barrier. - - - .. autoattribute:: cuda.cuda.CUstreamMemoryBarrier_flags.CU_STREAM_MEMORY_BARRIER_TYPE_GPU - - - Limit memory barrier scope to the GPU. - -.. autoclass:: cuda.cuda.CUoccupancy_flags - - .. autoattribute:: cuda.cuda.CUoccupancy_flags.CU_OCCUPANCY_DEFAULT - - - Default behavior - - - .. autoattribute:: cuda.cuda.CUoccupancy_flags.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE - - - Assume global caching is enabled and cannot be automatically turned off - -.. autoclass:: cuda.cuda.CUstreamUpdateCaptureDependencies_flags - - .. autoattribute:: cuda.cuda.CUstreamUpdateCaptureDependencies_flags.CU_STREAM_ADD_CAPTURE_DEPENDENCIES - - - Add new nodes to the dependency set - - - .. autoattribute:: cuda.cuda.CUstreamUpdateCaptureDependencies_flags.CU_STREAM_SET_CAPTURE_DEPENDENCIES - - - Replace the dependency set with the new nodes - -.. autoclass:: cuda.cuda.CUasyncNotificationType - - .. autoattribute:: cuda.cuda.CUasyncNotificationType.CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET - -.. autoclass:: cuda.cuda.CUarray_format - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT8 - - - Unsigned 8-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT16 - - - Unsigned 16-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT32 - - - Unsigned 32-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SIGNED_INT8 - - - Signed 8-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SIGNED_INT16 - - - Signed 16-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SIGNED_INT32 - - - Signed 32-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_HALF - - - 16-bit floating point - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_FLOAT - - - 32-bit floating point - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_NV12 - - - 8-bit YUV planar format, with 4:2:0 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT8X1 - - - 1 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT8X2 - - - 2 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT8X4 - - - 4 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT16X1 - - - 1 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT16X2 - - - 2 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT16X4 - - - 4 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT8X1 - - - 1 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT8X2 - - - 2 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT8X4 - - - 4 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT16X1 - - - 1 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT16X2 - - - 2 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT16X4 - - - 4 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC1_UNORM - - - 4 channel unsigned normalized block-compressed (BC1 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC1_UNORM_SRGB - - - 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC2_UNORM - - - 4 channel unsigned normalized block-compressed (BC2 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC2_UNORM_SRGB - - - 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC3_UNORM - - - 4 channel unsigned normalized block-compressed (BC3 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC3_UNORM_SRGB - - - 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC4_UNORM - - - 1 channel unsigned normalized block-compressed (BC4 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC4_SNORM - - - 1 channel signed normalized block-compressed (BC4 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC5_UNORM - - - 2 channel unsigned normalized block-compressed (BC5 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC5_SNORM - - - 2 channel signed normalized block-compressed (BC5 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC6H_UF16 - - - 3 channel unsigned half-float block-compressed (BC6H compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC6H_SF16 - - - 3 channel signed half-float block-compressed (BC6H compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC7_UNORM - - - 4 channel unsigned normalized block-compressed (BC7 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC7_UNORM_SRGB - - - 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_P010 - - - 10-bit YUV planar format, with 4:2:0 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_P016 - - - 16-bit YUV planar format, with 4:2:0 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_NV16 - - - 8-bit YUV planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_P210 - - - 10-bit YUV planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_P216 - - - 16-bit YUV planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_YUY2 - - - 2 channel, 8-bit YUV packed planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y210 - - - 2 channel, 10-bit YUV packed planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y216 - - - 2 channel, 16-bit YUV packed planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_AYUV - - - 4 channel, 8-bit YUV packed planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y410 - - - 10-bit YUV packed planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y416 - - - 4 channel, 12-bit YUV packed planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y444_PLANAR8 - - - 3 channel 8-bit YUV planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y444_PLANAR10 - - - 3 channel 10-bit YUV planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_MAX - -.. autoclass:: cuda.cuda.CUaddress_mode - - .. autoattribute:: cuda.cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_WRAP - - - Wrapping address mode - - - .. autoattribute:: cuda.cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_CLAMP - - - Clamp to edge address mode - - - .. autoattribute:: cuda.cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_MIRROR - - - Mirror address mode - - - .. autoattribute:: cuda.cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_BORDER - - - Border address mode - -.. autoclass:: cuda.cuda.CUfilter_mode - - .. autoattribute:: cuda.cuda.CUfilter_mode.CU_TR_FILTER_MODE_POINT - - - Point filter mode - - - .. autoattribute:: cuda.cuda.CUfilter_mode.CU_TR_FILTER_MODE_LINEAR - - - Linear filter mode - -.. autoclass:: cuda.cuda.CUdevice_attribute - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK - - - Maximum number of threads per block - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X - - - Maximum block dimension X - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y - - - Maximum block dimension Y - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z - - - Maximum block dimension Z - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X - - - Maximum grid dimension X - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y - - - Maximum grid dimension Y - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z - - - Maximum grid dimension Z - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK - - - Maximum shared memory available per block in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY - - - Memory available on device for constant variables in a CUDA C kernel in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_WARP_SIZE - - - Warp size in threads - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_PITCH - - - Maximum pitch in bytes allowed by memory copies - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK - - - Maximum number of 32-bit registers available per block - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CLOCK_RATE - - - Typical clock frequency in kilohertz - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT - - - Alignment requirement for textures - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_OVERLAP - - - Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT - - - Number of multiprocessors on device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT - - - Specifies whether there is a run time limit on kernels - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_INTEGRATED - - - Device is integrated with host memory - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY - - - Device can map host memory into CUDA address space - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_MODE - - - Compute mode (See :py:obj:`~.CUcomputemode` for details) - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH - - - Maximum 1D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH - - - Maximum 2D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT - - - Maximum 2D texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH - - - Maximum 3D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT - - - Maximum 3D texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH - - - Maximum 3D texture depth - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH - - - Maximum 2D layered texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT - - - Maximum 2D layered texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS - - - Maximum layers in a 2D layered texture - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT - - - Alignment requirement for surfaces - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS - - - Device can possibly execute multiple kernels concurrently - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_ECC_ENABLED - - - Device has ECC support enabled - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_BUS_ID - - - PCI bus ID of the device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID - - - PCI device ID of the device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TCC_DRIVER - - - Device is using TCC driver model - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE - - - Peak memory clock frequency in kilohertz - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH - - - Global memory bus width in bits - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE - - - Size of L2 cache in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR - - - Maximum resident threads per multiprocessor - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT - - - Number of asynchronous engines - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING - - - Device shares a unified address space with the host - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH - - - Maximum 1D layered texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS - - - Maximum layers in a 1D layered texture - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER - - - Deprecated, do not use. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH - - - Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT - - - Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE - - - Alternate maximum 3D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE - - - Alternate maximum 3D texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE - - - Alternate maximum 3D texture depth - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID - - - PCI domain ID of the device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT - - - Pitch alignment requirement for textures - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH - - - Maximum cubemap texture width/height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH - - - Maximum cubemap layered texture width/height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS - - - Maximum layers in a cubemap layered texture - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH - - - Maximum 1D surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH - - - Maximum 2D surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT - - - Maximum 2D surface height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH - - - Maximum 3D surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT - - - Maximum 3D surface height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH - - - Maximum 3D surface depth - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH - - - Maximum 1D layered surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS - - - Maximum layers in a 1D layered surface - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH - - - Maximum 2D layered surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT - - - Maximum 2D layered surface height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS - - - Maximum layers in a 2D layered surface - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH - - - Maximum cubemap surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH - - - Maximum cubemap layered surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS - - - Maximum layers in a cubemap layered surface - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH - - - Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or :py:obj:`~.cuDeviceGetTexture1DLinearMaxWidth()` instead. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH - - - Maximum 2D linear texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT - - - Maximum 2D linear texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH - - - Maximum 2D linear texture pitch in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH - - - Maximum mipmapped 2D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT - - - Maximum mipmapped 2D texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR - - - Major compute capability version number - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR - - - Minor compute capability version number - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH - - - Maximum mipmapped 1D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED - - - Device supports stream priorities - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED - - - Device supports caching globals in L1 - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED - - - Device supports caching locals in L1 - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR - - - Maximum shared memory available per multiprocessor in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR - - - Maximum number of 32-bit registers available per multiprocessor - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY - - - Device can allocate managed memory on this system - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD - - - Device is on a multi-GPU board - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID - - - Unique id for a group of devices on the same multi-GPU board - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED - - - Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware) - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO - - - Ratio of single precision performance (in floating-point operations per second) to double precision performance - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS - - - Device supports coherently accessing pageable memory without calling cudaHostRegister on it - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS - - - Device can coherently access managed memory concurrently with the CPU - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED - - - Device supports compute preemption. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM - - - Device can access host registered memory at the same virtual address as the CPU - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 - - - Deprecated, along with v1 MemOps API, :py:obj:`~.cuStreamBatchMemOp` and related APIs are supported. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 - - - Deprecated, along with v1 MemOps API, 64-bit operations are supported in :py:obj:`~.cuStreamBatchMemOp` and related APIs. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 - - - Deprecated, along with v1 MemOps API, :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH - - - Device supports launching cooperative kernels via :py:obj:`~.cuLaunchCooperativeKernel` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH - - - Deprecated, :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` is deprecated. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN - - - Maximum optin shared memory per block - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES - - - The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the device. See :py:obj:`~.Stream Memory Operations` for additional details. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED - - - Device supports host memory registration via :py:obj:`~.cudaHostRegister`. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES - - - Device accesses pageable memory via the host's page tables. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST - - - The host can directly access managed memory on the device without migration. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED - - - Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED - - - Device supports virtual memory management APIs like :py:obj:`~.cuMemAddressReserve`, :py:obj:`~.cuMemCreate`, :py:obj:`~.cuMemMap` and related APIs - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED - - - Device supports exporting memory to a posix file descriptor with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED - - - Device supports exporting memory to a Win32 NT handle with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED - - - Device supports exporting memory to a Win32 KMT handle with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR - - - Maximum number of blocks per multiprocessor - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED - - - Device supports compression of memory - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE - - - Maximum L2 persisting lines capacity setting in bytes. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE - - - Maximum value of :py:obj:`~.CUaccessPolicyWindow.num_bytes`. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED - - - Device supports specifying the GPUDirect RDMA flag with :py:obj:`~.cuMemCreate` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK - - - Shared memory reserved by CUDA driver per block in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED - - - Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED - - - Device supports using the :py:obj:`~.cuMemHostRegister` flag :py:obj:`~.CU_MEMHOSTERGISTER_READ_ONLY` to register memory that must be mapped as read-only to the GPU - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED - - - External timeline semaphore interop is supported on the device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED - - - Device supports using the :py:obj:`~.cuMemAllocAsync` and :py:obj:`~.cuMemPool` family of APIs - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED - - - Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS - - - The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the :py:obj:`~.CUflushGPUDirectRDMAWritesOptions` enum - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING - - - GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See :py:obj:`~.CUGPUDirectRDMAWritesOrdering` for the numerical values returned here. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES - - - Handle types supported with mempool based IPC - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH - - - Indicates device supports cluster launch - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED - - - Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS - - - 64-bit operations are supported in :py:obj:`~.cuStreamBatchMemOp` and related MemOp APIs. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR - - - :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported by MemOp APIs. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED - - - Device supports buffer sharing with dma_buf mechanism. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED - - - Device supports IPC Events. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT - - - Number of memory domains the device supports. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED - - - Device supports accessing memory using Tensor Map. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED - - - Device supports exporting memory to a fabric handle with :py:obj:`~.cuMemExportToShareableHandle()` or requested with :py:obj:`~.cuMemCreate()` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS - - - Device supports unified function pointers. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_NUMA_CONFIG - - - NUMA configuration of a device: value is of type :py:obj:`~.CUdeviceNumaConfig` enum - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_NUMA_ID - - - NUMA node ID of the GPU memory - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED - - - Device supports switch multicast and reduction operations. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MPS_ENABLED - - - Indicates if contexts created on this device will be shared via MPS - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID - - - NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED - - - Device supports CIG with D3D12. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX - -.. autoclass:: cuda.cuda.CUpointer_attribute - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_CONTEXT - - - The :py:obj:`~.CUcontext` on which a pointer was allocated or registered - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_TYPE - - - The :py:obj:`~.CUmemorytype` describing the physical location of a pointer - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_POINTER - - - The address at which a pointer's memory may be accessed on the device - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_HOST_POINTER - - - The address at which a pointer's memory may be accessed on the host - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_P2P_TOKENS - - - A pair of tokens for use with the nv-p2p.h Linux kernel interface - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS - - - Synchronize every synchronous memory operation initiated on this region - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_BUFFER_ID - - - A process-wide unique ID for an allocated memory region - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_MANAGED - - - Indicates if the pointer points to managed memory - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL - - - A device ordinal of a device on which a pointer was allocated or registered - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE - - - 1 if this pointer maps to an allocation that is suitable for :py:obj:`~.cudaIpcGetMemHandle`, 0 otherwise - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR - - - Starting address for this requested pointer - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_SIZE - - - Size of the address range for this requested pointer - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPED - - - 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES - - - Bitmask of allowed :py:obj:`~.CUmemAllocationHandleType` for this allocation - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE - - - 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS - - - Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE - - - Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL. - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPING_SIZE - - - Size of the actual underlying mapping that the pointer belongs to - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR - - - The start address of the mapping that the pointer belongs to - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID - - - A process-wide unique id corresponding to the physical allocation the pointer belongs to - -.. autoclass:: cuda.cuda.CUfunction_attribute - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK - - - The maximum number of threads per block, beyond which a launch of the function would fail. This number depends on both the function and the device on which the function is currently loaded. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES - - - The size in bytes of statically-allocated shared memory required by this function. This does not include dynamically-allocated shared memory requested by the user at runtime. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES - - - The size in bytes of user-allocated constant memory required by this function. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES - - - The size in bytes of local memory used by each thread of this function. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NUM_REGS - - - The number of registers used by each thread of this function. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_PTX_VERSION - - - The PTX virtual architecture version for which the function was compiled. This value is the major PTX version * 10 + the minor PTX version, so a PTX version 1.3 function would return the value 13. Note that this may return the undefined value of 0 for cubins compiled prior to CUDA 3.0. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_BINARY_VERSION - - - The binary architecture version for which the function was compiled. This value is the major binary version * 10 + the minor binary version, so a binary version 1.3 function would return the value 13. Note that this will return a value of 10 for legacy cubins that do not have a properly-encoded binary architecture version. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CACHE_MODE_CA - - - The attribute to indicate whether the function has been compiled with user specified option "-Xptxas --dlcm=ca" set . - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES - - - The maximum size in bytes of dynamically-allocated shared memory that can be used by this function. If the user-specified dynamic shared memory size is larger than this value, the launch will fail. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT - - - On devices where the L1 cache and shared memory use the same hardware resources, this sets the shared memory carveout preference, in percent of the total shared memory. Refer to :py:obj:`~.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR`. This is only a hint, and the driver can choose a different ratio if required to execute the function. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET - - - If this attribute is set, the kernel must launch with a valid cluster size specified. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH - - - The required cluster width in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. - - - - If the value is set during compile time, it cannot be set at runtime. Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT - - - The required cluster height in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. - - - - If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH - - - The required cluster depth in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. - - - - If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED - - - Whether the function can be launched with non-portable cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster size may only function on the specific SKUs the program is tested on. The launch might fail if the program is run on a different hardware platform. - - - - CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking whether the desired size can be launched on the current device. - - - - Portable Cluster Size - - - - A portable cluster size is guaranteed to be functional on all compute capabilities higher than the target compute capability. The portable cluster size for sm_90 is 8 blocks per cluster. This value may increase for future compute capabilities. - - - - The specific hardware unit may support higher cluster sizes that’s not guaranteed to be portable. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE - - - The block scheduling policy of a function. The value type is CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX - -.. autoclass:: cuda.cuda.CUfunc_cache - - .. autoattribute:: cuda.cuda.CUfunc_cache.CU_FUNC_CACHE_PREFER_NONE - - - no preference for shared memory or L1 (default) - - - .. autoattribute:: cuda.cuda.CUfunc_cache.CU_FUNC_CACHE_PREFER_SHARED - - - prefer larger shared memory and smaller L1 cache - - - .. autoattribute:: cuda.cuda.CUfunc_cache.CU_FUNC_CACHE_PREFER_L1 - - - prefer larger L1 cache and smaller shared memory - - - .. autoattribute:: cuda.cuda.CUfunc_cache.CU_FUNC_CACHE_PREFER_EQUAL - - - prefer equal sized L1 cache and shared memory - -.. autoclass:: cuda.cuda.CUsharedconfig - - .. autoattribute:: cuda.cuda.CUsharedconfig.CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE - - - set default shared memory bank size - - - .. autoattribute:: cuda.cuda.CUsharedconfig.CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE - - - set shared memory bank width to four bytes - - - .. autoattribute:: cuda.cuda.CUsharedconfig.CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE - - - set shared memory bank width to eight bytes - -.. autoclass:: cuda.cuda.CUshared_carveout - - .. autoattribute:: cuda.cuda.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_DEFAULT - - - No preference for shared memory or L1 (default) - - - .. autoattribute:: cuda.cuda.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_MAX_SHARED - - - Prefer maximum available shared memory, minimum L1 cache - - - .. autoattribute:: cuda.cuda.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_MAX_L1 - - - Prefer maximum available L1 cache, minimum shared memory - -.. autoclass:: cuda.cuda.CUmemorytype - - .. autoattribute:: cuda.cuda.CUmemorytype.CU_MEMORYTYPE_HOST - - - Host memory - - - .. autoattribute:: cuda.cuda.CUmemorytype.CU_MEMORYTYPE_DEVICE - - - Device memory - - - .. autoattribute:: cuda.cuda.CUmemorytype.CU_MEMORYTYPE_ARRAY - - - Array memory - - - .. autoattribute:: cuda.cuda.CUmemorytype.CU_MEMORYTYPE_UNIFIED - - - Unified device or host memory - -.. autoclass:: cuda.cuda.CUcomputemode - - .. autoattribute:: cuda.cuda.CUcomputemode.CU_COMPUTEMODE_DEFAULT - - - Default compute mode (Multiple contexts allowed per device) - - - .. autoattribute:: cuda.cuda.CUcomputemode.CU_COMPUTEMODE_PROHIBITED - - - Compute-prohibited mode (No contexts can be created on this device at this time) - - - .. autoattribute:: cuda.cuda.CUcomputemode.CU_COMPUTEMODE_EXCLUSIVE_PROCESS - - - Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time) - -.. autoclass:: cuda.cuda.CUmem_advise - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_SET_READ_MOSTLY - - - Data will mostly be read and only occasionally be written to - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_UNSET_READ_MOSTLY - - - Undo the effect of :py:obj:`~.CU_MEM_ADVISE_SET_READ_MOSTLY` - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_SET_PREFERRED_LOCATION - - - Set the preferred location for the data as the specified device - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION - - - Clear the preferred location for the data - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_SET_ACCESSED_BY - - - Data will be accessed by the specified device, so prevent page faults as much as possible - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_UNSET_ACCESSED_BY - - - Let the Unified Memory subsystem decide on the page faulting policy for the specified device - -.. autoclass:: cuda.cuda.CUmem_range_attribute - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY - - - Whether the range will mostly be read and only occasionally be written to - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION - - - The preferred location of the range - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY - - - Memory range has :py:obj:`~.CU_MEM_ADVISE_SET_ACCESSED_BY` set for specified device - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION - - - The last location to which the range was prefetched - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE - - - The preferred location type of the range - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID - - - The preferred location id of the range - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE - - - The last location type to which the range was prefetched - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID - - - The last location id to which the range was prefetched - -.. autoclass:: cuda.cuda.CUjit_option - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_MAX_REGISTERS - - - Max number of registers that a thread may use. - - Option type: unsigned int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_THREADS_PER_BLOCK - - - IN: Specifies minimum number of threads per block to target compilation for - - OUT: Returns the number of threads the compiler actually targeted. This restricts the resource utilization of the compiler (e.g. max registers) such that a block with the given number of threads should be able to launch based on register limitations. Note, this option does not currently take into account any other resource limitations, such as shared memory utilization. - - Cannot be combined with :py:obj:`~.CU_JIT_TARGET`. - - Option type: unsigned int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_WALL_TIME - - - Overwrites the option value with the total wall clock time, in milliseconds, spent in the compiler and linker - - Option type: float - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_INFO_LOG_BUFFER - - - Pointer to a buffer in which to print any log messages that are informational in nature (the buffer size is specified via option :py:obj:`~.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES`) - - Option type: char * - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES - - - IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator) - - OUT: Amount of log buffer filled with messages - - Option type: unsigned int - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_ERROR_LOG_BUFFER - - - Pointer to a buffer in which to print any log messages that reflect errors (the buffer size is specified via option :py:obj:`~.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES`) - - Option type: char * - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES - - - IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator) - - OUT: Amount of log buffer filled with messages - - Option type: unsigned int - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_OPTIMIZATION_LEVEL - - - Level of optimizations to apply to generated code (0 - 4), with 4 being the default and highest level of optimizations. - - Option type: unsigned int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_TARGET_FROM_CUCONTEXT - - - No option value required. Determines the target based on the current attached context (default) - - Option type: No option value needed - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_TARGET - - - Target is chosen based on supplied :py:obj:`~.CUjit_target`. Cannot be combined with :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`. - - Option type: unsigned int for enumerated type :py:obj:`~.CUjit_target` - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_FALLBACK_STRATEGY - - - Specifies choice of fallback strategy if matching cubin is not found. Choice is based on supplied :py:obj:`~.CUjit_fallback`. This option cannot be used with cuLink* APIs as the linker requires exact matches. - - Option type: unsigned int for enumerated type :py:obj:`~.CUjit_fallback` - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GENERATE_DEBUG_INFO - - - Specifies whether to create debug information in output (-g) (0: false, default) - - Option type: int - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_LOG_VERBOSE - - - Generate verbose log messages (0: false, default) - - Option type: int - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GENERATE_LINE_INFO - - - Generate line number information (-lineinfo) (0: false, default) - - Option type: int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_CACHE_MODE - - - Specifies whether to enable caching explicitly (-dlcm) - - Choice is based on supplied :py:obj:`~.CUjit_cacheMode_enum`. - - Option type: unsigned int for enumerated type :py:obj:`~.CUjit_cacheMode_enum` - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_NEW_SM3X_OPT - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_FAST_COMPILE - - - This jit option is used for internal purpose only. - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GLOBAL_SYMBOL_NAMES - - - Array of device symbol names that will be relocated to the corresponding host addresses stored in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_ADDRESSES`. - - Must contain :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_COUNT` entries. - - When loading a device module, driver will relocate all encountered unresolved symbols to the host addresses. - - It is only allowed to register symbols that correspond to unresolved global variables. - - It is illegal to register the same device symbol at multiple addresses. - - Option type: const char ** - - Applies to: dynamic linker only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GLOBAL_SYMBOL_ADDRESSES - - - Array of host addresses that will be used to relocate corresponding device symbols stored in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_NAMES`. - - Must contain :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_COUNT` entries. - - Option type: void ** - - Applies to: dynamic linker only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GLOBAL_SYMBOL_COUNT - - - Number of entries in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_NAMES` and :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_ADDRESSES` arrays. - - Option type: unsigned int - - Applies to: dynamic linker only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_LTO - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_FTZ - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_PREC_DIV - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_PREC_SQRT - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_FMA - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_REFERENCED_KERNEL_NAMES - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_REFERENCED_KERNEL_COUNT - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_REFERENCED_VARIABLE_NAMES - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_REFERENCED_VARIABLE_COUNT - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_POSITION_INDEPENDENT_CODE - - - Generate position independent code (0: false) - - Option type: int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_MIN_CTA_PER_SM - - - This option hints to the JIT compiler the minimum number of CTAs from the kernel’s grid to be mapped to a SM. This option is ignored when used together with :py:obj:`~.CU_JIT_MAX_REGISTERS` or :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`. Optimizations based on this option need :py:obj:`~.CU_JIT_MAX_THREADS_PER_BLOCK` to be specified as well. For kernels already using PTX directive .minnctapersm, this option will be ignored by default. Use :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take precedence over the PTX directive. Option type: unsigned int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_MAX_THREADS_PER_BLOCK - - - Maximum number threads in a thread block, computed as the product of the maximum extent specifed for each dimension of the block. This limit is guaranteed not to be exeeded in any invocation of the kernel. Exceeding the the maximum number of threads results in runtime error or kernel launch failure. For kernels already using PTX directive .maxntid, this option will be ignored by default. Use :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take precedence over the PTX directive. Option type: int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_OVERRIDE_DIRECTIVE_VALUES - - - This option lets the values specified using :py:obj:`~.CU_JIT_MAX_REGISTERS`, :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`, :py:obj:`~.CU_JIT_MAX_THREADS_PER_BLOCK` and :py:obj:`~.CU_JIT_MIN_CTA_PER_SM` take precedence over any PTX directives. (0: Disable, default; 1: Enable) Option type: int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_NUM_OPTIONS - -.. autoclass:: cuda.cuda.CUjit_target - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_30 - - - Compute device class 3.0 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_32 - - - Compute device class 3.2 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_35 - - - Compute device class 3.5 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_37 - - - Compute device class 3.7 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_50 - - - Compute device class 5.0 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_52 - - - Compute device class 5.2 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_53 - - - Compute device class 5.3 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_60 - - - Compute device class 6.0. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_61 - - - Compute device class 6.1. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_62 - - - Compute device class 6.2. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_70 - - - Compute device class 7.0. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_72 - - - Compute device class 7.2. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_75 - - - Compute device class 7.5. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_80 - - - Compute device class 8.0. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_86 - - - Compute device class 8.6. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_87 - - - Compute device class 8.7. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_89 - - - Compute device class 8.9. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_90 - - - Compute device class 9.0. Compute device class 9.0. with accelerated features. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_90A - -.. autoclass:: cuda.cuda.CUjit_fallback - - .. autoattribute:: cuda.cuda.CUjit_fallback.CU_PREFER_PTX - - - Prefer to compile ptx if exact binary match not found - - - .. autoattribute:: cuda.cuda.CUjit_fallback.CU_PREFER_BINARY - - - Prefer to fall back to compatible binary code if exact match not found - -.. autoclass:: cuda.cuda.CUjit_cacheMode - - .. autoattribute:: cuda.cuda.CUjit_cacheMode.CU_JIT_CACHE_OPTION_NONE - - - Compile with no -dlcm flag specified - - - .. autoattribute:: cuda.cuda.CUjit_cacheMode.CU_JIT_CACHE_OPTION_CG - - - Compile with L1 cache disabled - - - .. autoattribute:: cuda.cuda.CUjit_cacheMode.CU_JIT_CACHE_OPTION_CA - - - Compile with L1 cache enabled - -.. autoclass:: cuda.cuda.CUjitInputType - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_CUBIN - - - Compiled device-class-specific device code - - Applicable options: none - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_PTX - - - PTX source code - - Applicable options: PTX compiler options - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_FATBINARY - - - Bundle of multiple cubins and/or PTX of some device code - - Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_OBJECT - - - Host object with embedded device code - - Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_LIBRARY - - - Archive of host objects with embedded device code - - Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_NVVM - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_NUM_INPUT_TYPES - -.. autoclass:: cuda.cuda.CUgraphicsRegisterFlags - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_NONE - - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY - - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD - - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST - - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER - -.. autoclass:: cuda.cuda.CUgraphicsMapResourceFlags - - .. autoattribute:: cuda.cuda.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE - - - .. autoattribute:: cuda.cuda.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY - - - .. autoattribute:: cuda.cuda.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD - -.. autoclass:: cuda.cuda.CUarray_cubemap_face - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_X - - - Positive X face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_X - - - Negative X face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_Y - - - Positive Y face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_Y - - - Negative Y face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_Z - - - Positive Z face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_Z - - - Negative Z face of cubemap - -.. autoclass:: cuda.cuda.CUlimit - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_STACK_SIZE - - - GPU thread stack size - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_PRINTF_FIFO_SIZE - - - GPU printf FIFO size - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_MALLOC_HEAP_SIZE - - - GPU malloc heap size - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH - - - GPU device runtime launch synchronize depth - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT - - - GPU device runtime pending launch count - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_MAX_L2_FETCH_GRANULARITY - - - A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_PERSISTING_L2_CACHE_SIZE - - - A size in bytes for L2 persisting lines cache size - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_SHMEM_SIZE - - - A maximum size in bytes of shared memory available to CUDA kernels on a CIG context. Can only be queried, cannot be set - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_CIG_ENABLED - - - A non-zero value indicates this CUDA context is a CIG-enabled context. Can only be queried, cannot be set - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED - - - When set to a non-zero value, CUDA will fail to launch a kernel on a CIG context, instead of using the fallback path, if the kernel uses more shared memory than available - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_MAX - -.. autoclass:: cuda.cuda.CUresourcetype - - .. autoattribute:: cuda.cuda.CUresourcetype.CU_RESOURCE_TYPE_ARRAY - - - Array resource - - - .. autoattribute:: cuda.cuda.CUresourcetype.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY - - - Mipmapped array resource - - - .. autoattribute:: cuda.cuda.CUresourcetype.CU_RESOURCE_TYPE_LINEAR - - - Linear resource - - - .. autoattribute:: cuda.cuda.CUresourcetype.CU_RESOURCE_TYPE_PITCH2D - - - Pitch 2D resource - -.. autoclass:: cuda.cuda.CUaccessProperty - - .. autoattribute:: cuda.cuda.CUaccessProperty.CU_ACCESS_PROPERTY_NORMAL - - - Normal cache persistence. - - - .. autoattribute:: cuda.cuda.CUaccessProperty.CU_ACCESS_PROPERTY_STREAMING - - - Streaming access is less likely to persit from cache. - - - .. autoattribute:: cuda.cuda.CUaccessProperty.CU_ACCESS_PROPERTY_PERSISTING - - - Persisting access is more likely to persist in cache. - -.. autoclass:: cuda.cuda.CUgraphConditionalNodeType - - .. autoattribute:: cuda.cuda.CUgraphConditionalNodeType.CU_GRAPH_COND_TYPE_IF - - - Conditional 'if' Node. Body executed once if condition value is non-zero. - - - .. autoattribute:: cuda.cuda.CUgraphConditionalNodeType.CU_GRAPH_COND_TYPE_WHILE - - - Conditional 'while' Node. Body executed repeatedly while condition value is non-zero. - -.. autoclass:: cuda.cuda.CUgraphNodeType - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_KERNEL - - - GPU kernel node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMCPY - - - Memcpy node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMSET - - - Memset node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_HOST - - - Host (executable) node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_GRAPH - - - Node which executes an embedded graph - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EMPTY - - - Empty (no-op) node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_WAIT_EVENT - - - External event wait node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EVENT_RECORD - - - External event record node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL - - - External semaphore signal node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT - - - External semaphore wait node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEM_ALLOC - - - Memory Allocation Node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEM_FREE - - - Memory Free Node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_BATCH_MEM_OP - - - Batch MemOp Node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_CONDITIONAL - - - Conditional Node May be used to implement a conditional execution path or loop - - inside of a graph. The graph(s) contained within the body of the conditional node - - can be selectively executed or iterated upon based on the value of a conditional - - variable. - - - - Handles must be created in advance of creating the node - - using :py:obj:`~.cuGraphConditionalHandleCreate`. - - - - The following restrictions apply to graphs which contain conditional nodes: - - The graph cannot be used in a child node. - - Only one instantiation of the graph may exist at any point in time. - - The graph cannot be cloned. - - - - To set the control value, supply a default value when creating the handle and/or - - call :py:obj:`~.cudaGraphSetConditional` from device code. - -.. autoclass:: cuda.cuda.CUgraphDependencyType - - .. autoattribute:: cuda.cuda.CUgraphDependencyType.CU_GRAPH_DEPENDENCY_TYPE_DEFAULT - - - This is an ordinary dependency. - - - .. autoattribute:: cuda.cuda.CUgraphDependencyType.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC - - - This dependency type allows the downstream node to use `cudaGridDependencySynchronize()`. It may only be used between kernel nodes, and must be used with either the :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC` or :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER` outgoing port. - -.. autoclass:: cuda.cuda.CUgraphInstantiateResult - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_SUCCESS - - - Instantiation succeeded - - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_ERROR - - - Instantiation failed for an unexpected reason which is described in the return value of the function - - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE - - - Instantiation failed due to invalid structure, such as cycles - - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED - - - Instantiation for device launch failed because the graph contained an unsupported operation - - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED - - - Instantiation for device launch failed due to the nodes belonging to different contexts - -.. autoclass:: cuda.cuda.CUsynchronizationPolicy - - .. autoattribute:: cuda.cuda.CUsynchronizationPolicy.CU_SYNC_POLICY_AUTO - - - .. autoattribute:: cuda.cuda.CUsynchronizationPolicy.CU_SYNC_POLICY_SPIN - - - .. autoattribute:: cuda.cuda.CUsynchronizationPolicy.CU_SYNC_POLICY_YIELD - - - .. autoattribute:: cuda.cuda.CUsynchronizationPolicy.CU_SYNC_POLICY_BLOCKING_SYNC - -.. autoclass:: cuda.cuda.CUclusterSchedulingPolicy - - .. autoattribute:: cuda.cuda.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_DEFAULT - - - the default policy - - - .. autoattribute:: cuda.cuda.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_SPREAD - - - spread the blocks within a cluster to the SMs - - - .. autoattribute:: cuda.cuda.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING - - - allow the hardware to load-balance the blocks in a cluster to the SMs - -.. autoclass:: cuda.cuda.CUlaunchMemSyncDomain - - .. autoattribute:: cuda.cuda.CUlaunchMemSyncDomain.CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT - - - Launch kernels in the default domain - - - .. autoattribute:: cuda.cuda.CUlaunchMemSyncDomain.CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE - - - Launch kernels in the remote domain - -.. autoclass:: cuda.cuda.CUlaunchAttributeID - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_IGNORE - - - Ignored entry, for convenient composition - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW - - - Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.accessPolicyWindow`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_COOPERATIVE - - - Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.cooperative`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY - - - Valid for streams. See :py:obj:`~.CUlaunchAttributeValue.syncPolicy`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION - - - Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.clusterDim`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE - - - Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.clusterSchedulingPolicyPreference`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION - - - Valid for launches. Setting :py:obj:`~.CUlaunchAttributeValue.programmaticStreamSerializationAllowed` to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid's execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT - - - Valid for launches. Set :py:obj:`~.CUlaunchAttributeValue.programmaticEvent` to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event through PTX launchdep.release or CUDA builtin function cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be inserted at the beginning of each block's execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling :py:obj:`~.cuEventSynchronize()`) are not guaranteed to observe the release precisely when it is released. For example, :py:obj:`~.cuEventSynchronize()` may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks. - - The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PRIORITY - - - Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.priority`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP - - - Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.memSyncDomainMap`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN - - - Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.memSyncDomain`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT - - - Valid for launches. Set :py:obj:`~.CUlaunchAttributeValue.launchCompletionEvent` to record the event. - - Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock. - - A launch completion event is nominally similar to a programmatic event with `triggerAtBlockStart` set except that it is not visible to `cudaGridDependencySynchronize()` and can be used with compute capability less than 9.0. - - The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE - - - Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error. - - :py:obj:`~.CUlaunchAttributeValue`::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via :py:obj:`~.CUlaunchAttributeValue`::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node's kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see :py:obj:`~.cudaGraphKernelNodeUpdatesApply`. - - Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via :py:obj:`~.cuGraphDestroyNode`. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via :py:obj:`~.cuGraphKernelNodeCopyAttributes`. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to :py:obj:`~.cuGraphExecUpdate`. - - If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with :py:obj:`~.cuGraphUpload` before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT - - - Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting :py:obj:`~.CUlaunchAttributeValue.sharedMemCarveout` to a percentage between 0-100 signals the CUDA driver to set the shared memory carveout preference, in percent of the total shared memory for that kernel launch. This attribute takes precedence over :py:obj:`~.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT`. This is only a hint, and the CUDA driver can choose a different configuration if required for the launch. - -.. autoclass:: cuda.cuda.CUstreamCaptureStatus - - .. autoattribute:: cuda.cuda.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_NONE - - - Stream is not capturing - - - .. autoattribute:: cuda.cuda.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_ACTIVE - - - Stream is actively capturing - - - .. autoattribute:: cuda.cuda.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_INVALIDATED - - - Stream is part of a capture sequence that has been invalidated, but not terminated - -.. autoclass:: cuda.cuda.CUstreamCaptureMode - - .. autoattribute:: cuda.cuda.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_GLOBAL - - - .. autoattribute:: cuda.cuda.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_THREAD_LOCAL - - - .. autoattribute:: cuda.cuda.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_RELAXED - -.. autoclass:: cuda.cuda.CUdriverProcAddress_flags - - .. autoattribute:: cuda.cuda.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_DEFAULT - - - Default search mode for driver symbols. - - - .. autoattribute:: cuda.cuda.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_LEGACY_STREAM - - - Search for legacy versions of driver symbols. - - - .. autoattribute:: cuda.cuda.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM - - - Search for per-thread versions of driver symbols. - -.. autoclass:: cuda.cuda.CUdriverProcAddressQueryResult - - .. autoattribute:: cuda.cuda.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_SUCCESS - - - Symbol was succesfully found - - - .. autoattribute:: cuda.cuda.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND - - - Symbol was not found in search - - - .. autoattribute:: cuda.cuda.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT - - - Symbol was found but version supplied was not sufficient - -.. autoclass:: cuda.cuda.CUexecAffinityType - - .. autoattribute:: cuda.cuda.CUexecAffinityType.CU_EXEC_AFFINITY_TYPE_SM_COUNT - - - Create a context with limited SMs. - - - .. autoattribute:: cuda.cuda.CUexecAffinityType.CU_EXEC_AFFINITY_TYPE_MAX - -.. autoclass:: cuda.cuda.CUcigDataType - - .. autoattribute:: cuda.cuda.CUcigDataType.CIG_DATA_TYPE_D3D12_COMMAND_QUEUE - -.. autoclass:: cuda.cuda.CUlibraryOption - - .. autoattribute:: cuda.cuda.CUlibraryOption.CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE - - - .. autoattribute:: cuda.cuda.CUlibraryOption.CU_LIBRARY_BINARY_IS_PRESERVED - - - Specifes that the argument `code` passed to :py:obj:`~.cuLibraryLoadData()` will be preserved. Specifying this option will let the driver know that `code` can be accessed at any point until :py:obj:`~.cuLibraryUnload()`. The default behavior is for the driver to allocate and maintain its own copy of `code`. Note that this is only a memory usage optimization hint and the driver can choose to ignore it if required. Specifying this option with :py:obj:`~.cuLibraryLoadFromFile()` is invalid and will return :py:obj:`~.CUDA_ERROR_INVALID_VALUE`. - - - .. autoattribute:: cuda.cuda.CUlibraryOption.CU_LIBRARY_NUM_OPTIONS - -.. autoclass:: cuda.cuda.CUresult - - .. autoattribute:: cuda.cuda.CUresult.CUDA_SUCCESS - - - The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`). - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_VALUE - - - This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_OUT_OF_MEMORY - - - The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_INITIALIZED - - - This indicates that the CUDA driver has not been initialized with :py:obj:`~.cuInit()` or that initialization has failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_DEINITIALIZED - - - This indicates that the CUDA driver is in the process of shutting down. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PROFILER_DISABLED - - - This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PROFILER_NOT_INITIALIZED - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PROFILER_ALREADY_STARTED - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PROFILER_ALREADY_STOPPED - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STUB_LIBRARY - - - This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_DEVICE_UNAVAILABLE - - - This indicates that requested CUDA device is unavailable at the current time. Devices are often unavailable due to use of :py:obj:`~.CU_COMPUTEMODE_EXCLUSIVE_PROCESS` or :py:obj:`~.CU_COMPUTEMODE_PROHIBITED`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NO_DEVICE - - - This indicates that no CUDA-capable devices were detected by the installed CUDA driver. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_DEVICE - - - This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_DEVICE_NOT_LICENSED - - - This error indicates that the Grid license is not applied. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_IMAGE - - - This indicates that the device kernel image is invalid. This can also indicate an invalid CUDA module. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_CONTEXT - - - This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had :py:obj:`~.cuCtxDestroy()` invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See :py:obj:`~.cuCtxGetApiVersion()` for more details. This can also be returned if the green context passed to an API call was not converted to a :py:obj:`~.CUcontext` using :py:obj:`~.cuCtxFromGreenCtx` API. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CONTEXT_ALREADY_CURRENT - - - This indicated that the context being supplied as a parameter to the API call was already the active context. [Deprecated] - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MAP_FAILED - - - This indicates that a map or register operation has failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNMAP_FAILED - - - This indicates that an unmap or unregister operation has failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ARRAY_IS_MAPPED - - - This indicates that the specified array is currently mapped and thus cannot be destroyed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ALREADY_MAPPED - - - This indicates that the resource is already mapped. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NO_BINARY_FOR_GPU - - - This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ALREADY_ACQUIRED - - - This indicates that a resource has already been acquired. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_MAPPED - - - This indicates that a resource is not mapped. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_MAPPED_AS_ARRAY - - - This indicates that a mapped resource is not available for access as an array. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_MAPPED_AS_POINTER - - - This indicates that a mapped resource is not available for access as a pointer. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ECC_UNCORRECTABLE - - - This indicates that an uncorrectable ECC error was detected during execution. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNSUPPORTED_LIMIT - - - This indicates that the :py:obj:`~.CUlimit` passed to the API call is not supported by the active device. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CONTEXT_ALREADY_IN_USE - - - This indicates that the :py:obj:`~.CUcontext` passed to the API call can only be bound to a single CPU thread at a time but is already bound to a CPU thread. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PEER_ACCESS_UNSUPPORTED - - - This indicates that peer access is not supported across the given devices. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_PTX - - - This indicates that a PTX JIT compilation failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_GRAPHICS_CONTEXT - - - This indicates an error with OpenGL or DirectX context. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NVLINK_UNCORRECTABLE - - - This indicates that an uncorrectable NVLink error was detected during the execution. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_JIT_COMPILER_NOT_FOUND - - - This indicates that the PTX JIT compiler library was not found. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNSUPPORTED_PTX_VERSION - - - This indicates that the provided PTX was compiled with an unsupported toolchain. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_JIT_COMPILATION_DISABLED - - - This indicates that the PTX JIT compilation was disabled. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY - - - This indicates that the :py:obj:`~.CUexecAffinityType` passed to the API call is not supported by the active device. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC - - - This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_SOURCE - - - This indicates that the device kernel source is invalid. This includes compilation/linker errors encountered in device code or user error. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_FILE_NOT_FOUND - - - This indicates that the file specified was not found. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND - - - This indicates that a link to a shared object failed to resolve. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_SHARED_OBJECT_INIT_FAILED - - - This indicates that initialization of a shared object failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_OPERATING_SYSTEM - - - This indicates that an OS call failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_HANDLE - - - This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like :py:obj:`~.CUstream` and :py:obj:`~.CUevent`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ILLEGAL_STATE - - - This indicates that a resource required by the API call is not in a valid state to perform the requested operation. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LOSSY_QUERY - - - This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_FOUND - - - This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_READY - - - This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than :py:obj:`~.CUDA_SUCCESS` (which indicates completion). Calls that may return this value include :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ILLEGAL_ADDRESS - - - While executing a kernel, the device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES - - - This indicates that a launch did not occur because it did not have appropriate resources. This error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count. Passing arguments of the wrong size (i.e. a 64-bit pointer when a 32-bit int is expected) is equivalent to passing too many arguments and can also result in this error. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LAUNCH_TIMEOUT - - - This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT` for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING - - - This error indicates a kernel launch that uses an incompatible texturing mode. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED - - - This error indicates that a call to :py:obj:`~.cuCtxEnablePeerAccess()` is trying to re-enable peer access to a context which has already had peer access to it enabled. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PEER_ACCESS_NOT_ENABLED - - - This error indicates that :py:obj:`~.cuCtxDisablePeerAccess()` is trying to disable peer access which has not been enabled yet via :py:obj:`~.cuCtxEnablePeerAccess()`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE - - - This error indicates that the primary context for the specified device has already been initialized. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CONTEXT_IS_DESTROYED - - - This error indicates that the context current to the calling thread has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary context which has not yet been initialized. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ASSERT - - - A device-side assert triggered during kernel execution. The context cannot be used anymore, and must be destroyed. All existing device memory allocations from this context are invalid and must be reconstructed if the program is to continue using CUDA. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_TOO_MANY_PEERS - - - This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to :py:obj:`~.cuCtxEnablePeerAccess()`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED - - - This error indicates that the memory range passed to :py:obj:`~.cuMemHostRegister()` has already been registered. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED - - - This error indicates that the pointer passed to :py:obj:`~.cuMemHostUnregister()` does not correspond to any currently registered memory region. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_HARDWARE_STACK_ERROR - - - While executing a kernel, the device encountered a stack error. This can be due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ILLEGAL_INSTRUCTION - - - While executing a kernel, the device encountered an illegal instruction. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MISALIGNED_ADDRESS - - - While executing a kernel, the device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_ADDRESS_SPACE - - - While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_PC - - - While executing a kernel, the device program counter wrapped its address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LAUNCH_FAILED - - - An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE - - - This error indicates that the number of blocks launched per grid for a kernel that was launched via either :py:obj:`~.cuLaunchCooperativeKernel` or :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` exceeds the maximum number of blocks as allowed by :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessor` or :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` times the number of multiprocessors as specified by the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_PERMITTED - - - This error indicates that the attempted operation is not permitted. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_SUPPORTED - - - This error indicates that the attempted operation is not supported on the current system or device. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_SYSTEM_NOT_READY - - - This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH - - - This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE - - - This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_CONNECTION_FAILED - - - This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_RPC_FAILURE - - - This error indicates that the remote procedural call between the MPS server and the MPS client failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_SERVER_NOT_READY - - - This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_MAX_CLIENTS_REACHED - - - This error indicates that the hardware resources required to create MPS client have been exhausted. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED - - - This error indicates the the hardware resources required to support device connections have been exhausted. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_CLIENT_TERMINATED - - - This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CDP_NOT_SUPPORTED - - - This error indicates that the module is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CDP_VERSION_MISMATCH - - - This error indicates that a module contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED - - - This error indicates that the operation is not permitted when the stream is capturing. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_INVALIDATED - - - This error indicates that the current capture sequence on the stream has been invalidated due to a previous error. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_MERGE - - - This error indicates that the operation would have resulted in a merge of two independent capture sequences. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNMATCHED - - - This error indicates that the capture was not initiated in this stream. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNJOINED - - - This error indicates that the capture sequence contains a fork that was not joined to the primary stream. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_ISOLATION - - - This error indicates that a dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_IMPLICIT - - - This error indicates a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CAPTURED_EVENT - - - This error indicates that the operation is not permitted on an event which was last recorded in a capturing stream. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD - - - A stream capture sequence not initiated with the :py:obj:`~.CU_STREAM_CAPTURE_MODE_RELAXED` argument to :py:obj:`~.cuStreamBeginCapture` was passed to :py:obj:`~.cuStreamEndCapture` in a different thread. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_TIMEOUT - - - This error indicates that the timeout specified for the wait operation has lapsed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE - - - This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_EXTERNAL_DEVICE - - - This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device's signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_CLUSTER_SIZE - - - Indicates a kernel launch error due to cluster misconfiguration. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_FUNCTION_NOT_LOADED - - - Indiciates a function handle is not loaded when calling an API that requires a loaded function. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_RESOURCE_TYPE - - - This error indicates one or more resources passed in are not valid resource types for the operation. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION - - - This error indicates one or more resources are insufficient or non-applicable for the operation. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNKNOWN - - - This indicates that an unknown internal error has occurred. - -.. autoclass:: cuda.cuda.CUdevice_P2PAttribute - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK - - - A relative value indicating the performance of the link between two devices - - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED - - - P2P Access is enable - - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED - - - Atomic operation over the link supported - - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED - - - Accessing CUDA arrays over the link supported - -.. autoclass:: cuda.cuda.CUresourceViewFormat - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_NONE - - - No resource view format (use underlying resource format) - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X8 - - - 1 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X8 - - - 2 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X8 - - - 4 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X8 - - - 1 channel signed 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X8 - - - 2 channel signed 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X8 - - - 4 channel signed 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X16 - - - 1 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X16 - - - 2 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X16 - - - 4 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X16 - - - 1 channel signed 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X16 - - - 2 channel signed 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X16 - - - 4 channel signed 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X32 - - - 1 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X32 - - - 2 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X32 - - - 4 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X32 - - - 1 channel signed 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X32 - - - 2 channel signed 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X32 - - - 4 channel signed 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_1X16 - - - 1 channel 16-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_2X16 - - - 2 channel 16-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_4X16 - - - 4 channel 16-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_1X32 - - - 1 channel 32-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_2X32 - - - 2 channel 32-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_4X32 - - - 4 channel 32-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC1 - - - Block compressed 1 - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC2 - - - Block compressed 2 - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC3 - - - Block compressed 3 - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC4 - - - Block compressed 4 unsigned - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC4 - - - Block compressed 4 signed - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC5 - - - Block compressed 5 unsigned - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC5 - - - Block compressed 5 signed - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC6H - - - Block compressed 6 unsigned half-float - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC6H - - - Block compressed 6 signed half-float - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC7 - - - Block compressed 7 - -.. autoclass:: cuda.cuda.CUtensorMapDataType - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT8 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT16 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT32 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_INT32 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT64 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_INT64 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT16 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT32 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT64 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ - -.. autoclass:: cuda.cuda.CUtensorMapInterleave - - .. autoattribute:: cuda.cuda.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_NONE - - - .. autoattribute:: cuda.cuda.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_16B - - - .. autoattribute:: cuda.cuda.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_32B - -.. autoclass:: cuda.cuda.CUtensorMapSwizzle - - .. autoattribute:: cuda.cuda.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_NONE - - - .. autoattribute:: cuda.cuda.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_32B - - - .. autoattribute:: cuda.cuda.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_64B - - - .. autoattribute:: cuda.cuda.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_128B - -.. autoclass:: cuda.cuda.CUtensorMapL2promotion - - .. autoattribute:: cuda.cuda.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_NONE - - - .. autoattribute:: cuda.cuda.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_64B - - - .. autoattribute:: cuda.cuda.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_128B - - - .. autoattribute:: cuda.cuda.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_256B - -.. autoclass:: cuda.cuda.CUtensorMapFloatOOBfill - - .. autoattribute:: cuda.cuda.CUtensorMapFloatOOBfill.CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE - - - .. autoattribute:: cuda.cuda.CUtensorMapFloatOOBfill.CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA - -.. autoclass:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS - - .. autoattribute:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE - - - No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations - - - .. autoattribute:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ - - - Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case. - - - .. autoattribute:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE - - - Read-write access, the device has full read-write access to the memory - -.. autoclass:: cuda.cuda.CUexternalMemoryHandleType - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD - - - Handle is an opaque file descriptor - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 - - - Handle is an opaque shared NT handle - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT - - - Handle is an opaque, globally shared handle - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP - - - Handle is a D3D12 heap object - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE - - - Handle is a D3D12 committed resource - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE - - - Handle is a shared NT handle to a D3D11 resource - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT - - - Handle is a globally shared handle to a D3D11 resource - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF - - - Handle is an NvSciBuf object - -.. autoclass:: cuda.cuda.CUexternalSemaphoreHandleType - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD - - - Handle is an opaque file descriptor - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 - - - Handle is an opaque shared NT handle - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT - - - Handle is an opaque, globally shared handle - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE - - - Handle is a shared NT handle referencing a D3D12 fence object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE - - - Handle is a shared NT handle referencing a D3D11 fence object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC - - - Opaque handle to NvSciSync Object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX - - - Handle is a shared NT handle referencing a D3D11 keyed mutex object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT - - - Handle is a globally shared handle referencing a D3D11 keyed mutex object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD - - - Handle is an opaque file descriptor referencing a timeline semaphore - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 - - - Handle is an opaque shared NT handle referencing a timeline semaphore - -.. autoclass:: cuda.cuda.CUmemAllocationHandleType - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_NONE - - - Does not allow any export mechanism. > - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR - - - Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_WIN32 - - - Allows a Win32 NT handle to be used for exporting. (HANDLE) - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_WIN32_KMT - - - Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_FABRIC - - - Allows a fabric handle to be used for exporting. (CUmemFabricHandle) - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_MAX - -.. autoclass:: cuda.cuda.CUmemAccess_flags - - .. autoattribute:: cuda.cuda.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_NONE - - - Default, make the address range not accessible - - - .. autoattribute:: cuda.cuda.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READ - - - Make the address range read accessible - - - .. autoattribute:: cuda.cuda.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READWRITE - - - Make the address range read-write accessible - - - .. autoattribute:: cuda.cuda.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_MAX - -.. autoclass:: cuda.cuda.CUmemLocationType - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_INVALID - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE - - - Location is a device location, thus id is a device ordinal - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST - - - Location is host, id is ignored - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST_NUMA - - - Location is a host NUMA node, thus id is a host NUMA node id - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT - - - Location is a host NUMA node of the current thread, id is ignored - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_MAX - -.. autoclass:: cuda.cuda.CUmemAllocationType - - .. autoattribute:: cuda.cuda.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_INVALID - - - .. autoattribute:: cuda.cuda.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED - - - This allocation type is 'pinned', i.e. cannot migrate from its current location while the application is actively using it - - - .. autoattribute:: cuda.cuda.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_MAX - -.. autoclass:: cuda.cuda.CUmemAllocationGranularity_flags - - .. autoattribute:: cuda.cuda.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_MINIMUM - - - Minimum required granularity for allocation - - - .. autoattribute:: cuda.cuda.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_RECOMMENDED - - - Recommended granularity for allocation for best performance - -.. autoclass:: cuda.cuda.CUmemRangeHandleType - - .. autoattribute:: cuda.cuda.CUmemRangeHandleType.CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD - - - .. autoattribute:: cuda.cuda.CUmemRangeHandleType.CU_MEM_RANGE_HANDLE_TYPE_MAX - -.. autoclass:: cuda.cuda.CUarraySparseSubresourceType - - .. autoattribute:: cuda.cuda.CUarraySparseSubresourceType.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL - - - .. autoattribute:: cuda.cuda.CUarraySparseSubresourceType.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL - -.. autoclass:: cuda.cuda.CUmemOperationType - - .. autoattribute:: cuda.cuda.CUmemOperationType.CU_MEM_OPERATION_TYPE_MAP - - - .. autoattribute:: cuda.cuda.CUmemOperationType.CU_MEM_OPERATION_TYPE_UNMAP - -.. autoclass:: cuda.cuda.CUmemHandleType - - .. autoattribute:: cuda.cuda.CUmemHandleType.CU_MEM_HANDLE_TYPE_GENERIC - -.. autoclass:: cuda.cuda.CUmemAllocationCompType - - .. autoattribute:: cuda.cuda.CUmemAllocationCompType.CU_MEM_ALLOCATION_COMP_NONE - - - Allocating non-compressible memory - - - .. autoattribute:: cuda.cuda.CUmemAllocationCompType.CU_MEM_ALLOCATION_COMP_GENERIC - - - Allocating compressible memory - -.. autoclass:: cuda.cuda.CUmulticastGranularity_flags - - .. autoattribute:: cuda.cuda.CUmulticastGranularity_flags.CU_MULTICAST_GRANULARITY_MINIMUM - - - Minimum required granularity - - - .. autoattribute:: cuda.cuda.CUmulticastGranularity_flags.CU_MULTICAST_GRANULARITY_RECOMMENDED - - - Recommended granularity for best performance - -.. autoclass:: cuda.cuda.CUgraphExecUpdateResult - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_SUCCESS - - - The update succeeded - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR - - - The update failed for an unexpected reason which is described in the return value of the function - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED - - - The update failed because the topology changed - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED - - - The update failed because a node type changed - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED - - - The update failed because the function of a kernel node changed (CUDA driver < 11.2) - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED - - - The update failed because the parameters changed in a way that is not supported - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED - - - The update failed because something about the node is not supported - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE - - - The update failed because the function of a kernel node changed in an unsupported way - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED - - - The update failed because the node attributes changed in a way that is not supported - -.. autoclass:: cuda.cuda.CUmemPool_attribute - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES - - - (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled) - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC - - - (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled) - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES - - - (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled). - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD - - - (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0) - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT - - - (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool. - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH - - - (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero. - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_CURRENT - - - (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application. - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_HIGH - - - (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero. - -.. autoclass:: cuda.cuda.CUgraphMem_attribute - - .. autoattribute:: cuda.cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT - - - (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs - - - .. autoattribute:: cuda.cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH - - - (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero. - - - .. autoattribute:: cuda.cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT - - - (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. - - - .. autoattribute:: cuda.cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH - - - (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. - -.. autoclass:: cuda.cuda.CUflushGPUDirectRDMAWritesOptions - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesOptions.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST - - - :py:obj:`~.cuFlushGPUDirectRDMAWrites()` and its CUDA Runtime API counterpart are supported on the device. - - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesOptions.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS - - - The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the device. - -.. autoclass:: cuda.cuda.CUGPUDirectRDMAWritesOrdering - - .. autoattribute:: cuda.cuda.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE - - - The device does not natively support ordering of remote writes. :py:obj:`~.cuFlushGPUDirectRDMAWrites()` can be leveraged if supported. - - - .. autoattribute:: cuda.cuda.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER - - - Natively, the device can consistently consume remote writes, although other CUDA devices may not. - - - .. autoattribute:: cuda.cuda.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES - - - Any CUDA device in the system can consistently consume remote writes to this device. - -.. autoclass:: cuda.cuda.CUflushGPUDirectRDMAWritesScope - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesScope.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER - - - Blocks until remote writes are visible to the CUDA device context owning the data. - - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesScope.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES - - - Blocks until remote writes are visible to all CUDA device contexts. - -.. autoclass:: cuda.cuda.CUflushGPUDirectRDMAWritesTarget - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesTarget.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX - - - Sets the target for :py:obj:`~.cuFlushGPUDirectRDMAWrites()` to the currently active CUDA device context. - -.. autoclass:: cuda.cuda.CUgraphDebugDot_flags - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE - - - Output all debug data as if every debug flag is enabled - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES - - - Use CUDA Runtime structures for output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS - - - Adds CUDA_KERNEL_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS - - - Adds CUDA_MEMCPY3D values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS - - - Adds CUDA_MEMSET_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS - - - Adds CUDA_HOST_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS - - - Adds CUevent handle from record and wait nodes to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS - - - Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS - - - Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES - - - Adds CUkernelNodeAttrValue values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES - - - Adds node handles and every kernel function handle to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS - - - Adds memory alloc node parameters to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS - - - Adds memory free node parameters to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS - - - Adds batch mem op node parameters to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO - - - Adds edge numbering information - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS - - - Adds conditional node parameters to output - -.. autoclass:: cuda.cuda.CUuserObject_flags - - .. autoattribute:: cuda.cuda.CUuserObject_flags.CU_USER_OBJECT_NO_DESTRUCTOR_SYNC - - - Indicates the destructor execution is not synchronized by any CUDA handle. - -.. autoclass:: cuda.cuda.CUuserObjectRetain_flags - - .. autoattribute:: cuda.cuda.CUuserObjectRetain_flags.CU_GRAPH_USER_OBJECT_MOVE - - - Transfer references from the caller rather than creating new references. - -.. autoclass:: cuda.cuda.CUgraphInstantiate_flags - - .. autoattribute:: cuda.cuda.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH - - - Automatically free memory allocated in a graph before relaunching. - - - .. autoattribute:: cuda.cuda.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD - - - Automatically upload the graph after instantiation. Only supported by :py:obj:`~.cuGraphInstantiateWithParams`. The upload will be performed using the stream provided in `instantiateParams`. - - - .. autoattribute:: cuda.cuda.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH - - - Instantiate the graph to be launchable from the device. This flag can only be used on platforms which support unified addressing. This flag cannot be used in conjunction with CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. - - - .. autoattribute:: cuda.cuda.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY - - - Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into. - -.. autoclass:: cuda.cuda.CUdeviceNumaConfig - - .. autoattribute:: cuda.cuda.CUdeviceNumaConfig.CU_DEVICE_NUMA_CONFIG_NONE - - - The GPU is not a NUMA node - - - .. autoattribute:: cuda.cuda.CUdeviceNumaConfig.CU_DEVICE_NUMA_CONFIG_NUMA_NODE - - - The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its NUMA ID - -.. autoclass:: cuda.cuda.CUeglFrameType - - .. autoattribute:: cuda.cuda.CUeglFrameType.CU_EGL_FRAME_TYPE_ARRAY - - - Frame type CUDA array - - - .. autoattribute:: cuda.cuda.CUeglFrameType.CU_EGL_FRAME_TYPE_PITCH - - - Frame type pointer - -.. autoclass:: cuda.cuda.CUeglResourceLocationFlags - - .. autoattribute:: cuda.cuda.CUeglResourceLocationFlags.CU_EGL_RESOURCE_LOCATION_SYSMEM - - - Resource location sysmem - - - .. autoattribute:: cuda.cuda.CUeglResourceLocationFlags.CU_EGL_RESOURCE_LOCATION_VIDMEM - - - Resource location vidmem - -.. autoclass:: cuda.cuda.CUeglColorFormat - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR - - - Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR - - - Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_PLANAR - - - Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR - - - Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RGB - - - R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BGR - - - R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_ARGB - - - R/G/B/A four channels in one surface with BGRA byte ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RGBA - - - R/G/B/A four channels in one surface with ABGR byte ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_L - - - single luminance channel in one surface. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_R - - - single color channel in one surface. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_PLANAR - - - Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR - - - Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUYV_422 - - - Y, U, V in one surface, interleaved as UYVY in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_UYVY_422 - - - Y, U, V in one surface, interleaved as YUYV in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_ABGR - - - R/G/B/A four channels in one surface with RGBA byte ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BGRA - - - R/G/B/A four channels in one surface with ARGB byte ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_A - - - Alpha color format - one channel in one surface. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RG - - - R/G color format - two channels in one surface with GR byte ordering - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_AYUV - - - Y, U, V, A four channels in one surface, interleaved as VUYA. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR - - - Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR - - - Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR - - - Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR - - - Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_VYUY_ER - - - Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_UYVY_ER - - - Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUYV_ER - - - Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVYU_ER - - - Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV_ER - - - Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUVA_ER - - - Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_AYUV_ER - - - Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER - - - Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER - - - Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER - - - Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER - - - Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER - - - Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER - - - Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_RGGB - - - Bayer format - one channel in one surface with interleaved RGGB ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_BGGR - - - Bayer format - one channel in one surface with interleaved BGGR ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_GRBG - - - Bayer format - one channel in one surface with interleaved GRBG ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_GBRG - - - Bayer format - one channel in one surface with interleaved GBRG ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_RGGB - - - Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_BGGR - - - Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_GRBG - - - Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_GBRG - - - Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_RGGB - - - Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_BGGR - - - Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_GRBG - - - Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_GBRG - - - Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_RGGB - - - Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_BGGR - - - Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_GRBG - - - Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_GBRG - - - Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_RGGB - - - Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_BGGR - - - Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_GRBG - - - Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_GBRG - - - Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_PLANAR - - - Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_PLANAR - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_BCCR - - - Bayer format - one channel in one surface with interleaved BCCR ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_RCCB - - - Bayer format - one channel in one surface with interleaved RCCB ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_CRBC - - - Bayer format - one channel in one surface with interleaved CRBC ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_CBRC - - - Bayer format - one channel in one surface with interleaved CBRC ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_CCCC - - - Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_BCCR - - - Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_RCCB - - - Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CRBC - - - Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CBRC - - - Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CCCC - - - Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y - - - Color format for single Y plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 - - - Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 - - - Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 - - - Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 - - - Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 - - - Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 - - - Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 - - - Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 - - - Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 - - - Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 - - - Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 - - - Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR - - - Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 - - - Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y_ER - - - Extended Range Color format for single Y plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y_709_ER - - - Extended Range Color format for single Y plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10_ER - - - Extended Range Color format for single Y10 plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10_709_ER - - - Extended Range Color format for single Y10 plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12_ER - - - Extended Range Color format for single Y12 plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12_709_ER - - - Extended Range Color format for single Y12 plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUVA - - - Y, U, V, A four channels in one surface, interleaved as AVUY. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV - - - Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVYU - - - Y, U, V in one surface, interleaved as YVYU in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_VYUY - - - Y, U, V in one surface, interleaved as VYUY in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER - - - Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER - - - Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_MAX - -.. autoclass:: cuda.cuda.CUdeviceptr_v2 -.. autoclass:: cuda.cuda.CUdeviceptr -.. autoclass:: cuda.cuda.CUdevice_v1 -.. autoclass:: cuda.cuda.CUdevice -.. autoclass:: cuda.cuda.CUcontext -.. autoclass:: cuda.cuda.CUmodule -.. autoclass:: cuda.cuda.CUfunction -.. autoclass:: cuda.cuda.CUlibrary -.. autoclass:: cuda.cuda.CUkernel -.. autoclass:: cuda.cuda.CUarray -.. autoclass:: cuda.cuda.CUmipmappedArray -.. autoclass:: cuda.cuda.CUtexref -.. autoclass:: cuda.cuda.CUsurfref -.. autoclass:: cuda.cuda.CUevent -.. autoclass:: cuda.cuda.CUstream -.. autoclass:: cuda.cuda.CUgraphicsResource -.. autoclass:: cuda.cuda.CUtexObject_v1 -.. autoclass:: cuda.cuda.CUtexObject -.. autoclass:: cuda.cuda.CUsurfObject_v1 -.. autoclass:: cuda.cuda.CUsurfObject -.. autoclass:: cuda.cuda.CUexternalMemory -.. autoclass:: cuda.cuda.CUexternalSemaphore -.. autoclass:: cuda.cuda.CUgraph -.. autoclass:: cuda.cuda.CUgraphNode -.. autoclass:: cuda.cuda.CUgraphExec -.. autoclass:: cuda.cuda.CUmemoryPool -.. autoclass:: cuda.cuda.CUuserObject -.. autoclass:: cuda.cuda.CUgraphConditionalHandle -.. autoclass:: cuda.cuda.CUgraphDeviceNode -.. autoclass:: cuda.cuda.CUasyncCallbackHandle -.. autoclass:: cuda.cuda.CUgreenCtx -.. autoclass:: cuda.cuda.CUuuid -.. autoclass:: cuda.cuda.CUmemFabricHandle_v1 -.. autoclass:: cuda.cuda.CUmemFabricHandle -.. autoclass:: cuda.cuda.CUipcEventHandle_v1 -.. autoclass:: cuda.cuda.CUipcEventHandle -.. autoclass:: cuda.cuda.CUipcMemHandle_v1 -.. autoclass:: cuda.cuda.CUipcMemHandle -.. autoclass:: cuda.cuda.CUstreamBatchMemOpParams_v1 -.. autoclass:: cuda.cuda.CUstreamBatchMemOpParams -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUasyncNotificationInfo -.. autoclass:: cuda.cuda.CUasyncCallback -.. autoclass:: cuda.cuda.CUdevprop_v1 -.. autoclass:: cuda.cuda.CUdevprop -.. autoclass:: cuda.cuda.CUlinkState -.. autoclass:: cuda.cuda.CUhostFn -.. autoclass:: cuda.cuda.CUaccessPolicyWindow_v1 -.. autoclass:: cuda.cuda.CUaccessPolicyWindow -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v3 -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_CONDITIONAL_NODE_PARAMS -.. autoclass:: cuda.cuda.CUgraphEdgeData -.. autoclass:: cuda.cuda.CUDA_GRAPH_INSTANTIATE_PARAMS -.. autoclass:: cuda.cuda.CUlaunchMemSyncDomainMap -.. autoclass:: cuda.cuda.CUlaunchAttributeValue -.. autoclass:: cuda.cuda.CUlaunchAttribute -.. autoclass:: cuda.cuda.CUlaunchConfig -.. autoclass:: cuda.cuda.CUkernelNodeAttrID -.. autoclass:: cuda.cuda.CUkernelNodeAttrValue_v1 -.. autoclass:: cuda.cuda.CUkernelNodeAttrValue -.. autoclass:: cuda.cuda.CUstreamAttrID -.. autoclass:: cuda.cuda.CUstreamAttrValue_v1 -.. autoclass:: cuda.cuda.CUstreamAttrValue -.. autoclass:: cuda.cuda.CUexecAffinitySmCount_v1 -.. autoclass:: cuda.cuda.CUexecAffinitySmCount -.. autoclass:: cuda.cuda.CUexecAffinityParam_v1 -.. autoclass:: cuda.cuda.CUexecAffinityParam -.. autoclass:: cuda.cuda.CUctxCigParam -.. autoclass:: cuda.cuda.CUctxCreateParams -.. autoclass:: cuda.cuda.CUlibraryHostUniversalFunctionAndDataTable -.. autoclass:: cuda.cuda.CUstreamCallback -.. autoclass:: cuda.cuda.CUoccupancyB2DSize -.. autoclass:: cuda.cuda.CUDA_MEMCPY2D_v2 -.. autoclass:: cuda.cuda.CUDA_MEMCPY2D -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_v2 -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_PEER_v1 -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_PEER -.. autoclass:: cuda.cuda.CUDA_MEMCPY_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_ARRAY_DESCRIPTOR_v2 -.. autoclass:: cuda.cuda.CUDA_ARRAY_DESCRIPTOR -.. autoclass:: cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR_v2 -.. autoclass:: cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR -.. autoclass:: cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES_v1 -.. autoclass:: cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES -.. autoclass:: cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 -.. autoclass:: cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS -.. autoclass:: cuda.cuda.CUDA_RESOURCE_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_RESOURCE_DESC -.. autoclass:: cuda.cuda.CUDA_TEXTURE_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_TEXTURE_DESC -.. autoclass:: cuda.cuda.CUDA_RESOURCE_VIEW_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_RESOURCE_VIEW_DESC -.. autoclass:: cuda.cuda.CUtensorMap -.. autoclass:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 -.. autoclass:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS -.. autoclass:: cuda.cuda.CUDA_LAUNCH_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_LAUNCH_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUmemGenericAllocationHandle_v1 -.. autoclass:: cuda.cuda.CUmemGenericAllocationHandle -.. autoclass:: cuda.cuda.CUarrayMapInfo_v1 -.. autoclass:: cuda.cuda.CUarrayMapInfo -.. autoclass:: cuda.cuda.CUmemLocation_v1 -.. autoclass:: cuda.cuda.CUmemLocation -.. autoclass:: cuda.cuda.CUmemAllocationProp_v1 -.. autoclass:: cuda.cuda.CUmemAllocationProp -.. autoclass:: cuda.cuda.CUmulticastObjectProp_v1 -.. autoclass:: cuda.cuda.CUmulticastObjectProp -.. autoclass:: cuda.cuda.CUmemAccessDesc_v1 -.. autoclass:: cuda.cuda.CUmemAccessDesc -.. autoclass:: cuda.cuda.CUgraphExecUpdateResultInfo_v1 -.. autoclass:: cuda.cuda.CUgraphExecUpdateResultInfo -.. autoclass:: cuda.cuda.CUmemPoolProps_v1 -.. autoclass:: cuda.cuda.CUmemPoolProps -.. autoclass:: cuda.cuda.CUmemPoolPtrExportData_v1 -.. autoclass:: cuda.cuda.CUmemPoolPtrExportData -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_MEM_FREE_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_CHILD_GRAPH_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_EVENT_RECORD_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_EVENT_WAIT_NODE_PARAMS -.. autoclass:: cuda.cuda.CUgraphNodeParams -.. autoclass:: cuda.cuda.CUeglFrame_v1 -.. autoclass:: cuda.cuda.CUeglFrame -.. autoclass:: cuda.cuda.CUeglStreamConnection -.. autoattribute:: cuda.cuda.CUDA_VERSION - - CUDA API version number - -.. autoattribute:: cuda.cuda.CU_UUID_HAS_BEEN_DEFINED - - CUDA UUID types - -.. autoattribute:: cuda.cuda.CU_IPC_HANDLE_SIZE - - CUDA IPC handle size - -.. autoattribute:: cuda.cuda.CU_STREAM_LEGACY - - Legacy stream handle - - - - Stream handle that can be passed as a CUstream to use an implicit stream with legacy synchronization behavior. - - - - See details of the \link_sync_behavior - -.. autoattribute:: cuda.cuda.CU_STREAM_PER_THREAD - - Per-thread stream handle - - - - Stream handle that can be passed as a CUstream to use an implicit stream with per-thread synchronization behavior. - - - - See details of the \link_sync_behavior - -.. autoattribute:: cuda.cuda.CU_COMPUTE_ACCELERATED_TARGET_BASE -.. autoattribute:: cuda.cuda.CUDA_CB -.. autoattribute:: cuda.cuda.CU_GRAPH_COND_ASSIGN_DEFAULT - - Conditional node handle flags Default value is applied when graph is launched. - -.. autoattribute:: cuda.cuda.CU_GRAPH_KERNEL_NODE_PORT_DEFAULT - - This port activates when the kernel has finished executing. - -.. autoattribute:: cuda.cuda.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC - - This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type :py:obj:`~.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC`. See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT`. - -.. autoattribute:: cuda.cuda.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER - - This port activates when all blocks of the kernel have begun execution. See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT`. - -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_PRIORITY -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_PRIORITY -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN -.. autoattribute:: cuda.cuda.CU_MEMHOSTALLOC_PORTABLE - - If set, host memory is portable between CUDA contexts. Flag for :py:obj:`~.cuMemHostAlloc()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTALLOC_DEVICEMAP - - If set, host memory is mapped into CUDA address space and :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host pointer. Flag for :py:obj:`~.cuMemHostAlloc()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTALLOC_WRITECOMBINED - - If set, host memory is allocated as write-combined - fast to write, faster to DMA, slow to read except via SSE4 streaming load instruction (MOVNTDQA). Flag for :py:obj:`~.cuMemHostAlloc()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTREGISTER_PORTABLE - - If set, host memory is portable between CUDA contexts. Flag for :py:obj:`~.cuMemHostRegister()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTREGISTER_DEVICEMAP - - If set, host memory is mapped into CUDA address space and :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host pointer. Flag for :py:obj:`~.cuMemHostRegister()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTREGISTER_IOMEMORY - - If set, the passed memory pointer is treated as pointing to some memory-mapped I/O space, e.g. belonging to a third-party PCIe device. On Windows the flag is a no-op. On Linux that memory is marked as non cache-coherent for the GPU and is expected to be physically contiguous. It may return :py:obj:`~.CUDA_ERROR_NOT_PERMITTED` if run as an unprivileged user, :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED` on older Linux kernel versions. On all other platforms, it is not supported and :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED` is returned. Flag for :py:obj:`~.cuMemHostRegister()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTREGISTER_READ_ONLY - - If set, the passed memory pointer is treated as pointing to memory that is considered read-only by the device. On platforms without :py:obj:`~.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES`, this flag is required in order to register memory mapped to the CPU as read-only. Support for the use of this flag can be queried from the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED`. Using this flag with a current context associated with a device that does not have this attribute set will cause :py:obj:`~.cuMemHostRegister` to error with :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED`. - -.. autoattribute:: cuda.cuda.CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL - - Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers - -.. autoattribute:: cuda.cuda.CU_TENSOR_MAP_NUM_QWORDS - - Size of tensor map descriptor - -.. autoattribute:: cuda.cuda.CUDA_EXTERNAL_MEMORY_DEDICATED - - Indicates that the external memory object is a dedicated resource - -.. autoattribute:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC - - When the `flags` parameter of :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS` contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. - -.. autoattribute:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC - - When the `flags` parameter of :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS` contains this flag, it indicates that waiting on an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. - -.. autoattribute:: cuda.cuda.CUDA_NVSCISYNC_ATTR_SIGNAL - - When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to this, it indicates that application needs signaler specific NvSciSyncAttr to be filled by :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. - -.. autoattribute:: cuda.cuda.CUDA_NVSCISYNC_ATTR_WAIT - - When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to this, it indicates that application needs waiter specific NvSciSyncAttr to be filled by :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. - -.. autoattribute:: cuda.cuda.CU_MEM_CREATE_USAGE_TILE_POOL - - This flag if set indicates that the memory will be used as a tile pool. - -.. autoattribute:: cuda.cuda.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC - - If set, each kernel launched as part of :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution. - -.. autoattribute:: cuda.cuda.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC - - If set, any subsequent work pushed in a stream that participated in a call to :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_LAYERED - - If set, the CUDA array is a collection of layers, where each layer is either a 1D or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number of layers, not the depth of a 3D array. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_2DARRAY - - Deprecated, use CUDA_ARRAY3D_LAYERED - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_SURFACE_LDST - - This flag must be set in order to bind a surface reference to the CUDA array - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_CUBEMAP - - If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The width of such a CUDA array must be equal to its height, and Depth must be six. If :py:obj:`~.CUDA_ARRAY3D_LAYERED` flag is also set, then the CUDA array is a collection of cubemaps and Depth must be a multiple of six. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_TEXTURE_GATHER - - This flag must be set in order to perform texture gather operations on a CUDA array. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_DEPTH_TEXTURE - - This flag if set indicates that the CUDA array is a DEPTH_TEXTURE. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_COLOR_ATTACHMENT - - This flag indicates that the CUDA array may be bound as a color target in an external graphics API - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_SPARSE - - This flag if set indicates that the CUDA array or CUDA mipmapped array is a sparse CUDA array or CUDA mipmapped array respectively - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_DEFERRED_MAPPING - - This flag if set indicates that the CUDA array or CUDA mipmapped array will allow deferred memory mapping - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_VIDEO_ENCODE_DECODE - - This flag indicates that the CUDA array will be used for hardware accelerated video encode/decode operations. - -.. autoattribute:: cuda.cuda.CU_TRSA_OVERRIDE_FORMAT - - Override the texref format with a format inferred from the array. Flag for :py:obj:`~.cuTexRefSetArray()` - -.. autoattribute:: cuda.cuda.CU_TRSF_READ_AS_INTEGER - - Read the texture as integers rather than promoting the values to floats in the range [0,1]. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_TRSF_NORMALIZED_COORDINATES - - Use normalized texture coordinates in the range [0,1) instead of [0,dim). Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_TRSF_SRGB - - Perform sRGB->linear conversion during texture read. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION - - Disable any trilinear filtering optimizations. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_TRSF_SEAMLESS_CUBEMAP - - Enable seamless cube map filtering. Flag for :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_END_AS_INT - - C++ compile time constant for CU_LAUNCH_PARAM_END - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_END - - End of array terminator for the `extra` parameter to :py:obj:`~.cuLaunchKernel` - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT - - C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_POINTER - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_BUFFER_POINTER - - Indicator that the next value in the `extra` parameter to :py:obj:`~.cuLaunchKernel` will be a pointer to a buffer containing all kernel parameters used for launching kernel `f`. This buffer needs to honor all alignment/padding requirements of the individual parameters. If :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not also specified in the `extra` array, then :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` will have no effect. - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT - - C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_SIZE - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_BUFFER_SIZE - - Indicator that the next value in the `extra` parameter to :py:obj:`~.cuLaunchKernel` will be a pointer to a size_t which contains the size of the buffer specified with :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER`. It is required that :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` also be specified in the `extra` array if the value associated with :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not zero. - -.. autoattribute:: cuda.cuda.CU_PARAM_TR_DEFAULT - - For texture references loaded into the module, use default texunit from texture reference. - -.. autoattribute:: cuda.cuda.CU_DEVICE_CPU - - Device that represents the CPU - -.. autoattribute:: cuda.cuda.CU_DEVICE_INVALID - - Device that represents an invalid device - -.. autoattribute:: cuda.cuda.MAX_PLANES - - Maximum number of planes per frame - -.. autoattribute:: cuda.cuda.CUDA_EGL_INFINITE_TIMEOUT - - Indicates that timeout for :py:obj:`~.cuEGLStreamConsumerAcquireFrame` is infinite. - - -Error Handling --------------- - -This section describes the error handling functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGetErrorString -.. autofunction:: cuda.cuda.cuGetErrorName - -Initialization --------------- - -This section describes the initialization functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuInit - -Version Management ------------------- - -This section describes the version management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuDriverGetVersion - -Device Management ------------------ - -This section describes the device management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuDeviceGet -.. autofunction:: cuda.cuda.cuDeviceGetCount -.. autofunction:: cuda.cuda.cuDeviceGetName -.. autofunction:: cuda.cuda.cuDeviceGetUuid -.. autofunction:: cuda.cuda.cuDeviceGetUuid_v2 -.. autofunction:: cuda.cuda.cuDeviceGetLuid -.. autofunction:: cuda.cuda.cuDeviceTotalMem -.. autofunction:: cuda.cuda.cuDeviceGetTexture1DLinearMaxWidth -.. autofunction:: cuda.cuda.cuDeviceGetAttribute -.. autofunction:: cuda.cuda.cuDeviceGetNvSciSyncAttributes -.. autofunction:: cuda.cuda.cuDeviceSetMemPool -.. autofunction:: cuda.cuda.cuDeviceGetMemPool -.. autofunction:: cuda.cuda.cuDeviceGetDefaultMemPool -.. autofunction:: cuda.cuda.cuDeviceGetExecAffinitySupport -.. autofunction:: cuda.cuda.cuFlushGPUDirectRDMAWrites - -Primary Context Management --------------------------- - -This section describes the primary context management functions of the low-level CUDA driver application programming interface. - - - -The primary context is unique per device and shared with the CUDA runtime API. These functions allow integration with other libraries using CUDA. - -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxRetain -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxRelease -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxSetFlags -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxGetState -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxReset - -Context Management ------------------- - -This section describes the context management functions of the low-level CUDA driver application programming interface. - - - -Please note that some functions are described in Primary Context Management section. - -.. autofunction:: cuda.cuda.cuCtxCreate -.. autofunction:: cuda.cuda.cuCtxCreate_v3 -.. autofunction:: cuda.cuda.cuCtxCreate_v4 -.. autofunction:: cuda.cuda.cuCtxDestroy -.. autofunction:: cuda.cuda.cuCtxPushCurrent -.. autofunction:: cuda.cuda.cuCtxPopCurrent -.. autofunction:: cuda.cuda.cuCtxSetCurrent -.. autofunction:: cuda.cuda.cuCtxGetCurrent -.. autofunction:: cuda.cuda.cuCtxGetDevice -.. autofunction:: cuda.cuda.cuCtxGetFlags -.. autofunction:: cuda.cuda.cuCtxSetFlags -.. autofunction:: cuda.cuda.cuCtxGetId -.. autofunction:: cuda.cuda.cuCtxSynchronize -.. autofunction:: cuda.cuda.cuCtxSetLimit -.. autofunction:: cuda.cuda.cuCtxGetLimit -.. autofunction:: cuda.cuda.cuCtxGetCacheConfig -.. autofunction:: cuda.cuda.cuCtxSetCacheConfig -.. autofunction:: cuda.cuda.cuCtxGetApiVersion -.. autofunction:: cuda.cuda.cuCtxGetStreamPriorityRange -.. autofunction:: cuda.cuda.cuCtxResetPersistingL2Cache -.. autofunction:: cuda.cuda.cuCtxGetExecAffinity -.. autofunction:: cuda.cuda.cuCtxRecordEvent -.. autofunction:: cuda.cuda.cuCtxWaitEvent - -Module Management ------------------ - -This section describes the module management functions of the low-level CUDA driver application programming interface. - -.. autoclass:: cuda.cuda.CUmoduleLoadingMode - - .. autoattribute:: cuda.cuda.CUmoduleLoadingMode.CU_MODULE_EAGER_LOADING - - - Lazy Kernel Loading is not enabled - - - .. autoattribute:: cuda.cuda.CUmoduleLoadingMode.CU_MODULE_LAZY_LOADING - - - Lazy Kernel Loading is enabled - -.. autofunction:: cuda.cuda.cuModuleLoad -.. autofunction:: cuda.cuda.cuModuleLoadData -.. autofunction:: cuda.cuda.cuModuleLoadDataEx -.. autofunction:: cuda.cuda.cuModuleLoadFatBinary -.. autofunction:: cuda.cuda.cuModuleUnload -.. autofunction:: cuda.cuda.cuModuleGetLoadingMode -.. autofunction:: cuda.cuda.cuModuleGetFunction -.. autofunction:: cuda.cuda.cuModuleGetFunctionCount -.. autofunction:: cuda.cuda.cuModuleEnumerateFunctions -.. autofunction:: cuda.cuda.cuModuleGetGlobal -.. autofunction:: cuda.cuda.cuLinkCreate -.. autofunction:: cuda.cuda.cuLinkAddData -.. autofunction:: cuda.cuda.cuLinkAddFile -.. autofunction:: cuda.cuda.cuLinkComplete -.. autofunction:: cuda.cuda.cuLinkDestroy - -Library Management ------------------- - -This section describes the library management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuLibraryLoadData -.. autofunction:: cuda.cuda.cuLibraryLoadFromFile -.. autofunction:: cuda.cuda.cuLibraryUnload -.. autofunction:: cuda.cuda.cuLibraryGetKernel -.. autofunction:: cuda.cuda.cuLibraryGetKernelCount -.. autofunction:: cuda.cuda.cuLibraryEnumerateKernels -.. autofunction:: cuda.cuda.cuLibraryGetModule -.. autofunction:: cuda.cuda.cuKernelGetFunction -.. autofunction:: cuda.cuda.cuKernelGetLibrary -.. autofunction:: cuda.cuda.cuLibraryGetGlobal -.. autofunction:: cuda.cuda.cuLibraryGetManaged -.. autofunction:: cuda.cuda.cuLibraryGetUnifiedFunction -.. autofunction:: cuda.cuda.cuKernelGetAttribute -.. autofunction:: cuda.cuda.cuKernelSetAttribute -.. autofunction:: cuda.cuda.cuKernelSetCacheConfig -.. autofunction:: cuda.cuda.cuKernelGetName -.. autofunction:: cuda.cuda.cuKernelGetParamInfo - -Memory Management ------------------ - -This section describes the memory management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuMemGetInfo -.. autofunction:: cuda.cuda.cuMemAlloc -.. autofunction:: cuda.cuda.cuMemAllocPitch -.. autofunction:: cuda.cuda.cuMemFree -.. autofunction:: cuda.cuda.cuMemGetAddressRange -.. autofunction:: cuda.cuda.cuMemAllocHost -.. autofunction:: cuda.cuda.cuMemFreeHost -.. autofunction:: cuda.cuda.cuMemHostAlloc -.. autofunction:: cuda.cuda.cuMemHostGetDevicePointer -.. autofunction:: cuda.cuda.cuMemHostGetFlags -.. autofunction:: cuda.cuda.cuMemAllocManaged -.. autofunction:: cuda.cuda.cuDeviceRegisterAsyncNotification -.. autofunction:: cuda.cuda.cuDeviceUnregisterAsyncNotification -.. autofunction:: cuda.cuda.cuDeviceGetByPCIBusId -.. autofunction:: cuda.cuda.cuDeviceGetPCIBusId -.. autofunction:: cuda.cuda.cuIpcGetEventHandle -.. autofunction:: cuda.cuda.cuIpcOpenEventHandle -.. autofunction:: cuda.cuda.cuIpcGetMemHandle -.. autofunction:: cuda.cuda.cuIpcOpenMemHandle -.. autofunction:: cuda.cuda.cuIpcCloseMemHandle -.. autofunction:: cuda.cuda.cuMemHostRegister -.. autofunction:: cuda.cuda.cuMemHostUnregister -.. autofunction:: cuda.cuda.cuMemcpy -.. autofunction:: cuda.cuda.cuMemcpyPeer -.. autofunction:: cuda.cuda.cuMemcpyHtoD -.. autofunction:: cuda.cuda.cuMemcpyDtoH -.. autofunction:: cuda.cuda.cuMemcpyDtoD -.. autofunction:: cuda.cuda.cuMemcpyDtoA -.. autofunction:: cuda.cuda.cuMemcpyAtoD -.. autofunction:: cuda.cuda.cuMemcpyHtoA -.. autofunction:: cuda.cuda.cuMemcpyAtoH -.. autofunction:: cuda.cuda.cuMemcpyAtoA -.. autofunction:: cuda.cuda.cuMemcpy2D -.. autofunction:: cuda.cuda.cuMemcpy2DUnaligned -.. autofunction:: cuda.cuda.cuMemcpy3D -.. autofunction:: cuda.cuda.cuMemcpy3DPeer -.. autofunction:: cuda.cuda.cuMemcpyAsync -.. autofunction:: cuda.cuda.cuMemcpyPeerAsync -.. autofunction:: cuda.cuda.cuMemcpyHtoDAsync -.. autofunction:: cuda.cuda.cuMemcpyDtoHAsync -.. autofunction:: cuda.cuda.cuMemcpyDtoDAsync -.. autofunction:: cuda.cuda.cuMemcpyHtoAAsync -.. autofunction:: cuda.cuda.cuMemcpyAtoHAsync -.. autofunction:: cuda.cuda.cuMemcpy2DAsync -.. autofunction:: cuda.cuda.cuMemcpy3DAsync -.. autofunction:: cuda.cuda.cuMemcpy3DPeerAsync -.. autofunction:: cuda.cuda.cuMemsetD8 -.. autofunction:: cuda.cuda.cuMemsetD16 -.. autofunction:: cuda.cuda.cuMemsetD32 -.. autofunction:: cuda.cuda.cuMemsetD2D8 -.. autofunction:: cuda.cuda.cuMemsetD2D16 -.. autofunction:: cuda.cuda.cuMemsetD2D32 -.. autofunction:: cuda.cuda.cuMemsetD8Async -.. autofunction:: cuda.cuda.cuMemsetD16Async -.. autofunction:: cuda.cuda.cuMemsetD32Async -.. autofunction:: cuda.cuda.cuMemsetD2D8Async -.. autofunction:: cuda.cuda.cuMemsetD2D16Async -.. autofunction:: cuda.cuda.cuMemsetD2D32Async -.. autofunction:: cuda.cuda.cuArrayCreate -.. autofunction:: cuda.cuda.cuArrayGetDescriptor -.. autofunction:: cuda.cuda.cuArrayGetSparseProperties -.. autofunction:: cuda.cuda.cuMipmappedArrayGetSparseProperties -.. autofunction:: cuda.cuda.cuArrayGetMemoryRequirements -.. autofunction:: cuda.cuda.cuMipmappedArrayGetMemoryRequirements -.. autofunction:: cuda.cuda.cuArrayGetPlane -.. autofunction:: cuda.cuda.cuArrayDestroy -.. autofunction:: cuda.cuda.cuArray3DCreate -.. autofunction:: cuda.cuda.cuArray3DGetDescriptor -.. autofunction:: cuda.cuda.cuMipmappedArrayCreate -.. autofunction:: cuda.cuda.cuMipmappedArrayGetLevel -.. autofunction:: cuda.cuda.cuMipmappedArrayDestroy -.. autofunction:: cuda.cuda.cuMemGetHandleForAddressRange - -Virtual Memory Management -------------------------- - -This section describes the virtual memory management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuMemAddressReserve -.. autofunction:: cuda.cuda.cuMemAddressFree -.. autofunction:: cuda.cuda.cuMemCreate -.. autofunction:: cuda.cuda.cuMemRelease -.. autofunction:: cuda.cuda.cuMemMap -.. autofunction:: cuda.cuda.cuMemMapArrayAsync -.. autofunction:: cuda.cuda.cuMemUnmap -.. autofunction:: cuda.cuda.cuMemSetAccess -.. autofunction:: cuda.cuda.cuMemGetAccess -.. autofunction:: cuda.cuda.cuMemExportToShareableHandle -.. autofunction:: cuda.cuda.cuMemImportFromShareableHandle -.. autofunction:: cuda.cuda.cuMemGetAllocationGranularity -.. autofunction:: cuda.cuda.cuMemGetAllocationPropertiesFromHandle -.. autofunction:: cuda.cuda.cuMemRetainAllocationHandle - -Stream Ordered Memory Allocator -------------------------------- - -This section describes the stream ordered memory allocator exposed by the low-level CUDA driver application programming interface. - - - - - -**overview** - - - -The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior. - -The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee. - - - - - -**Supported Platforms** - - - -Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED - -.. autofunction:: cuda.cuda.cuMemFreeAsync -.. autofunction:: cuda.cuda.cuMemAllocAsync -.. autofunction:: cuda.cuda.cuMemPoolTrimTo -.. autofunction:: cuda.cuda.cuMemPoolSetAttribute -.. autofunction:: cuda.cuda.cuMemPoolGetAttribute -.. autofunction:: cuda.cuda.cuMemPoolSetAccess -.. autofunction:: cuda.cuda.cuMemPoolGetAccess -.. autofunction:: cuda.cuda.cuMemPoolCreate -.. autofunction:: cuda.cuda.cuMemPoolDestroy -.. autofunction:: cuda.cuda.cuMemAllocFromPoolAsync -.. autofunction:: cuda.cuda.cuMemPoolExportToShareableHandle -.. autofunction:: cuda.cuda.cuMemPoolImportFromShareableHandle -.. autofunction:: cuda.cuda.cuMemPoolExportPointer -.. autofunction:: cuda.cuda.cuMemPoolImportPointer - -Multicast Object Management ---------------------------- - -This section describes the CUDA multicast object operations exposed by the low-level CUDA driver application programming interface. - - - - - -**overview** - - - -A multicast object created via cuMulticastCreate enables certain memory operations to be broadcast to a team of devices. Devices can be added to a multicast object via cuMulticastAddDevice. Memory can be bound on each participating device via either cuMulticastBindMem or cuMulticastBindAddr. Multicast objects can be mapped into a device's virtual address space using the virtual memmory management APIs (see cuMemMap and cuMemSetAccess). - - - - - -**Supported Platforms** - - - -Support for multicast on a specific device can be queried using the device attribute CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED - -.. autofunction:: cuda.cuda.cuMulticastCreate -.. autofunction:: cuda.cuda.cuMulticastAddDevice -.. autofunction:: cuda.cuda.cuMulticastBindMem -.. autofunction:: cuda.cuda.cuMulticastBindAddr -.. autofunction:: cuda.cuda.cuMulticastUnbind -.. autofunction:: cuda.cuda.cuMulticastGetGranularity - -Unified Addressing ------------------- - -This section describes the unified addressing functions of the low-level CUDA driver application programming interface. - - - - - -**Overview** - - - -CUDA devices can share a unified address space with the host. For these devices there is no distinction between a device pointer and a host pointer -- the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below). - - - - - -**Supported Platforms** - - - -Whether or not a device supports unified addressing may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING. - -Unified addressing is automatically enabled in 64-bit processes - - - - - -**Looking Up Information from Pointer Values** - - - -It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cuPointerGetAttribute() - -Since pointers are unique, it is not necessary to specify information about the pointers specified to the various copy functions in the CUDA API. The function cuMemcpy() may be used to perform a copy between two pointers, ignoring whether they point to host or device memory (making cuMemcpyHtoD(), cuMemcpyDtoD(), and cuMemcpyDtoH() unnecessary for devices supporting unified addressing). For multidimensional copies, the memory type CU_MEMORYTYPE_UNIFIED may be used to specify that the CUDA driver should infer the location of the pointer from its value. - - - - - -**Automatic Mapping of Host Allocated Host Memory** - - - -All host memory allocated in all contexts using cuMemAllocHost() and cuMemHostAlloc() is always directly accessible from all contexts on all devices that support unified addressing. This is the case regardless of whether or not the flags CU_MEMHOSTALLOC_PORTABLE and CU_MEMHOSTALLOC_DEVICEMAP are specified. - -The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host, so it is not necessary to call cuMemHostGetDevicePointer() to get the device pointer for these allocations. - -Note that this is not the case for memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below. - - - - - -**Automatic Registration of Peer Memory** - - - -Upon enabling direct access from a context that supports unified addressing to another peer context that supports unified addressing using cuCtxEnablePeerAccess() all memory allocated in the peer context using cuMemAlloc() and cuMemAllocPitch() will immediately be accessible by the current context. The device pointer value through which any peer memory may be accessed in the current context is the same pointer value through which that memory may be accessed in the peer context. - - - - - -**Exceptions, Disjoint Addressing** - - - -Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cuMemHostRegister() and host memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all contexts that support unified addressing. - -This device address may be queried using cuMemHostGetDevicePointer() when a context using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory through cuMemcpy() and similar functions using the CU_MEMORYTYPE_UNIFIED memory type. - -.. autofunction:: cuda.cuda.cuPointerGetAttribute -.. autofunction:: cuda.cuda.cuMemPrefetchAsync -.. autofunction:: cuda.cuda.cuMemPrefetchAsync_v2 -.. autofunction:: cuda.cuda.cuMemAdvise -.. autofunction:: cuda.cuda.cuMemAdvise_v2 -.. autofunction:: cuda.cuda.cuMemRangeGetAttribute -.. autofunction:: cuda.cuda.cuMemRangeGetAttributes -.. autofunction:: cuda.cuda.cuPointerSetAttribute -.. autofunction:: cuda.cuda.cuPointerGetAttributes - -Stream Management ------------------ - -This section describes the stream management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuStreamCreate -.. autofunction:: cuda.cuda.cuStreamCreateWithPriority -.. autofunction:: cuda.cuda.cuStreamGetPriority -.. autofunction:: cuda.cuda.cuStreamGetFlags -.. autofunction:: cuda.cuda.cuStreamGetId -.. autofunction:: cuda.cuda.cuStreamGetCtx -.. autofunction:: cuda.cuda.cuStreamGetCtx_v2 -.. autofunction:: cuda.cuda.cuStreamWaitEvent -.. autofunction:: cuda.cuda.cuStreamAddCallback -.. autofunction:: cuda.cuda.cuStreamBeginCapture -.. autofunction:: cuda.cuda.cuStreamBeginCaptureToGraph -.. autofunction:: cuda.cuda.cuThreadExchangeStreamCaptureMode -.. autofunction:: cuda.cuda.cuStreamEndCapture -.. autofunction:: cuda.cuda.cuStreamIsCapturing -.. autofunction:: cuda.cuda.cuStreamGetCaptureInfo -.. autofunction:: cuda.cuda.cuStreamGetCaptureInfo_v3 -.. autofunction:: cuda.cuda.cuStreamUpdateCaptureDependencies -.. autofunction:: cuda.cuda.cuStreamUpdateCaptureDependencies_v2 -.. autofunction:: cuda.cuda.cuStreamAttachMemAsync -.. autofunction:: cuda.cuda.cuStreamQuery -.. autofunction:: cuda.cuda.cuStreamSynchronize -.. autofunction:: cuda.cuda.cuStreamDestroy -.. autofunction:: cuda.cuda.cuStreamCopyAttributes -.. autofunction:: cuda.cuda.cuStreamGetAttribute -.. autofunction:: cuda.cuda.cuStreamSetAttribute - -Event Management ----------------- - -This section describes the event management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuEventCreate -.. autofunction:: cuda.cuda.cuEventRecord -.. autofunction:: cuda.cuda.cuEventRecordWithFlags -.. autofunction:: cuda.cuda.cuEventQuery -.. autofunction:: cuda.cuda.cuEventSynchronize -.. autofunction:: cuda.cuda.cuEventDestroy -.. autofunction:: cuda.cuda.cuEventElapsedTime - -External Resource Interoperability ----------------------------------- - -This section describes the external resource interoperability functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuImportExternalMemory -.. autofunction:: cuda.cuda.cuExternalMemoryGetMappedBuffer -.. autofunction:: cuda.cuda.cuExternalMemoryGetMappedMipmappedArray -.. autofunction:: cuda.cuda.cuDestroyExternalMemory -.. autofunction:: cuda.cuda.cuImportExternalSemaphore -.. autofunction:: cuda.cuda.cuSignalExternalSemaphoresAsync -.. autofunction:: cuda.cuda.cuWaitExternalSemaphoresAsync -.. autofunction:: cuda.cuda.cuDestroyExternalSemaphore - -Stream Memory Operations ------------------------- - -This section describes the stream memory operations of the low-level CUDA driver application programming interface. - - - -Support for the CU_STREAM_WAIT_VALUE_NOR flag can be queried with ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2. - - - -Support for the cuStreamWriteValue64() and cuStreamWaitValue64() functions, as well as for the CU_STREAM_MEM_OP_WAIT_VALUE_64 and CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. - - - -Support for both CU_STREAM_WAIT_VALUE_FLUSH and CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform hardware features and can be queried with cuDeviceGetAttribute() and CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES. - - - -Note that all memory pointers passed as parameters to these operations are device pointers. Where necessary a device pointer should be obtained, for example with cuMemHostGetDevicePointer(). - - - -None of the operations accepts pointers to managed memory buffers (cuMemAllocManaged). - - - -Warning: Improper use of these APIs may deadlock the application. Synchronization ordering established through these APIs is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by these APIs should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. - -.. autofunction:: cuda.cuda.cuStreamWaitValue32 -.. autofunction:: cuda.cuda.cuStreamWaitValue64 -.. autofunction:: cuda.cuda.cuStreamWriteValue32 -.. autofunction:: cuda.cuda.cuStreamWriteValue64 -.. autofunction:: cuda.cuda.cuStreamBatchMemOp - -Execution Control ------------------ - -This section describes the execution control functions of the low-level CUDA driver application programming interface. - -.. autoclass:: cuda.cuda.CUfunctionLoadingState - - .. autoattribute:: cuda.cuda.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_UNLOADED - - - .. autoattribute:: cuda.cuda.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_LOADED - - - .. autoattribute:: cuda.cuda.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_MAX - -.. autofunction:: cuda.cuda.cuFuncGetAttribute -.. autofunction:: cuda.cuda.cuFuncSetAttribute -.. autofunction:: cuda.cuda.cuFuncSetCacheConfig -.. autofunction:: cuda.cuda.cuFuncGetModule -.. autofunction:: cuda.cuda.cuFuncGetName -.. autofunction:: cuda.cuda.cuFuncGetParamInfo -.. autofunction:: cuda.cuda.cuFuncIsLoaded -.. autofunction:: cuda.cuda.cuFuncLoad -.. autofunction:: cuda.cuda.cuLaunchKernel -.. autofunction:: cuda.cuda.cuLaunchKernelEx -.. autofunction:: cuda.cuda.cuLaunchCooperativeKernel -.. autofunction:: cuda.cuda.cuLaunchCooperativeKernelMultiDevice -.. autofunction:: cuda.cuda.cuLaunchHostFunc - -Graph Management ----------------- - -This section describes the graph management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGraphCreate -.. autofunction:: cuda.cuda.cuGraphAddKernelNode -.. autofunction:: cuda.cuda.cuGraphKernelNodeGetParams -.. autofunction:: cuda.cuda.cuGraphKernelNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddMemcpyNode -.. autofunction:: cuda.cuda.cuGraphMemcpyNodeGetParams -.. autofunction:: cuda.cuda.cuGraphMemcpyNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddMemsetNode -.. autofunction:: cuda.cuda.cuGraphMemsetNodeGetParams -.. autofunction:: cuda.cuda.cuGraphMemsetNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddHostNode -.. autofunction:: cuda.cuda.cuGraphHostNodeGetParams -.. autofunction:: cuda.cuda.cuGraphHostNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddChildGraphNode -.. autofunction:: cuda.cuda.cuGraphChildGraphNodeGetGraph -.. autofunction:: cuda.cuda.cuGraphAddEmptyNode -.. autofunction:: cuda.cuda.cuGraphAddEventRecordNode -.. autofunction:: cuda.cuda.cuGraphEventRecordNodeGetEvent -.. autofunction:: cuda.cuda.cuGraphEventRecordNodeSetEvent -.. autofunction:: cuda.cuda.cuGraphAddEventWaitNode -.. autofunction:: cuda.cuda.cuGraphEventWaitNodeGetEvent -.. autofunction:: cuda.cuda.cuGraphEventWaitNodeSetEvent -.. autofunction:: cuda.cuda.cuGraphAddExternalSemaphoresSignalNode -.. autofunction:: cuda.cuda.cuGraphExternalSemaphoresSignalNodeGetParams -.. autofunction:: cuda.cuda.cuGraphExternalSemaphoresSignalNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddExternalSemaphoresWaitNode -.. autofunction:: cuda.cuda.cuGraphExternalSemaphoresWaitNodeGetParams -.. autofunction:: cuda.cuda.cuGraphExternalSemaphoresWaitNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddBatchMemOpNode -.. autofunction:: cuda.cuda.cuGraphBatchMemOpNodeGetParams -.. autofunction:: cuda.cuda.cuGraphBatchMemOpNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecBatchMemOpNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddMemAllocNode -.. autofunction:: cuda.cuda.cuGraphMemAllocNodeGetParams -.. autofunction:: cuda.cuda.cuGraphAddMemFreeNode -.. autofunction:: cuda.cuda.cuGraphMemFreeNodeGetParams -.. autofunction:: cuda.cuda.cuDeviceGraphMemTrim -.. autofunction:: cuda.cuda.cuDeviceGetGraphMemAttribute -.. autofunction:: cuda.cuda.cuDeviceSetGraphMemAttribute -.. autofunction:: cuda.cuda.cuGraphClone -.. autofunction:: cuda.cuda.cuGraphNodeFindInClone -.. autofunction:: cuda.cuda.cuGraphNodeGetType -.. autofunction:: cuda.cuda.cuGraphGetNodes -.. autofunction:: cuda.cuda.cuGraphGetRootNodes -.. autofunction:: cuda.cuda.cuGraphGetEdges -.. autofunction:: cuda.cuda.cuGraphGetEdges_v2 -.. autofunction:: cuda.cuda.cuGraphNodeGetDependencies -.. autofunction:: cuda.cuda.cuGraphNodeGetDependencies_v2 -.. autofunction:: cuda.cuda.cuGraphNodeGetDependentNodes -.. autofunction:: cuda.cuda.cuGraphNodeGetDependentNodes_v2 -.. autofunction:: cuda.cuda.cuGraphAddDependencies -.. autofunction:: cuda.cuda.cuGraphAddDependencies_v2 -.. autofunction:: cuda.cuda.cuGraphRemoveDependencies -.. autofunction:: cuda.cuda.cuGraphRemoveDependencies_v2 -.. autofunction:: cuda.cuda.cuGraphDestroyNode -.. autofunction:: cuda.cuda.cuGraphInstantiate -.. autofunction:: cuda.cuda.cuGraphInstantiateWithParams -.. autofunction:: cuda.cuda.cuGraphExecGetFlags -.. autofunction:: cuda.cuda.cuGraphExecKernelNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecMemcpyNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecMemsetNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecHostNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecChildGraphNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecEventRecordNodeSetEvent -.. autofunction:: cuda.cuda.cuGraphExecEventWaitNodeSetEvent -.. autofunction:: cuda.cuda.cuGraphExecExternalSemaphoresSignalNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecExternalSemaphoresWaitNodeSetParams -.. autofunction:: cuda.cuda.cuGraphNodeSetEnabled -.. autofunction:: cuda.cuda.cuGraphNodeGetEnabled -.. autofunction:: cuda.cuda.cuGraphUpload -.. autofunction:: cuda.cuda.cuGraphLaunch -.. autofunction:: cuda.cuda.cuGraphExecDestroy -.. autofunction:: cuda.cuda.cuGraphDestroy -.. autofunction:: cuda.cuda.cuGraphExecUpdate -.. autofunction:: cuda.cuda.cuGraphKernelNodeCopyAttributes -.. autofunction:: cuda.cuda.cuGraphKernelNodeGetAttribute -.. autofunction:: cuda.cuda.cuGraphKernelNodeSetAttribute -.. autofunction:: cuda.cuda.cuGraphDebugDotPrint -.. autofunction:: cuda.cuda.cuUserObjectCreate -.. autofunction:: cuda.cuda.cuUserObjectRetain -.. autofunction:: cuda.cuda.cuUserObjectRelease -.. autofunction:: cuda.cuda.cuGraphRetainUserObject -.. autofunction:: cuda.cuda.cuGraphReleaseUserObject -.. autofunction:: cuda.cuda.cuGraphAddNode -.. autofunction:: cuda.cuda.cuGraphAddNode_v2 -.. autofunction:: cuda.cuda.cuGraphNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecNodeSetParams -.. autofunction:: cuda.cuda.cuGraphConditionalHandleCreate - -Occupancy ---------- - -This section describes the occupancy calculation functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuOccupancyMaxActiveBlocksPerMultiprocessor -.. autofunction:: cuda.cuda.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags -.. autofunction:: cuda.cuda.cuOccupancyMaxPotentialBlockSize -.. autofunction:: cuda.cuda.cuOccupancyMaxPotentialBlockSizeWithFlags -.. autofunction:: cuda.cuda.cuOccupancyAvailableDynamicSMemPerBlock -.. autofunction:: cuda.cuda.cuOccupancyMaxPotentialClusterSize -.. autofunction:: cuda.cuda.cuOccupancyMaxActiveClusters - -Texture Object Management -------------------------- - -This section describes the texture object management functions of the low-level CUDA driver application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher. - -.. autofunction:: cuda.cuda.cuTexObjectCreate -.. autofunction:: cuda.cuda.cuTexObjectDestroy -.. autofunction:: cuda.cuda.cuTexObjectGetResourceDesc -.. autofunction:: cuda.cuda.cuTexObjectGetTextureDesc -.. autofunction:: cuda.cuda.cuTexObjectGetResourceViewDesc - -Surface Object Management -------------------------- - -This section describes the surface object management functions of the low-level CUDA driver application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher. - -.. autofunction:: cuda.cuda.cuSurfObjectCreate -.. autofunction:: cuda.cuda.cuSurfObjectDestroy -.. autofunction:: cuda.cuda.cuSurfObjectGetResourceDesc - -Tensor Map Object Managment ---------------------------- - -This section describes the tensor map object management functions of the low-level CUDA driver application programming interface. The tensor core API is only supported on devices of compute capability 9.0 or higher. - -.. autofunction:: cuda.cuda.cuTensorMapEncodeTiled -.. autofunction:: cuda.cuda.cuTensorMapEncodeIm2col -.. autofunction:: cuda.cuda.cuTensorMapReplaceAddress - -Peer Context Memory Access --------------------------- - -This section describes the direct peer context memory access functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuDeviceCanAccessPeer -.. autofunction:: cuda.cuda.cuCtxEnablePeerAccess -.. autofunction:: cuda.cuda.cuCtxDisablePeerAccess -.. autofunction:: cuda.cuda.cuDeviceGetP2PAttribute - -Graphics Interoperability -------------------------- - -This section describes the graphics interoperability functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGraphicsUnregisterResource -.. autofunction:: cuda.cuda.cuGraphicsSubResourceGetMappedArray -.. autofunction:: cuda.cuda.cuGraphicsResourceGetMappedMipmappedArray -.. autofunction:: cuda.cuda.cuGraphicsResourceGetMappedPointer -.. autofunction:: cuda.cuda.cuGraphicsResourceSetMapFlags -.. autofunction:: cuda.cuda.cuGraphicsMapResources -.. autofunction:: cuda.cuda.cuGraphicsUnmapResources - -Driver Entry Point Access -------------------------- - -This section describes the driver entry point access functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGetProcAddress - -Coredump Attributes Control API -------------------------------- - -This section describes the coredump attribute control functions of the low-level CUDA driver application programming interface. - -.. autoclass:: cuda.cuda.CUcoredumpSettings - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_ENABLE_ON_EXCEPTION - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_TRIGGER_HOST - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_LIGHTWEIGHT - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_ENABLE_USER_TRIGGER - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_FILE - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_PIPE - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_GENERATION_FLAGS - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_MAX - -.. autoclass:: cuda.cuda.CUCoredumpGenerationFlags - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_DEFAULT_FLAGS - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_GLOBAL_MEMORY - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_SHARED_MEMORY - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_LOCAL_MEMORY - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_ABORT - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_CONSTBANK_MEMORY - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_LIGHTWEIGHT_FLAGS - -.. autofunction:: cuda.cuda.cuCoredumpGetAttribute -.. autofunction:: cuda.cuda.cuCoredumpGetAttributeGlobal -.. autofunction:: cuda.cuda.cuCoredumpSetAttribute -.. autofunction:: cuda.cuda.cuCoredumpSetAttributeGlobal - -Green Contexts --------------- - -This section describes the APIs for creation and manipulation of green contexts in the CUDA driver. Green contexts are a lightweight alternative to traditional contexts, with the ability to pass in a set of resources that they should be initialized with. This allows the developer to represent distinct spatial partitions of the GPU, provision resources for them, and target them via the same programming model that CUDA exposes (streams, kernel launches, etc.). - - - -There are 4 main steps to using these new set of APIs. - -- (1) Start with an initial set of resources, for example via cuDeviceGetDevResource. Only SM type is supported today. - - - - - - - -- (2) Partition this set of resources by providing them as input to a partition API, for example: cuDevSmResourceSplitByCount. - - - - - - - -- (3) Finalize the specification of resources by creating a descriptor via cuDevResourceGenerateDesc. - - - - - - - -- (4) Provision the resources and create a green context via cuGreenCtxCreate. - - - - - - - - - - - -For ``CU_DEV_RESOURCE_TYPE_SM``\ , the partitions created have minimum SM count requirements, often rounding up and aligning the minCount provided to cuDevSmResourceSplitByCount. The following is a guideline for each architecture and may be subject to change: - -- On Compute Architecture 6.X: The minimum count is 1 SM. - - - - - - - -- On Compute Architecture 7.X: The minimum count is 2 SMs and must be a multiple of 2. - - - - - - - -- On Compute Architecture 8.X: The minimum count is 4 SMs and must be a multiple of 2. - - - - - - - -- On Compute Architecture 9.0+: The minimum count is 8 SMs and must be a multiple of 8. - - - - - - - - - - - -In the future, flags can be provided to tradeoff functional and performance characteristics versus finer grained SM partitions. - - - -Even if the green contexts have disjoint SM partitions, it is not guaranteed that the kernels launched in them will run concurrently or have forward progress guarantees. This is due to other resources (like HW connections, see ::CUDA_DEVICE_MAX_CONNECTIONS) that could cause a dependency. Additionally, in certain scenarios, it is possible for the workload to run on more SMs than was provisioned (but never less). The following are two scenarios which can exhibit this behavior: - -- On Volta+ MPS: When ``CUDA_MPS_ACTIVE_THREAD_PERCENTAGE``\ is used, the set of SMs that are used for running kernels can be scaled up to the value of SMs used for the MPS client. - - - - - - - -- On Compute Architecture 9.x: When a module with dynamic parallelism (CDP) is loaded, all future kernels running under green contexts may use and share an additional set of 2 SMs. - -.. autoclass:: cuda.cuda.CUdevSmResource_st -.. autoclass:: cuda.cuda.CUdevResource_st -.. autoclass:: cuda.cuda.CUdevSmResource -.. autoclass:: cuda.cuda.CUdevResource -.. autoclass:: cuda.cuda.CUgreenCtxCreate_flags - - .. autoattribute:: cuda.cuda.CUgreenCtxCreate_flags.CU_GREEN_CTX_DEFAULT_STREAM - - - Required. Creates a default stream to use inside the green context - -.. autoclass:: cuda.cuda.CUdevSmResourceSplit_flags - - .. autoattribute:: cuda.cuda.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING - - - .. autoattribute:: cuda.cuda.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE - -.. autoclass:: cuda.cuda.CUdevResourceType - - .. autoattribute:: cuda.cuda.CUdevResourceType.CU_DEV_RESOURCE_TYPE_INVALID - - - .. autoattribute:: cuda.cuda.CUdevResourceType.CU_DEV_RESOURCE_TYPE_SM - - - Streaming multiprocessors related information - -.. autoclass:: cuda.cuda.CUdevResourceDesc -.. autoclass:: cuda.cuda.CUdevSmResource -.. autofunction:: cuda.cuda._CONCAT_OUTER -.. autofunction:: cuda.cuda.cuGreenCtxCreate -.. autofunction:: cuda.cuda.cuGreenCtxDestroy -.. autofunction:: cuda.cuda.cuCtxFromGreenCtx -.. autofunction:: cuda.cuda.cuDeviceGetDevResource -.. autofunction:: cuda.cuda.cuCtxGetDevResource -.. autofunction:: cuda.cuda.cuGreenCtxGetDevResource -.. autofunction:: cuda.cuda.cuDevSmResourceSplitByCount -.. autofunction:: cuda.cuda.cuDevResourceGenerateDesc -.. autofunction:: cuda.cuda.cuGreenCtxRecordEvent -.. autofunction:: cuda.cuda.cuGreenCtxWaitEvent -.. autofunction:: cuda.cuda.cuStreamGetGreenCtx -.. autofunction:: cuda.cuda.cuGreenCtxStreamCreate -.. autoattribute:: cuda.cuda.RESOURCE_ABI_VERSION -.. autoattribute:: cuda.cuda.RESOURCE_ABI_EXTERNAL_BYTES -.. autoattribute:: cuda.cuda._CONCAT_INNER -.. autoattribute:: cuda.cuda._CONCAT_OUTER - -EGL Interoperability --------------------- - -This section describes the EGL interoperability functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGraphicsEGLRegisterImage -.. autofunction:: cuda.cuda.cuEGLStreamConsumerConnect -.. autofunction:: cuda.cuda.cuEGLStreamConsumerConnectWithFlags -.. autofunction:: cuda.cuda.cuEGLStreamConsumerDisconnect -.. autofunction:: cuda.cuda.cuEGLStreamConsumerAcquireFrame -.. autofunction:: cuda.cuda.cuEGLStreamConsumerReleaseFrame -.. autofunction:: cuda.cuda.cuEGLStreamProducerConnect -.. autofunction:: cuda.cuda.cuEGLStreamProducerDisconnect -.. autofunction:: cuda.cuda.cuEGLStreamProducerPresentFrame -.. autofunction:: cuda.cuda.cuEGLStreamProducerReturnFrame -.. autofunction:: cuda.cuda.cuGraphicsResourceGetMappedEglFrame -.. autofunction:: cuda.cuda.cuEventCreateFromEGLSync - -OpenGL Interoperability ------------------------ - -This section describes the OpenGL interoperability functions of the low-level CUDA driver application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interoperability. - -.. autoclass:: cuda.cuda.CUGLDeviceList - - .. autoattribute:: cuda.cuda.CUGLDeviceList.CU_GL_DEVICE_LIST_ALL - - - The CUDA devices for all GPUs used by the current OpenGL context - - - .. autoattribute:: cuda.cuda.CUGLDeviceList.CU_GL_DEVICE_LIST_CURRENT_FRAME - - - The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame - - - .. autoattribute:: cuda.cuda.CUGLDeviceList.CU_GL_DEVICE_LIST_NEXT_FRAME - - - The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame - -.. autofunction:: cuda.cuda.cuGraphicsGLRegisterBuffer -.. autofunction:: cuda.cuda.cuGraphicsGLRegisterImage -.. autofunction:: cuda.cuda.cuGLGetDevices - -Profiler Control ----------------- - -This section describes the profiler control functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuProfilerStart -.. autofunction:: cuda.cuda.cuProfilerStop - -VDPAU Interoperability ----------------------- - -This section describes the VDPAU interoperability functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuVDPAUGetDevice -.. autofunction:: cuda.cuda.cuVDPAUCtxCreate -.. autofunction:: cuda.cuda.cuGraphicsVDPAURegisterVideoSurface -.. autofunction:: cuda.cuda.cuGraphicsVDPAURegisterOutputSurface diff --git a/docs/_sources/module/cudart.rst.txt b/docs/_sources/module/cudart.rst.txt deleted file mode 100644 index fc7e6676..00000000 --- a/docs/_sources/module/cudart.rst.txt +++ /dev/null @@ -1,5274 +0,0 @@ ------- -cudart ------- - -Profiler Control ----------------- - -This section describes the profiler control functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaProfilerStart -.. autofunction:: cuda.cudart.cudaProfilerStop - -Device Management ------------------ - -impl_private - - - - - - - -This section describes the device management functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaDeviceReset -.. autofunction:: cuda.cudart.cudaDeviceSynchronize -.. autofunction:: cuda.cudart.cudaDeviceSetLimit -.. autofunction:: cuda.cudart.cudaDeviceGetLimit -.. autofunction:: cuda.cudart.cudaDeviceGetTexture1DLinearMaxWidth -.. autofunction:: cuda.cudart.cudaDeviceGetCacheConfig -.. autofunction:: cuda.cudart.cudaDeviceGetStreamPriorityRange -.. autofunction:: cuda.cudart.cudaDeviceSetCacheConfig -.. autofunction:: cuda.cudart.cudaDeviceGetByPCIBusId -.. autofunction:: cuda.cudart.cudaDeviceGetPCIBusId -.. autofunction:: cuda.cudart.cudaIpcGetEventHandle -.. autofunction:: cuda.cudart.cudaIpcOpenEventHandle -.. autofunction:: cuda.cudart.cudaIpcGetMemHandle -.. autofunction:: cuda.cudart.cudaIpcOpenMemHandle -.. autofunction:: cuda.cudart.cudaIpcCloseMemHandle -.. autofunction:: cuda.cudart.cudaDeviceFlushGPUDirectRDMAWrites -.. autofunction:: cuda.cudart.cudaDeviceRegisterAsyncNotification -.. autofunction:: cuda.cudart.cudaDeviceUnregisterAsyncNotification -.. autofunction:: cuda.cudart.cudaGetDeviceCount -.. autofunction:: cuda.cudart.cudaGetDeviceProperties -.. autofunction:: cuda.cudart.cudaDeviceGetAttribute -.. autofunction:: cuda.cudart.cudaDeviceGetDefaultMemPool -.. autofunction:: cuda.cudart.cudaDeviceSetMemPool -.. autofunction:: cuda.cudart.cudaDeviceGetMemPool -.. autofunction:: cuda.cudart.cudaDeviceGetNvSciSyncAttributes -.. autofunction:: cuda.cudart.cudaDeviceGetP2PAttribute -.. autofunction:: cuda.cudart.cudaChooseDevice -.. autofunction:: cuda.cudart.cudaInitDevice -.. autofunction:: cuda.cudart.cudaSetDevice -.. autofunction:: cuda.cudart.cudaGetDevice -.. autofunction:: cuda.cudart.cudaSetDeviceFlags -.. autofunction:: cuda.cudart.cudaGetDeviceFlags - -Error Handling --------------- - -This section describes the error handling functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGetLastError -.. autofunction:: cuda.cudart.cudaPeekAtLastError -.. autofunction:: cuda.cudart.cudaGetErrorName -.. autofunction:: cuda.cudart.cudaGetErrorString - -Stream Management ------------------ - -This section describes the stream management functions of the CUDA runtime application programming interface. - -.. autoclass:: cuda.cudart.cudaStreamCallback_t -.. autofunction:: cuda.cudart.cudaStreamCreate -.. autofunction:: cuda.cudart.cudaStreamCreateWithFlags -.. autofunction:: cuda.cudart.cudaStreamCreateWithPriority -.. autofunction:: cuda.cudart.cudaStreamGetPriority -.. autofunction:: cuda.cudart.cudaStreamGetFlags -.. autofunction:: cuda.cudart.cudaStreamGetId -.. autofunction:: cuda.cudart.cudaCtxResetPersistingL2Cache -.. autofunction:: cuda.cudart.cudaStreamCopyAttributes -.. autofunction:: cuda.cudart.cudaStreamGetAttribute -.. autofunction:: cuda.cudart.cudaStreamSetAttribute -.. autofunction:: cuda.cudart.cudaStreamDestroy -.. autofunction:: cuda.cudart.cudaStreamWaitEvent -.. autofunction:: cuda.cudart.cudaStreamAddCallback -.. autofunction:: cuda.cudart.cudaStreamSynchronize -.. autofunction:: cuda.cudart.cudaStreamQuery -.. autofunction:: cuda.cudart.cudaStreamAttachMemAsync -.. autofunction:: cuda.cudart.cudaStreamBeginCapture -.. autofunction:: cuda.cudart.cudaStreamBeginCaptureToGraph -.. autofunction:: cuda.cudart.cudaThreadExchangeStreamCaptureMode -.. autofunction:: cuda.cudart.cudaStreamEndCapture -.. autofunction:: cuda.cudart.cudaStreamIsCapturing -.. autofunction:: cuda.cudart.cudaStreamGetCaptureInfo -.. autofunction:: cuda.cudart.cudaStreamGetCaptureInfo_v3 -.. autofunction:: cuda.cudart.cudaStreamUpdateCaptureDependencies -.. autofunction:: cuda.cudart.cudaStreamUpdateCaptureDependencies_v2 - -Event Management ----------------- - -This section describes the event management functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaEventCreate -.. autofunction:: cuda.cudart.cudaEventCreateWithFlags -.. autofunction:: cuda.cudart.cudaEventRecord -.. autofunction:: cuda.cudart.cudaEventRecordWithFlags -.. autofunction:: cuda.cudart.cudaEventQuery -.. autofunction:: cuda.cudart.cudaEventSynchronize -.. autofunction:: cuda.cudart.cudaEventDestroy -.. autofunction:: cuda.cudart.cudaEventElapsedTime - -External Resource Interoperability ----------------------------------- - -This section describes the external resource interoperability functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaImportExternalMemory -.. autofunction:: cuda.cudart.cudaExternalMemoryGetMappedBuffer -.. autofunction:: cuda.cudart.cudaExternalMemoryGetMappedMipmappedArray -.. autofunction:: cuda.cudart.cudaDestroyExternalMemory -.. autofunction:: cuda.cudart.cudaImportExternalSemaphore -.. autofunction:: cuda.cudart.cudaSignalExternalSemaphoresAsync -.. autofunction:: cuda.cudart.cudaWaitExternalSemaphoresAsync -.. autofunction:: cuda.cudart.cudaDestroyExternalSemaphore - -Execution Control ------------------ - -This section describes the execution control functions of the CUDA runtime application programming interface. - - - -Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module. - -.. autofunction:: cuda.cudart.cudaFuncSetCacheConfig -.. autofunction:: cuda.cudart.cudaFuncGetAttributes -.. autofunction:: cuda.cudart.cudaFuncSetAttribute -.. autofunction:: cuda.cudart.cudaLaunchHostFunc - -Occupancy ---------- - -This section describes the occupancy calculation functions of the CUDA runtime application programming interface. - - - -Besides the occupancy calculator functions (cudaOccupancyMaxActiveBlocksPerMultiprocessor and cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags), there are also C++ only occupancy-based launch configuration functions documented in C++ API Routines module. - - - -See cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API) cudaOccupancyAvailableDynamicSMemPerBlock (C++ API), - -.. autofunction:: cuda.cudart.cudaOccupancyMaxActiveBlocksPerMultiprocessor -.. autofunction:: cuda.cudart.cudaOccupancyAvailableDynamicSMemPerBlock -.. autofunction:: cuda.cudart.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags - -Memory Management ------------------ - -This section describes the memory management functions of the CUDA runtime application programming interface. - - - -Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module. - -.. autofunction:: cuda.cudart.cudaMallocManaged -.. autofunction:: cuda.cudart.cudaMalloc -.. autofunction:: cuda.cudart.cudaMallocHost -.. autofunction:: cuda.cudart.cudaMallocPitch -.. autofunction:: cuda.cudart.cudaMallocArray -.. autofunction:: cuda.cudart.cudaFree -.. autofunction:: cuda.cudart.cudaFreeHost -.. autofunction:: cuda.cudart.cudaFreeArray -.. autofunction:: cuda.cudart.cudaFreeMipmappedArray -.. autofunction:: cuda.cudart.cudaHostAlloc -.. autofunction:: cuda.cudart.cudaHostRegister -.. autofunction:: cuda.cudart.cudaHostUnregister -.. autofunction:: cuda.cudart.cudaHostGetDevicePointer -.. autofunction:: cuda.cudart.cudaHostGetFlags -.. autofunction:: cuda.cudart.cudaMalloc3D -.. autofunction:: cuda.cudart.cudaMalloc3DArray -.. autofunction:: cuda.cudart.cudaMallocMipmappedArray -.. autofunction:: cuda.cudart.cudaGetMipmappedArrayLevel -.. autofunction:: cuda.cudart.cudaMemcpy3D -.. autofunction:: cuda.cudart.cudaMemcpy3DPeer -.. autofunction:: cuda.cudart.cudaMemcpy3DAsync -.. autofunction:: cuda.cudart.cudaMemcpy3DPeerAsync -.. autofunction:: cuda.cudart.cudaMemGetInfo -.. autofunction:: cuda.cudart.cudaArrayGetInfo -.. autofunction:: cuda.cudart.cudaArrayGetPlane -.. autofunction:: cuda.cudart.cudaArrayGetMemoryRequirements -.. autofunction:: cuda.cudart.cudaMipmappedArrayGetMemoryRequirements -.. autofunction:: cuda.cudart.cudaArrayGetSparseProperties -.. autofunction:: cuda.cudart.cudaMipmappedArrayGetSparseProperties -.. autofunction:: cuda.cudart.cudaMemcpy -.. autofunction:: cuda.cudart.cudaMemcpyPeer -.. autofunction:: cuda.cudart.cudaMemcpy2D -.. autofunction:: cuda.cudart.cudaMemcpy2DToArray -.. autofunction:: cuda.cudart.cudaMemcpy2DFromArray -.. autofunction:: cuda.cudart.cudaMemcpy2DArrayToArray -.. autofunction:: cuda.cudart.cudaMemcpyAsync -.. autofunction:: cuda.cudart.cudaMemcpyPeerAsync -.. autofunction:: cuda.cudart.cudaMemcpy2DAsync -.. autofunction:: cuda.cudart.cudaMemcpy2DToArrayAsync -.. autofunction:: cuda.cudart.cudaMemcpy2DFromArrayAsync -.. autofunction:: cuda.cudart.cudaMemset -.. autofunction:: cuda.cudart.cudaMemset2D -.. autofunction:: cuda.cudart.cudaMemset3D -.. autofunction:: cuda.cudart.cudaMemsetAsync -.. autofunction:: cuda.cudart.cudaMemset2DAsync -.. autofunction:: cuda.cudart.cudaMemset3DAsync -.. autofunction:: cuda.cudart.cudaMemPrefetchAsync -.. autofunction:: cuda.cudart.cudaMemPrefetchAsync_v2 -.. autofunction:: cuda.cudart.cudaMemAdvise -.. autofunction:: cuda.cudart.cudaMemAdvise_v2 -.. autofunction:: cuda.cudart.cudaMemRangeGetAttribute -.. autofunction:: cuda.cudart.cudaMemRangeGetAttributes -.. autofunction:: cuda.cudart.make_cudaPitchedPtr -.. autofunction:: cuda.cudart.make_cudaPos -.. autofunction:: cuda.cudart.make_cudaExtent - -Stream Ordered Memory Allocator -------------------------------- - -**overview** - - - -The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior. - -The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee. - - - - - -**Supported Platforms** - - - -Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cudaDeviceGetAttribute() with the device attribute cudaDevAttrMemoryPoolsSupported. - -.. autofunction:: cuda.cudart.cudaMallocAsync -.. autofunction:: cuda.cudart.cudaFreeAsync -.. autofunction:: cuda.cudart.cudaMemPoolTrimTo -.. autofunction:: cuda.cudart.cudaMemPoolSetAttribute -.. autofunction:: cuda.cudart.cudaMemPoolGetAttribute -.. autofunction:: cuda.cudart.cudaMemPoolSetAccess -.. autofunction:: cuda.cudart.cudaMemPoolGetAccess -.. autofunction:: cuda.cudart.cudaMemPoolCreate -.. autofunction:: cuda.cudart.cudaMemPoolDestroy -.. autofunction:: cuda.cudart.cudaMallocFromPoolAsync -.. autofunction:: cuda.cudart.cudaMemPoolExportToShareableHandle -.. autofunction:: cuda.cudart.cudaMemPoolImportFromShareableHandle -.. autofunction:: cuda.cudart.cudaMemPoolExportPointer -.. autofunction:: cuda.cudart.cudaMemPoolImportPointer - -Unified Addressing ------------------- - -This section describes the unified addressing functions of the CUDA runtime application programming interface. - - - - - -**Overview** - - - -CUDA devices can share a unified address space with the host. - - For these devices there is no distinction between a device pointer and a host pointer -- the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below). - - - - - -**Supported Platforms** - - - -Whether or not a device supports unified addressing may be queried by calling cudaGetDeviceProperties() with the device property cudaDeviceProp::unifiedAddressing. - -Unified addressing is automatically enabled in 64-bit processes . - - - - - -**Looking Up Information from Pointer Values** - - - -It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cudaPointerGetAttributes() - -Since pointers are unique, it is not necessary to specify information about the pointers specified to cudaMemcpy() and other copy functions. - - The copy direction cudaMemcpyDefault may be used to specify that the CUDA runtime should infer the location of the pointer from its value. - - - - - -**Automatic Mapping of Host Allocated Host Memory** - - - -All host memory allocated through all devices using cudaMallocHost() and cudaHostAlloc() is always directly accessible from all devices that support unified addressing. This is the case regardless of whether or not the flags cudaHostAllocPortable and cudaHostAllocMapped are specified. - -The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host. It is not necessary to call cudaHostGetDevicePointer() to get the device pointer for these allocations. - - - -Note that this is not the case for memory allocated using the flag cudaHostAllocWriteCombined, as discussed below. - - - - - -**Direct Access of Peer Memory** - - - -Upon enabling direct access from a device that supports unified addressing to another peer device that supports unified addressing using cudaDeviceEnablePeerAccess() all memory allocated in the peer device using cudaMalloc() and cudaMallocPitch() will immediately be accessible by the current device. The device pointer value through which any peer's memory may be accessed in the current device is the same pointer value through which that memory may be accessed from the peer device. - - - - - -**Exceptions, Disjoint Addressing** - - - -Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cudaHostRegister() and host memory allocated using the flag cudaHostAllocWriteCombined. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all devices that support unified addressing. - - - -This device address may be queried using cudaHostGetDevicePointer() when a device using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory in cudaMemcpy() and similar functions using the cudaMemcpyDefault memory direction. - -.. autofunction:: cuda.cudart.cudaPointerGetAttributes - -Peer Device Memory Access -------------------------- - -This section describes the peer device memory access functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaDeviceCanAccessPeer -.. autofunction:: cuda.cudart.cudaDeviceEnablePeerAccess -.. autofunction:: cuda.cudart.cudaDeviceDisablePeerAccess - -OpenGL Interoperability ------------------------ - -impl_private - - - -This section describes the OpenGL interoperability functions of the CUDA runtime application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interopability. - -.. autoclass:: cuda.cudart.cudaGLDeviceList - - .. autoattribute:: cuda.cudart.cudaGLDeviceList.cudaGLDeviceListAll - - - The CUDA devices for all GPUs used by the current OpenGL context - - - .. autoattribute:: cuda.cudart.cudaGLDeviceList.cudaGLDeviceListCurrentFrame - - - The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame - - - .. autoattribute:: cuda.cudart.cudaGLDeviceList.cudaGLDeviceListNextFrame - - - The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame - -.. autofunction:: cuda.cudart.cudaGLGetDevices -.. autofunction:: cuda.cudart.cudaGraphicsGLRegisterImage -.. autofunction:: cuda.cudart.cudaGraphicsGLRegisterBuffer - -Direct3D 9 Interoperability ---------------------------- - - - - -Direct3D 10 Interoperability ----------------------------- - - - - -Direct3D 11 Interoperability ----------------------------- - - - - -VDPAU Interoperability ----------------------- - -This section describes the VDPAU interoperability functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaVDPAUGetDevice -.. autofunction:: cuda.cudart.cudaVDPAUSetVDPAUDevice -.. autofunction:: cuda.cudart.cudaGraphicsVDPAURegisterVideoSurface -.. autofunction:: cuda.cudart.cudaGraphicsVDPAURegisterOutputSurface - -EGL Interoperability --------------------- - -This section describes the EGL interoperability functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGraphicsEGLRegisterImage -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerConnect -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerConnectWithFlags -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerDisconnect -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerAcquireFrame -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerReleaseFrame -.. autofunction:: cuda.cudart.cudaEGLStreamProducerConnect -.. autofunction:: cuda.cudart.cudaEGLStreamProducerDisconnect -.. autofunction:: cuda.cudart.cudaEGLStreamProducerPresentFrame -.. autofunction:: cuda.cudart.cudaEGLStreamProducerReturnFrame -.. autofunction:: cuda.cudart.cudaGraphicsResourceGetMappedEglFrame -.. autofunction:: cuda.cudart.cudaEventCreateFromEGLSync - -Graphics Interoperability -------------------------- - -This section describes the graphics interoperability functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGraphicsUnregisterResource -.. autofunction:: cuda.cudart.cudaGraphicsResourceSetMapFlags -.. autofunction:: cuda.cudart.cudaGraphicsMapResources -.. autofunction:: cuda.cudart.cudaGraphicsUnmapResources -.. autofunction:: cuda.cudart.cudaGraphicsResourceGetMappedPointer -.. autofunction:: cuda.cudart.cudaGraphicsSubResourceGetMappedArray -.. autofunction:: cuda.cudart.cudaGraphicsResourceGetMappedMipmappedArray - -Texture Object Management -------------------------- - -This section describes the low level texture object management functions of the CUDA runtime application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher. - -.. autofunction:: cuda.cudart.cudaGetChannelDesc -.. autofunction:: cuda.cudart.cudaCreateChannelDesc -.. autofunction:: cuda.cudart.cudaCreateTextureObject -.. autofunction:: cuda.cudart.cudaDestroyTextureObject -.. autofunction:: cuda.cudart.cudaGetTextureObjectResourceDesc -.. autofunction:: cuda.cudart.cudaGetTextureObjectTextureDesc -.. autofunction:: cuda.cudart.cudaGetTextureObjectResourceViewDesc - -Surface Object Management -------------------------- - -This section describes the low level texture object management functions of the CUDA runtime application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher. - -.. autofunction:: cuda.cudart.cudaCreateSurfaceObject -.. autofunction:: cuda.cudart.cudaDestroySurfaceObject -.. autofunction:: cuda.cudart.cudaGetSurfaceObjectResourceDesc - -Version Management ------------------- - - - -.. autofunction:: cuda.cudart.cudaDriverGetVersion -.. autofunction:: cuda.cudart.cudaRuntimeGetVersion -.. autofunction:: cuda.cudart.getLocalRuntimeVersion - -Graph Management ----------------- - -This section describes the graph management functions of CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGraphCreate -.. autofunction:: cuda.cudart.cudaGraphAddKernelNode -.. autofunction:: cuda.cudart.cudaGraphKernelNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphKernelNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphKernelNodeCopyAttributes -.. autofunction:: cuda.cudart.cudaGraphKernelNodeGetAttribute -.. autofunction:: cuda.cudart.cudaGraphKernelNodeSetAttribute -.. autofunction:: cuda.cudart.cudaGraphAddMemcpyNode -.. autofunction:: cuda.cudart.cudaGraphAddMemcpyNode1D -.. autofunction:: cuda.cudart.cudaGraphMemcpyNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphMemcpyNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphMemcpyNodeSetParams1D -.. autofunction:: cuda.cudart.cudaGraphAddMemsetNode -.. autofunction:: cuda.cudart.cudaGraphMemsetNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphMemsetNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphAddHostNode -.. autofunction:: cuda.cudart.cudaGraphHostNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphHostNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphAddChildGraphNode -.. autofunction:: cuda.cudart.cudaGraphChildGraphNodeGetGraph -.. autofunction:: cuda.cudart.cudaGraphAddEmptyNode -.. autofunction:: cuda.cudart.cudaGraphAddEventRecordNode -.. autofunction:: cuda.cudart.cudaGraphEventRecordNodeGetEvent -.. autofunction:: cuda.cudart.cudaGraphEventRecordNodeSetEvent -.. autofunction:: cuda.cudart.cudaGraphAddEventWaitNode -.. autofunction:: cuda.cudart.cudaGraphEventWaitNodeGetEvent -.. autofunction:: cuda.cudart.cudaGraphEventWaitNodeSetEvent -.. autofunction:: cuda.cudart.cudaGraphAddExternalSemaphoresSignalNode -.. autofunction:: cuda.cudart.cudaGraphExternalSemaphoresSignalNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphExternalSemaphoresSignalNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphAddExternalSemaphoresWaitNode -.. autofunction:: cuda.cudart.cudaGraphExternalSemaphoresWaitNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphExternalSemaphoresWaitNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphAddMemAllocNode -.. autofunction:: cuda.cudart.cudaGraphMemAllocNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphAddMemFreeNode -.. autofunction:: cuda.cudart.cudaGraphMemFreeNodeGetParams -.. autofunction:: cuda.cudart.cudaDeviceGraphMemTrim -.. autofunction:: cuda.cudart.cudaDeviceGetGraphMemAttribute -.. autofunction:: cuda.cudart.cudaDeviceSetGraphMemAttribute -.. autofunction:: cuda.cudart.cudaGraphClone -.. autofunction:: cuda.cudart.cudaGraphNodeFindInClone -.. autofunction:: cuda.cudart.cudaGraphNodeGetType -.. autofunction:: cuda.cudart.cudaGraphGetNodes -.. autofunction:: cuda.cudart.cudaGraphGetRootNodes -.. autofunction:: cuda.cudart.cudaGraphGetEdges -.. autofunction:: cuda.cudart.cudaGraphGetEdges_v2 -.. autofunction:: cuda.cudart.cudaGraphNodeGetDependencies -.. autofunction:: cuda.cudart.cudaGraphNodeGetDependencies_v2 -.. autofunction:: cuda.cudart.cudaGraphNodeGetDependentNodes -.. autofunction:: cuda.cudart.cudaGraphNodeGetDependentNodes_v2 -.. autofunction:: cuda.cudart.cudaGraphAddDependencies -.. autofunction:: cuda.cudart.cudaGraphAddDependencies_v2 -.. autofunction:: cuda.cudart.cudaGraphRemoveDependencies -.. autofunction:: cuda.cudart.cudaGraphRemoveDependencies_v2 -.. autofunction:: cuda.cudart.cudaGraphDestroyNode -.. autofunction:: cuda.cudart.cudaGraphInstantiate -.. autofunction:: cuda.cudart.cudaGraphInstantiateWithFlags -.. autofunction:: cuda.cudart.cudaGraphInstantiateWithParams -.. autofunction:: cuda.cudart.cudaGraphExecGetFlags -.. autofunction:: cuda.cudart.cudaGraphExecKernelNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecMemcpyNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecMemcpyNodeSetParams1D -.. autofunction:: cuda.cudart.cudaGraphExecMemsetNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecHostNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecChildGraphNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecEventRecordNodeSetEvent -.. autofunction:: cuda.cudart.cudaGraphExecEventWaitNodeSetEvent -.. autofunction:: cuda.cudart.cudaGraphExecExternalSemaphoresSignalNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecExternalSemaphoresWaitNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphNodeSetEnabled -.. autofunction:: cuda.cudart.cudaGraphNodeGetEnabled -.. autofunction:: cuda.cudart.cudaGraphExecUpdate -.. autofunction:: cuda.cudart.cudaGraphUpload -.. autofunction:: cuda.cudart.cudaGraphLaunch -.. autofunction:: cuda.cudart.cudaGraphExecDestroy -.. autofunction:: cuda.cudart.cudaGraphDestroy -.. autofunction:: cuda.cudart.cudaGraphDebugDotPrint -.. autofunction:: cuda.cudart.cudaUserObjectCreate -.. autofunction:: cuda.cudart.cudaUserObjectRetain -.. autofunction:: cuda.cudart.cudaUserObjectRelease -.. autofunction:: cuda.cudart.cudaGraphRetainUserObject -.. autofunction:: cuda.cudart.cudaGraphReleaseUserObject -.. autofunction:: cuda.cudart.cudaGraphAddNode -.. autofunction:: cuda.cudart.cudaGraphAddNode_v2 -.. autofunction:: cuda.cudart.cudaGraphNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphConditionalHandleCreate - -Driver Entry Point Access -------------------------- - -This section describes the driver entry point access functions of CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGetDriverEntryPoint -.. autofunction:: cuda.cudart.cudaGetDriverEntryPointByVersion - -C++ API Routines ----------------- -C++-style interface built on top of CUDA runtime API. -impl_private - - - - - - - -This section describes the C++ high level API functions of the CUDA runtime application programming interface. To use these functions, your application needs to be compiled with the ``nvcc``\ compiler. - - -Interactions with the CUDA Driver API -------------------------------------- - -This section describes the interactions between the CUDA Driver API and the CUDA Runtime API - - - - - -**Primary Contexts** - - - -There exists a one to one relationship between CUDA devices in the CUDA Runtime API and ::CUcontext s in the CUDA Driver API within a process. The specific context which the CUDA Runtime API uses for a device is called the device's primary context. From the perspective of the CUDA Runtime API, a device and its primary context are synonymous. - - - - - -**Initialization and Tear-Down** - - - -CUDA Runtime API calls operate on the CUDA Driver API ::CUcontext which is current to to the calling host thread. - -The function cudaInitDevice() ensures that the primary context is initialized for the requested device but does not make it current to the calling thread. - -The function cudaSetDevice() initializes the primary context for the specified device and makes it current to the calling thread by calling ::cuCtxSetCurrent(). - -The CUDA Runtime API will automatically initialize the primary context for a device at the first CUDA Runtime API call which requires an active context. If no ::CUcontext is current to the calling thread when a CUDA Runtime API call which requires an active context is made, then the primary context for a device will be selected, made current to the calling thread, and initialized. - -The context which the CUDA Runtime API initializes will be initialized using the parameters specified by the CUDA Runtime API functions cudaSetDeviceFlags(), ::cudaD3D9SetDirect3DDevice(), ::cudaD3D10SetDirect3DDevice(), ::cudaD3D11SetDirect3DDevice(), cudaGLSetGLDevice(), and cudaVDPAUSetVDPAUDevice(). Note that these functions will fail with cudaErrorSetOnActiveProcess if they are called when the primary context for the specified device has already been initialized. (or if the current device has already been initialized, in the case of cudaSetDeviceFlags()). - -Primary contexts will remain active until they are explicitly deinitialized using cudaDeviceReset(). The function cudaDeviceReset() will deinitialize the primary context for the calling thread's current device immediately. The context will remain current to all of the threads that it was current to. The next CUDA Runtime API call on any thread which requires an active context will trigger the reinitialization of that device's primary context. - -Note that primary contexts are shared resources. It is recommended that the primary context not be reset except just before exit or to recover from an unspecified launch failure. - - - - - -**Context Interoperability** - - - -Note that the use of multiple ::CUcontext s per device within a single process will substantially degrade performance and is strongly discouraged. Instead, it is highly recommended that the implicit one-to-one device-to-context mapping for the process provided by the CUDA Runtime API be used. - -If a non-primary ::CUcontext created by the CUDA Driver API is current to a thread then the CUDA Runtime API calls to that thread will operate on that ::CUcontext, with some exceptions listed below. Interoperability between data types is discussed in the following sections. - -The function cudaPointerGetAttributes() will return the error cudaErrorIncompatibleDriverContext if the pointer being queried was allocated by a non-primary context. The function cudaDeviceEnablePeerAccess() and the rest of the peer access API may not be called when a non-primary ::CUcontext is current. - - To use the pointer query and peer access APIs with a context created using the CUDA Driver API, it is necessary that the CUDA Driver API be used to access these features. - -All CUDA Runtime API state (e.g, global variables' addresses and values) travels with its underlying ::CUcontext. In particular, if a ::CUcontext is moved from one thread to another then all CUDA Runtime API state will move to that thread as well. - -Please note that attaching to legacy contexts (those with a version of 3010 as returned by ::cuCtxGetApiVersion()) is not possible. The CUDA Runtime will return cudaErrorIncompatibleDriverContext in such cases. - - - - - -**Interactions between CUstream and cudaStream_t** - - - -The types ::CUstream and cudaStream_t are identical and may be used interchangeably. - - - - - -**Interactions between CUevent and cudaEvent_t** - - - -The types ::CUevent and cudaEvent_t are identical and may be used interchangeably. - - - - - -**Interactions between CUarray and cudaArray_t** - - - -The types ::CUarray and struct ::cudaArray * represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a ::CUarray in a CUDA Runtime API function which takes a struct ::cudaArray *, it is necessary to explicitly cast the ::CUarray to a struct ::cudaArray *. - -In order to use a struct ::cudaArray * in a CUDA Driver API function which takes a ::CUarray, it is necessary to explicitly cast the struct ::cudaArray * to a ::CUarray . - - - - - -**Interactions between CUgraphicsResource and cudaGraphicsResource_t** - - - -The types ::CUgraphicsResource and cudaGraphicsResource_t represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a ::CUgraphicsResource in a CUDA Runtime API function which takes a cudaGraphicsResource_t, it is necessary to explicitly cast the ::CUgraphicsResource to a cudaGraphicsResource_t. - -In order to use a cudaGraphicsResource_t in a CUDA Driver API function which takes a ::CUgraphicsResource, it is necessary to explicitly cast the cudaGraphicsResource_t to a ::CUgraphicsResource. - - - - - -**Interactions between CUtexObject and cudaTextureObject_t** - - - -The types ::CUtexObject and cudaTextureObject_t represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a ::CUtexObject in a CUDA Runtime API function which takes a cudaTextureObject_t, it is necessary to explicitly cast the ::CUtexObject to a cudaTextureObject_t. - -In order to use a cudaTextureObject_t in a CUDA Driver API function which takes a ::CUtexObject, it is necessary to explicitly cast the cudaTextureObject_t to a ::CUtexObject. - - - - - -**Interactions between CUsurfObject and cudaSurfaceObject_t** - - - -The types ::CUsurfObject and cudaSurfaceObject_t represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a ::CUsurfObject in a CUDA Runtime API function which takes a cudaSurfaceObject_t, it is necessary to explicitly cast the ::CUsurfObject to a cudaSurfaceObject_t. - -In order to use a cudaSurfaceObject_t in a CUDA Driver API function which takes a ::CUsurfObject, it is necessary to explicitly cast the cudaSurfaceObject_t to a ::CUsurfObject. - - - - - -**Interactions between CUfunction and cudaFunction_t** - - - -The types ::CUfunction and cudaFunction_t represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a cudaFunction_t in a CUDA Driver API function which takes a ::CUfunction, it is necessary to explicitly cast the cudaFunction_t to a ::CUfunction. - -.. autofunction:: cuda.cudart.cudaGetKernel - -Data types used by CUDA Runtime -------------------------------- - - - -.. autoclass:: cuda.cudart.cudaEglPlaneDesc_st -.. autoclass:: cuda.cudart.cudaEglFrame_st -.. autoclass:: cuda.cudart.cudaChannelFormatDesc -.. autoclass:: cuda.cudart.cudaArraySparseProperties -.. autoclass:: cuda.cudart.cudaArrayMemoryRequirements -.. autoclass:: cuda.cudart.cudaPitchedPtr -.. autoclass:: cuda.cudart.cudaExtent -.. autoclass:: cuda.cudart.cudaPos -.. autoclass:: cuda.cudart.cudaMemcpy3DParms -.. autoclass:: cuda.cudart.cudaMemcpyNodeParams -.. autoclass:: cuda.cudart.cudaMemcpy3DPeerParms -.. autoclass:: cuda.cudart.cudaMemsetParams -.. autoclass:: cuda.cudart.cudaMemsetParamsV2 -.. autoclass:: cuda.cudart.cudaAccessPolicyWindow -.. autoclass:: cuda.cudart.cudaHostNodeParams -.. autoclass:: cuda.cudart.cudaHostNodeParamsV2 -.. autoclass:: cuda.cudart.cudaResourceDesc -.. autoclass:: cuda.cudart.cudaResourceViewDesc -.. autoclass:: cuda.cudart.cudaPointerAttributes -.. autoclass:: cuda.cudart.cudaFuncAttributes -.. autoclass:: cuda.cudart.cudaMemLocation -.. autoclass:: cuda.cudart.cudaMemAccessDesc -.. autoclass:: cuda.cudart.cudaMemPoolProps -.. autoclass:: cuda.cudart.cudaMemPoolPtrExportData -.. autoclass:: cuda.cudart.cudaMemAllocNodeParams -.. autoclass:: cuda.cudart.cudaMemAllocNodeParamsV2 -.. autoclass:: cuda.cudart.cudaMemFreeNodeParams -.. autoclass:: cuda.cudart.CUuuid_st -.. autoclass:: cuda.cudart.cudaDeviceProp -.. autoclass:: cuda.cudart.cudaIpcEventHandle_st -.. autoclass:: cuda.cudart.cudaIpcMemHandle_st -.. autoclass:: cuda.cudart.cudaMemFabricHandle_st -.. autoclass:: cuda.cudart.cudaExternalMemoryHandleDesc -.. autoclass:: cuda.cudart.cudaExternalMemoryBufferDesc -.. autoclass:: cuda.cudart.cudaExternalMemoryMipmappedArrayDesc -.. autoclass:: cuda.cudart.cudaExternalSemaphoreHandleDesc -.. autoclass:: cuda.cudart.cudaExternalSemaphoreSignalParams -.. autoclass:: cuda.cudart.cudaExternalSemaphoreWaitParams -.. autoclass:: cuda.cudart.cudaKernelNodeParams -.. autoclass:: cuda.cudart.cudaKernelNodeParamsV2 -.. autoclass:: cuda.cudart.cudaExternalSemaphoreSignalNodeParams -.. autoclass:: cuda.cudart.cudaExternalSemaphoreSignalNodeParamsV2 -.. autoclass:: cuda.cudart.cudaExternalSemaphoreWaitNodeParams -.. autoclass:: cuda.cudart.cudaExternalSemaphoreWaitNodeParamsV2 -.. autoclass:: cuda.cudart.cudaConditionalNodeParams -.. autoclass:: cuda.cudart.cudaChildGraphNodeParams -.. autoclass:: cuda.cudart.cudaEventRecordNodeParams -.. autoclass:: cuda.cudart.cudaEventWaitNodeParams -.. autoclass:: cuda.cudart.cudaGraphNodeParams -.. autoclass:: cuda.cudart.cudaGraphEdgeData_st -.. autoclass:: cuda.cudart.cudaGraphInstantiateParams_st -.. autoclass:: cuda.cudart.cudaGraphExecUpdateResultInfo_st -.. autoclass:: cuda.cudart.cudaGraphKernelNodeUpdate -.. autoclass:: cuda.cudart.cudaLaunchMemSyncDomainMap_st -.. autoclass:: cuda.cudart.cudaLaunchAttributeValue -.. autoclass:: cuda.cudart.cudaLaunchAttribute_st -.. autoclass:: cuda.cudart.cudaAsyncNotificationInfo -.. autoclass:: cuda.cudart.cudaTextureDesc -.. autoclass:: cuda.cudart.cudaEglFrameType - - .. autoattribute:: cuda.cudart.cudaEglFrameType.cudaEglFrameTypeArray - - - Frame type CUDA array - - - .. autoattribute:: cuda.cudart.cudaEglFrameType.cudaEglFrameTypePitch - - - Frame type CUDA pointer - -.. autoclass:: cuda.cudart.cudaEglResourceLocationFlags - - .. autoattribute:: cuda.cudart.cudaEglResourceLocationFlags.cudaEglResourceLocationSysmem - - - Resource location sysmem - - - .. autoattribute:: cuda.cudart.cudaEglResourceLocationFlags.cudaEglResourceLocationVidmem - - - Resource location vidmem - -.. autoclass:: cuda.cudart.cudaEglColorFormat - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420Planar - - - Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar - - - Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV422Planar - - - Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV422SemiPlanar - - - Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatARGB - - - R/G/B/A four channels in one surface with BGRA byte ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatRGBA - - - R/G/B/A four channels in one surface with ABGR byte ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatL - - - single luminance channel in one surface. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatR - - - single color channel in one surface. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV444Planar - - - Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV444SemiPlanar - - - Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUYV422 - - - Y, U, V in one surface, interleaved as UYVY in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatUYVY422 - - - Y, U, V in one surface, interleaved as YUYV in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatABGR - - - R/G/B/A four channels in one surface with RGBA byte ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBGRA - - - R/G/B/A four channels in one surface with ARGB byte ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatA - - - Alpha color format - one channel in one surface. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatRG - - - R/G color format - two channels in one surface with GR byte ordering - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatAYUV - - - Y, U, V, A four channels in one surface, interleaved as VUYA. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU444SemiPlanar - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU422SemiPlanar - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar - - - Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar - - - Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar - - - Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar - - - Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatVYUY_ER - - - Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatUYVY_ER - - - Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUYV_ER - - - Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVYU_ER - - - Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUVA_ER - - - Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatAYUV_ER - - - Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV444Planar_ER - - - Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV422Planar_ER - - - Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_ER - - - Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV444SemiPlanar_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV422SemiPlanar_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU444Planar_ER - - - Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU422Planar_ER - - - Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_ER - - - Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU444SemiPlanar_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU422SemiPlanar_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerRGGB - - - Bayer format - one channel in one surface with interleaved RGGB ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerBGGR - - - Bayer format - one channel in one surface with interleaved BGGR ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerGRBG - - - Bayer format - one channel in one surface with interleaved GRBG ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerGBRG - - - Bayer format - one channel in one surface with interleaved GBRG ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10RGGB - - - Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10BGGR - - - Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10GRBG - - - Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10GBRG - - - Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12RGGB - - - Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12BGGR - - - Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12GRBG - - - Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12GBRG - - - Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer14RGGB - - - Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer14BGGR - - - Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer14GRBG - - - Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer14GBRG - - - Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer20RGGB - - - Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer20BGGR - - - Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer20GRBG - - - Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer20GBRG - - - Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU444Planar - - - Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU422Planar - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420Planar - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerIspRGGB - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerIspBGGR - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerIspGRBG - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerIspGBRG - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerBCCR - - - Bayer format - one channel in one surface with interleaved BCCR ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerRCCB - - - Bayer format - one channel in one surface with interleaved RCCB ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerCRBC - - - Bayer format - one channel in one surface with interleaved CRBC ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerCBRC - - - Bayer format - one channel in one surface with interleaved CBRC ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10CCCC - - - Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12BCCR - - - Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12RCCB - - - Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12CRBC - - - Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12CBRC - - - Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12CCCC - - - Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY - - - Color format for single Y plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_2020 - - - Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_2020 - - - Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_2020 - - - Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_2020 - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_709 - - - Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_709 - - - Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_709 - - - Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_709 - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_709 - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_2020 - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar_2020 - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar_709 - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY_ER - - - Extended Range Color format for single Y plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY_709_ER - - - Extended Range Color format for single Y plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10_ER - - - Extended Range Color format for single Y10 plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10_709_ER - - - Extended Range Color format for single Y10 plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12_ER - - - Extended Range Color format for single Y12 plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12_709_ER - - - Extended Range Color format for single Y12 plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUVA - - - Y, U, V, A four channels in one surface, interleaved as AVUY. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVYU - - - Y, U, V in one surface, interleaved as YVYU in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatVYUY - - - Y, U, V in one surface, interleaved as VYUY in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - -.. autoclass:: cuda.cudart.cudaError_t - - .. autoattribute:: cuda.cudart.cudaError_t.cudaSuccess - - - The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`). - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidValue - - - This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMemoryAllocation - - - The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInitializationError - - - The API call failed because the CUDA driver and runtime could not be initialized. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCudartUnloading - - - This indicates that a CUDA Runtime API call cannot be executed because it is being called during process shut down, at a point in time after CUDA driver has been unloaded. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorProfilerDisabled - - - This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorProfilerNotInitialized - - - [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorProfilerAlreadyStarted - - - [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorProfilerAlreadyStopped - - - [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidConfiguration - - - This indicates that a kernel launch is requesting resources that can never be satisfied by the current device. Requesting more shared memory per block than the device supports will trigger this error, as will requesting too many threads or blocks. See :py:obj:`~.cudaDeviceProp` for more device limitations. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidPitchValue - - - This indicates that one or more of the pitch-related parameters passed to the API call is not within the acceptable range for pitch. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidSymbol - - - This indicates that the symbol name/identifier passed to the API call is not a valid name or identifier. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidHostPointer - - - This indicates that at least one host pointer passed to the API call is not a valid host pointer. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidDevicePointer - - - This indicates that at least one device pointer passed to the API call is not a valid device pointer. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidTexture - - - This indicates that the texture passed to the API call is not a valid texture. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidTextureBinding - - - This indicates that the texture binding is not valid. This occurs if you call :py:obj:`~.cudaGetTextureAlignmentOffset()` with an unbound texture. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidChannelDescriptor - - - This indicates that the channel descriptor passed to the API call is not valid. This occurs if the format is not one of the formats specified by :py:obj:`~.cudaChannelFormatKind`, or if one of the dimensions is invalid. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidMemcpyDirection - - - This indicates that the direction of the memcpy passed to the API call is not one of the types specified by :py:obj:`~.cudaMemcpyKind`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorAddressOfConstant - - - This indicated that the user has taken the address of a constant variable, which was forbidden up until the CUDA 3.1 release. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorTextureFetchFailed - - - This indicated that a texture fetch was not able to be performed. This was previously used for device emulation of texture operations. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorTextureNotBound - - - This indicated that a texture was not bound for access. This was previously used for device emulation of texture operations. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSynchronizationError - - - This indicated that a synchronization operation had failed. This was previously used for some device emulation functions. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidFilterSetting - - - This indicates that a non-float texture was being accessed with linear filtering. This is not supported by CUDA. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidNormSetting - - - This indicates that an attempt was made to read a non-float texture as a normalized float. This is not supported by CUDA. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMixedDeviceExecution - - - Mixing of device and device emulation code was not allowed. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotYetImplemented - - - This indicates that the API call is not yet implemented. Production releases of CUDA will never return this error. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMemoryValueTooLarge - - - This indicated that an emulated device pointer exceeded the 32-bit address range. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStubLibrary - - - This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInsufficientDriver - - - This indicates that the installed NVIDIA CUDA driver is older than the CUDA runtime library. This is not a supported configuration. Users should install an updated NVIDIA display driver to allow the application to run. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCallRequiresNewerDriver - - - This indicates that the API call requires a newer CUDA driver than the one currently installed. Users should install an updated NVIDIA CUDA driver to allow the API call to succeed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidSurface - - - This indicates that the surface passed to the API call is not a valid surface. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDuplicateVariableName - - - This indicates that multiple global or constant variables (across separate CUDA source files in the application) share the same string name. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDuplicateTextureName - - - This indicates that multiple textures (across separate CUDA source files in the application) share the same string name. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDuplicateSurfaceName - - - This indicates that multiple surfaces (across separate CUDA source files in the application) share the same string name. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDevicesUnavailable - - - This indicates that all CUDA devices are busy or unavailable at the current time. Devices are often busy/unavailable due to use of :py:obj:`~.cudaComputeModeProhibited`, :py:obj:`~.cudaComputeModeExclusiveProcess`, or when long running CUDA kernels have filled up the GPU and are blocking new work from starting. They can also be unavailable due to memory constraints on a device that already has active CUDA work being performed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorIncompatibleDriverContext - - - This indicates that the current context is not compatible with this the CUDA Runtime. This can only occur if you are using CUDA Runtime/Driver interoperability and have created an existing Driver context using the driver API. The Driver context may be incompatible either because the Driver context was created using an older version of the API, because the Runtime API call expects a primary driver context and the Driver context is not primary, or because the Driver context has been destroyed. Please see :py:obj:`~.Interactions`with the CUDA Driver API" for more information. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMissingConfiguration - - - The device function being invoked (usually via :py:obj:`~.cudaLaunchKernel()`) was not previously configured via the :py:obj:`~.cudaConfigureCall()` function. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorPriorLaunchFailure - - - This indicated that a previous kernel launch failed. This was previously used for device emulation of kernel launches. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchMaxDepthExceeded - - - This error indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchFileScopedTex - - - This error indicates that a grid launch did not occur because the kernel uses file-scoped textures which are unsupported by the device runtime. Kernels launched via the device runtime only support textures created with the Texture Object API's. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchFileScopedSurf - - - This error indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. Kernels launched via the device runtime only support surfaces created with the Surface Object API's. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSyncDepthExceeded - - - This error indicates that a call to :py:obj:`~.cudaDeviceSynchronize` made from the device runtime failed because the call was made at grid depth greater than than either the default (2 levels of grids) or user specified device limit :py:obj:`~.cudaLimitDevRuntimeSyncDepth`. To be able to synchronize on launched grids at a greater depth successfully, the maximum nested depth at which :py:obj:`~.cudaDeviceSynchronize` will be called must be specified with the :py:obj:`~.cudaLimitDevRuntimeSyncDepth` limit to the :py:obj:`~.cudaDeviceSetLimit` api before the host-side launch of a kernel using the device runtime. Keep in mind that additional levels of sync depth require the runtime to reserve large amounts of device memory that cannot be used for user allocations. Note that :py:obj:`~.cudaDeviceSynchronize` made from device runtime is only supported on devices of compute capability < 9.0. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchPendingCountExceeded - - - This error indicates that a device runtime grid launch failed because the launch would exceed the limit :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount`. For this launch to proceed successfully, :py:obj:`~.cudaDeviceSetLimit` must be called to set the :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount` to be higher than the upper bound of outstanding launches that can be issued to the device runtime. Keep in mind that raising the limit of pending device runtime launches will require the runtime to reserve device memory that cannot be used for user allocations. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidDeviceFunction - - - The requested device function does not exist or is not compiled for the proper device architecture. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNoDevice - - - This indicates that no CUDA-capable devices were detected by the installed CUDA driver. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidDevice - - - This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDeviceNotLicensed - - - This indicates that the device doesn't have a valid Grid License. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSoftwareValidityNotEstablished - - - By default, the CUDA runtime may perform a minimal set of self-tests, as well as CUDA driver tests, to establish the validity of both. Introduced in CUDA 11.2, this error return indicates that at least one of these tests has failed and the validity of either the runtime or the driver could not be established. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStartupFailure - - - This indicates an internal startup failure in the CUDA runtime. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidKernelImage - - - This indicates that the device kernel image is invalid. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDeviceUninitialized - - - This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had :py:obj:`~.cuCtxDestroy()` invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See :py:obj:`~.cuCtxGetApiVersion()` for more details. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMapBufferObjectFailed - - - This indicates that the buffer object could not be mapped. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnmapBufferObjectFailed - - - This indicates that the buffer object could not be unmapped. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorArrayIsMapped - - - This indicates that the specified array is currently mapped and thus cannot be destroyed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorAlreadyMapped - - - This indicates that the resource is already mapped. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNoKernelImageForDevice - - - This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorAlreadyAcquired - - - This indicates that a resource has already been acquired. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotMapped - - - This indicates that a resource is not mapped. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotMappedAsArray - - - This indicates that a mapped resource is not available for access as an array. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotMappedAsPointer - - - This indicates that a mapped resource is not available for access as a pointer. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorECCUncorrectable - - - This indicates that an uncorrectable ECC error was detected during execution. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnsupportedLimit - - - This indicates that the :py:obj:`~.cudaLimit` passed to the API call is not supported by the active device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDeviceAlreadyInUse - - - This indicates that a call tried to access an exclusive-thread device that is already in use by a different thread. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorPeerAccessUnsupported - - - This error indicates that P2P access is not supported across the given devices. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidPtx - - - A PTX compilation failed. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidGraphicsContext - - - This indicates an error with the OpenGL or DirectX context. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNvlinkUncorrectable - - - This indicates that an uncorrectable NVLink error was detected during the execution. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorJitCompilerNotFound - - - This indicates that the PTX JIT compiler library was not found. The JIT Compiler library is used for PTX compilation. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnsupportedPtxVersion - - - This indicates that the provided PTX was compiled with an unsupported toolchain. The most common reason for this, is the PTX was generated by a compiler newer than what is supported by the CUDA driver and PTX JIT compiler. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorJitCompilationDisabled - - - This indicates that the JIT compilation was disabled. The JIT compilation compiles PTX. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnsupportedExecAffinity - - - This indicates that the provided execution affinity is not supported by the device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnsupportedDevSideSync - - - This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidSource - - - This indicates that the device kernel source is invalid. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorFileNotFound - - - This indicates that the file specified was not found. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSharedObjectSymbolNotFound - - - This indicates that a link to a shared object failed to resolve. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSharedObjectInitFailed - - - This indicates that initialization of a shared object failed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorOperatingSystem - - - This error indicates that an OS call failed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidResourceHandle - - - This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like :py:obj:`~.cudaStream_t` and :py:obj:`~.cudaEvent_t`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorIllegalState - - - This indicates that a resource required by the API call is not in a valid state to perform the requested operation. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLossyQuery - - - This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSymbolNotFound - - - This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotReady - - - This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than :py:obj:`~.cudaSuccess` (which indicates completion). Calls that may return this value include :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorIllegalAddress - - - The device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchOutOfResources - - - This indicates that a launch did not occur because it did not have appropriate resources. Although this error is similar to :py:obj:`~.cudaErrorInvalidConfiguration`, this error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchTimeout - - - This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device property :py:obj:`~.kernelExecTimeoutEnabled` for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchIncompatibleTexturing - - - This error indicates a kernel launch that uses an incompatible texturing mode. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorPeerAccessAlreadyEnabled - - - This error indicates that a call to :py:obj:`~.cudaDeviceEnablePeerAccess()` is trying to re-enable peer addressing on from a context which has already had peer addressing enabled. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorPeerAccessNotEnabled - - - This error indicates that :py:obj:`~.cudaDeviceDisablePeerAccess()` is trying to disable peer addressing which has not been enabled yet via :py:obj:`~.cudaDeviceEnablePeerAccess()`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSetOnActiveProcess - - - This indicates that the user has called :py:obj:`~.cudaSetValidDevices()`, :py:obj:`~.cudaSetDeviceFlags()`, :py:obj:`~.cudaD3D9SetDirect3DDevice()`, :py:obj:`~.cudaD3D10SetDirect3DDevice`, :py:obj:`~.cudaD3D11SetDirect3DDevice()`, or :py:obj:`~.cudaVDPAUSetVDPAUDevice()` after initializing the CUDA runtime by calling non-device management operations (allocating memory and launching kernels are examples of non-device management operations). This error can also be returned if using runtime/driver interoperability and there is an existing :py:obj:`~.CUcontext` active on the host thread. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorContextIsDestroyed - - - This error indicates that the context current to the calling thread has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary context which has not yet been initialized. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorAssert - - - An assert triggered in device code during kernel execution. The device cannot be used again. All existing allocations are invalid. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorTooManyPeers - - - This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to :py:obj:`~.cudaEnablePeerAccess()`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorHostMemoryAlreadyRegistered - - - This error indicates that the memory range passed to :py:obj:`~.cudaHostRegister()` has already been registered. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorHostMemoryNotRegistered - - - This error indicates that the pointer passed to :py:obj:`~.cudaHostUnregister()` does not correspond to any currently registered memory region. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorHardwareStackError - - - Device encountered an error in the call stack during kernel execution, possibly due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorIllegalInstruction - - - The device encountered an illegal instruction during kernel execution This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMisalignedAddress - - - The device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidAddressSpace - - - While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidPc - - - The device encountered an invalid program counter. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchFailure - - - An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCooperativeLaunchTooLarge - - - This error indicates that the number of blocks launched per grid for a kernel that was launched via either :py:obj:`~.cudaLaunchCooperativeKernel` or :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` exceeds the maximum number of blocks as allowed by :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessor` or :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` times the number of multiprocessors as specified by the device attribute :py:obj:`~.cudaDevAttrMultiProcessorCount`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotPermitted - - - This error indicates the attempted operation is not permitted. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotSupported - - - This error indicates the attempted operation is not supported on the current system or device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSystemNotReady - - - This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSystemDriverMismatch - - - This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCompatNotSupportedOnDevice - - - This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsConnectionFailed - - - This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsRpcFailure - - - This error indicates that the remote procedural call between the MPS server and the MPS client failed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsServerNotReady - - - This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsMaxClientsReached - - - This error indicates that the hardware resources required to create MPS client have been exhausted. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsMaxConnectionsReached - - - This error indicates the the hardware resources required to device connections have been exhausted. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsClientTerminated - - - This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCdpNotSupported - - - This error indicates, that the program is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCdpVersionMismatch - - - This error indicates, that the program contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureUnsupported - - - The operation is not permitted when the stream is capturing. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureInvalidated - - - The current capture sequence on the stream has been invalidated due to a previous error. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureMerge - - - The operation would have resulted in a merge of two independent capture sequences. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureUnmatched - - - The capture was not initiated in this stream. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureUnjoined - - - The capture sequence contains a fork that was not joined to the primary stream. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureIsolation - - - A dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureImplicit - - - The operation would have resulted in a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCapturedEvent - - - The operation is not permitted on an event which was last recorded in a capturing stream. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureWrongThread - - - A stream capture sequence not initiated with the :py:obj:`~.cudaStreamCaptureModeRelaxed` argument to :py:obj:`~.cudaStreamBeginCapture` was passed to :py:obj:`~.cudaStreamEndCapture` in a different thread. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorTimeout - - - This indicates that the wait operation has timed out. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorGraphExecUpdateFailure - - - This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorExternalDevice - - - This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device's signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidClusterSize - - - This indicates that a kernel launch error has occurred due to cluster misconfiguration. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorFunctionNotLoaded - - - Indiciates a function handle is not loaded when calling an API that requires a loaded function. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidResourceType - - - This error indicates one or more resources passed in are not valid resource types for the operation. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidResourceConfiguration - - - This error indicates one or more resources are insufficient or non-applicable for the operation. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnknown - - - This indicates that an unknown internal error has occurred. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorApiFailureBase - -.. autoclass:: cuda.cudart.cudaChannelFormatKind - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSigned - - - Signed channel format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned - - - Unsigned channel format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindFloat - - - Float channel format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindNone - - - No channel format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindNV12 - - - Unsigned 8-bit integers, planar 4:2:0 YUV format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X1 - - - 1 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X2 - - - 2 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X4 - - - 4 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X1 - - - 1 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X2 - - - 2 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X4 - - - 4 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X1 - - - 1 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X2 - - - 2 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X4 - - - 4 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X1 - - - 1 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X2 - - - 2 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X4 - - - 4 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1 - - - 4 channel unsigned normalized block-compressed (BC1 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1SRGB - - - 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2 - - - 4 channel unsigned normalized block-compressed (BC2 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2SRGB - - - 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3 - - - 4 channel unsigned normalized block-compressed (BC3 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3SRGB - - - 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed4 - - - 1 channel unsigned normalized block-compressed (BC4 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed4 - - - 1 channel signed normalized block-compressed (BC4 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed5 - - - 2 channel unsigned normalized block-compressed (BC5 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed5 - - - 2 channel signed normalized block-compressed (BC5 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed6H - - - 3 channel unsigned half-float block-compressed (BC6H compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed6H - - - 3 channel signed half-float block-compressed (BC6H compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7 - - - 4 channel unsigned normalized block-compressed (BC7 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7SRGB - - - 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding - -.. autoclass:: cuda.cudart.cudaMemoryType - - .. autoattribute:: cuda.cudart.cudaMemoryType.cudaMemoryTypeUnregistered - - - Unregistered memory - - - .. autoattribute:: cuda.cudart.cudaMemoryType.cudaMemoryTypeHost - - - Host memory - - - .. autoattribute:: cuda.cudart.cudaMemoryType.cudaMemoryTypeDevice - - - Device memory - - - .. autoattribute:: cuda.cudart.cudaMemoryType.cudaMemoryTypeManaged - - - Managed memory - -.. autoclass:: cuda.cudart.cudaMemcpyKind - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyHostToHost - - - Host -> Host - - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyHostToDevice - - - Host -> Device - - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost - - - Device -> Host - - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice - - - Device -> Device - - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyDefault - - - Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing - -.. autoclass:: cuda.cudart.cudaAccessProperty - - .. autoattribute:: cuda.cudart.cudaAccessProperty.cudaAccessPropertyNormal - - - Normal cache persistence. - - - .. autoattribute:: cuda.cudart.cudaAccessProperty.cudaAccessPropertyStreaming - - - Streaming access is less likely to persit from cache. - - - .. autoattribute:: cuda.cudart.cudaAccessProperty.cudaAccessPropertyPersisting - - - Persisting access is more likely to persist in cache. - -.. autoclass:: cuda.cudart.cudaStreamCaptureStatus - - .. autoattribute:: cuda.cudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusNone - - - Stream is not capturing - - - .. autoattribute:: cuda.cudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusActive - - - Stream is actively capturing - - - .. autoattribute:: cuda.cudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusInvalidated - - - Stream is part of a capture sequence that has been invalidated, but not terminated - -.. autoclass:: cuda.cudart.cudaStreamCaptureMode - - .. autoattribute:: cuda.cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal - - - .. autoattribute:: cuda.cudart.cudaStreamCaptureMode.cudaStreamCaptureModeThreadLocal - - - .. autoattribute:: cuda.cudart.cudaStreamCaptureMode.cudaStreamCaptureModeRelaxed - -.. autoclass:: cuda.cudart.cudaSynchronizationPolicy - - .. autoattribute:: cuda.cudart.cudaSynchronizationPolicy.cudaSyncPolicyAuto - - - .. autoattribute:: cuda.cudart.cudaSynchronizationPolicy.cudaSyncPolicySpin - - - .. autoattribute:: cuda.cudart.cudaSynchronizationPolicy.cudaSyncPolicyYield - - - .. autoattribute:: cuda.cudart.cudaSynchronizationPolicy.cudaSyncPolicyBlockingSync - -.. autoclass:: cuda.cudart.cudaClusterSchedulingPolicy - - .. autoattribute:: cuda.cudart.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyDefault - - - the default policy - - - .. autoattribute:: cuda.cudart.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicySpread - - - spread the blocks within a cluster to the SMs - - - .. autoattribute:: cuda.cudart.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyLoadBalancing - - - allow the hardware to load-balance the blocks in a cluster to the SMs - -.. autoclass:: cuda.cudart.cudaStreamUpdateCaptureDependenciesFlags - - .. autoattribute:: cuda.cudart.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamAddCaptureDependencies - - - Add new nodes to the dependency set - - - .. autoattribute:: cuda.cudart.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamSetCaptureDependencies - - - Replace the dependency set with the new nodes - -.. autoclass:: cuda.cudart.cudaUserObjectFlags - - .. autoattribute:: cuda.cudart.cudaUserObjectFlags.cudaUserObjectNoDestructorSync - - - Indicates the destructor execution is not synchronized by any CUDA handle. - -.. autoclass:: cuda.cudart.cudaUserObjectRetainFlags - - .. autoattribute:: cuda.cudart.cudaUserObjectRetainFlags.cudaGraphUserObjectMove - - - Transfer references from the caller rather than creating new references. - -.. autoclass:: cuda.cudart.cudaGraphicsRegisterFlags - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone - - - Default - - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly - - - CUDA will not write to this resource - - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard - - - CUDA will only write to and will not read from this resource - - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsSurfaceLoadStore - - - CUDA will bind this resource to a surface reference - - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsTextureGather - - - CUDA will perform texture gather operations on this resource - -.. autoclass:: cuda.cudart.cudaGraphicsMapFlags - - .. autoattribute:: cuda.cudart.cudaGraphicsMapFlags.cudaGraphicsMapFlagsNone - - - Default; Assume resource can be read/written - - - .. autoattribute:: cuda.cudart.cudaGraphicsMapFlags.cudaGraphicsMapFlagsReadOnly - - - CUDA will not write to this resource - - - .. autoattribute:: cuda.cudart.cudaGraphicsMapFlags.cudaGraphicsMapFlagsWriteDiscard - - - CUDA will only write to and will not read from this resource - -.. autoclass:: cuda.cudart.cudaGraphicsCubeFace - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveX - - - Positive X face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeX - - - Negative X face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveY - - - Positive Y face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeY - - - Negative Y face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveZ - - - Positive Z face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeZ - - - Negative Z face of cubemap - -.. autoclass:: cuda.cudart.cudaResourceType - - .. autoattribute:: cuda.cudart.cudaResourceType.cudaResourceTypeArray - - - Array resource - - - .. autoattribute:: cuda.cudart.cudaResourceType.cudaResourceTypeMipmappedArray - - - Mipmapped array resource - - - .. autoattribute:: cuda.cudart.cudaResourceType.cudaResourceTypeLinear - - - Linear resource - - - .. autoattribute:: cuda.cudart.cudaResourceType.cudaResourceTypePitch2D - - - Pitch 2D resource - -.. autoclass:: cuda.cudart.cudaResourceViewFormat - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatNone - - - No resource view format (use underlying resource format) - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedChar1 - - - 1 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedChar2 - - - 2 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedChar4 - - - 4 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedChar1 - - - 1 channel signed 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedChar2 - - - 2 channel signed 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedChar4 - - - 4 channel signed 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedShort1 - - - 1 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedShort2 - - - 2 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedShort4 - - - 4 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedShort1 - - - 1 channel signed 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedShort2 - - - 2 channel signed 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedShort4 - - - 4 channel signed 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedInt1 - - - 1 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedInt2 - - - 2 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedInt4 - - - 4 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedInt1 - - - 1 channel signed 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedInt2 - - - 2 channel signed 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedInt4 - - - 4 channel signed 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatHalf1 - - - 1 channel 16-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatHalf2 - - - 2 channel 16-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatHalf4 - - - 4 channel 16-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatFloat1 - - - 1 channel 32-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatFloat2 - - - 2 channel 32-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatFloat4 - - - 4 channel 32-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed1 - - - Block compressed 1 - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed2 - - - Block compressed 2 - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed3 - - - Block compressed 3 - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed4 - - - Block compressed 4 unsigned - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed4 - - - Block compressed 4 signed - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed5 - - - Block compressed 5 unsigned - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed5 - - - Block compressed 5 signed - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed6H - - - Block compressed 6 unsigned half-float - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed6H - - - Block compressed 6 signed half-float - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed7 - - - Block compressed 7 - -.. autoclass:: cuda.cudart.cudaFuncAttribute - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeMaxDynamicSharedMemorySize - - - Maximum dynamic shared memory size - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributePreferredSharedMemoryCarveout - - - Preferred shared memory-L1 cache split - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeClusterDimMustBeSet - - - Indicator to enforce valid cluster dimension specification on kernel launch - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeRequiredClusterWidth - - - Required cluster width - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeRequiredClusterHeight - - - Required cluster height - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeRequiredClusterDepth - - - Required cluster depth - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeNonPortableClusterSizeAllowed - - - Whether non-portable cluster scheduling policy is supported - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeClusterSchedulingPolicyPreference - - - Required cluster scheduling policy preference - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeMax - -.. autoclass:: cuda.cudart.cudaFuncCache - - .. autoattribute:: cuda.cudart.cudaFuncCache.cudaFuncCachePreferNone - - - Default function cache configuration, no preference - - - .. autoattribute:: cuda.cudart.cudaFuncCache.cudaFuncCachePreferShared - - - Prefer larger shared memory and smaller L1 cache - - - .. autoattribute:: cuda.cudart.cudaFuncCache.cudaFuncCachePreferL1 - - - Prefer larger L1 cache and smaller shared memory - - - .. autoattribute:: cuda.cudart.cudaFuncCache.cudaFuncCachePreferEqual - - - Prefer equal size L1 cache and shared memory - -.. autoclass:: cuda.cudart.cudaSharedMemConfig - - .. autoattribute:: cuda.cudart.cudaSharedMemConfig.cudaSharedMemBankSizeDefault - - - .. autoattribute:: cuda.cudart.cudaSharedMemConfig.cudaSharedMemBankSizeFourByte - - - .. autoattribute:: cuda.cudart.cudaSharedMemConfig.cudaSharedMemBankSizeEightByte - -.. autoclass:: cuda.cudart.cudaSharedCarveout - - .. autoattribute:: cuda.cudart.cudaSharedCarveout.cudaSharedmemCarveoutDefault - - - No preference for shared memory or L1 (default) - - - .. autoattribute:: cuda.cudart.cudaSharedCarveout.cudaSharedmemCarveoutMaxShared - - - Prefer maximum available shared memory, minimum L1 cache - - - .. autoattribute:: cuda.cudart.cudaSharedCarveout.cudaSharedmemCarveoutMaxL1 - - - Prefer maximum available L1 cache, minimum shared memory - -.. autoclass:: cuda.cudart.cudaComputeMode - - .. autoattribute:: cuda.cudart.cudaComputeMode.cudaComputeModeDefault - - - Default compute mode (Multiple threads can use :py:obj:`~.cudaSetDevice()` with this device) - - - .. autoattribute:: cuda.cudart.cudaComputeMode.cudaComputeModeExclusive - - - Compute-exclusive-thread mode (Only one thread in one process will be able to use :py:obj:`~.cudaSetDevice()` with this device) - - - .. autoattribute:: cuda.cudart.cudaComputeMode.cudaComputeModeProhibited - - - Compute-prohibited mode (No threads can use :py:obj:`~.cudaSetDevice()` with this device) - - - .. autoattribute:: cuda.cudart.cudaComputeMode.cudaComputeModeExclusiveProcess - - - Compute-exclusive-process mode (Many threads in one process will be able to use :py:obj:`~.cudaSetDevice()` with this device) - -.. autoclass:: cuda.cudart.cudaLimit - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitStackSize - - - GPU thread stack size - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitPrintfFifoSize - - - GPU printf FIFO size - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitMallocHeapSize - - - GPU malloc heap size - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitDevRuntimeSyncDepth - - - GPU device runtime synchronize depth - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitDevRuntimePendingLaunchCount - - - GPU device runtime pending launch count - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitMaxL2FetchGranularity - - - A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitPersistingL2CacheSize - - - A size in bytes for L2 persisting lines cache size - -.. autoclass:: cuda.cudart.cudaMemoryAdvise - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseSetReadMostly - - - Data will mostly be read and only occassionally be written to - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseUnsetReadMostly - - - Undo the effect of :py:obj:`~.cudaMemAdviseSetReadMostly` - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseSetPreferredLocation - - - Set the preferred location for the data as the specified device - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseUnsetPreferredLocation - - - Clear the preferred location for the data - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseSetAccessedBy - - - Data will be accessed by the specified device, so prevent page faults as much as possible - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseUnsetAccessedBy - - - Let the Unified Memory subsystem decide on the page faulting policy for the specified device - -.. autoclass:: cuda.cudart.cudaMemRangeAttribute - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeReadMostly - - - Whether the range will mostly be read and only occassionally be written to - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocation - - - The preferred location of the range - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeAccessedBy - - - Memory range has :py:obj:`~.cudaMemAdviseSetAccessedBy` set for specified device - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocation - - - The last location to which the range was prefetched - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationType - - - The preferred location type of the range - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationId - - - The preferred location id of the range - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationType - - - The last location type to which the range was prefetched - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationId - - - The last location id to which the range was prefetched - -.. autoclass:: cuda.cudart.cudaFlushGPUDirectRDMAWritesOptions - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionHost - - - :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` and its CUDA Driver API counterpart are supported on the device. - - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionMemOps - - - The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the CUDA device. - -.. autoclass:: cuda.cudart.cudaGPUDirectRDMAWritesOrdering - - .. autoattribute:: cuda.cudart.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingNone - - - The device does not natively support ordering of GPUDirect RDMA writes. :py:obj:`~.cudaFlushGPUDirectRDMAWrites()` can be leveraged if supported. - - - .. autoattribute:: cuda.cudart.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingOwner - - - Natively, the device can consistently consume GPUDirect RDMA writes, although other CUDA devices may not. - - - .. autoattribute:: cuda.cudart.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingAllDevices - - - Any CUDA device in the system can consistently consume GPUDirect RDMA writes to this device. - -.. autoclass:: cuda.cudart.cudaFlushGPUDirectRDMAWritesScope - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToOwner - - - Blocks until remote writes are visible to the CUDA device context owning the data. - - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToAllDevices - - - Blocks until remote writes are visible to all CUDA device contexts. - -.. autoclass:: cuda.cudart.cudaFlushGPUDirectRDMAWritesTarget - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesTarget.cudaFlushGPUDirectRDMAWritesTargetCurrentDevice - - - Sets the target for :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` to the currently active CUDA device context. - -.. autoclass:: cuda.cudart.cudaDeviceAttr - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock - - - Maximum number of threads per block - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxBlockDimX - - - Maximum block dimension X - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxBlockDimY - - - Maximum block dimension Y - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxBlockDimZ - - - Maximum block dimension Z - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxGridDimX - - - Maximum grid dimension X - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxGridDimY - - - Maximum grid dimension Y - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxGridDimZ - - - Maximum grid dimension Z - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock - - - Maximum shared memory available per block in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTotalConstantMemory - - - Memory available on device for constant variables in a CUDA C kernel in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrWarpSize - - - Warp size in threads - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxPitch - - - Maximum pitch in bytes allowed by memory copies - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock - - - Maximum number of 32-bit registers available per block - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrClockRate - - - Peak clock frequency in kilohertz - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTextureAlignment - - - Alignment requirement for textures - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGpuOverlap - - - Device can possibly copy memory and execute a kernel concurrently - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMultiProcessorCount - - - Number of multiprocessors on device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrKernelExecTimeout - - - Specifies whether there is a run time limit on kernels - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrIntegrated - - - Device is integrated with host memory - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCanMapHostMemory - - - Device can map host memory into CUDA address space - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrComputeMode - - - Compute mode (See :py:obj:`~.cudaComputeMode` for details) - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth - - - Maximum 1D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth - - - Maximum 2D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight - - - Maximum 2D texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth - - - Maximum 3D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight - - - Maximum 3D texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth - - - Maximum 3D texture depth - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth - - - Maximum 2D layered texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight - - - Maximum 2D layered texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers - - - Maximum layers in a 2D layered texture - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrSurfaceAlignment - - - Alignment requirement for surfaces - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrConcurrentKernels - - - Device can possibly execute multiple kernels concurrently - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrEccEnabled - - - Device has ECC support enabled - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPciBusId - - - PCI bus ID of the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPciDeviceId - - - PCI device ID of the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTccDriver - - - Device is using TCC driver model - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMemoryClockRate - - - Peak memory clock frequency in kilohertz - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth - - - Global memory bus width in bits - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrL2CacheSize - - - Size of L2 cache in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor - - - Maximum resident threads per multiprocessor - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrAsyncEngineCount - - - Number of asynchronous engines - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrUnifiedAddressing - - - Device shares a unified address space with the host - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth - - - Maximum 1D layered texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers - - - Maximum layers in a 1D layered texture - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth - - - Maximum 2D texture width if cudaArrayTextureGather is set - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight - - - Maximum 2D texture height if cudaArrayTextureGather is set - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt - - - Alternate maximum 3D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt - - - Alternate maximum 3D texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt - - - Alternate maximum 3D texture depth - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPciDomainId - - - PCI domain ID of the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTexturePitchAlignment - - - Pitch alignment requirement for textures - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth - - - Maximum cubemap texture width/height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth - - - Maximum cubemap layered texture width/height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers - - - Maximum layers in a cubemap layered texture - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth - - - Maximum 1D surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth - - - Maximum 2D surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight - - - Maximum 2D surface height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth - - - Maximum 3D surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight - - - Maximum 3D surface height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth - - - Maximum 3D surface depth - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth - - - Maximum 1D layered surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers - - - Maximum layers in a 1D layered surface - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth - - - Maximum 2D layered surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight - - - Maximum 2D layered surface height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers - - - Maximum layers in a 2D layered surface - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth - - - Maximum cubemap surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth - - - Maximum cubemap layered surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers - - - Maximum layers in a cubemap layered surface - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth - - - Maximum 1D linear texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth - - - Maximum 2D linear texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight - - - Maximum 2D linear texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch - - - Maximum 2D linear texture pitch in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth - - - Maximum mipmapped 2D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight - - - Maximum mipmapped 2D texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor - - - Major compute capability version number - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor - - - Minor compute capability version number - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth - - - Maximum mipmapped 1D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported - - - Device supports stream priorities - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported - - - Device supports caching globals in L1 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported - - - Device supports caching locals in L1 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor - - - Maximum shared memory available per multiprocessor in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor - - - Maximum number of 32-bit registers available per multiprocessor - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrManagedMemory - - - Device can allocate managed memory on this system - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard - - - Device is on a multi-GPU board - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID - - - Unique identifier for a group of devices on the same multi-GPU board - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported - - - Link between the device and the host supports native atomic operations - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio - - - Ratio of single precision performance (in floating-point operations per second) to double precision performance - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPageableMemoryAccess - - - Device supports coherently accessing pageable memory without calling cudaHostRegister on it - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess - - - Device can coherently access managed memory concurrently with the CPU - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrComputePreemptionSupported - - - Device supports Compute Preemption - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem - - - Device can access host registered memory at the same virtual address as the CPU - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved92 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved93 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved94 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCooperativeLaunch - - - Device supports launching cooperative kernels via :py:obj:`~.cudaLaunchCooperativeKernel` - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch - - - Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin - - - The maximum optin shared memory per block. This value may vary by chip. See :py:obj:`~.cudaFuncSetAttribute` - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites - - - Device supports flushing of outstanding remote writes. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrHostRegisterSupported - - - Device supports host memory registration via :py:obj:`~.cudaHostRegister`. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables - - - Device accesses pageable memory via the host's page tables. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost - - - Host can directly access managed memory on the device without migration. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxBlocksPerMultiprocessor - - - Maximum number of blocks per multiprocessor - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxPersistingL2CacheSize - - - Maximum L2 persisting lines capacity setting in bytes. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxAccessPolicyWindowSize - - - Maximum value of :py:obj:`~.cudaAccessPolicyWindow.num_bytes`. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReservedSharedMemoryPerBlock - - - Shared memory reserved by CUDA driver per block in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrSparseCudaArraySupported - - - Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrHostRegisterReadOnlySupported - - - Device supports using the :py:obj:`~.cudaHostRegister` flag cudaHostRegisterReadOnly to register memory that must be mapped as read-only to the GPU - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTimelineSemaphoreInteropSupported - - - External timeline semaphore interop is supported on the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTimelineSemaphoreInteropSupported - - - Deprecated, External timeline semaphore interop is supported on the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMemoryPoolsSupported - - - Device supports using the :py:obj:`~.cudaMallocAsync` and :py:obj:`~.cudaMemPool` family of APIs - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGPUDirectRDMASupported - - - Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAFlushWritesOptions - - - The returned attribute shall be interpreted as a bitmask, where the individual bits are listed in the :py:obj:`~.cudaFlushGPUDirectRDMAWritesOptions` enum - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAWritesOrdering - - - GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See :py:obj:`~.cudaGPUDirectRDMAWritesOrdering` for the numerical values returned here. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMemoryPoolSupportedHandleTypes - - - Handle types supported with mempool based IPC - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrClusterLaunch - - - Indicates device supports cluster launch - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrDeferredMappingCudaArraySupported - - - Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved122 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved123 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved124 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrIpcEventSupport - - - Device supports IPC Events. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMemSyncDomainCount - - - Number of memory synchronization domains the device supports. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved127 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved128 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved129 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrNumaConfig - - - NUMA configuration of a device: value is of type :py:obj:`~.cudaDeviceNumaConfig` enum - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrNumaId - - - NUMA node ID of the GPU memory - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved132 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMpsEnabled - - - Contexts created on this device will be shared via MPS - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrHostNumaId - - - NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrD3D12CigSupported - - - Device supports CIG with D3D12. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMax - -.. autoclass:: cuda.cudart.cudaMemPoolAttr - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolReuseFollowEventDependencies - - - (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled) - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowOpportunistic - - - (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled) - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowInternalDependencies - - - (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled). - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold - - - (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0) - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrReservedMemCurrent - - - (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool. - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrReservedMemHigh - - - (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero. - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrUsedMemCurrent - - - (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application. - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrUsedMemHigh - - - (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero. - -.. autoclass:: cuda.cudart.cudaMemLocationType - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeInvalid - - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeDevice - - - Location is a device location, thus id is a device ordinal - - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeHost - - - Location is host, id is ignored - - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeHostNuma - - - Location is a host NUMA node, thus id is a host NUMA node id - - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeHostNumaCurrent - - - Location is the host NUMA node closest to the current thread's CPU, id is ignored - -.. autoclass:: cuda.cudart.cudaMemAccessFlags - - .. autoattribute:: cuda.cudart.cudaMemAccessFlags.cudaMemAccessFlagsProtNone - - - Default, make the address range not accessible - - - .. autoattribute:: cuda.cudart.cudaMemAccessFlags.cudaMemAccessFlagsProtRead - - - Make the address range read accessible - - - .. autoattribute:: cuda.cudart.cudaMemAccessFlags.cudaMemAccessFlagsProtReadWrite - - - Make the address range read-write accessible - -.. autoclass:: cuda.cudart.cudaMemAllocationType - - .. autoattribute:: cuda.cudart.cudaMemAllocationType.cudaMemAllocationTypeInvalid - - - .. autoattribute:: cuda.cudart.cudaMemAllocationType.cudaMemAllocationTypePinned - - - This allocation type is 'pinned', i.e. cannot migrate from its current location while the application is actively using it - - - .. autoattribute:: cuda.cudart.cudaMemAllocationType.cudaMemAllocationTypeMax - -.. autoclass:: cuda.cudart.cudaMemAllocationHandleType - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypeNone - - - Does not allow any export mechanism. > - - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypePosixFileDescriptor - - - Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) - - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypeWin32 - - - Allows a Win32 NT handle to be used for exporting. (HANDLE) - - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypeWin32Kmt - - - Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) - - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypeFabric - - - Allows a fabric handle to be used for exporting. (cudaMemFabricHandle_t) - -.. autoclass:: cuda.cudart.cudaGraphMemAttributeType - - .. autoattribute:: cuda.cudart.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemCurrent - - - (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs. - - - .. autoattribute:: cuda.cudart.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemHigh - - - (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero. - - - .. autoattribute:: cuda.cudart.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemCurrent - - - (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. - - - .. autoattribute:: cuda.cudart.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemHigh - - - (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. - -.. autoclass:: cuda.cudart.cudaDeviceP2PAttr - - .. autoattribute:: cuda.cudart.cudaDeviceP2PAttr.cudaDevP2PAttrPerformanceRank - - - A relative value indicating the performance of the link between two devices - - - .. autoattribute:: cuda.cudart.cudaDeviceP2PAttr.cudaDevP2PAttrAccessSupported - - - Peer access is enabled - - - .. autoattribute:: cuda.cudart.cudaDeviceP2PAttr.cudaDevP2PAttrNativeAtomicSupported - - - Native atomic operation over the link supported - - - .. autoattribute:: cuda.cudart.cudaDeviceP2PAttr.cudaDevP2PAttrCudaArrayAccessSupported - - - Accessing CUDA arrays over the link supported - -.. autoclass:: cuda.cudart.cudaExternalMemoryHandleType - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueFd - - - Handle is an opaque file descriptor - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32 - - - Handle is an opaque shared NT handle - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32Kmt - - - Handle is an opaque, globally shared handle - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Heap - - - Handle is a D3D12 heap object - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Resource - - - Handle is a D3D12 committed resource - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11Resource - - - Handle is a shared NT handle to a D3D11 resource - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11ResourceKmt - - - Handle is a globally shared handle to a D3D11 resource - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeNvSciBuf - - - Handle is an NvSciBuf object - -.. autoclass:: cuda.cudart.cudaExternalSemaphoreHandleType - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueFd - - - Handle is an opaque file descriptor - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32 - - - Handle is an opaque shared NT handle - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt - - - Handle is an opaque, globally shared handle - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D12Fence - - - Handle is a shared NT handle referencing a D3D12 fence object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D11Fence - - - Handle is a shared NT handle referencing a D3D11 fence object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeNvSciSync - - - Opaque handle to NvSciSync Object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutex - - - Handle is a shared NT handle referencing a D3D11 keyed mutex object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutexKmt - - - Handle is a shared KMT handle referencing a D3D11 keyed mutex object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd - - - Handle is an opaque handle file descriptor referencing a timeline semaphore - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 - - - Handle is an opaque handle file descriptor referencing a timeline semaphore - -.. autoclass:: cuda.cudart.cudaCGScope - - .. autoattribute:: cuda.cudart.cudaCGScope.cudaCGScopeInvalid - - - Invalid cooperative group scope - - - .. autoattribute:: cuda.cudart.cudaCGScope.cudaCGScopeGrid - - - Scope represented by a grid_group - - - .. autoattribute:: cuda.cudart.cudaCGScope.cudaCGScopeMultiGrid - - - Scope represented by a multi_grid_group - -.. autoclass:: cuda.cudart.cudaGraphConditionalHandleFlags - - .. autoattribute:: cuda.cudart.cudaGraphConditionalHandleFlags.cudaGraphCondAssignDefault - - - Apply default handle value when graph is launched. - -.. autoclass:: cuda.cudart.cudaGraphConditionalNodeType - - .. autoattribute:: cuda.cudart.cudaGraphConditionalNodeType.cudaGraphCondTypeIf - - - Conditional 'if' Node. Body executed once if condition value is non-zero. - - - .. autoattribute:: cuda.cudart.cudaGraphConditionalNodeType.cudaGraphCondTypeWhile - - - Conditional 'while' Node. Body executed repeatedly while condition value is non-zero. - -.. autoclass:: cuda.cudart.cudaGraphNodeType - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeKernel - - - GPU kernel node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeMemcpy - - - Memcpy node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeMemset - - - Memset node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeHost - - - Host (executable) node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeGraph - - - Node which executes an embedded graph - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeEmpty - - - Empty (no-op) node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeWaitEvent - - - External event wait node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeEventRecord - - - External event record node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreSignal - - - External semaphore signal node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreWait - - - External semaphore wait node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeMemAlloc - - - Memory allocation node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeMemFree - - - Memory free node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeConditional - - - Conditional node May be used to implement a conditional execution path or loop - - inside of a graph. The graph(s) contained within the body of the conditional node - - can be selectively executed or iterated upon based on the value of a conditional - - variable. - - - - Handles must be created in advance of creating the node - - using :py:obj:`~.cudaGraphConditionalHandleCreate`. - - - - The following restrictions apply to graphs which contain conditional nodes: - - The graph cannot be used in a child node. - - Only one instantiation of the graph may exist at any point in time. - - The graph cannot be cloned. - - - - To set the control value, supply a default value when creating the handle and/or - - call :py:obj:`~.cudaGraphSetConditional` from device code. - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeCount - -.. autoclass:: cuda.cudart.cudaGraphDependencyType - - .. autoattribute:: cuda.cudart.cudaGraphDependencyType.cudaGraphDependencyTypeDefault - - - This is an ordinary dependency. - - - .. autoattribute:: cuda.cudart.cudaGraphDependencyType.cudaGraphDependencyTypeProgrammatic - - - This dependency type allows the downstream node to use `cudaGridDependencySynchronize()`. It may only be used between kernel nodes, and must be used with either the :py:obj:`~.cudaGraphKernelNodePortProgrammatic` or :py:obj:`~.cudaGraphKernelNodePortLaunchCompletion` outgoing port. - -.. autoclass:: cuda.cudart.cudaGraphExecUpdateResult - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateSuccess - - - The update succeeded - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateError - - - The update failed for an unexpected reason which is described in the return value of the function - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorTopologyChanged - - - The update failed because the topology changed - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNodeTypeChanged - - - The update failed because a node type changed - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorFunctionChanged - - - The update failed because the function of a kernel node changed (CUDA driver < 11.2) - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorParametersChanged - - - The update failed because the parameters changed in a way that is not supported - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNotSupported - - - The update failed because something about the node is not supported - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorUnsupportedFunctionChange - - - The update failed because the function of a kernel node changed in an unsupported way - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorAttributesChanged - - - The update failed because the node attributes changed in a way that is not supported - -.. autoclass:: cuda.cudart.cudaGraphInstantiateResult - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateSuccess - - - Instantiation succeeded - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateError - - - Instantiation failed for an unexpected reason which is described in the return value of the function - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateInvalidStructure - - - Instantiation failed due to invalid structure, such as cycles - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateNodeOperationNotSupported - - - Instantiation for device launch failed because the graph contained an unsupported operation - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateMultipleDevicesNotSupported - - - Instantiation for device launch failed due to the nodes belonging to different contexts - -.. autoclass:: cuda.cudart.cudaGraphKernelNodeField - - .. autoattribute:: cuda.cudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldInvalid - - - Invalid field - - - .. autoattribute:: cuda.cudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldGridDim - - - Grid dimension update - - - .. autoattribute:: cuda.cudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldParam - - - Kernel parameter update - - - .. autoattribute:: cuda.cudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldEnabled - - - Node enable/disable - -.. autoclass:: cuda.cudart.cudaGetDriverEntryPointFlags - - .. autoattribute:: cuda.cudart.cudaGetDriverEntryPointFlags.cudaEnableDefault - - - Default search mode for driver symbols. - - - .. autoattribute:: cuda.cudart.cudaGetDriverEntryPointFlags.cudaEnableLegacyStream - - - Search for legacy versions of driver symbols. - - - .. autoattribute:: cuda.cudart.cudaGetDriverEntryPointFlags.cudaEnablePerThreadDefaultStream - - - Search for per-thread versions of driver symbols. - -.. autoclass:: cuda.cudart.cudaDriverEntryPointQueryResult - - .. autoattribute:: cuda.cudart.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSuccess - - - Search for symbol found a match - - - .. autoattribute:: cuda.cudart.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSymbolNotFound - - - Search for symbol was not found - - - .. autoattribute:: cuda.cudart.cudaDriverEntryPointQueryResult.cudaDriverEntryPointVersionNotSufficent - - - Search for symbol was found but version wasn't great enough - -.. autoclass:: cuda.cudart.cudaGraphDebugDotFlags - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsVerbose - - - Output all debug data as if every debug flag is enabled - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeParams - - - Adds :py:obj:`~.cudaKernelNodeParams` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemcpyNodeParams - - - Adds :py:obj:`~.cudaMemcpy3DParms` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemsetNodeParams - - - Adds :py:obj:`~.cudaMemsetParams` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHostNodeParams - - - Adds :py:obj:`~.cudaHostNodeParams` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsEventNodeParams - - - Adds cudaEvent_t handle from record and wait nodes to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasSignalNodeParams - - - Adds :py:obj:`~.cudaExternalSemaphoreSignalNodeParams` values to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasWaitNodeParams - - - Adds :py:obj:`~.cudaExternalSemaphoreWaitNodeParams` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeAttributes - - - Adds cudaKernelNodeAttrID values to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHandles - - - Adds node handles and every kernel function handle to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsConditionalNodeParams - - - Adds :py:obj:`~.cudaConditionalNodeParams` to output - -.. autoclass:: cuda.cudart.cudaGraphInstantiateFlags - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagAutoFreeOnLaunch - - - Automatically free memory allocated in a graph before relaunching. - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUpload - - - Automatically upload the graph after instantiation. Only supported by - - :py:obj:`~.cudaGraphInstantiateWithParams`. The upload will be performed using the - - stream provided in `instantiateParams`. - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagDeviceLaunch - - - Instantiate the graph to be launchable from the device. This flag can only - - be used on platforms which support unified addressing. This flag cannot be - - used in conjunction with cudaGraphInstantiateFlagAutoFreeOnLaunch. - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUseNodePriority - - - Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into. - -.. autoclass:: cuda.cudart.cudaLaunchMemSyncDomain - - .. autoattribute:: cuda.cudart.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainDefault - - - Launch kernels in the default domain - - - .. autoattribute:: cuda.cudart.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainRemote - - - Launch kernels in the remote domain - -.. autoclass:: cuda.cudart.cudaLaunchAttributeID - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeIgnore - - - Ignored entry, for convenient composition - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow - - - Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.accessPolicyWindow`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeCooperative - - - Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.cooperative`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy - - - Valid for streams. See :py:obj:`~.cudaLaunchAttributeValue.syncPolicy`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension - - - Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.clusterDim`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference - - - Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.clusterSchedulingPolicyPreference`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization - - - Valid for launches. Setting :py:obj:`~.cudaLaunchAttributeValue.programmaticStreamSerializationAllowed` to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid's execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent - - - Valid for launches. Set :py:obj:`~.cudaLaunchAttributeValue.programmaticEvent` to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event programmatically in a future CUDA release. A trigger can also be inserted at the beginning of each block's execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling :py:obj:`~.cudaEventSynchronize()`) are not guaranteed to observe the release precisely when it is released. For example, :py:obj:`~.cudaEventSynchronize()` may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks. - - The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.cudaEventDisableTiming` flag set). - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributePriority - - - Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.priority`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap - - - Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.memSyncDomainMap`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain - - - Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.memSyncDomain`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent - - - Valid for launches. Set :py:obj:`~.cudaLaunchAttributeValue.launchCompletionEvent` to record the event. - - Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock. - - A launch completion event is nominally similar to a programmatic event with `triggerAtBlockStart` set except that it is not visible to `cudaGridDependencySynchronize()` and can be used with compute capability less than 9.0. - - The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.cudaEventDisableTiming` flag set). - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode - - - Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error. - - :cudaLaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via :py:obj:`~.cudaLaunchAttributeValue`::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node's kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see :py:obj:`~.cudaGraphKernelNodeUpdatesApply`. - - Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via :py:obj:`~.cudaGraphDestroyNode`. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via :py:obj:`~.cudaGraphKernelNodeCopyAttributes`. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to :py:obj:`~.cudaGraphExecUpdate`. - - If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with :py:obj:`~.cuGraphUpload` before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout - - - Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting :py:obj:`~.cudaLaunchAttributeValue.sharedMemCarveout` to a percentage between 0-100 signals sets the shared memory carveout preference in percent of the total shared memory for that kernel launch. This attribute takes precedence over :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout`. This is only a hint, and the driver can choose a different configuration if required for the launch. - -.. autoclass:: cuda.cudart.cudaDeviceNumaConfig - - .. autoattribute:: cuda.cudart.cudaDeviceNumaConfig.cudaDeviceNumaConfigNone - - - The GPU is not a NUMA node - - - .. autoattribute:: cuda.cudart.cudaDeviceNumaConfig.cudaDeviceNumaConfigNumaNode - - - The GPU is a NUMA node, cudaDevAttrNumaId contains its NUMA ID - -.. autoclass:: cuda.cudart.cudaAsyncNotificationType - - .. autoattribute:: cuda.cudart.cudaAsyncNotificationType.cudaAsyncNotificationTypeOverBudget - -.. autoclass:: cuda.cudart.cudaSurfaceBoundaryMode - - .. autoattribute:: cuda.cudart.cudaSurfaceBoundaryMode.cudaBoundaryModeZero - - - Zero boundary mode - - - .. autoattribute:: cuda.cudart.cudaSurfaceBoundaryMode.cudaBoundaryModeClamp - - - Clamp boundary mode - - - .. autoattribute:: cuda.cudart.cudaSurfaceBoundaryMode.cudaBoundaryModeTrap - - - Trap boundary mode - -.. autoclass:: cuda.cudart.cudaSurfaceFormatMode - - .. autoattribute:: cuda.cudart.cudaSurfaceFormatMode.cudaFormatModeForced - - - Forced format mode - - - .. autoattribute:: cuda.cudart.cudaSurfaceFormatMode.cudaFormatModeAuto - - - Auto format mode - -.. autoclass:: cuda.cudart.cudaTextureAddressMode - - .. autoattribute:: cuda.cudart.cudaTextureAddressMode.cudaAddressModeWrap - - - Wrapping address mode - - - .. autoattribute:: cuda.cudart.cudaTextureAddressMode.cudaAddressModeClamp - - - Clamp to edge address mode - - - .. autoattribute:: cuda.cudart.cudaTextureAddressMode.cudaAddressModeMirror - - - Mirror address mode - - - .. autoattribute:: cuda.cudart.cudaTextureAddressMode.cudaAddressModeBorder - - - Border address mode - -.. autoclass:: cuda.cudart.cudaTextureFilterMode - - .. autoattribute:: cuda.cudart.cudaTextureFilterMode.cudaFilterModePoint - - - Point filter mode - - - .. autoattribute:: cuda.cudart.cudaTextureFilterMode.cudaFilterModeLinear - - - Linear filter mode - -.. autoclass:: cuda.cudart.cudaTextureReadMode - - .. autoattribute:: cuda.cudart.cudaTextureReadMode.cudaReadModeElementType - - - Read texture as specified element type - - - .. autoattribute:: cuda.cudart.cudaTextureReadMode.cudaReadModeNormalizedFloat - - - Read texture as normalized float - -.. autoclass:: cuda.cudart.cudaEglPlaneDesc -.. autoclass:: cuda.cudart.cudaEglFrame -.. autoclass:: cuda.cudart.cudaEglStreamConnection -.. autoclass:: cuda.cudart.cudaArray_t -.. autoclass:: cuda.cudart.cudaArray_const_t -.. autoclass:: cuda.cudart.cudaMipmappedArray_t -.. autoclass:: cuda.cudart.cudaMipmappedArray_const_t -.. autoclass:: cuda.cudart.cudaHostFn_t -.. autoclass:: cuda.cudart.CUuuid -.. autoclass:: cuda.cudart.cudaUUID_t -.. autoclass:: cuda.cudart.cudaIpcEventHandle_t -.. autoclass:: cuda.cudart.cudaIpcMemHandle_t -.. autoclass:: cuda.cudart.cudaMemFabricHandle_t -.. autoclass:: cuda.cudart.cudaStream_t -.. autoclass:: cuda.cudart.cudaEvent_t -.. autoclass:: cuda.cudart.cudaGraphicsResource_t -.. autoclass:: cuda.cudart.cudaExternalMemory_t -.. autoclass:: cuda.cudart.cudaExternalSemaphore_t -.. autoclass:: cuda.cudart.cudaGraph_t -.. autoclass:: cuda.cudart.cudaGraphNode_t -.. autoclass:: cuda.cudart.cudaUserObject_t -.. autoclass:: cuda.cudart.cudaGraphConditionalHandle -.. autoclass:: cuda.cudart.cudaFunction_t -.. autoclass:: cuda.cudart.cudaKernel_t -.. autoclass:: cuda.cudart.cudaMemPool_t -.. autoclass:: cuda.cudart.cudaGraphEdgeData -.. autoclass:: cuda.cudart.cudaGraphExec_t -.. autoclass:: cuda.cudart.cudaGraphInstantiateParams -.. autoclass:: cuda.cudart.cudaGraphExecUpdateResultInfo -.. autoclass:: cuda.cudart.cudaGraphDeviceNode_t -.. autoclass:: cuda.cudart.cudaLaunchMemSyncDomainMap -.. autoclass:: cuda.cudart.cudaLaunchAttributeValue -.. autoclass:: cuda.cudart.cudaLaunchAttribute -.. autoclass:: cuda.cudart.cudaAsyncCallbackHandle_t -.. autoclass:: cuda.cudart.cudaAsyncNotificationInfo_t -.. autoclass:: cuda.cudart.cudaAsyncCallback -.. autoclass:: cuda.cudart.cudaSurfaceObject_t -.. autoclass:: cuda.cudart.cudaTextureObject_t -.. autoattribute:: cuda.cudart.CUDA_EGL_MAX_PLANES - - Maximum number of planes per frame - -.. autoattribute:: cuda.cudart.cudaHostAllocDefault - - Default page-locked allocation flag - -.. autoattribute:: cuda.cudart.cudaHostAllocPortable - - Pinned memory accessible by all CUDA contexts - -.. autoattribute:: cuda.cudart.cudaHostAllocMapped - - Map allocation into device space - -.. autoattribute:: cuda.cudart.cudaHostAllocWriteCombined - - Write-combined memory - -.. autoattribute:: cuda.cudart.cudaHostRegisterDefault - - Default host memory registration flag - -.. autoattribute:: cuda.cudart.cudaHostRegisterPortable - - Pinned memory accessible by all CUDA contexts - -.. autoattribute:: cuda.cudart.cudaHostRegisterMapped - - Map registered memory into device space - -.. autoattribute:: cuda.cudart.cudaHostRegisterIoMemory - - Memory-mapped I/O space - -.. autoattribute:: cuda.cudart.cudaHostRegisterReadOnly - - Memory-mapped read-only - -.. autoattribute:: cuda.cudart.cudaPeerAccessDefault - - Default peer addressing enable flag - -.. autoattribute:: cuda.cudart.cudaStreamDefault - - Default stream flag - -.. autoattribute:: cuda.cudart.cudaStreamNonBlocking - - Stream does not synchronize with stream 0 (the NULL stream) - -.. autoattribute:: cuda.cudart.cudaStreamLegacy - - Legacy stream handle - - - - Stream handle that can be passed as a cudaStream_t to use an implicit stream with legacy synchronization behavior. - - - - See details of the \link_sync_behavior - -.. autoattribute:: cuda.cudart.cudaStreamPerThread - - Per-thread stream handle - - - - Stream handle that can be passed as a cudaStream_t to use an implicit stream with per-thread synchronization behavior. - - - - See details of the \link_sync_behavior - -.. autoattribute:: cuda.cudart.cudaEventDefault - - Default event flag - -.. autoattribute:: cuda.cudart.cudaEventBlockingSync - - Event uses blocking synchronization - -.. autoattribute:: cuda.cudart.cudaEventDisableTiming - - Event will not record timing data - -.. autoattribute:: cuda.cudart.cudaEventInterprocess - - Event is suitable for interprocess use. cudaEventDisableTiming must be set - -.. autoattribute:: cuda.cudart.cudaEventRecordDefault - - Default event record flag - -.. autoattribute:: cuda.cudart.cudaEventRecordExternal - - Event is captured in the graph as an external event node when performing stream capture - -.. autoattribute:: cuda.cudart.cudaEventWaitDefault - - Default event wait flag - -.. autoattribute:: cuda.cudart.cudaEventWaitExternal - - Event is captured in the graph as an external event node when performing stream capture - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleAuto - - Device flag - Automatic scheduling - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleSpin - - Device flag - Spin default scheduling - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleYield - - Device flag - Yield default scheduling - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleBlockingSync - - Device flag - Use blocking synchronization - -.. autoattribute:: cuda.cudart.cudaDeviceBlockingSync - - Device flag - Use blocking synchronization [Deprecated] - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleMask - - Device schedule flags mask - -.. autoattribute:: cuda.cudart.cudaDeviceMapHost - - Device flag - Support mapped pinned allocations - -.. autoattribute:: cuda.cudart.cudaDeviceLmemResizeToMax - - Device flag - Keep local memory allocation after launch - -.. autoattribute:: cuda.cudart.cudaDeviceSyncMemops - - Device flag - Ensure synchronous memory operations on this context will synchronize - -.. autoattribute:: cuda.cudart.cudaDeviceMask - - Device flags mask - -.. autoattribute:: cuda.cudart.cudaArrayDefault - - Default CUDA array allocation flag - -.. autoattribute:: cuda.cudart.cudaArrayLayered - - Must be set in cudaMalloc3DArray to create a layered CUDA array - -.. autoattribute:: cuda.cudart.cudaArraySurfaceLoadStore - - Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind surfaces to the CUDA array - -.. autoattribute:: cuda.cudart.cudaArrayCubemap - - Must be set in cudaMalloc3DArray to create a cubemap CUDA array - -.. autoattribute:: cuda.cudart.cudaArrayTextureGather - - Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform texture gather operations on the CUDA array - -.. autoattribute:: cuda.cudart.cudaArrayColorAttachment - - Must be set in cudaExternalMemoryGetMappedMipmappedArray if the mipmapped array is used as a color target in a graphics API - -.. autoattribute:: cuda.cudart.cudaArraySparse - - Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a sparse CUDA array or CUDA mipmapped array - -.. autoattribute:: cuda.cudart.cudaArrayDeferredMapping - - Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a deferred mapping CUDA array or CUDA mipmapped array - -.. autoattribute:: cuda.cudart.cudaIpcMemLazyEnablePeerAccess - - Automatically enable peer access between remote devices as needed - -.. autoattribute:: cuda.cudart.cudaMemAttachGlobal - - Memory can be accessed by any stream on any device - -.. autoattribute:: cuda.cudart.cudaMemAttachHost - - Memory cannot be accessed by any stream on any device - -.. autoattribute:: cuda.cudart.cudaMemAttachSingle - - Memory can only be accessed by a single stream on the associated device - -.. autoattribute:: cuda.cudart.cudaOccupancyDefault - - Default behavior - -.. autoattribute:: cuda.cudart.cudaOccupancyDisableCachingOverride - - Assume global caching is enabled and cannot be automatically turned off - -.. autoattribute:: cuda.cudart.cudaCpuDeviceId - - Device id that represents the CPU - -.. autoattribute:: cuda.cudart.cudaInvalidDeviceId - - Device id that represents an invalid device - -.. autoattribute:: cuda.cudart.cudaInitDeviceFlagsAreValid - - Tell the CUDA runtime that DeviceFlags is being set in cudaInitDevice call - -.. autoattribute:: cuda.cudart.cudaCooperativeLaunchMultiDeviceNoPreSync - - If set, each kernel launched as part of :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution. - -.. autoattribute:: cuda.cudart.cudaCooperativeLaunchMultiDeviceNoPostSync - - If set, any subsequent work pushed in a stream that participated in a call to :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution. - -.. autoattribute:: cuda.cudart.cudaArraySparsePropertiesSingleMipTail - - Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers - -.. autoattribute:: cuda.cudart.CUDART_CB -.. autoattribute:: cuda.cudart.CU_UUID_HAS_BEEN_DEFINED - - CUDA UUID types - -.. autoattribute:: cuda.cudart.CUDA_IPC_HANDLE_SIZE - - CUDA IPC Handle Size - -.. autoattribute:: cuda.cudart.cudaExternalMemoryDedicated - - Indicates that the external memory object is a dedicated resource - -.. autoattribute:: cuda.cudart.cudaExternalSemaphoreSignalSkipNvSciBufMemSync - - When the /p flags parameter of :py:obj:`~.cudaExternalSemaphoreSignalParams` contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. - -.. autoattribute:: cuda.cudart.cudaExternalSemaphoreWaitSkipNvSciBufMemSync - - When the /p flags parameter of :py:obj:`~.cudaExternalSemaphoreWaitParams` contains this flag, it indicates that waiting an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. - -.. autoattribute:: cuda.cudart.cudaNvSciSyncAttrSignal - - When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to this, it indicates that application need signaler specific NvSciSyncAttr to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. - -.. autoattribute:: cuda.cudart.cudaNvSciSyncAttrWait - - When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to this, it indicates that application need waiter specific NvSciSyncAttr to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. - -.. autoattribute:: cuda.cudart.cudaGraphKernelNodePortDefault - - This port activates when the kernel has finished executing. - -.. autoattribute:: cuda.cudart.cudaGraphKernelNodePortProgrammatic - - This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type :py:obj:`~.cudaGraphDependencyTypeProgrammatic`. See also :py:obj:`~.cudaLaunchAttributeProgrammaticEvent`. - -.. autoattribute:: cuda.cudart.cudaGraphKernelNodePortLaunchCompletion - - This port activates when all blocks of the kernel have begun execution. See also :py:obj:`~.cudaLaunchAttributeLaunchCompletionEvent`. - -.. autoattribute:: cuda.cudart.cudaStreamAttrID -.. autoattribute:: cuda.cudart.cudaStreamAttributeAccessPolicyWindow -.. autoattribute:: cuda.cudart.cudaStreamAttributeSynchronizationPolicy -.. autoattribute:: cuda.cudart.cudaStreamAttributeMemSyncDomainMap -.. autoattribute:: cuda.cudart.cudaStreamAttributeMemSyncDomain -.. autoattribute:: cuda.cudart.cudaStreamAttributePriority -.. autoattribute:: cuda.cudart.cudaStreamAttrValue -.. autoattribute:: cuda.cudart.cudaKernelNodeAttrID -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeAccessPolicyWindow -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeCooperative -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributePriority -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeClusterDimension -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeClusterSchedulingPolicyPreference -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeMemSyncDomainMap -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeMemSyncDomain -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributePreferredSharedMemoryCarveout -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeDeviceUpdatableKernelNode -.. autoattribute:: cuda.cudart.cudaKernelNodeAttrValue -.. autoattribute:: cuda.cudart.cudaSurfaceType1D -.. autoattribute:: cuda.cudart.cudaSurfaceType2D -.. autoattribute:: cuda.cudart.cudaSurfaceType3D -.. autoattribute:: cuda.cudart.cudaSurfaceTypeCubemap -.. autoattribute:: cuda.cudart.cudaSurfaceType1DLayered -.. autoattribute:: cuda.cudart.cudaSurfaceType2DLayered -.. autoattribute:: cuda.cudart.cudaSurfaceTypeCubemapLayered -.. autoattribute:: cuda.cudart.cudaTextureType1D -.. autoattribute:: cuda.cudart.cudaTextureType2D -.. autoattribute:: cuda.cudart.cudaTextureType3D -.. autoattribute:: cuda.cudart.cudaTextureTypeCubemap -.. autoattribute:: cuda.cudart.cudaTextureType1DLayered -.. autoattribute:: cuda.cudart.cudaTextureType2DLayered -.. autoattribute:: cuda.cudart.cudaTextureTypeCubemapLayered diff --git a/docs/_sources/module/driver.rst.txt b/docs/_sources/module/driver.rst.txt new file mode 100644 index 00000000..694c81c7 --- /dev/null +++ b/docs/_sources/module/driver.rst.txt @@ -0,0 +1,6792 @@ +------ +driver +------ + +Data types used by CUDA driver +------------------------------ + + + +.. autoclass:: cuda.bindings.driver.CUuuid_st +.. autoclass:: cuda.bindings.driver.CUmemFabricHandle_st +.. autoclass:: cuda.bindings.driver.CUipcEventHandle_st +.. autoclass:: cuda.bindings.driver.CUipcMemHandle_st +.. autoclass:: cuda.bindings.driver.CUstreamBatchMemOpParams_union +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUasyncNotificationInfo_st +.. autoclass:: cuda.bindings.driver.CUdevprop_st +.. autoclass:: cuda.bindings.driver.CUaccessPolicyWindow_st +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v3_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_CONDITIONAL_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUgraphEdgeData_st +.. autoclass:: cuda.bindings.driver.CUDA_GRAPH_INSTANTIATE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUlaunchMemSyncDomainMap_st +.. autoclass:: cuda.bindings.driver.CUlaunchAttributeValue_union +.. autoclass:: cuda.bindings.driver.CUlaunchAttribute_st +.. autoclass:: cuda.bindings.driver.CUlaunchConfig_st +.. autoclass:: cuda.bindings.driver.CUexecAffinitySmCount_st +.. autoclass:: cuda.bindings.driver.CUexecAffinityParam_st +.. autoclass:: cuda.bindings.driver.CUctxCigParam_st +.. autoclass:: cuda.bindings.driver.CUctxCreateParams_st +.. autoclass:: cuda.bindings.driver.CUlibraryHostUniversalFunctionAndDataTable_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY2D_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_PEER_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR_st +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR_st +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES_st +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS_st +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_TEXTURE_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC_st +.. autoclass:: cuda.bindings.driver.CUtensorMap_st +.. autoclass:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st +.. autoclass:: cuda.bindings.driver.CUDA_LAUNCH_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUarrayMapInfo_st +.. autoclass:: cuda.bindings.driver.CUmemLocation_st +.. autoclass:: cuda.bindings.driver.CUmemAllocationProp_st +.. autoclass:: cuda.bindings.driver.CUmulticastObjectProp_st +.. autoclass:: cuda.bindings.driver.CUmemAccessDesc_st +.. autoclass:: cuda.bindings.driver.CUgraphExecUpdateResultInfo_st +.. autoclass:: cuda.bindings.driver.CUmemPoolProps_st +.. autoclass:: cuda.bindings.driver.CUmemPoolPtrExportData_st +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_MEM_FREE_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_CHILD_GRAPH_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EVENT_RECORD_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EVENT_WAIT_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUgraphNodeParams_st +.. autoclass:: cuda.bindings.driver.CUeglFrame_st +.. autoclass:: cuda.bindings.driver.CUipcMem_flags + + .. autoattribute:: cuda.bindings.driver.CUipcMem_flags.CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS + + + Automatically enable peer access between remote devices as needed + +.. autoclass:: cuda.bindings.driver.CUmemAttach_flags + + .. autoattribute:: cuda.bindings.driver.CUmemAttach_flags.CU_MEM_ATTACH_GLOBAL + + + Memory can be accessed by any stream on any device + + + .. autoattribute:: cuda.bindings.driver.CUmemAttach_flags.CU_MEM_ATTACH_HOST + + + Memory cannot be accessed by any stream on any device + + + .. autoattribute:: cuda.bindings.driver.CUmemAttach_flags.CU_MEM_ATTACH_SINGLE + + + Memory can only be accessed by a single stream on the associated device + +.. autoclass:: cuda.bindings.driver.CUctx_flags + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_AUTO + + + Automatic scheduling + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_SPIN + + + Set spin as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_YIELD + + + Set yield as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_MASK + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_MAP_HOST + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_LMEM_RESIZE_TO_MAX + + + Keep local memory allocation after launch + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_COREDUMP_ENABLE + + + Trigger coredumps from exceptions in this context + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_USER_COREDUMP_ENABLE + + + Enable user pipe to trigger coredumps in this context + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SYNC_MEMOPS + + + Ensure synchronous memory operations on this context will synchronize + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_FLAGS_MASK + +.. autoclass:: cuda.bindings.driver.CUevent_sched_flags + + .. autoattribute:: cuda.bindings.driver.CUevent_sched_flags.CU_EVENT_SCHED_AUTO + + + Automatic scheduling + + + .. autoattribute:: cuda.bindings.driver.CUevent_sched_flags.CU_EVENT_SCHED_SPIN + + + Set spin as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUevent_sched_flags.CU_EVENT_SCHED_YIELD + + + Set yield as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUevent_sched_flags.CU_EVENT_SCHED_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling + +.. autoclass:: cuda.bindings.driver.cl_event_flags + + .. autoattribute:: cuda.bindings.driver.cl_event_flags.NVCL_EVENT_SCHED_AUTO + + + Automatic scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_event_flags.NVCL_EVENT_SCHED_SPIN + + + Set spin as default scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_event_flags.NVCL_EVENT_SCHED_YIELD + + + Set yield as default scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_event_flags.NVCL_EVENT_SCHED_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling + +.. autoclass:: cuda.bindings.driver.cl_context_flags + + .. autoattribute:: cuda.bindings.driver.cl_context_flags.NVCL_CTX_SCHED_AUTO + + + Automatic scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_context_flags.NVCL_CTX_SCHED_SPIN + + + Set spin as default scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_context_flags.NVCL_CTX_SCHED_YIELD + + + Set yield as default scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_context_flags.NVCL_CTX_SCHED_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling + +.. autoclass:: cuda.bindings.driver.CUstream_flags + + .. autoattribute:: cuda.bindings.driver.CUstream_flags.CU_STREAM_DEFAULT + + + Default stream flag + + + .. autoattribute:: cuda.bindings.driver.CUstream_flags.CU_STREAM_NON_BLOCKING + + + Stream does not synchronize with stream 0 (the NULL stream) + +.. autoclass:: cuda.bindings.driver.CUevent_flags + + .. autoattribute:: cuda.bindings.driver.CUevent_flags.CU_EVENT_DEFAULT + + + Default event flag + + + .. autoattribute:: cuda.bindings.driver.CUevent_flags.CU_EVENT_BLOCKING_SYNC + + + Event uses blocking synchronization + + + .. autoattribute:: cuda.bindings.driver.CUevent_flags.CU_EVENT_DISABLE_TIMING + + + Event will not record timing data + + + .. autoattribute:: cuda.bindings.driver.CUevent_flags.CU_EVENT_INTERPROCESS + + + Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set + +.. autoclass:: cuda.bindings.driver.CUevent_record_flags + + .. autoattribute:: cuda.bindings.driver.CUevent_record_flags.CU_EVENT_RECORD_DEFAULT + + + Default event record flag + + + .. autoattribute:: cuda.bindings.driver.CUevent_record_flags.CU_EVENT_RECORD_EXTERNAL + + + When using stream capture, create an event record node instead of the default behavior. This flag is invalid when used outside of capture. + +.. autoclass:: cuda.bindings.driver.CUevent_wait_flags + + .. autoattribute:: cuda.bindings.driver.CUevent_wait_flags.CU_EVENT_WAIT_DEFAULT + + + Default event wait flag + + + .. autoattribute:: cuda.bindings.driver.CUevent_wait_flags.CU_EVENT_WAIT_EXTERNAL + + + When using stream capture, create an event wait node instead of the default behavior. This flag is invalid when used outside of capture. + +.. autoclass:: cuda.bindings.driver.CUstreamWaitValue_flags + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_GEQ + + + Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit values). Note this is a cyclic comparison which ignores wraparound. (Default behavior.) + + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_EQ + + + Wait until *addr == value. + + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_AND + + + Wait until (*addr & value) != 0. + + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_NOR + + + Wait until ~(*addr | value) != 0. Support for this operation can be queried with :py:obj:`~.cuDeviceGetAttribute()` and :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR`. + + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_FLUSH + + + Follow the wait operation with a flush of outstanding remote writes. This means that, if a remote write operation is guaranteed to have reached the device before the wait can be satisfied, that write is guaranteed to be visible to downstream device work. The device is permitted to reorder remote writes internally. For example, this flag would be required if two remote writes arrive in a defined order, the wait is satisfied by the second write, and downstream work needs to observe the first write. Support for this operation is restricted to selected platforms and can be queried with :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES`. + +.. autoclass:: cuda.bindings.driver.CUstreamWriteValue_flags + + .. autoattribute:: cuda.bindings.driver.CUstreamWriteValue_flags.CU_STREAM_WRITE_VALUE_DEFAULT + + + Default behavior + + + .. autoattribute:: cuda.bindings.driver.CUstreamWriteValue_flags.CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER + + + Permits the write to be reordered with writes which were issued before it, as a performance optimization. Normally, :py:obj:`~.cuStreamWriteValue32` will provide a memory fence before the write, which has similar semantics to __threadfence_system() but is scoped to the stream rather than a CUDA thread. This flag is not supported in the v2 API. + +.. autoclass:: cuda.bindings.driver.CUstreamBatchMemOpType + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_32 + + + Represents a :py:obj:`~.cuStreamWaitValue32` operation + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WRITE_VALUE_32 + + + Represents a :py:obj:`~.cuStreamWriteValue32` operation + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_64 + + + Represents a :py:obj:`~.cuStreamWaitValue64` operation + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WRITE_VALUE_64 + + + Represents a :py:obj:`~.cuStreamWriteValue64` operation + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_BARRIER + + + Insert a memory barrier of the specified type + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES + + + This has the same effect as :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH`, but as a standalone operation. + +.. autoclass:: cuda.bindings.driver.CUstreamMemoryBarrier_flags + + .. autoattribute:: cuda.bindings.driver.CUstreamMemoryBarrier_flags.CU_STREAM_MEMORY_BARRIER_TYPE_SYS + + + System-wide memory barrier. + + + .. autoattribute:: cuda.bindings.driver.CUstreamMemoryBarrier_flags.CU_STREAM_MEMORY_BARRIER_TYPE_GPU + + + Limit memory barrier scope to the GPU. + +.. autoclass:: cuda.bindings.driver.CUoccupancy_flags + + .. autoattribute:: cuda.bindings.driver.CUoccupancy_flags.CU_OCCUPANCY_DEFAULT + + + Default behavior + + + .. autoattribute:: cuda.bindings.driver.CUoccupancy_flags.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE + + + Assume global caching is enabled and cannot be automatically turned off + +.. autoclass:: cuda.bindings.driver.CUstreamUpdateCaptureDependencies_flags + + .. autoattribute:: cuda.bindings.driver.CUstreamUpdateCaptureDependencies_flags.CU_STREAM_ADD_CAPTURE_DEPENDENCIES + + + Add new nodes to the dependency set + + + .. autoattribute:: cuda.bindings.driver.CUstreamUpdateCaptureDependencies_flags.CU_STREAM_SET_CAPTURE_DEPENDENCIES + + + Replace the dependency set with the new nodes + +.. autoclass:: cuda.bindings.driver.CUasyncNotificationType + + .. autoattribute:: cuda.bindings.driver.CUasyncNotificationType.CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET + +.. autoclass:: cuda.bindings.driver.CUarray_format + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT8 + + + Unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT16 + + + Unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT32 + + + Unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SIGNED_INT8 + + + Signed 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SIGNED_INT16 + + + Signed 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SIGNED_INT32 + + + Signed 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_HALF + + + 16-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_FLOAT + + + 32-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_NV12 + + + 8-bit YUV planar format, with 4:2:0 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT8X1 + + + 1 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT8X2 + + + 2 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT8X4 + + + 4 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT16X1 + + + 1 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT16X2 + + + 2 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT16X4 + + + 4 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT8X1 + + + 1 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT8X2 + + + 2 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT8X4 + + + 4 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT16X1 + + + 1 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT16X2 + + + 2 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT16X4 + + + 4 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC1_UNORM + + + 4 channel unsigned normalized block-compressed (BC1 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC1_UNORM_SRGB + + + 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC2_UNORM + + + 4 channel unsigned normalized block-compressed (BC2 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC2_UNORM_SRGB + + + 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC3_UNORM + + + 4 channel unsigned normalized block-compressed (BC3 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC3_UNORM_SRGB + + + 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC4_UNORM + + + 1 channel unsigned normalized block-compressed (BC4 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC4_SNORM + + + 1 channel signed normalized block-compressed (BC4 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC5_UNORM + + + 2 channel unsigned normalized block-compressed (BC5 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC5_SNORM + + + 2 channel signed normalized block-compressed (BC5 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC6H_UF16 + + + 3 channel unsigned half-float block-compressed (BC6H compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC6H_SF16 + + + 3 channel signed half-float block-compressed (BC6H compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC7_UNORM + + + 4 channel unsigned normalized block-compressed (BC7 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC7_UNORM_SRGB + + + 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_P010 + + + 10-bit YUV planar format, with 4:2:0 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_P016 + + + 16-bit YUV planar format, with 4:2:0 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_NV16 + + + 8-bit YUV planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_P210 + + + 10-bit YUV planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_P216 + + + 16-bit YUV planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_YUY2 + + + 2 channel, 8-bit YUV packed planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y210 + + + 2 channel, 10-bit YUV packed planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y216 + + + 2 channel, 16-bit YUV packed planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_AYUV + + + 4 channel, 8-bit YUV packed planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y410 + + + 10-bit YUV packed planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y416 + + + 4 channel, 12-bit YUV packed planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y444_PLANAR8 + + + 3 channel 8-bit YUV planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y444_PLANAR10 + + + 3 channel 10-bit YUV planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_MAX + +.. autoclass:: cuda.bindings.driver.CUaddress_mode + + .. autoattribute:: cuda.bindings.driver.CUaddress_mode.CU_TR_ADDRESS_MODE_WRAP + + + Wrapping address mode + + + .. autoattribute:: cuda.bindings.driver.CUaddress_mode.CU_TR_ADDRESS_MODE_CLAMP + + + Clamp to edge address mode + + + .. autoattribute:: cuda.bindings.driver.CUaddress_mode.CU_TR_ADDRESS_MODE_MIRROR + + + Mirror address mode + + + .. autoattribute:: cuda.bindings.driver.CUaddress_mode.CU_TR_ADDRESS_MODE_BORDER + + + Border address mode + +.. autoclass:: cuda.bindings.driver.CUfilter_mode + + .. autoattribute:: cuda.bindings.driver.CUfilter_mode.CU_TR_FILTER_MODE_POINT + + + Point filter mode + + + .. autoattribute:: cuda.bindings.driver.CUfilter_mode.CU_TR_FILTER_MODE_LINEAR + + + Linear filter mode + +.. autoclass:: cuda.bindings.driver.CUdevice_attribute + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK + + + Maximum number of threads per block + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X + + + Maximum block dimension X + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y + + + Maximum block dimension Y + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z + + + Maximum block dimension Z + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X + + + Maximum grid dimension X + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y + + + Maximum grid dimension Y + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z + + + Maximum grid dimension Z + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK + + + Maximum shared memory available per block in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY + + + Memory available on device for constant variables in a CUDA C kernel in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_WARP_SIZE + + + Warp size in threads + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_PITCH + + + Maximum pitch in bytes allowed by memory copies + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK + + + Maximum number of 32-bit registers available per block + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CLOCK_RATE + + + Typical clock frequency in kilohertz + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT + + + Alignment requirement for textures + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_OVERLAP + + + Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT + + + Number of multiprocessors on device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT + + + Specifies whether there is a run time limit on kernels + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_INTEGRATED + + + Device is integrated with host memory + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY + + + Device can map host memory into CUDA address space + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_MODE + + + Compute mode (See :py:obj:`~.CUcomputemode` for details) + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH + + + Maximum 1D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH + + + Maximum 2D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT + + + Maximum 2D texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH + + + Maximum 3D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT + + + Maximum 3D texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH + + + Maximum 3D texture depth + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH + + + Maximum 2D layered texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT + + + Maximum 2D layered texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS + + + Maximum layers in a 2D layered texture + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT + + + Alignment requirement for surfaces + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS + + + Device can possibly execute multiple kernels concurrently + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_ECC_ENABLED + + + Device has ECC support enabled + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_BUS_ID + + + PCI bus ID of the device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID + + + PCI device ID of the device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TCC_DRIVER + + + Device is using TCC driver model + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE + + + Peak memory clock frequency in kilohertz + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH + + + Global memory bus width in bits + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE + + + Size of L2 cache in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR + + + Maximum resident threads per multiprocessor + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT + + + Number of asynchronous engines + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING + + + Device shares a unified address space with the host + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH + + + Maximum 1D layered texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS + + + Maximum layers in a 1D layered texture + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER + + + Deprecated, do not use. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH + + + Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT + + + Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE + + + Alternate maximum 3D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE + + + Alternate maximum 3D texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE + + + Alternate maximum 3D texture depth + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID + + + PCI domain ID of the device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT + + + Pitch alignment requirement for textures + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH + + + Maximum cubemap texture width/height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH + + + Maximum cubemap layered texture width/height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS + + + Maximum layers in a cubemap layered texture + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH + + + Maximum 1D surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH + + + Maximum 2D surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT + + + Maximum 2D surface height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH + + + Maximum 3D surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT + + + Maximum 3D surface height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH + + + Maximum 3D surface depth + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH + + + Maximum 1D layered surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS + + + Maximum layers in a 1D layered surface + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH + + + Maximum 2D layered surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT + + + Maximum 2D layered surface height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS + + + Maximum layers in a 2D layered surface + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH + + + Maximum cubemap surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH + + + Maximum cubemap layered surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS + + + Maximum layers in a cubemap layered surface + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH + + + Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or :py:obj:`~.cuDeviceGetTexture1DLinearMaxWidth()` instead. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH + + + Maximum 2D linear texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT + + + Maximum 2D linear texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH + + + Maximum 2D linear texture pitch in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH + + + Maximum mipmapped 2D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT + + + Maximum mipmapped 2D texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR + + + Major compute capability version number + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR + + + Minor compute capability version number + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH + + + Maximum mipmapped 1D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED + + + Device supports stream priorities + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED + + + Device supports caching globals in L1 + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED + + + Device supports caching locals in L1 + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR + + + Maximum shared memory available per multiprocessor in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR + + + Maximum number of 32-bit registers available per multiprocessor + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY + + + Device can allocate managed memory on this system + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD + + + Device is on a multi-GPU board + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID + + + Unique id for a group of devices on the same multi-GPU board + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED + + + Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware) + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO + + + Ratio of single precision performance (in floating-point operations per second) to double precision performance + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS + + + Device supports coherently accessing pageable memory without calling cudaHostRegister on it + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS + + + Device can coherently access managed memory concurrently with the CPU + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED + + + Device supports compute preemption. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM + + + Device can access host registered memory at the same virtual address as the CPU + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 + + + Deprecated, along with v1 MemOps API, :py:obj:`~.cuStreamBatchMemOp` and related APIs are supported. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 + + + Deprecated, along with v1 MemOps API, 64-bit operations are supported in :py:obj:`~.cuStreamBatchMemOp` and related APIs. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 + + + Deprecated, along with v1 MemOps API, :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH + + + Device supports launching cooperative kernels via :py:obj:`~.cuLaunchCooperativeKernel` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH + + + Deprecated, :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` is deprecated. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN + + + Maximum optin shared memory per block + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES + + + The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the device. See :py:obj:`~.Stream Memory Operations` for additional details. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED + + + Device supports host memory registration via :py:obj:`~.cudaHostRegister`. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES + + + Device accesses pageable memory via the host's page tables. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST + + + The host can directly access managed memory on the device without migration. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED + + + Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED + + + Device supports virtual memory management APIs like :py:obj:`~.cuMemAddressReserve`, :py:obj:`~.cuMemCreate`, :py:obj:`~.cuMemMap` and related APIs + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED + + + Device supports exporting memory to a posix file descriptor with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED + + + Device supports exporting memory to a Win32 NT handle with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED + + + Device supports exporting memory to a Win32 KMT handle with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR + + + Maximum number of blocks per multiprocessor + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED + + + Device supports compression of memory + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE + + + Maximum L2 persisting lines capacity setting in bytes. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE + + + Maximum value of :py:obj:`~.CUaccessPolicyWindow.num_bytes`. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED + + + Device supports specifying the GPUDirect RDMA flag with :py:obj:`~.cuMemCreate` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK + + + Shared memory reserved by CUDA driver per block in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED + + + Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED + + + Device supports using the :py:obj:`~.cuMemHostRegister` flag :py:obj:`~.CU_MEMHOSTERGISTER_READ_ONLY` to register memory that must be mapped as read-only to the GPU + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED + + + External timeline semaphore interop is supported on the device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED + + + Device supports using the :py:obj:`~.cuMemAllocAsync` and :py:obj:`~.cuMemPool` family of APIs + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED + + + Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS + + + The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the :py:obj:`~.CUflushGPUDirectRDMAWritesOptions` enum + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING + + + GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See :py:obj:`~.CUGPUDirectRDMAWritesOrdering` for the numerical values returned here. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES + + + Handle types supported with mempool based IPC + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH + + + Indicates device supports cluster launch + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED + + + Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS + + + 64-bit operations are supported in :py:obj:`~.cuStreamBatchMemOp` and related MemOp APIs. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR + + + :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported by MemOp APIs. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED + + + Device supports buffer sharing with dma_buf mechanism. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + + + Device supports IPC Events. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT + + + Number of memory domains the device supports. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED + + + Device supports accessing memory using Tensor Map. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED + + + Device supports exporting memory to a fabric handle with :py:obj:`~.cuMemExportToShareableHandle()` or requested with :py:obj:`~.cuMemCreate()` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS + + + Device supports unified function pointers. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_NUMA_CONFIG + + + NUMA configuration of a device: value is of type :py:obj:`~.CUdeviceNumaConfig` enum + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_NUMA_ID + + + NUMA node ID of the GPU memory + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED + + + Device supports switch multicast and reduction operations. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MPS_ENABLED + + + Indicates if contexts created on this device will be shared via MPS + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID + + + NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED + + + Device supports CIG with D3D12. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX + +.. autoclass:: cuda.bindings.driver.CUpointer_attribute + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_CONTEXT + + + The :py:obj:`~.CUcontext` on which a pointer was allocated or registered + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_TYPE + + + The :py:obj:`~.CUmemorytype` describing the physical location of a pointer + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_POINTER + + + The address at which a pointer's memory may be accessed on the device + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_HOST_POINTER + + + The address at which a pointer's memory may be accessed on the host + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_P2P_TOKENS + + + A pair of tokens for use with the nv-p2p.h Linux kernel interface + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS + + + Synchronize every synchronous memory operation initiated on this region + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_BUFFER_ID + + + A process-wide unique ID for an allocated memory region + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_MANAGED + + + Indicates if the pointer points to managed memory + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL + + + A device ordinal of a device on which a pointer was allocated or registered + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE + + + 1 if this pointer maps to an allocation that is suitable for :py:obj:`~.cudaIpcGetMemHandle`, 0 otherwise + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR + + + Starting address for this requested pointer + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_SIZE + + + Size of the address range for this requested pointer + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPED + + + 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES + + + Bitmask of allowed :py:obj:`~.CUmemAllocationHandleType` for this allocation + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE + + + 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS + + + Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE + + + Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL. + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPING_SIZE + + + Size of the actual underlying mapping that the pointer belongs to + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR + + + The start address of the mapping that the pointer belongs to + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID + + + A process-wide unique id corresponding to the physical allocation the pointer belongs to + +.. autoclass:: cuda.bindings.driver.CUfunction_attribute + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK + + + The maximum number of threads per block, beyond which a launch of the function would fail. This number depends on both the function and the device on which the function is currently loaded. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES + + + The size in bytes of statically-allocated shared memory required by this function. This does not include dynamically-allocated shared memory requested by the user at runtime. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES + + + The size in bytes of user-allocated constant memory required by this function. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES + + + The size in bytes of local memory used by each thread of this function. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NUM_REGS + + + The number of registers used by each thread of this function. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_PTX_VERSION + + + The PTX virtual architecture version for which the function was compiled. This value is the major PTX version * 10 + the minor PTX version, so a PTX version 1.3 function would return the value 13. Note that this may return the undefined value of 0 for cubins compiled prior to CUDA 3.0. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_BINARY_VERSION + + + The binary architecture version for which the function was compiled. This value is the major binary version * 10 + the minor binary version, so a binary version 1.3 function would return the value 13. Note that this will return a value of 10 for legacy cubins that do not have a properly-encoded binary architecture version. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CACHE_MODE_CA + + + The attribute to indicate whether the function has been compiled with user specified option "-Xptxas --dlcm=ca" set . + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES + + + The maximum size in bytes of dynamically-allocated shared memory that can be used by this function. If the user-specified dynamic shared memory size is larger than this value, the launch will fail. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT + + + On devices where the L1 cache and shared memory use the same hardware resources, this sets the shared memory carveout preference, in percent of the total shared memory. Refer to :py:obj:`~.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR`. This is only a hint, and the driver can choose a different ratio if required to execute the function. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET + + + If this attribute is set, the kernel must launch with a valid cluster size specified. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH + + + The required cluster width in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. + + + + If the value is set during compile time, it cannot be set at runtime. Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT + + + The required cluster height in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. + + + + If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH + + + The required cluster depth in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. + + + + If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED + + + Whether the function can be launched with non-portable cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster size may only function on the specific SKUs the program is tested on. The launch might fail if the program is run on a different hardware platform. + + + + CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking whether the desired size can be launched on the current device. + + + + Portable Cluster Size + + + + A portable cluster size is guaranteed to be functional on all compute capabilities higher than the target compute capability. The portable cluster size for sm_90 is 8 blocks per cluster. This value may increase for future compute capabilities. + + + + The specific hardware unit may support higher cluster sizes that’s not guaranteed to be portable. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE + + + The block scheduling policy of a function. The value type is CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX + +.. autoclass:: cuda.bindings.driver.CUfunc_cache + + .. autoattribute:: cuda.bindings.driver.CUfunc_cache.CU_FUNC_CACHE_PREFER_NONE + + + no preference for shared memory or L1 (default) + + + .. autoattribute:: cuda.bindings.driver.CUfunc_cache.CU_FUNC_CACHE_PREFER_SHARED + + + prefer larger shared memory and smaller L1 cache + + + .. autoattribute:: cuda.bindings.driver.CUfunc_cache.CU_FUNC_CACHE_PREFER_L1 + + + prefer larger L1 cache and smaller shared memory + + + .. autoattribute:: cuda.bindings.driver.CUfunc_cache.CU_FUNC_CACHE_PREFER_EQUAL + + + prefer equal sized L1 cache and shared memory + +.. autoclass:: cuda.bindings.driver.CUsharedconfig + + .. autoattribute:: cuda.bindings.driver.CUsharedconfig.CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE + + + set default shared memory bank size + + + .. autoattribute:: cuda.bindings.driver.CUsharedconfig.CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE + + + set shared memory bank width to four bytes + + + .. autoattribute:: cuda.bindings.driver.CUsharedconfig.CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE + + + set shared memory bank width to eight bytes + +.. autoclass:: cuda.bindings.driver.CUshared_carveout + + .. autoattribute:: cuda.bindings.driver.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_DEFAULT + + + No preference for shared memory or L1 (default) + + + .. autoattribute:: cuda.bindings.driver.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_MAX_SHARED + + + Prefer maximum available shared memory, minimum L1 cache + + + .. autoattribute:: cuda.bindings.driver.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_MAX_L1 + + + Prefer maximum available L1 cache, minimum shared memory + +.. autoclass:: cuda.bindings.driver.CUmemorytype + + .. autoattribute:: cuda.bindings.driver.CUmemorytype.CU_MEMORYTYPE_HOST + + + Host memory + + + .. autoattribute:: cuda.bindings.driver.CUmemorytype.CU_MEMORYTYPE_DEVICE + + + Device memory + + + .. autoattribute:: cuda.bindings.driver.CUmemorytype.CU_MEMORYTYPE_ARRAY + + + Array memory + + + .. autoattribute:: cuda.bindings.driver.CUmemorytype.CU_MEMORYTYPE_UNIFIED + + + Unified device or host memory + +.. autoclass:: cuda.bindings.driver.CUcomputemode + + .. autoattribute:: cuda.bindings.driver.CUcomputemode.CU_COMPUTEMODE_DEFAULT + + + Default compute mode (Multiple contexts allowed per device) + + + .. autoattribute:: cuda.bindings.driver.CUcomputemode.CU_COMPUTEMODE_PROHIBITED + + + Compute-prohibited mode (No contexts can be created on this device at this time) + + + .. autoattribute:: cuda.bindings.driver.CUcomputemode.CU_COMPUTEMODE_EXCLUSIVE_PROCESS + + + Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time) + +.. autoclass:: cuda.bindings.driver.CUmem_advise + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_SET_READ_MOSTLY + + + Data will mostly be read and only occasionally be written to + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_UNSET_READ_MOSTLY + + + Undo the effect of :py:obj:`~.CU_MEM_ADVISE_SET_READ_MOSTLY` + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_SET_PREFERRED_LOCATION + + + Set the preferred location for the data as the specified device + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION + + + Clear the preferred location for the data + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_SET_ACCESSED_BY + + + Data will be accessed by the specified device, so prevent page faults as much as possible + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_UNSET_ACCESSED_BY + + + Let the Unified Memory subsystem decide on the page faulting policy for the specified device + +.. autoclass:: cuda.bindings.driver.CUmem_range_attribute + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY + + + Whether the range will mostly be read and only occasionally be written to + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION + + + The preferred location of the range + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY + + + Memory range has :py:obj:`~.CU_MEM_ADVISE_SET_ACCESSED_BY` set for specified device + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION + + + The last location to which the range was prefetched + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE + + + The preferred location type of the range + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID + + + The preferred location id of the range + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE + + + The last location type to which the range was prefetched + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID + + + The last location id to which the range was prefetched + +.. autoclass:: cuda.bindings.driver.CUjit_option + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_MAX_REGISTERS + + + Max number of registers that a thread may use. + + Option type: unsigned int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_THREADS_PER_BLOCK + + + IN: Specifies minimum number of threads per block to target compilation for + + OUT: Returns the number of threads the compiler actually targeted. This restricts the resource utilization of the compiler (e.g. max registers) such that a block with the given number of threads should be able to launch based on register limitations. Note, this option does not currently take into account any other resource limitations, such as shared memory utilization. + + Cannot be combined with :py:obj:`~.CU_JIT_TARGET`. + + Option type: unsigned int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_WALL_TIME + + + Overwrites the option value with the total wall clock time, in milliseconds, spent in the compiler and linker + + Option type: float + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_INFO_LOG_BUFFER + + + Pointer to a buffer in which to print any log messages that are informational in nature (the buffer size is specified via option :py:obj:`~.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES`) + + Option type: char * + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES + + + IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator) + + OUT: Amount of log buffer filled with messages + + Option type: unsigned int + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_ERROR_LOG_BUFFER + + + Pointer to a buffer in which to print any log messages that reflect errors (the buffer size is specified via option :py:obj:`~.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES`) + + Option type: char * + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES + + + IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator) + + OUT: Amount of log buffer filled with messages + + Option type: unsigned int + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_OPTIMIZATION_LEVEL + + + Level of optimizations to apply to generated code (0 - 4), with 4 being the default and highest level of optimizations. + + Option type: unsigned int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_TARGET_FROM_CUCONTEXT + + + No option value required. Determines the target based on the current attached context (default) + + Option type: No option value needed + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_TARGET + + + Target is chosen based on supplied :py:obj:`~.CUjit_target`. Cannot be combined with :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`. + + Option type: unsigned int for enumerated type :py:obj:`~.CUjit_target` + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_FALLBACK_STRATEGY + + + Specifies choice of fallback strategy if matching cubin is not found. Choice is based on supplied :py:obj:`~.CUjit_fallback`. This option cannot be used with cuLink* APIs as the linker requires exact matches. + + Option type: unsigned int for enumerated type :py:obj:`~.CUjit_fallback` + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GENERATE_DEBUG_INFO + + + Specifies whether to create debug information in output (-g) (0: false, default) + + Option type: int + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_LOG_VERBOSE + + + Generate verbose log messages (0: false, default) + + Option type: int + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GENERATE_LINE_INFO + + + Generate line number information (-lineinfo) (0: false, default) + + Option type: int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_CACHE_MODE + + + Specifies whether to enable caching explicitly (-dlcm) + + Choice is based on supplied :py:obj:`~.CUjit_cacheMode_enum`. + + Option type: unsigned int for enumerated type :py:obj:`~.CUjit_cacheMode_enum` + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_NEW_SM3X_OPT + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_FAST_COMPILE + + + This jit option is used for internal purpose only. + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GLOBAL_SYMBOL_NAMES + + + Array of device symbol names that will be relocated to the corresponding host addresses stored in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_ADDRESSES`. + + Must contain :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_COUNT` entries. + + When loading a device module, driver will relocate all encountered unresolved symbols to the host addresses. + + It is only allowed to register symbols that correspond to unresolved global variables. + + It is illegal to register the same device symbol at multiple addresses. + + Option type: const char ** + + Applies to: dynamic linker only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GLOBAL_SYMBOL_ADDRESSES + + + Array of host addresses that will be used to relocate corresponding device symbols stored in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_NAMES`. + + Must contain :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_COUNT` entries. + + Option type: void ** + + Applies to: dynamic linker only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GLOBAL_SYMBOL_COUNT + + + Number of entries in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_NAMES` and :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_ADDRESSES` arrays. + + Option type: unsigned int + + Applies to: dynamic linker only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_LTO + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_FTZ + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_PREC_DIV + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_PREC_SQRT + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_FMA + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_REFERENCED_KERNEL_NAMES + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_REFERENCED_KERNEL_COUNT + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_REFERENCED_VARIABLE_NAMES + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_REFERENCED_VARIABLE_COUNT + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_POSITION_INDEPENDENT_CODE + + + Generate position independent code (0: false) + + Option type: int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_MIN_CTA_PER_SM + + + This option hints to the JIT compiler the minimum number of CTAs from the kernel’s grid to be mapped to a SM. This option is ignored when used together with :py:obj:`~.CU_JIT_MAX_REGISTERS` or :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`. Optimizations based on this option need :py:obj:`~.CU_JIT_MAX_THREADS_PER_BLOCK` to be specified as well. For kernels already using PTX directive .minnctapersm, this option will be ignored by default. Use :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take precedence over the PTX directive. Option type: unsigned int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_MAX_THREADS_PER_BLOCK + + + Maximum number threads in a thread block, computed as the product of the maximum extent specifed for each dimension of the block. This limit is guaranteed not to be exeeded in any invocation of the kernel. Exceeding the the maximum number of threads results in runtime error or kernel launch failure. For kernels already using PTX directive .maxntid, this option will be ignored by default. Use :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take precedence over the PTX directive. Option type: int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_OVERRIDE_DIRECTIVE_VALUES + + + This option lets the values specified using :py:obj:`~.CU_JIT_MAX_REGISTERS`, :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`, :py:obj:`~.CU_JIT_MAX_THREADS_PER_BLOCK` and :py:obj:`~.CU_JIT_MIN_CTA_PER_SM` take precedence over any PTX directives. (0: Disable, default; 1: Enable) Option type: int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_NUM_OPTIONS + +.. autoclass:: cuda.bindings.driver.CUjit_target + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_30 + + + Compute device class 3.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_32 + + + Compute device class 3.2 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_35 + + + Compute device class 3.5 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_37 + + + Compute device class 3.7 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_50 + + + Compute device class 5.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_52 + + + Compute device class 5.2 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_53 + + + Compute device class 5.3 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_60 + + + Compute device class 6.0. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_61 + + + Compute device class 6.1. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_62 + + + Compute device class 6.2. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_70 + + + Compute device class 7.0. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_72 + + + Compute device class 7.2. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_75 + + + Compute device class 7.5. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_80 + + + Compute device class 8.0. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_86 + + + Compute device class 8.6. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_87 + + + Compute device class 8.7. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_89 + + + Compute device class 8.9. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_90 + + + Compute device class 9.0. Compute device class 9.0. with accelerated features. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_90A + +.. autoclass:: cuda.bindings.driver.CUjit_fallback + + .. autoattribute:: cuda.bindings.driver.CUjit_fallback.CU_PREFER_PTX + + + Prefer to compile ptx if exact binary match not found + + + .. autoattribute:: cuda.bindings.driver.CUjit_fallback.CU_PREFER_BINARY + + + Prefer to fall back to compatible binary code if exact match not found + +.. autoclass:: cuda.bindings.driver.CUjit_cacheMode + + .. autoattribute:: cuda.bindings.driver.CUjit_cacheMode.CU_JIT_CACHE_OPTION_NONE + + + Compile with no -dlcm flag specified + + + .. autoattribute:: cuda.bindings.driver.CUjit_cacheMode.CU_JIT_CACHE_OPTION_CG + + + Compile with L1 cache disabled + + + .. autoattribute:: cuda.bindings.driver.CUjit_cacheMode.CU_JIT_CACHE_OPTION_CA + + + Compile with L1 cache enabled + +.. autoclass:: cuda.bindings.driver.CUjitInputType + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_CUBIN + + + Compiled device-class-specific device code + + Applicable options: none + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_PTX + + + PTX source code + + Applicable options: PTX compiler options + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_FATBINARY + + + Bundle of multiple cubins and/or PTX of some device code + + Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_OBJECT + + + Host object with embedded device code + + Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_LIBRARY + + + Archive of host objects with embedded device code + + Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_NVVM + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_NUM_INPUT_TYPES + +.. autoclass:: cuda.bindings.driver.CUgraphicsRegisterFlags + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_NONE + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER + +.. autoclass:: cuda.bindings.driver.CUgraphicsMapResourceFlags + + .. autoattribute:: cuda.bindings.driver.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD + +.. autoclass:: cuda.bindings.driver.CUarray_cubemap_face + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_X + + + Positive X face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_X + + + Negative X face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_Y + + + Positive Y face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_Y + + + Negative Y face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_Z + + + Positive Z face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_Z + + + Negative Z face of cubemap + +.. autoclass:: cuda.bindings.driver.CUlimit + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_STACK_SIZE + + + GPU thread stack size + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_PRINTF_FIFO_SIZE + + + GPU printf FIFO size + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_MALLOC_HEAP_SIZE + + + GPU malloc heap size + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH + + + GPU device runtime launch synchronize depth + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT + + + GPU device runtime pending launch count + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_MAX_L2_FETCH_GRANULARITY + + + A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_PERSISTING_L2_CACHE_SIZE + + + A size in bytes for L2 persisting lines cache size + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_SHMEM_SIZE + + + A maximum size in bytes of shared memory available to CUDA kernels on a CIG context. Can only be queried, cannot be set + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_CIG_ENABLED + + + A non-zero value indicates this CUDA context is a CIG-enabled context. Can only be queried, cannot be set + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED + + + When set to a non-zero value, CUDA will fail to launch a kernel on a CIG context, instead of using the fallback path, if the kernel uses more shared memory than available + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_MAX + +.. autoclass:: cuda.bindings.driver.CUresourcetype + + .. autoattribute:: cuda.bindings.driver.CUresourcetype.CU_RESOURCE_TYPE_ARRAY + + + Array resource + + + .. autoattribute:: cuda.bindings.driver.CUresourcetype.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY + + + Mipmapped array resource + + + .. autoattribute:: cuda.bindings.driver.CUresourcetype.CU_RESOURCE_TYPE_LINEAR + + + Linear resource + + + .. autoattribute:: cuda.bindings.driver.CUresourcetype.CU_RESOURCE_TYPE_PITCH2D + + + Pitch 2D resource + +.. autoclass:: cuda.bindings.driver.CUaccessProperty + + .. autoattribute:: cuda.bindings.driver.CUaccessProperty.CU_ACCESS_PROPERTY_NORMAL + + + Normal cache persistence. + + + .. autoattribute:: cuda.bindings.driver.CUaccessProperty.CU_ACCESS_PROPERTY_STREAMING + + + Streaming access is less likely to persit from cache. + + + .. autoattribute:: cuda.bindings.driver.CUaccessProperty.CU_ACCESS_PROPERTY_PERSISTING + + + Persisting access is more likely to persist in cache. + +.. autoclass:: cuda.bindings.driver.CUgraphConditionalNodeType + + .. autoattribute:: cuda.bindings.driver.CUgraphConditionalNodeType.CU_GRAPH_COND_TYPE_IF + + + Conditional 'if' Node. Body executed once if condition value is non-zero. + + + .. autoattribute:: cuda.bindings.driver.CUgraphConditionalNodeType.CU_GRAPH_COND_TYPE_WHILE + + + Conditional 'while' Node. Body executed repeatedly while condition value is non-zero. + +.. autoclass:: cuda.bindings.driver.CUgraphNodeType + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_KERNEL + + + GPU kernel node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMCPY + + + Memcpy node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMSET + + + Memset node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_HOST + + + Host (executable) node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_GRAPH + + + Node which executes an embedded graph + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EMPTY + + + Empty (no-op) node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_WAIT_EVENT + + + External event wait node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EVENT_RECORD + + + External event record node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL + + + External semaphore signal node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT + + + External semaphore wait node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEM_ALLOC + + + Memory Allocation Node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEM_FREE + + + Memory Free Node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_BATCH_MEM_OP + + + Batch MemOp Node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_CONDITIONAL + + + Conditional Node May be used to implement a conditional execution path or loop + + inside of a graph. The graph(s) contained within the body of the conditional node + + can be selectively executed or iterated upon based on the value of a conditional + + variable. + + + + Handles must be created in advance of creating the node + + using :py:obj:`~.cuGraphConditionalHandleCreate`. + + + + The following restrictions apply to graphs which contain conditional nodes: + + The graph cannot be used in a child node. + + Only one instantiation of the graph may exist at any point in time. + + The graph cannot be cloned. + + + + To set the control value, supply a default value when creating the handle and/or + + call :py:obj:`~.cudaGraphSetConditional` from device code. + +.. autoclass:: cuda.bindings.driver.CUgraphDependencyType + + .. autoattribute:: cuda.bindings.driver.CUgraphDependencyType.CU_GRAPH_DEPENDENCY_TYPE_DEFAULT + + + This is an ordinary dependency. + + + .. autoattribute:: cuda.bindings.driver.CUgraphDependencyType.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC + + + This dependency type allows the downstream node to use `cudaGridDependencySynchronize()`. It may only be used between kernel nodes, and must be used with either the :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC` or :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER` outgoing port. + +.. autoclass:: cuda.bindings.driver.CUgraphInstantiateResult + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_SUCCESS + + + Instantiation succeeded + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_ERROR + + + Instantiation failed for an unexpected reason which is described in the return value of the function + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE + + + Instantiation failed due to invalid structure, such as cycles + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED + + + Instantiation for device launch failed because the graph contained an unsupported operation + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED + + + Instantiation for device launch failed due to the nodes belonging to different contexts + +.. autoclass:: cuda.bindings.driver.CUsynchronizationPolicy + + .. autoattribute:: cuda.bindings.driver.CUsynchronizationPolicy.CU_SYNC_POLICY_AUTO + + + .. autoattribute:: cuda.bindings.driver.CUsynchronizationPolicy.CU_SYNC_POLICY_SPIN + + + .. autoattribute:: cuda.bindings.driver.CUsynchronizationPolicy.CU_SYNC_POLICY_YIELD + + + .. autoattribute:: cuda.bindings.driver.CUsynchronizationPolicy.CU_SYNC_POLICY_BLOCKING_SYNC + +.. autoclass:: cuda.bindings.driver.CUclusterSchedulingPolicy + + .. autoattribute:: cuda.bindings.driver.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_DEFAULT + + + the default policy + + + .. autoattribute:: cuda.bindings.driver.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_SPREAD + + + spread the blocks within a cluster to the SMs + + + .. autoattribute:: cuda.bindings.driver.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING + + + allow the hardware to load-balance the blocks in a cluster to the SMs + +.. autoclass:: cuda.bindings.driver.CUlaunchMemSyncDomain + + .. autoattribute:: cuda.bindings.driver.CUlaunchMemSyncDomain.CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT + + + Launch kernels in the default domain + + + .. autoattribute:: cuda.bindings.driver.CUlaunchMemSyncDomain.CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE + + + Launch kernels in the remote domain + +.. autoclass:: cuda.bindings.driver.CUlaunchAttributeID + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_IGNORE + + + Ignored entry, for convenient composition + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW + + + Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.accessPolicyWindow`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_COOPERATIVE + + + Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.cooperative`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY + + + Valid for streams. See :py:obj:`~.CUlaunchAttributeValue.syncPolicy`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION + + + Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.clusterDim`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE + + + Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.clusterSchedulingPolicyPreference`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION + + + Valid for launches. Setting :py:obj:`~.CUlaunchAttributeValue.programmaticStreamSerializationAllowed` to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid's execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT + + + Valid for launches. Set :py:obj:`~.CUlaunchAttributeValue.programmaticEvent` to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event through PTX launchdep.release or CUDA builtin function cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be inserted at the beginning of each block's execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling :py:obj:`~.cuEventSynchronize()`) are not guaranteed to observe the release precisely when it is released. For example, :py:obj:`~.cuEventSynchronize()` may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks. + + The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PRIORITY + + + Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.priority`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP + + + Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.memSyncDomainMap`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN + + + Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.memSyncDomain`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT + + + Valid for launches. Set :py:obj:`~.CUlaunchAttributeValue.launchCompletionEvent` to record the event. + + Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock. + + A launch completion event is nominally similar to a programmatic event with `triggerAtBlockStart` set except that it is not visible to `cudaGridDependencySynchronize()` and can be used with compute capability less than 9.0. + + The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE + + + Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error. + + :py:obj:`~.CUlaunchAttributeValue`::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via :py:obj:`~.CUlaunchAttributeValue`::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node's kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see :py:obj:`~.cudaGraphKernelNodeUpdatesApply`. + + Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via :py:obj:`~.cuGraphDestroyNode`. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via :py:obj:`~.cuGraphKernelNodeCopyAttributes`. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to :py:obj:`~.cuGraphExecUpdate`. + + If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with :py:obj:`~.cuGraphUpload` before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT + + + Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting :py:obj:`~.CUlaunchAttributeValue.sharedMemCarveout` to a percentage between 0-100 signals the CUDA driver to set the shared memory carveout preference, in percent of the total shared memory for that kernel launch. This attribute takes precedence over :py:obj:`~.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT`. This is only a hint, and the CUDA driver can choose a different configuration if required for the launch. + +.. autoclass:: cuda.bindings.driver.CUstreamCaptureStatus + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_NONE + + + Stream is not capturing + + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_ACTIVE + + + Stream is actively capturing + + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_INVALIDATED + + + Stream is part of a capture sequence that has been invalidated, but not terminated + +.. autoclass:: cuda.bindings.driver.CUstreamCaptureMode + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_GLOBAL + + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_THREAD_LOCAL + + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_RELAXED + +.. autoclass:: cuda.bindings.driver.CUdriverProcAddress_flags + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_DEFAULT + + + Default search mode for driver symbols. + + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_LEGACY_STREAM + + + Search for legacy versions of driver symbols. + + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM + + + Search for per-thread versions of driver symbols. + +.. autoclass:: cuda.bindings.driver.CUdriverProcAddressQueryResult + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_SUCCESS + + + Symbol was succesfully found + + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND + + + Symbol was not found in search + + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT + + + Symbol was found but version supplied was not sufficient + +.. autoclass:: cuda.bindings.driver.CUexecAffinityType + + .. autoattribute:: cuda.bindings.driver.CUexecAffinityType.CU_EXEC_AFFINITY_TYPE_SM_COUNT + + + Create a context with limited SMs. + + + .. autoattribute:: cuda.bindings.driver.CUexecAffinityType.CU_EXEC_AFFINITY_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUcigDataType + + .. autoattribute:: cuda.bindings.driver.CUcigDataType.CIG_DATA_TYPE_D3D12_COMMAND_QUEUE + +.. autoclass:: cuda.bindings.driver.CUlibraryOption + + .. autoattribute:: cuda.bindings.driver.CUlibraryOption.CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE + + + .. autoattribute:: cuda.bindings.driver.CUlibraryOption.CU_LIBRARY_BINARY_IS_PRESERVED + + + Specifes that the argument `code` passed to :py:obj:`~.cuLibraryLoadData()` will be preserved. Specifying this option will let the driver know that `code` can be accessed at any point until :py:obj:`~.cuLibraryUnload()`. The default behavior is for the driver to allocate and maintain its own copy of `code`. Note that this is only a memory usage optimization hint and the driver can choose to ignore it if required. Specifying this option with :py:obj:`~.cuLibraryLoadFromFile()` is invalid and will return :py:obj:`~.CUDA_ERROR_INVALID_VALUE`. + + + .. autoattribute:: cuda.bindings.driver.CUlibraryOption.CU_LIBRARY_NUM_OPTIONS + +.. autoclass:: cuda.bindings.driver.CUresult + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_SUCCESS + + + The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`). + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_VALUE + + + This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_OUT_OF_MEMORY + + + The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_INITIALIZED + + + This indicates that the CUDA driver has not been initialized with :py:obj:`~.cuInit()` or that initialization has failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_DEINITIALIZED + + + This indicates that the CUDA driver is in the process of shutting down. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PROFILER_DISABLED + + + This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PROFILER_NOT_INITIALIZED + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PROFILER_ALREADY_STARTED + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PROFILER_ALREADY_STOPPED + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STUB_LIBRARY + + + This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_DEVICE_UNAVAILABLE + + + This indicates that requested CUDA device is unavailable at the current time. Devices are often unavailable due to use of :py:obj:`~.CU_COMPUTEMODE_EXCLUSIVE_PROCESS` or :py:obj:`~.CU_COMPUTEMODE_PROHIBITED`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NO_DEVICE + + + This indicates that no CUDA-capable devices were detected by the installed CUDA driver. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_DEVICE + + + This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_DEVICE_NOT_LICENSED + + + This error indicates that the Grid license is not applied. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_IMAGE + + + This indicates that the device kernel image is invalid. This can also indicate an invalid CUDA module. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_CONTEXT + + + This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had :py:obj:`~.cuCtxDestroy()` invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See :py:obj:`~.cuCtxGetApiVersion()` for more details. This can also be returned if the green context passed to an API call was not converted to a :py:obj:`~.CUcontext` using :py:obj:`~.cuCtxFromGreenCtx` API. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CONTEXT_ALREADY_CURRENT + + + This indicated that the context being supplied as a parameter to the API call was already the active context. [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MAP_FAILED + + + This indicates that a map or register operation has failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNMAP_FAILED + + + This indicates that an unmap or unregister operation has failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ARRAY_IS_MAPPED + + + This indicates that the specified array is currently mapped and thus cannot be destroyed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ALREADY_MAPPED + + + This indicates that the resource is already mapped. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NO_BINARY_FOR_GPU + + + This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ALREADY_ACQUIRED + + + This indicates that a resource has already been acquired. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_MAPPED + + + This indicates that a resource is not mapped. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_MAPPED_AS_ARRAY + + + This indicates that a mapped resource is not available for access as an array. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_MAPPED_AS_POINTER + + + This indicates that a mapped resource is not available for access as a pointer. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ECC_UNCORRECTABLE + + + This indicates that an uncorrectable ECC error was detected during execution. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNSUPPORTED_LIMIT + + + This indicates that the :py:obj:`~.CUlimit` passed to the API call is not supported by the active device. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CONTEXT_ALREADY_IN_USE + + + This indicates that the :py:obj:`~.CUcontext` passed to the API call can only be bound to a single CPU thread at a time but is already bound to a CPU thread. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PEER_ACCESS_UNSUPPORTED + + + This indicates that peer access is not supported across the given devices. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_PTX + + + This indicates that a PTX JIT compilation failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_GRAPHICS_CONTEXT + + + This indicates an error with OpenGL or DirectX context. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NVLINK_UNCORRECTABLE + + + This indicates that an uncorrectable NVLink error was detected during the execution. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_JIT_COMPILER_NOT_FOUND + + + This indicates that the PTX JIT compiler library was not found. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNSUPPORTED_PTX_VERSION + + + This indicates that the provided PTX was compiled with an unsupported toolchain. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_JIT_COMPILATION_DISABLED + + + This indicates that the PTX JIT compilation was disabled. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY + + + This indicates that the :py:obj:`~.CUexecAffinityType` passed to the API call is not supported by the active device. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC + + + This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_SOURCE + + + This indicates that the device kernel source is invalid. This includes compilation/linker errors encountered in device code or user error. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_FILE_NOT_FOUND + + + This indicates that the file specified was not found. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND + + + This indicates that a link to a shared object failed to resolve. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + + + This indicates that initialization of a shared object failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_OPERATING_SYSTEM + + + This indicates that an OS call failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_HANDLE + + + This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like :py:obj:`~.CUstream` and :py:obj:`~.CUevent`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ILLEGAL_STATE + + + This indicates that a resource required by the API call is not in a valid state to perform the requested operation. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LOSSY_QUERY + + + This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_FOUND + + + This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_READY + + + This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than :py:obj:`~.CUDA_SUCCESS` (which indicates completion). Calls that may return this value include :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ILLEGAL_ADDRESS + + + While executing a kernel, the device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES + + + This indicates that a launch did not occur because it did not have appropriate resources. This error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count. Passing arguments of the wrong size (i.e. a 64-bit pointer when a 32-bit int is expected) is equivalent to passing too many arguments and can also result in this error. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LAUNCH_TIMEOUT + + + This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT` for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING + + + This error indicates a kernel launch that uses an incompatible texturing mode. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED + + + This error indicates that a call to :py:obj:`~.cuCtxEnablePeerAccess()` is trying to re-enable peer access to a context which has already had peer access to it enabled. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PEER_ACCESS_NOT_ENABLED + + + This error indicates that :py:obj:`~.cuCtxDisablePeerAccess()` is trying to disable peer access which has not been enabled yet via :py:obj:`~.cuCtxEnablePeerAccess()`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE + + + This error indicates that the primary context for the specified device has already been initialized. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CONTEXT_IS_DESTROYED + + + This error indicates that the context current to the calling thread has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary context which has not yet been initialized. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ASSERT + + + A device-side assert triggered during kernel execution. The context cannot be used anymore, and must be destroyed. All existing device memory allocations from this context are invalid and must be reconstructed if the program is to continue using CUDA. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_TOO_MANY_PEERS + + + This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to :py:obj:`~.cuCtxEnablePeerAccess()`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED + + + This error indicates that the memory range passed to :py:obj:`~.cuMemHostRegister()` has already been registered. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED + + + This error indicates that the pointer passed to :py:obj:`~.cuMemHostUnregister()` does not correspond to any currently registered memory region. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_HARDWARE_STACK_ERROR + + + While executing a kernel, the device encountered a stack error. This can be due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ILLEGAL_INSTRUCTION + + + While executing a kernel, the device encountered an illegal instruction. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MISALIGNED_ADDRESS + + + While executing a kernel, the device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_ADDRESS_SPACE + + + While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_PC + + + While executing a kernel, the device program counter wrapped its address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LAUNCH_FAILED + + + An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE + + + This error indicates that the number of blocks launched per grid for a kernel that was launched via either :py:obj:`~.cuLaunchCooperativeKernel` or :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` exceeds the maximum number of blocks as allowed by :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessor` or :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` times the number of multiprocessors as specified by the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_PERMITTED + + + This error indicates that the attempted operation is not permitted. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_SUPPORTED + + + This error indicates that the attempted operation is not supported on the current system or device. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_SYSTEM_NOT_READY + + + This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH + + + This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE + + + This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_CONNECTION_FAILED + + + This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_RPC_FAILURE + + + This error indicates that the remote procedural call between the MPS server and the MPS client failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_SERVER_NOT_READY + + + This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_MAX_CLIENTS_REACHED + + + This error indicates that the hardware resources required to create MPS client have been exhausted. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED + + + This error indicates the the hardware resources required to support device connections have been exhausted. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_CLIENT_TERMINATED + + + This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CDP_NOT_SUPPORTED + + + This error indicates that the module is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CDP_VERSION_MISMATCH + + + This error indicates that a module contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED + + + This error indicates that the operation is not permitted when the stream is capturing. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_INVALIDATED + + + This error indicates that the current capture sequence on the stream has been invalidated due to a previous error. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_MERGE + + + This error indicates that the operation would have resulted in a merge of two independent capture sequences. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNMATCHED + + + This error indicates that the capture was not initiated in this stream. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNJOINED + + + This error indicates that the capture sequence contains a fork that was not joined to the primary stream. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_ISOLATION + + + This error indicates that a dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_IMPLICIT + + + This error indicates a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CAPTURED_EVENT + + + This error indicates that the operation is not permitted on an event which was last recorded in a capturing stream. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD + + + A stream capture sequence not initiated with the :py:obj:`~.CU_STREAM_CAPTURE_MODE_RELAXED` argument to :py:obj:`~.cuStreamBeginCapture` was passed to :py:obj:`~.cuStreamEndCapture` in a different thread. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_TIMEOUT + + + This error indicates that the timeout specified for the wait operation has lapsed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE + + + This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_EXTERNAL_DEVICE + + + This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device's signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_CLUSTER_SIZE + + + Indicates a kernel launch error due to cluster misconfiguration. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_FUNCTION_NOT_LOADED + + + Indiciates a function handle is not loaded when calling an API that requires a loaded function. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_RESOURCE_TYPE + + + This error indicates one or more resources passed in are not valid resource types for the operation. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION + + + This error indicates one or more resources are insufficient or non-applicable for the operation. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNKNOWN + + + This indicates that an unknown internal error has occurred. + +.. autoclass:: cuda.bindings.driver.CUdevice_P2PAttribute + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK + + + A relative value indicating the performance of the link between two devices + + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED + + + P2P Access is enable + + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED + + + Atomic operation over the link supported + + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED + + + Accessing CUDA arrays over the link supported + +.. autoclass:: cuda.bindings.driver.CUresourceViewFormat + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_NONE + + + No resource view format (use underlying resource format) + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X8 + + + 1 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X8 + + + 2 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X8 + + + 4 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X8 + + + 1 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X8 + + + 2 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X8 + + + 4 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X16 + + + 1 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X16 + + + 2 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X16 + + + 4 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X16 + + + 1 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X16 + + + 2 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X16 + + + 4 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X32 + + + 1 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X32 + + + 2 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X32 + + + 4 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X32 + + + 1 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X32 + + + 2 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X32 + + + 4 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_1X16 + + + 1 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_2X16 + + + 2 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_4X16 + + + 4 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_1X32 + + + 1 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_2X32 + + + 2 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_4X32 + + + 4 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC1 + + + Block compressed 1 + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC2 + + + Block compressed 2 + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC3 + + + Block compressed 3 + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC4 + + + Block compressed 4 unsigned + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC4 + + + Block compressed 4 signed + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC5 + + + Block compressed 5 unsigned + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC5 + + + Block compressed 5 signed + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC6H + + + Block compressed 6 unsigned half-float + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC6H + + + Block compressed 6 signed half-float + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC7 + + + Block compressed 7 + +.. autoclass:: cuda.bindings.driver.CUtensorMapDataType + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT8 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT16 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT32 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_INT32 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT64 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_INT64 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT16 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT32 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT64 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ + +.. autoclass:: cuda.bindings.driver.CUtensorMapInterleave + + .. autoattribute:: cuda.bindings.driver.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_NONE + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_16B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_32B + +.. autoclass:: cuda.bindings.driver.CUtensorMapSwizzle + + .. autoattribute:: cuda.bindings.driver.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_NONE + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_32B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_64B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_128B + +.. autoclass:: cuda.bindings.driver.CUtensorMapL2promotion + + .. autoattribute:: cuda.bindings.driver.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_NONE + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_64B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_128B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_256B + +.. autoclass:: cuda.bindings.driver.CUtensorMapFloatOOBfill + + .. autoattribute:: cuda.bindings.driver.CUtensorMapFloatOOBfill.CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapFloatOOBfill.CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA + +.. autoclass:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS + + .. autoattribute:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE + + + No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations + + + .. autoattribute:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ + + + Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case. + + + .. autoattribute:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE + + + Read-write access, the device has full read-write access to the memory + +.. autoclass:: cuda.bindings.driver.CUexternalMemoryHandleType + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD + + + Handle is an opaque file descriptor + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 + + + Handle is an opaque shared NT handle + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT + + + Handle is an opaque, globally shared handle + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP + + + Handle is a D3D12 heap object + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE + + + Handle is a D3D12 committed resource + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE + + + Handle is a shared NT handle to a D3D11 resource + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + + + Handle is a globally shared handle to a D3D11 resource + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF + + + Handle is an NvSciBuf object + +.. autoclass:: cuda.bindings.driver.CUexternalSemaphoreHandleType + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD + + + Handle is an opaque file descriptor + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 + + + Handle is an opaque shared NT handle + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + + + Handle is an opaque, globally shared handle + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE + + + Handle is a shared NT handle referencing a D3D12 fence object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE + + + Handle is a shared NT handle referencing a D3D11 fence object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC + + + Opaque handle to NvSciSync Object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX + + + Handle is a shared NT handle referencing a D3D11 keyed mutex object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + + + Handle is a globally shared handle referencing a D3D11 keyed mutex object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD + + + Handle is an opaque file descriptor referencing a timeline semaphore + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + + + Handle is an opaque shared NT handle referencing a timeline semaphore + +.. autoclass:: cuda.bindings.driver.CUmemAllocationHandleType + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_NONE + + + Does not allow any export mechanism. > + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR + + + Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_WIN32 + + + Allows a Win32 NT handle to be used for exporting. (HANDLE) + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_WIN32_KMT + + + Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_FABRIC + + + Allows a fabric handle to be used for exporting. (CUmemFabricHandle) + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUmemAccess_flags + + .. autoattribute:: cuda.bindings.driver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_NONE + + + Default, make the address range not accessible + + + .. autoattribute:: cuda.bindings.driver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READ + + + Make the address range read accessible + + + .. autoattribute:: cuda.bindings.driver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READWRITE + + + Make the address range read-write accessible + + + .. autoattribute:: cuda.bindings.driver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_MAX + +.. autoclass:: cuda.bindings.driver.CUmemLocationType + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_INVALID + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE + + + Location is a device location, thus id is a device ordinal + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST + + + Location is host, id is ignored + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST_NUMA + + + Location is a host NUMA node, thus id is a host NUMA node id + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT + + + Location is a host NUMA node of the current thread, id is ignored + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUmemAllocationType + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_INVALID + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED + + + This allocation type is 'pinned', i.e. cannot migrate from its current location while the application is actively using it + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUmemAllocationGranularity_flags + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_MINIMUM + + + Minimum required granularity for allocation + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_RECOMMENDED + + + Recommended granularity for allocation for best performance + +.. autoclass:: cuda.bindings.driver.CUmemRangeHandleType + + .. autoattribute:: cuda.bindings.driver.CUmemRangeHandleType.CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD + + + .. autoattribute:: cuda.bindings.driver.CUmemRangeHandleType.CU_MEM_RANGE_HANDLE_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUarraySparseSubresourceType + + .. autoattribute:: cuda.bindings.driver.CUarraySparseSubresourceType.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL + + + .. autoattribute:: cuda.bindings.driver.CUarraySparseSubresourceType.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL + +.. autoclass:: cuda.bindings.driver.CUmemOperationType + + .. autoattribute:: cuda.bindings.driver.CUmemOperationType.CU_MEM_OPERATION_TYPE_MAP + + + .. autoattribute:: cuda.bindings.driver.CUmemOperationType.CU_MEM_OPERATION_TYPE_UNMAP + +.. autoclass:: cuda.bindings.driver.CUmemHandleType + + .. autoattribute:: cuda.bindings.driver.CUmemHandleType.CU_MEM_HANDLE_TYPE_GENERIC + +.. autoclass:: cuda.bindings.driver.CUmemAllocationCompType + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationCompType.CU_MEM_ALLOCATION_COMP_NONE + + + Allocating non-compressible memory + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationCompType.CU_MEM_ALLOCATION_COMP_GENERIC + + + Allocating compressible memory + +.. autoclass:: cuda.bindings.driver.CUmulticastGranularity_flags + + .. autoattribute:: cuda.bindings.driver.CUmulticastGranularity_flags.CU_MULTICAST_GRANULARITY_MINIMUM + + + Minimum required granularity + + + .. autoattribute:: cuda.bindings.driver.CUmulticastGranularity_flags.CU_MULTICAST_GRANULARITY_RECOMMENDED + + + Recommended granularity for best performance + +.. autoclass:: cuda.bindings.driver.CUgraphExecUpdateResult + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_SUCCESS + + + The update succeeded + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR + + + The update failed for an unexpected reason which is described in the return value of the function + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED + + + The update failed because the topology changed + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED + + + The update failed because a node type changed + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED + + + The update failed because the function of a kernel node changed (CUDA driver < 11.2) + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED + + + The update failed because the parameters changed in a way that is not supported + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED + + + The update failed because something about the node is not supported + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE + + + The update failed because the function of a kernel node changed in an unsupported way + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED + + + The update failed because the node attributes changed in a way that is not supported + +.. autoclass:: cuda.bindings.driver.CUmemPool_attribute + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES + + + (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled) + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC + + + (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled) + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES + + + (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled). + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD + + + (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0) + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT + + + (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool. + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH + + + (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero. + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_CURRENT + + + (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application. + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_HIGH + + + (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero. + +.. autoclass:: cuda.bindings.driver.CUgraphMem_attribute + + .. autoattribute:: cuda.bindings.driver.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT + + + (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs + + + .. autoattribute:: cuda.bindings.driver.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH + + + (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero. + + + .. autoattribute:: cuda.bindings.driver.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT + + + (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. + + + .. autoattribute:: cuda.bindings.driver.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH + + + (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. + +.. autoclass:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesOptions + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesOptions.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST + + + :py:obj:`~.cuFlushGPUDirectRDMAWrites()` and its CUDA Runtime API counterpart are supported on the device. + + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesOptions.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS + + + The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the device. + +.. autoclass:: cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering + + .. autoattribute:: cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE + + + The device does not natively support ordering of remote writes. :py:obj:`~.cuFlushGPUDirectRDMAWrites()` can be leveraged if supported. + + + .. autoattribute:: cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER + + + Natively, the device can consistently consume remote writes, although other CUDA devices may not. + + + .. autoattribute:: cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES + + + Any CUDA device in the system can consistently consume remote writes to this device. + +.. autoclass:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesScope + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesScope.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER + + + Blocks until remote writes are visible to the CUDA device context owning the data. + + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesScope.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES + + + Blocks until remote writes are visible to all CUDA device contexts. + +.. autoclass:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesTarget + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesTarget.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX + + + Sets the target for :py:obj:`~.cuFlushGPUDirectRDMAWrites()` to the currently active CUDA device context. + +.. autoclass:: cuda.bindings.driver.CUgraphDebugDot_flags + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE + + + Output all debug data as if every debug flag is enabled + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES + + + Use CUDA Runtime structures for output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS + + + Adds CUDA_KERNEL_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS + + + Adds CUDA_MEMCPY3D values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS + + + Adds CUDA_MEMSET_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS + + + Adds CUDA_HOST_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS + + + Adds CUevent handle from record and wait nodes to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS + + + Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS + + + Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES + + + Adds CUkernelNodeAttrValue values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES + + + Adds node handles and every kernel function handle to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS + + + Adds memory alloc node parameters to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS + + + Adds memory free node parameters to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS + + + Adds batch mem op node parameters to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO + + + Adds edge numbering information + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS + + + Adds conditional node parameters to output + +.. autoclass:: cuda.bindings.driver.CUuserObject_flags + + .. autoattribute:: cuda.bindings.driver.CUuserObject_flags.CU_USER_OBJECT_NO_DESTRUCTOR_SYNC + + + Indicates the destructor execution is not synchronized by any CUDA handle. + +.. autoclass:: cuda.bindings.driver.CUuserObjectRetain_flags + + .. autoattribute:: cuda.bindings.driver.CUuserObjectRetain_flags.CU_GRAPH_USER_OBJECT_MOVE + + + Transfer references from the caller rather than creating new references. + +.. autoclass:: cuda.bindings.driver.CUgraphInstantiate_flags + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH + + + Automatically free memory allocated in a graph before relaunching. + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD + + + Automatically upload the graph after instantiation. Only supported by :py:obj:`~.cuGraphInstantiateWithParams`. The upload will be performed using the stream provided in `instantiateParams`. + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH + + + Instantiate the graph to be launchable from the device. This flag can only be used on platforms which support unified addressing. This flag cannot be used in conjunction with CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY + + + Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into. + +.. autoclass:: cuda.bindings.driver.CUdeviceNumaConfig + + .. autoattribute:: cuda.bindings.driver.CUdeviceNumaConfig.CU_DEVICE_NUMA_CONFIG_NONE + + + The GPU is not a NUMA node + + + .. autoattribute:: cuda.bindings.driver.CUdeviceNumaConfig.CU_DEVICE_NUMA_CONFIG_NUMA_NODE + + + The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its NUMA ID + +.. autoclass:: cuda.bindings.driver.CUeglFrameType + + .. autoattribute:: cuda.bindings.driver.CUeglFrameType.CU_EGL_FRAME_TYPE_ARRAY + + + Frame type CUDA array + + + .. autoattribute:: cuda.bindings.driver.CUeglFrameType.CU_EGL_FRAME_TYPE_PITCH + + + Frame type pointer + +.. autoclass:: cuda.bindings.driver.CUeglResourceLocationFlags + + .. autoattribute:: cuda.bindings.driver.CUeglResourceLocationFlags.CU_EGL_RESOURCE_LOCATION_SYSMEM + + + Resource location sysmem + + + .. autoattribute:: cuda.bindings.driver.CUeglResourceLocationFlags.CU_EGL_RESOURCE_LOCATION_VIDMEM + + + Resource location vidmem + +.. autoclass:: cuda.bindings.driver.CUeglColorFormat + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR + + + Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR + + + Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_PLANAR + + + Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR + + + Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RGB + + + R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BGR + + + R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_ARGB + + + R/G/B/A four channels in one surface with BGRA byte ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RGBA + + + R/G/B/A four channels in one surface with ABGR byte ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_L + + + single luminance channel in one surface. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_R + + + single color channel in one surface. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_PLANAR + + + Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR + + + Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUYV_422 + + + Y, U, V in one surface, interleaved as UYVY in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_UYVY_422 + + + Y, U, V in one surface, interleaved as YUYV in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_ABGR + + + R/G/B/A four channels in one surface with RGBA byte ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BGRA + + + R/G/B/A four channels in one surface with ARGB byte ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_A + + + Alpha color format - one channel in one surface. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RG + + + R/G color format - two channels in one surface with GR byte ordering + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_AYUV + + + Y, U, V, A four channels in one surface, interleaved as VUYA. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR + + + Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR + + + Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR + + + Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR + + + Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_VYUY_ER + + + Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_UYVY_ER + + + Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUYV_ER + + + Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVYU_ER + + + Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV_ER + + + Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUVA_ER + + + Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_AYUV_ER + + + Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER + + + Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER + + + Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER + + + Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER + + + Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER + + + Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER + + + Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_RGGB + + + Bayer format - one channel in one surface with interleaved RGGB ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_BGGR + + + Bayer format - one channel in one surface with interleaved BGGR ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_GRBG + + + Bayer format - one channel in one surface with interleaved GRBG ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_GBRG + + + Bayer format - one channel in one surface with interleaved GBRG ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_RGGB + + + Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_BGGR + + + Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_GRBG + + + Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_GBRG + + + Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_RGGB + + + Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_BGGR + + + Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_GRBG + + + Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_GBRG + + + Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_RGGB + + + Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_BGGR + + + Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_GRBG + + + Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_GBRG + + + Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_RGGB + + + Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_BGGR + + + Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_GRBG + + + Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_GBRG + + + Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_PLANAR + + + Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_PLANAR + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_BCCR + + + Bayer format - one channel in one surface with interleaved BCCR ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_RCCB + + + Bayer format - one channel in one surface with interleaved RCCB ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_CRBC + + + Bayer format - one channel in one surface with interleaved CRBC ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_CBRC + + + Bayer format - one channel in one surface with interleaved CBRC ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_CCCC + + + Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_BCCR + + + Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_RCCB + + + Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CRBC + + + Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CBRC + + + Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CCCC + + + Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y + + + Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 + + + Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 + + + Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 + + + Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 + + + Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 + + + Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 + + + Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 + + + Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 + + + Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 + + + Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 + + + Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 + + + Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR + + + Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 + + + Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y_ER + + + Extended Range Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y_709_ER + + + Extended Range Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10_ER + + + Extended Range Color format for single Y10 plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10_709_ER + + + Extended Range Color format for single Y10 plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12_ER + + + Extended Range Color format for single Y12 plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12_709_ER + + + Extended Range Color format for single Y12 plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUVA + + + Y, U, V, A four channels in one surface, interleaved as AVUY. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV + + + Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVYU + + + Y, U, V in one surface, interleaved as YVYU in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_VYUY + + + Y, U, V in one surface, interleaved as VYUY in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER + + + Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER + + + Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_MAX + +.. autoclass:: cuda.bindings.driver.CUdeviceptr_v2 +.. autoclass:: cuda.bindings.driver.CUdeviceptr +.. autoclass:: cuda.bindings.driver.CUdevice_v1 +.. autoclass:: cuda.bindings.driver.CUdevice +.. autoclass:: cuda.bindings.driver.CUcontext +.. autoclass:: cuda.bindings.driver.CUmodule +.. autoclass:: cuda.bindings.driver.CUfunction +.. autoclass:: cuda.bindings.driver.CUlibrary +.. autoclass:: cuda.bindings.driver.CUkernel +.. autoclass:: cuda.bindings.driver.CUarray +.. autoclass:: cuda.bindings.driver.CUmipmappedArray +.. autoclass:: cuda.bindings.driver.CUtexref +.. autoclass:: cuda.bindings.driver.CUsurfref +.. autoclass:: cuda.bindings.driver.CUevent +.. autoclass:: cuda.bindings.driver.CUstream +.. autoclass:: cuda.bindings.driver.CUgraphicsResource +.. autoclass:: cuda.bindings.driver.CUtexObject_v1 +.. autoclass:: cuda.bindings.driver.CUtexObject +.. autoclass:: cuda.bindings.driver.CUsurfObject_v1 +.. autoclass:: cuda.bindings.driver.CUsurfObject +.. autoclass:: cuda.bindings.driver.CUexternalMemory +.. autoclass:: cuda.bindings.driver.CUexternalSemaphore +.. autoclass:: cuda.bindings.driver.CUgraph +.. autoclass:: cuda.bindings.driver.CUgraphNode +.. autoclass:: cuda.bindings.driver.CUgraphExec +.. autoclass:: cuda.bindings.driver.CUmemoryPool +.. autoclass:: cuda.bindings.driver.CUuserObject +.. autoclass:: cuda.bindings.driver.CUgraphConditionalHandle +.. autoclass:: cuda.bindings.driver.CUgraphDeviceNode +.. autoclass:: cuda.bindings.driver.CUasyncCallbackHandle +.. autoclass:: cuda.bindings.driver.CUgreenCtx +.. autoclass:: cuda.bindings.driver.CUuuid +.. autoclass:: cuda.bindings.driver.CUmemFabricHandle_v1 +.. autoclass:: cuda.bindings.driver.CUmemFabricHandle +.. autoclass:: cuda.bindings.driver.CUipcEventHandle_v1 +.. autoclass:: cuda.bindings.driver.CUipcEventHandle +.. autoclass:: cuda.bindings.driver.CUipcMemHandle_v1 +.. autoclass:: cuda.bindings.driver.CUipcMemHandle +.. autoclass:: cuda.bindings.driver.CUstreamBatchMemOpParams_v1 +.. autoclass:: cuda.bindings.driver.CUstreamBatchMemOpParams +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUasyncNotificationInfo +.. autoclass:: cuda.bindings.driver.CUasyncCallback +.. autoclass:: cuda.bindings.driver.CUdevprop_v1 +.. autoclass:: cuda.bindings.driver.CUdevprop +.. autoclass:: cuda.bindings.driver.CUlinkState +.. autoclass:: cuda.bindings.driver.CUhostFn +.. autoclass:: cuda.bindings.driver.CUaccessPolicyWindow_v1 +.. autoclass:: cuda.bindings.driver.CUaccessPolicyWindow +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v3 +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_CONDITIONAL_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUgraphEdgeData +.. autoclass:: cuda.bindings.driver.CUDA_GRAPH_INSTANTIATE_PARAMS +.. autoclass:: cuda.bindings.driver.CUlaunchMemSyncDomainMap +.. autoclass:: cuda.bindings.driver.CUlaunchAttributeValue +.. autoclass:: cuda.bindings.driver.CUlaunchAttribute +.. autoclass:: cuda.bindings.driver.CUlaunchConfig +.. autoclass:: cuda.bindings.driver.CUkernelNodeAttrID +.. autoclass:: cuda.bindings.driver.CUkernelNodeAttrValue_v1 +.. autoclass:: cuda.bindings.driver.CUkernelNodeAttrValue +.. autoclass:: cuda.bindings.driver.CUstreamAttrID +.. autoclass:: cuda.bindings.driver.CUstreamAttrValue_v1 +.. autoclass:: cuda.bindings.driver.CUstreamAttrValue +.. autoclass:: cuda.bindings.driver.CUexecAffinitySmCount_v1 +.. autoclass:: cuda.bindings.driver.CUexecAffinitySmCount +.. autoclass:: cuda.bindings.driver.CUexecAffinityParam_v1 +.. autoclass:: cuda.bindings.driver.CUexecAffinityParam +.. autoclass:: cuda.bindings.driver.CUctxCigParam +.. autoclass:: cuda.bindings.driver.CUctxCreateParams +.. autoclass:: cuda.bindings.driver.CUlibraryHostUniversalFunctionAndDataTable +.. autoclass:: cuda.bindings.driver.CUstreamCallback +.. autoclass:: cuda.bindings.driver.CUoccupancyB2DSize +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY2D_v2 +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY2D +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_v2 +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_PEER_v1 +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_PEER +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR_v2 +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR_v2 +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES_v1 +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_DESC +.. autoclass:: cuda.bindings.driver.CUDA_TEXTURE_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_TEXTURE_DESC +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC +.. autoclass:: cuda.bindings.driver.CUtensorMap +.. autoclass:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS +.. autoclass:: cuda.bindings.driver.CUDA_LAUNCH_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_LAUNCH_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUmemGenericAllocationHandle_v1 +.. autoclass:: cuda.bindings.driver.CUmemGenericAllocationHandle +.. autoclass:: cuda.bindings.driver.CUarrayMapInfo_v1 +.. autoclass:: cuda.bindings.driver.CUarrayMapInfo +.. autoclass:: cuda.bindings.driver.CUmemLocation_v1 +.. autoclass:: cuda.bindings.driver.CUmemLocation +.. autoclass:: cuda.bindings.driver.CUmemAllocationProp_v1 +.. autoclass:: cuda.bindings.driver.CUmemAllocationProp +.. autoclass:: cuda.bindings.driver.CUmulticastObjectProp_v1 +.. autoclass:: cuda.bindings.driver.CUmulticastObjectProp +.. autoclass:: cuda.bindings.driver.CUmemAccessDesc_v1 +.. autoclass:: cuda.bindings.driver.CUmemAccessDesc +.. autoclass:: cuda.bindings.driver.CUgraphExecUpdateResultInfo_v1 +.. autoclass:: cuda.bindings.driver.CUgraphExecUpdateResultInfo +.. autoclass:: cuda.bindings.driver.CUmemPoolProps_v1 +.. autoclass:: cuda.bindings.driver.CUmemPoolProps +.. autoclass:: cuda.bindings.driver.CUmemPoolPtrExportData_v1 +.. autoclass:: cuda.bindings.driver.CUmemPoolPtrExportData +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_MEM_FREE_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_CHILD_GRAPH_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EVENT_RECORD_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EVENT_WAIT_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUgraphNodeParams +.. autoclass:: cuda.bindings.driver.CUeglFrame_v1 +.. autoclass:: cuda.bindings.driver.CUeglFrame +.. autoclass:: cuda.bindings.driver.CUeglStreamConnection +.. autoattribute:: cuda.bindings.driver.CUDA_VERSION + + CUDA API version number + +.. autoattribute:: cuda.bindings.driver.CU_UUID_HAS_BEEN_DEFINED + + CUDA UUID types + +.. autoattribute:: cuda.bindings.driver.CU_IPC_HANDLE_SIZE + + CUDA IPC handle size + +.. autoattribute:: cuda.bindings.driver.CU_STREAM_LEGACY + + Legacy stream handle + + + + Stream handle that can be passed as a CUstream to use an implicit stream with legacy synchronization behavior. + + + + See details of the \link_sync_behavior + +.. autoattribute:: cuda.bindings.driver.CU_STREAM_PER_THREAD + + Per-thread stream handle + + + + Stream handle that can be passed as a CUstream to use an implicit stream with per-thread synchronization behavior. + + + + See details of the \link_sync_behavior + +.. autoattribute:: cuda.bindings.driver.CU_COMPUTE_ACCELERATED_TARGET_BASE +.. autoattribute:: cuda.bindings.driver.CUDA_CB +.. autoattribute:: cuda.bindings.driver.CU_GRAPH_COND_ASSIGN_DEFAULT + + Conditional node handle flags Default value is applied when graph is launched. + +.. autoattribute:: cuda.bindings.driver.CU_GRAPH_KERNEL_NODE_PORT_DEFAULT + + This port activates when the kernel has finished executing. + +.. autoattribute:: cuda.bindings.driver.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC + + This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type :py:obj:`~.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC`. See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT`. + +.. autoattribute:: cuda.bindings.driver.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER + + This port activates when all blocks of the kernel have begun execution. See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT`. + +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_PRIORITY +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_PRIORITY +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTALLOC_PORTABLE + + If set, host memory is portable between CUDA contexts. Flag for :py:obj:`~.cuMemHostAlloc()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTALLOC_DEVICEMAP + + If set, host memory is mapped into CUDA address space and :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host pointer. Flag for :py:obj:`~.cuMemHostAlloc()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTALLOC_WRITECOMBINED + + If set, host memory is allocated as write-combined - fast to write, faster to DMA, slow to read except via SSE4 streaming load instruction (MOVNTDQA). Flag for :py:obj:`~.cuMemHostAlloc()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTREGISTER_PORTABLE + + If set, host memory is portable between CUDA contexts. Flag for :py:obj:`~.cuMemHostRegister()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTREGISTER_DEVICEMAP + + If set, host memory is mapped into CUDA address space and :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host pointer. Flag for :py:obj:`~.cuMemHostRegister()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTREGISTER_IOMEMORY + + If set, the passed memory pointer is treated as pointing to some memory-mapped I/O space, e.g. belonging to a third-party PCIe device. On Windows the flag is a no-op. On Linux that memory is marked as non cache-coherent for the GPU and is expected to be physically contiguous. It may return :py:obj:`~.CUDA_ERROR_NOT_PERMITTED` if run as an unprivileged user, :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED` on older Linux kernel versions. On all other platforms, it is not supported and :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED` is returned. Flag for :py:obj:`~.cuMemHostRegister()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTREGISTER_READ_ONLY + + If set, the passed memory pointer is treated as pointing to memory that is considered read-only by the device. On platforms without :py:obj:`~.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES`, this flag is required in order to register memory mapped to the CPU as read-only. Support for the use of this flag can be queried from the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED`. Using this flag with a current context associated with a device that does not have this attribute set will cause :py:obj:`~.cuMemHostRegister` to error with :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED`. + +.. autoattribute:: cuda.bindings.driver.CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL + + Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers + +.. autoattribute:: cuda.bindings.driver.CU_TENSOR_MAP_NUM_QWORDS + + Size of tensor map descriptor + +.. autoattribute:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_DEDICATED + + Indicates that the external memory object is a dedicated resource + +.. autoattribute:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC + + When the `flags` parameter of :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS` contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. + +.. autoattribute:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC + + When the `flags` parameter of :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS` contains this flag, it indicates that waiting on an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. + +.. autoattribute:: cuda.bindings.driver.CUDA_NVSCISYNC_ATTR_SIGNAL + + When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to this, it indicates that application needs signaler specific NvSciSyncAttr to be filled by :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. + +.. autoattribute:: cuda.bindings.driver.CUDA_NVSCISYNC_ATTR_WAIT + + When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to this, it indicates that application needs waiter specific NvSciSyncAttr to be filled by :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. + +.. autoattribute:: cuda.bindings.driver.CU_MEM_CREATE_USAGE_TILE_POOL + + This flag if set indicates that the memory will be used as a tile pool. + +.. autoattribute:: cuda.bindings.driver.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC + + If set, each kernel launched as part of :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution. + +.. autoattribute:: cuda.bindings.driver.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC + + If set, any subsequent work pushed in a stream that participated in a call to :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_LAYERED + + If set, the CUDA array is a collection of layers, where each layer is either a 1D or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number of layers, not the depth of a 3D array. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_2DARRAY + + Deprecated, use CUDA_ARRAY3D_LAYERED + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_SURFACE_LDST + + This flag must be set in order to bind a surface reference to the CUDA array + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_CUBEMAP + + If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The width of such a CUDA array must be equal to its height, and Depth must be six. If :py:obj:`~.CUDA_ARRAY3D_LAYERED` flag is also set, then the CUDA array is a collection of cubemaps and Depth must be a multiple of six. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_TEXTURE_GATHER + + This flag must be set in order to perform texture gather operations on a CUDA array. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_DEPTH_TEXTURE + + This flag if set indicates that the CUDA array is a DEPTH_TEXTURE. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_COLOR_ATTACHMENT + + This flag indicates that the CUDA array may be bound as a color target in an external graphics API + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_SPARSE + + This flag if set indicates that the CUDA array or CUDA mipmapped array is a sparse CUDA array or CUDA mipmapped array respectively + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_DEFERRED_MAPPING + + This flag if set indicates that the CUDA array or CUDA mipmapped array will allow deferred memory mapping + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_VIDEO_ENCODE_DECODE + + This flag indicates that the CUDA array will be used for hardware accelerated video encode/decode operations. + +.. autoattribute:: cuda.bindings.driver.CU_TRSA_OVERRIDE_FORMAT + + Override the texref format with a format inferred from the array. Flag for :py:obj:`~.cuTexRefSetArray()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_READ_AS_INTEGER + + Read the texture as integers rather than promoting the values to floats in the range [0,1]. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_NORMALIZED_COORDINATES + + Use normalized texture coordinates in the range [0,1) instead of [0,dim). Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_SRGB + + Perform sRGB->linear conversion during texture read. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION + + Disable any trilinear filtering optimizations. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_SEAMLESS_CUBEMAP + + Enable seamless cube map filtering. Flag for :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_END_AS_INT + + C++ compile time constant for CU_LAUNCH_PARAM_END + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_END + + End of array terminator for the `extra` parameter to :py:obj:`~.cuLaunchKernel` + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT + + C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_POINTER + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_BUFFER_POINTER + + Indicator that the next value in the `extra` parameter to :py:obj:`~.cuLaunchKernel` will be a pointer to a buffer containing all kernel parameters used for launching kernel `f`. This buffer needs to honor all alignment/padding requirements of the individual parameters. If :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not also specified in the `extra` array, then :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` will have no effect. + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT + + C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_SIZE + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_BUFFER_SIZE + + Indicator that the next value in the `extra` parameter to :py:obj:`~.cuLaunchKernel` will be a pointer to a size_t which contains the size of the buffer specified with :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER`. It is required that :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` also be specified in the `extra` array if the value associated with :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not zero. + +.. autoattribute:: cuda.bindings.driver.CU_PARAM_TR_DEFAULT + + For texture references loaded into the module, use default texunit from texture reference. + +.. autoattribute:: cuda.bindings.driver.CU_DEVICE_CPU + + Device that represents the CPU + +.. autoattribute:: cuda.bindings.driver.CU_DEVICE_INVALID + + Device that represents an invalid device + +.. autoattribute:: cuda.bindings.driver.MAX_PLANES + + Maximum number of planes per frame + +.. autoattribute:: cuda.bindings.driver.CUDA_EGL_INFINITE_TIMEOUT + + Indicates that timeout for :py:obj:`~.cuEGLStreamConsumerAcquireFrame` is infinite. + + +Error Handling +-------------- + +This section describes the error handling functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGetErrorString +.. autofunction:: cuda.bindings.driver.cuGetErrorName + +Initialization +-------------- + +This section describes the initialization functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuInit + +Version Management +------------------ + +This section describes the version management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuDriverGetVersion + +Device Management +----------------- + +This section describes the device management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuDeviceGet +.. autofunction:: cuda.bindings.driver.cuDeviceGetCount +.. autofunction:: cuda.bindings.driver.cuDeviceGetName +.. autofunction:: cuda.bindings.driver.cuDeviceGetUuid +.. autofunction:: cuda.bindings.driver.cuDeviceGetUuid_v2 +.. autofunction:: cuda.bindings.driver.cuDeviceGetLuid +.. autofunction:: cuda.bindings.driver.cuDeviceTotalMem +.. autofunction:: cuda.bindings.driver.cuDeviceGetTexture1DLinearMaxWidth +.. autofunction:: cuda.bindings.driver.cuDeviceGetAttribute +.. autofunction:: cuda.bindings.driver.cuDeviceGetNvSciSyncAttributes +.. autofunction:: cuda.bindings.driver.cuDeviceSetMemPool +.. autofunction:: cuda.bindings.driver.cuDeviceGetMemPool +.. autofunction:: cuda.bindings.driver.cuDeviceGetDefaultMemPool +.. autofunction:: cuda.bindings.driver.cuDeviceGetExecAffinitySupport +.. autofunction:: cuda.bindings.driver.cuFlushGPUDirectRDMAWrites + +Primary Context Management +-------------------------- + +This section describes the primary context management functions of the low-level CUDA driver application programming interface. + + + +The primary context is unique per device and shared with the CUDA runtime API. These functions allow integration with other libraries using CUDA. + +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxRetain +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxRelease +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxSetFlags +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxGetState +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxReset + +Context Management +------------------ + +This section describes the context management functions of the low-level CUDA driver application programming interface. + + + +Please note that some functions are described in Primary Context Management section. + +.. autofunction:: cuda.bindings.driver.cuCtxCreate +.. autofunction:: cuda.bindings.driver.cuCtxCreate_v3 +.. autofunction:: cuda.bindings.driver.cuCtxCreate_v4 +.. autofunction:: cuda.bindings.driver.cuCtxDestroy +.. autofunction:: cuda.bindings.driver.cuCtxPushCurrent +.. autofunction:: cuda.bindings.driver.cuCtxPopCurrent +.. autofunction:: cuda.bindings.driver.cuCtxSetCurrent +.. autofunction:: cuda.bindings.driver.cuCtxGetCurrent +.. autofunction:: cuda.bindings.driver.cuCtxGetDevice +.. autofunction:: cuda.bindings.driver.cuCtxGetFlags +.. autofunction:: cuda.bindings.driver.cuCtxSetFlags +.. autofunction:: cuda.bindings.driver.cuCtxGetId +.. autofunction:: cuda.bindings.driver.cuCtxSynchronize +.. autofunction:: cuda.bindings.driver.cuCtxSetLimit +.. autofunction:: cuda.bindings.driver.cuCtxGetLimit +.. autofunction:: cuda.bindings.driver.cuCtxGetCacheConfig +.. autofunction:: cuda.bindings.driver.cuCtxSetCacheConfig +.. autofunction:: cuda.bindings.driver.cuCtxGetApiVersion +.. autofunction:: cuda.bindings.driver.cuCtxGetStreamPriorityRange +.. autofunction:: cuda.bindings.driver.cuCtxResetPersistingL2Cache +.. autofunction:: cuda.bindings.driver.cuCtxGetExecAffinity +.. autofunction:: cuda.bindings.driver.cuCtxRecordEvent +.. autofunction:: cuda.bindings.driver.cuCtxWaitEvent + +Module Management +----------------- + +This section describes the module management functions of the low-level CUDA driver application programming interface. + +.. autoclass:: cuda.bindings.driver.CUmoduleLoadingMode + + .. autoattribute:: cuda.bindings.driver.CUmoduleLoadingMode.CU_MODULE_EAGER_LOADING + + + Lazy Kernel Loading is not enabled + + + .. autoattribute:: cuda.bindings.driver.CUmoduleLoadingMode.CU_MODULE_LAZY_LOADING + + + Lazy Kernel Loading is enabled + +.. autofunction:: cuda.bindings.driver.cuModuleLoad +.. autofunction:: cuda.bindings.driver.cuModuleLoadData +.. autofunction:: cuda.bindings.driver.cuModuleLoadDataEx +.. autofunction:: cuda.bindings.driver.cuModuleLoadFatBinary +.. autofunction:: cuda.bindings.driver.cuModuleUnload +.. autofunction:: cuda.bindings.driver.cuModuleGetLoadingMode +.. autofunction:: cuda.bindings.driver.cuModuleGetFunction +.. autofunction:: cuda.bindings.driver.cuModuleGetFunctionCount +.. autofunction:: cuda.bindings.driver.cuModuleEnumerateFunctions +.. autofunction:: cuda.bindings.driver.cuModuleGetGlobal +.. autofunction:: cuda.bindings.driver.cuLinkCreate +.. autofunction:: cuda.bindings.driver.cuLinkAddData +.. autofunction:: cuda.bindings.driver.cuLinkAddFile +.. autofunction:: cuda.bindings.driver.cuLinkComplete +.. autofunction:: cuda.bindings.driver.cuLinkDestroy + +Library Management +------------------ + +This section describes the library management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuLibraryLoadData +.. autofunction:: cuda.bindings.driver.cuLibraryLoadFromFile +.. autofunction:: cuda.bindings.driver.cuLibraryUnload +.. autofunction:: cuda.bindings.driver.cuLibraryGetKernel +.. autofunction:: cuda.bindings.driver.cuLibraryGetKernelCount +.. autofunction:: cuda.bindings.driver.cuLibraryEnumerateKernels +.. autofunction:: cuda.bindings.driver.cuLibraryGetModule +.. autofunction:: cuda.bindings.driver.cuKernelGetFunction +.. autofunction:: cuda.bindings.driver.cuKernelGetLibrary +.. autofunction:: cuda.bindings.driver.cuLibraryGetGlobal +.. autofunction:: cuda.bindings.driver.cuLibraryGetManaged +.. autofunction:: cuda.bindings.driver.cuLibraryGetUnifiedFunction +.. autofunction:: cuda.bindings.driver.cuKernelGetAttribute +.. autofunction:: cuda.bindings.driver.cuKernelSetAttribute +.. autofunction:: cuda.bindings.driver.cuKernelSetCacheConfig +.. autofunction:: cuda.bindings.driver.cuKernelGetName +.. autofunction:: cuda.bindings.driver.cuKernelGetParamInfo + +Memory Management +----------------- + +This section describes the memory management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuMemGetInfo +.. autofunction:: cuda.bindings.driver.cuMemAlloc +.. autofunction:: cuda.bindings.driver.cuMemAllocPitch +.. autofunction:: cuda.bindings.driver.cuMemFree +.. autofunction:: cuda.bindings.driver.cuMemGetAddressRange +.. autofunction:: cuda.bindings.driver.cuMemAllocHost +.. autofunction:: cuda.bindings.driver.cuMemFreeHost +.. autofunction:: cuda.bindings.driver.cuMemHostAlloc +.. autofunction:: cuda.bindings.driver.cuMemHostGetDevicePointer +.. autofunction:: cuda.bindings.driver.cuMemHostGetFlags +.. autofunction:: cuda.bindings.driver.cuMemAllocManaged +.. autofunction:: cuda.bindings.driver.cuDeviceRegisterAsyncNotification +.. autofunction:: cuda.bindings.driver.cuDeviceUnregisterAsyncNotification +.. autofunction:: cuda.bindings.driver.cuDeviceGetByPCIBusId +.. autofunction:: cuda.bindings.driver.cuDeviceGetPCIBusId +.. autofunction:: cuda.bindings.driver.cuIpcGetEventHandle +.. autofunction:: cuda.bindings.driver.cuIpcOpenEventHandle +.. autofunction:: cuda.bindings.driver.cuIpcGetMemHandle +.. autofunction:: cuda.bindings.driver.cuIpcOpenMemHandle +.. autofunction:: cuda.bindings.driver.cuIpcCloseMemHandle +.. autofunction:: cuda.bindings.driver.cuMemHostRegister +.. autofunction:: cuda.bindings.driver.cuMemHostUnregister +.. autofunction:: cuda.bindings.driver.cuMemcpy +.. autofunction:: cuda.bindings.driver.cuMemcpyPeer +.. autofunction:: cuda.bindings.driver.cuMemcpyHtoD +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoH +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoD +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoA +.. autofunction:: cuda.bindings.driver.cuMemcpyAtoD +.. autofunction:: cuda.bindings.driver.cuMemcpyHtoA +.. autofunction:: cuda.bindings.driver.cuMemcpyAtoH +.. autofunction:: cuda.bindings.driver.cuMemcpyAtoA +.. autofunction:: cuda.bindings.driver.cuMemcpy2D +.. autofunction:: cuda.bindings.driver.cuMemcpy2DUnaligned +.. autofunction:: cuda.bindings.driver.cuMemcpy3D +.. autofunction:: cuda.bindings.driver.cuMemcpy3DPeer +.. autofunction:: cuda.bindings.driver.cuMemcpyAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyPeerAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyHtoDAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoHAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoDAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyHtoAAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyAtoHAsync +.. autofunction:: cuda.bindings.driver.cuMemcpy2DAsync +.. autofunction:: cuda.bindings.driver.cuMemcpy3DAsync +.. autofunction:: cuda.bindings.driver.cuMemcpy3DPeerAsync +.. autofunction:: cuda.bindings.driver.cuMemsetD8 +.. autofunction:: cuda.bindings.driver.cuMemsetD16 +.. autofunction:: cuda.bindings.driver.cuMemsetD32 +.. autofunction:: cuda.bindings.driver.cuMemsetD2D8 +.. autofunction:: cuda.bindings.driver.cuMemsetD2D16 +.. autofunction:: cuda.bindings.driver.cuMemsetD2D32 +.. autofunction:: cuda.bindings.driver.cuMemsetD8Async +.. autofunction:: cuda.bindings.driver.cuMemsetD16Async +.. autofunction:: cuda.bindings.driver.cuMemsetD32Async +.. autofunction:: cuda.bindings.driver.cuMemsetD2D8Async +.. autofunction:: cuda.bindings.driver.cuMemsetD2D16Async +.. autofunction:: cuda.bindings.driver.cuMemsetD2D32Async +.. autofunction:: cuda.bindings.driver.cuArrayCreate +.. autofunction:: cuda.bindings.driver.cuArrayGetDescriptor +.. autofunction:: cuda.bindings.driver.cuArrayGetSparseProperties +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayGetSparseProperties +.. autofunction:: cuda.bindings.driver.cuArrayGetMemoryRequirements +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayGetMemoryRequirements +.. autofunction:: cuda.bindings.driver.cuArrayGetPlane +.. autofunction:: cuda.bindings.driver.cuArrayDestroy +.. autofunction:: cuda.bindings.driver.cuArray3DCreate +.. autofunction:: cuda.bindings.driver.cuArray3DGetDescriptor +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayCreate +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayGetLevel +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayDestroy +.. autofunction:: cuda.bindings.driver.cuMemGetHandleForAddressRange + +Virtual Memory Management +------------------------- + +This section describes the virtual memory management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuMemAddressReserve +.. autofunction:: cuda.bindings.driver.cuMemAddressFree +.. autofunction:: cuda.bindings.driver.cuMemCreate +.. autofunction:: cuda.bindings.driver.cuMemRelease +.. autofunction:: cuda.bindings.driver.cuMemMap +.. autofunction:: cuda.bindings.driver.cuMemMapArrayAsync +.. autofunction:: cuda.bindings.driver.cuMemUnmap +.. autofunction:: cuda.bindings.driver.cuMemSetAccess +.. autofunction:: cuda.bindings.driver.cuMemGetAccess +.. autofunction:: cuda.bindings.driver.cuMemExportToShareableHandle +.. autofunction:: cuda.bindings.driver.cuMemImportFromShareableHandle +.. autofunction:: cuda.bindings.driver.cuMemGetAllocationGranularity +.. autofunction:: cuda.bindings.driver.cuMemGetAllocationPropertiesFromHandle +.. autofunction:: cuda.bindings.driver.cuMemRetainAllocationHandle + +Stream Ordered Memory Allocator +------------------------------- + +This section describes the stream ordered memory allocator exposed by the low-level CUDA driver application programming interface. + + + + + +**overview** + + + +The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior. + +The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee. + + + + + +**Supported Platforms** + + + +Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED + +.. autofunction:: cuda.bindings.driver.cuMemFreeAsync +.. autofunction:: cuda.bindings.driver.cuMemAllocAsync +.. autofunction:: cuda.bindings.driver.cuMemPoolTrimTo +.. autofunction:: cuda.bindings.driver.cuMemPoolSetAttribute +.. autofunction:: cuda.bindings.driver.cuMemPoolGetAttribute +.. autofunction:: cuda.bindings.driver.cuMemPoolSetAccess +.. autofunction:: cuda.bindings.driver.cuMemPoolGetAccess +.. autofunction:: cuda.bindings.driver.cuMemPoolCreate +.. autofunction:: cuda.bindings.driver.cuMemPoolDestroy +.. autofunction:: cuda.bindings.driver.cuMemAllocFromPoolAsync +.. autofunction:: cuda.bindings.driver.cuMemPoolExportToShareableHandle +.. autofunction:: cuda.bindings.driver.cuMemPoolImportFromShareableHandle +.. autofunction:: cuda.bindings.driver.cuMemPoolExportPointer +.. autofunction:: cuda.bindings.driver.cuMemPoolImportPointer + +Multicast Object Management +--------------------------- + +This section describes the CUDA multicast object operations exposed by the low-level CUDA driver application programming interface. + + + + + +**overview** + + + +A multicast object created via cuMulticastCreate enables certain memory operations to be broadcast to a team of devices. Devices can be added to a multicast object via cuMulticastAddDevice. Memory can be bound on each participating device via either cuMulticastBindMem or cuMulticastBindAddr. Multicast objects can be mapped into a device's virtual address space using the virtual memmory management APIs (see cuMemMap and cuMemSetAccess). + + + + + +**Supported Platforms** + + + +Support for multicast on a specific device can be queried using the device attribute CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED + +.. autofunction:: cuda.bindings.driver.cuMulticastCreate +.. autofunction:: cuda.bindings.driver.cuMulticastAddDevice +.. autofunction:: cuda.bindings.driver.cuMulticastBindMem +.. autofunction:: cuda.bindings.driver.cuMulticastBindAddr +.. autofunction:: cuda.bindings.driver.cuMulticastUnbind +.. autofunction:: cuda.bindings.driver.cuMulticastGetGranularity + +Unified Addressing +------------------ + +This section describes the unified addressing functions of the low-level CUDA driver application programming interface. + + + + + +**Overview** + + + +CUDA devices can share a unified address space with the host. For these devices there is no distinction between a device pointer and a host pointer -- the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below). + + + + + +**Supported Platforms** + + + +Whether or not a device supports unified addressing may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING. + +Unified addressing is automatically enabled in 64-bit processes + + + + + +**Looking Up Information from Pointer Values** + + + +It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cuPointerGetAttribute() + +Since pointers are unique, it is not necessary to specify information about the pointers specified to the various copy functions in the CUDA API. The function cuMemcpy() may be used to perform a copy between two pointers, ignoring whether they point to host or device memory (making cuMemcpyHtoD(), cuMemcpyDtoD(), and cuMemcpyDtoH() unnecessary for devices supporting unified addressing). For multidimensional copies, the memory type CU_MEMORYTYPE_UNIFIED may be used to specify that the CUDA driver should infer the location of the pointer from its value. + + + + + +**Automatic Mapping of Host Allocated Host Memory** + + + +All host memory allocated in all contexts using cuMemAllocHost() and cuMemHostAlloc() is always directly accessible from all contexts on all devices that support unified addressing. This is the case regardless of whether or not the flags CU_MEMHOSTALLOC_PORTABLE and CU_MEMHOSTALLOC_DEVICEMAP are specified. + +The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host, so it is not necessary to call cuMemHostGetDevicePointer() to get the device pointer for these allocations. + +Note that this is not the case for memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below. + + + + + +**Automatic Registration of Peer Memory** + + + +Upon enabling direct access from a context that supports unified addressing to another peer context that supports unified addressing using cuCtxEnablePeerAccess() all memory allocated in the peer context using cuMemAlloc() and cuMemAllocPitch() will immediately be accessible by the current context. The device pointer value through which any peer memory may be accessed in the current context is the same pointer value through which that memory may be accessed in the peer context. + + + + + +**Exceptions, Disjoint Addressing** + + + +Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cuMemHostRegister() and host memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all contexts that support unified addressing. + +This device address may be queried using cuMemHostGetDevicePointer() when a context using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory through cuMemcpy() and similar functions using the CU_MEMORYTYPE_UNIFIED memory type. + +.. autofunction:: cuda.bindings.driver.cuPointerGetAttribute +.. autofunction:: cuda.bindings.driver.cuMemPrefetchAsync +.. autofunction:: cuda.bindings.driver.cuMemPrefetchAsync_v2 +.. autofunction:: cuda.bindings.driver.cuMemAdvise +.. autofunction:: cuda.bindings.driver.cuMemAdvise_v2 +.. autofunction:: cuda.bindings.driver.cuMemRangeGetAttribute +.. autofunction:: cuda.bindings.driver.cuMemRangeGetAttributes +.. autofunction:: cuda.bindings.driver.cuPointerSetAttribute +.. autofunction:: cuda.bindings.driver.cuPointerGetAttributes + +Stream Management +----------------- + +This section describes the stream management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuStreamCreate +.. autofunction:: cuda.bindings.driver.cuStreamCreateWithPriority +.. autofunction:: cuda.bindings.driver.cuStreamGetPriority +.. autofunction:: cuda.bindings.driver.cuStreamGetFlags +.. autofunction:: cuda.bindings.driver.cuStreamGetId +.. autofunction:: cuda.bindings.driver.cuStreamGetCtx +.. autofunction:: cuda.bindings.driver.cuStreamGetCtx_v2 +.. autofunction:: cuda.bindings.driver.cuStreamWaitEvent +.. autofunction:: cuda.bindings.driver.cuStreamAddCallback +.. autofunction:: cuda.bindings.driver.cuStreamBeginCapture +.. autofunction:: cuda.bindings.driver.cuStreamBeginCaptureToGraph +.. autofunction:: cuda.bindings.driver.cuThreadExchangeStreamCaptureMode +.. autofunction:: cuda.bindings.driver.cuStreamEndCapture +.. autofunction:: cuda.bindings.driver.cuStreamIsCapturing +.. autofunction:: cuda.bindings.driver.cuStreamGetCaptureInfo +.. autofunction:: cuda.bindings.driver.cuStreamGetCaptureInfo_v3 +.. autofunction:: cuda.bindings.driver.cuStreamUpdateCaptureDependencies +.. autofunction:: cuda.bindings.driver.cuStreamUpdateCaptureDependencies_v2 +.. autofunction:: cuda.bindings.driver.cuStreamAttachMemAsync +.. autofunction:: cuda.bindings.driver.cuStreamQuery +.. autofunction:: cuda.bindings.driver.cuStreamSynchronize +.. autofunction:: cuda.bindings.driver.cuStreamDestroy +.. autofunction:: cuda.bindings.driver.cuStreamCopyAttributes +.. autofunction:: cuda.bindings.driver.cuStreamGetAttribute +.. autofunction:: cuda.bindings.driver.cuStreamSetAttribute + +Event Management +---------------- + +This section describes the event management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuEventCreate +.. autofunction:: cuda.bindings.driver.cuEventRecord +.. autofunction:: cuda.bindings.driver.cuEventRecordWithFlags +.. autofunction:: cuda.bindings.driver.cuEventQuery +.. autofunction:: cuda.bindings.driver.cuEventSynchronize +.. autofunction:: cuda.bindings.driver.cuEventDestroy +.. autofunction:: cuda.bindings.driver.cuEventElapsedTime + +External Resource Interoperability +---------------------------------- + +This section describes the external resource interoperability functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuImportExternalMemory +.. autofunction:: cuda.bindings.driver.cuExternalMemoryGetMappedBuffer +.. autofunction:: cuda.bindings.driver.cuExternalMemoryGetMappedMipmappedArray +.. autofunction:: cuda.bindings.driver.cuDestroyExternalMemory +.. autofunction:: cuda.bindings.driver.cuImportExternalSemaphore +.. autofunction:: cuda.bindings.driver.cuSignalExternalSemaphoresAsync +.. autofunction:: cuda.bindings.driver.cuWaitExternalSemaphoresAsync +.. autofunction:: cuda.bindings.driver.cuDestroyExternalSemaphore + +Stream Memory Operations +------------------------ + +This section describes the stream memory operations of the low-level CUDA driver application programming interface. + + + +Support for the CU_STREAM_WAIT_VALUE_NOR flag can be queried with ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2. + + + +Support for the cuStreamWriteValue64() and cuStreamWaitValue64() functions, as well as for the CU_STREAM_MEM_OP_WAIT_VALUE_64 and CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + + + +Support for both CU_STREAM_WAIT_VALUE_FLUSH and CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform hardware features and can be queried with cuDeviceGetAttribute() and CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES. + + + +Note that all memory pointers passed as parameters to these operations are device pointers. Where necessary a device pointer should be obtained, for example with cuMemHostGetDevicePointer(). + + + +None of the operations accepts pointers to managed memory buffers (cuMemAllocManaged). + + + +Warning: Improper use of these APIs may deadlock the application. Synchronization ordering established through these APIs is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by these APIs should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. + +.. autofunction:: cuda.bindings.driver.cuStreamWaitValue32 +.. autofunction:: cuda.bindings.driver.cuStreamWaitValue64 +.. autofunction:: cuda.bindings.driver.cuStreamWriteValue32 +.. autofunction:: cuda.bindings.driver.cuStreamWriteValue64 +.. autofunction:: cuda.bindings.driver.cuStreamBatchMemOp + +Execution Control +----------------- + +This section describes the execution control functions of the low-level CUDA driver application programming interface. + +.. autoclass:: cuda.bindings.driver.CUfunctionLoadingState + + .. autoattribute:: cuda.bindings.driver.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_UNLOADED + + + .. autoattribute:: cuda.bindings.driver.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_LOADED + + + .. autoattribute:: cuda.bindings.driver.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_MAX + +.. autofunction:: cuda.bindings.driver.cuFuncGetAttribute +.. autofunction:: cuda.bindings.driver.cuFuncSetAttribute +.. autofunction:: cuda.bindings.driver.cuFuncSetCacheConfig +.. autofunction:: cuda.bindings.driver.cuFuncGetModule +.. autofunction:: cuda.bindings.driver.cuFuncGetName +.. autofunction:: cuda.bindings.driver.cuFuncGetParamInfo +.. autofunction:: cuda.bindings.driver.cuFuncIsLoaded +.. autofunction:: cuda.bindings.driver.cuFuncLoad +.. autofunction:: cuda.bindings.driver.cuLaunchKernel +.. autofunction:: cuda.bindings.driver.cuLaunchKernelEx +.. autofunction:: cuda.bindings.driver.cuLaunchCooperativeKernel +.. autofunction:: cuda.bindings.driver.cuLaunchCooperativeKernelMultiDevice +.. autofunction:: cuda.bindings.driver.cuLaunchHostFunc + +Graph Management +---------------- + +This section describes the graph management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGraphCreate +.. autofunction:: cuda.bindings.driver.cuGraphAddKernelNode +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddMemcpyNode +.. autofunction:: cuda.bindings.driver.cuGraphMemcpyNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphMemcpyNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddMemsetNode +.. autofunction:: cuda.bindings.driver.cuGraphMemsetNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphMemsetNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddHostNode +.. autofunction:: cuda.bindings.driver.cuGraphHostNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphHostNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddChildGraphNode +.. autofunction:: cuda.bindings.driver.cuGraphChildGraphNodeGetGraph +.. autofunction:: cuda.bindings.driver.cuGraphAddEmptyNode +.. autofunction:: cuda.bindings.driver.cuGraphAddEventRecordNode +.. autofunction:: cuda.bindings.driver.cuGraphEventRecordNodeGetEvent +.. autofunction:: cuda.bindings.driver.cuGraphEventRecordNodeSetEvent +.. autofunction:: cuda.bindings.driver.cuGraphAddEventWaitNode +.. autofunction:: cuda.bindings.driver.cuGraphEventWaitNodeGetEvent +.. autofunction:: cuda.bindings.driver.cuGraphEventWaitNodeSetEvent +.. autofunction:: cuda.bindings.driver.cuGraphAddExternalSemaphoresSignalNode +.. autofunction:: cuda.bindings.driver.cuGraphExternalSemaphoresSignalNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphExternalSemaphoresSignalNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddExternalSemaphoresWaitNode +.. autofunction:: cuda.bindings.driver.cuGraphExternalSemaphoresWaitNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphExternalSemaphoresWaitNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddBatchMemOpNode +.. autofunction:: cuda.bindings.driver.cuGraphBatchMemOpNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphBatchMemOpNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecBatchMemOpNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddMemAllocNode +.. autofunction:: cuda.bindings.driver.cuGraphMemAllocNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddMemFreeNode +.. autofunction:: cuda.bindings.driver.cuGraphMemFreeNodeGetParams +.. autofunction:: cuda.bindings.driver.cuDeviceGraphMemTrim +.. autofunction:: cuda.bindings.driver.cuDeviceGetGraphMemAttribute +.. autofunction:: cuda.bindings.driver.cuDeviceSetGraphMemAttribute +.. autofunction:: cuda.bindings.driver.cuGraphClone +.. autofunction:: cuda.bindings.driver.cuGraphNodeFindInClone +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetType +.. autofunction:: cuda.bindings.driver.cuGraphGetNodes +.. autofunction:: cuda.bindings.driver.cuGraphGetRootNodes +.. autofunction:: cuda.bindings.driver.cuGraphGetEdges +.. autofunction:: cuda.bindings.driver.cuGraphGetEdges_v2 +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetDependencies +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetDependencies_v2 +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetDependentNodes +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetDependentNodes_v2 +.. autofunction:: cuda.bindings.driver.cuGraphAddDependencies +.. autofunction:: cuda.bindings.driver.cuGraphAddDependencies_v2 +.. autofunction:: cuda.bindings.driver.cuGraphRemoveDependencies +.. autofunction:: cuda.bindings.driver.cuGraphRemoveDependencies_v2 +.. autofunction:: cuda.bindings.driver.cuGraphDestroyNode +.. autofunction:: cuda.bindings.driver.cuGraphInstantiate +.. autofunction:: cuda.bindings.driver.cuGraphInstantiateWithParams +.. autofunction:: cuda.bindings.driver.cuGraphExecGetFlags +.. autofunction:: cuda.bindings.driver.cuGraphExecKernelNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecMemcpyNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecMemsetNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecHostNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecChildGraphNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecEventRecordNodeSetEvent +.. autofunction:: cuda.bindings.driver.cuGraphExecEventWaitNodeSetEvent +.. autofunction:: cuda.bindings.driver.cuGraphExecExternalSemaphoresSignalNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecExternalSemaphoresWaitNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphNodeSetEnabled +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetEnabled +.. autofunction:: cuda.bindings.driver.cuGraphUpload +.. autofunction:: cuda.bindings.driver.cuGraphLaunch +.. autofunction:: cuda.bindings.driver.cuGraphExecDestroy +.. autofunction:: cuda.bindings.driver.cuGraphDestroy +.. autofunction:: cuda.bindings.driver.cuGraphExecUpdate +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeCopyAttributes +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeGetAttribute +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeSetAttribute +.. autofunction:: cuda.bindings.driver.cuGraphDebugDotPrint +.. autofunction:: cuda.bindings.driver.cuUserObjectCreate +.. autofunction:: cuda.bindings.driver.cuUserObjectRetain +.. autofunction:: cuda.bindings.driver.cuUserObjectRelease +.. autofunction:: cuda.bindings.driver.cuGraphRetainUserObject +.. autofunction:: cuda.bindings.driver.cuGraphReleaseUserObject +.. autofunction:: cuda.bindings.driver.cuGraphAddNode +.. autofunction:: cuda.bindings.driver.cuGraphAddNode_v2 +.. autofunction:: cuda.bindings.driver.cuGraphNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphConditionalHandleCreate + +Occupancy +--------- + +This section describes the occupancy calculation functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxActiveBlocksPerMultiprocessor +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxPotentialBlockSize +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxPotentialBlockSizeWithFlags +.. autofunction:: cuda.bindings.driver.cuOccupancyAvailableDynamicSMemPerBlock +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxPotentialClusterSize +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxActiveClusters + +Texture Object Management +------------------------- + +This section describes the texture object management functions of the low-level CUDA driver application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher. + +.. autofunction:: cuda.bindings.driver.cuTexObjectCreate +.. autofunction:: cuda.bindings.driver.cuTexObjectDestroy +.. autofunction:: cuda.bindings.driver.cuTexObjectGetResourceDesc +.. autofunction:: cuda.bindings.driver.cuTexObjectGetTextureDesc +.. autofunction:: cuda.bindings.driver.cuTexObjectGetResourceViewDesc + +Surface Object Management +------------------------- + +This section describes the surface object management functions of the low-level CUDA driver application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher. + +.. autofunction:: cuda.bindings.driver.cuSurfObjectCreate +.. autofunction:: cuda.bindings.driver.cuSurfObjectDestroy +.. autofunction:: cuda.bindings.driver.cuSurfObjectGetResourceDesc + +Tensor Map Object Managment +--------------------------- + +This section describes the tensor map object management functions of the low-level CUDA driver application programming interface. The tensor core API is only supported on devices of compute capability 9.0 or higher. + +.. autofunction:: cuda.bindings.driver.cuTensorMapEncodeTiled +.. autofunction:: cuda.bindings.driver.cuTensorMapEncodeIm2col +.. autofunction:: cuda.bindings.driver.cuTensorMapReplaceAddress + +Peer Context Memory Access +-------------------------- + +This section describes the direct peer context memory access functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuDeviceCanAccessPeer +.. autofunction:: cuda.bindings.driver.cuCtxEnablePeerAccess +.. autofunction:: cuda.bindings.driver.cuCtxDisablePeerAccess +.. autofunction:: cuda.bindings.driver.cuDeviceGetP2PAttribute + +Graphics Interoperability +------------------------- + +This section describes the graphics interoperability functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGraphicsUnregisterResource +.. autofunction:: cuda.bindings.driver.cuGraphicsSubResourceGetMappedArray +.. autofunction:: cuda.bindings.driver.cuGraphicsResourceGetMappedMipmappedArray +.. autofunction:: cuda.bindings.driver.cuGraphicsResourceGetMappedPointer +.. autofunction:: cuda.bindings.driver.cuGraphicsResourceSetMapFlags +.. autofunction:: cuda.bindings.driver.cuGraphicsMapResources +.. autofunction:: cuda.bindings.driver.cuGraphicsUnmapResources + +Driver Entry Point Access +------------------------- + +This section describes the driver entry point access functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGetProcAddress + +Coredump Attributes Control API +------------------------------- + +This section describes the coredump attribute control functions of the low-level CUDA driver application programming interface. + +.. autoclass:: cuda.bindings.driver.CUcoredumpSettings + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_ENABLE_ON_EXCEPTION + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_TRIGGER_HOST + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_LIGHTWEIGHT + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_ENABLE_USER_TRIGGER + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_FILE + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_PIPE + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_GENERATION_FLAGS + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_MAX + +.. autoclass:: cuda.bindings.driver.CUCoredumpGenerationFlags + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_DEFAULT_FLAGS + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_GLOBAL_MEMORY + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_SHARED_MEMORY + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_LOCAL_MEMORY + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_ABORT + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_CONSTBANK_MEMORY + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_LIGHTWEIGHT_FLAGS + +.. autofunction:: cuda.bindings.driver.cuCoredumpGetAttribute +.. autofunction:: cuda.bindings.driver.cuCoredumpGetAttributeGlobal +.. autofunction:: cuda.bindings.driver.cuCoredumpSetAttribute +.. autofunction:: cuda.bindings.driver.cuCoredumpSetAttributeGlobal + +Green Contexts +-------------- + +This section describes the APIs for creation and manipulation of green contexts in the CUDA driver. Green contexts are a lightweight alternative to traditional contexts, with the ability to pass in a set of resources that they should be initialized with. This allows the developer to represent distinct spatial partitions of the GPU, provision resources for them, and target them via the same programming model that CUDA exposes (streams, kernel launches, etc.). + + + +There are 4 main steps to using these new set of APIs. + +- (1) Start with an initial set of resources, for example via cuDeviceGetDevResource. Only SM type is supported today. + + + + + + + +- (2) Partition this set of resources by providing them as input to a partition API, for example: cuDevSmResourceSplitByCount. + + + + + + + +- (3) Finalize the specification of resources by creating a descriptor via cuDevResourceGenerateDesc. + + + + + + + +- (4) Provision the resources and create a green context via cuGreenCtxCreate. + + + + + + + + + + + +For ``CU_DEV_RESOURCE_TYPE_SM``\ , the partitions created have minimum SM count requirements, often rounding up and aligning the minCount provided to cuDevSmResourceSplitByCount. The following is a guideline for each architecture and may be subject to change: + +- On Compute Architecture 6.X: The minimum count is 1 SM. + + + + + + + +- On Compute Architecture 7.X: The minimum count is 2 SMs and must be a multiple of 2. + + + + + + + +- On Compute Architecture 8.X: The minimum count is 4 SMs and must be a multiple of 2. + + + + + + + +- On Compute Architecture 9.0+: The minimum count is 8 SMs and must be a multiple of 8. + + + + + + + + + + + +In the future, flags can be provided to tradeoff functional and performance characteristics versus finer grained SM partitions. + + + +Even if the green contexts have disjoint SM partitions, it is not guaranteed that the kernels launched in them will run concurrently or have forward progress guarantees. This is due to other resources (like HW connections, see ::CUDA_DEVICE_MAX_CONNECTIONS) that could cause a dependency. Additionally, in certain scenarios, it is possible for the workload to run on more SMs than was provisioned (but never less). The following are two scenarios which can exhibit this behavior: + +- On Volta+ MPS: When ``CUDA_MPS_ACTIVE_THREAD_PERCENTAGE``\ is used, the set of SMs that are used for running kernels can be scaled up to the value of SMs used for the MPS client. + + + + + + + +- On Compute Architecture 9.x: When a module with dynamic parallelism (CDP) is loaded, all future kernels running under green contexts may use and share an additional set of 2 SMs. + +.. autoclass:: cuda.bindings.driver.CUdevSmResource_st +.. autoclass:: cuda.bindings.driver.CUdevResource_st +.. autoclass:: cuda.bindings.driver.CUdevSmResource +.. autoclass:: cuda.bindings.driver.CUdevResource +.. autoclass:: cuda.bindings.driver.CUgreenCtxCreate_flags + + .. autoattribute:: cuda.bindings.driver.CUgreenCtxCreate_flags.CU_GREEN_CTX_DEFAULT_STREAM + + + Required. Creates a default stream to use inside the green context + +.. autoclass:: cuda.bindings.driver.CUdevSmResourceSplit_flags + + .. autoattribute:: cuda.bindings.driver.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING + + + .. autoattribute:: cuda.bindings.driver.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE + +.. autoclass:: cuda.bindings.driver.CUdevResourceType + + .. autoattribute:: cuda.bindings.driver.CUdevResourceType.CU_DEV_RESOURCE_TYPE_INVALID + + + .. autoattribute:: cuda.bindings.driver.CUdevResourceType.CU_DEV_RESOURCE_TYPE_SM + + + Streaming multiprocessors related information + +.. autoclass:: cuda.bindings.driver.CUdevResourceDesc +.. autoclass:: cuda.bindings.driver.CUdevSmResource +.. autofunction:: cuda.bindings.driver._CONCAT_OUTER +.. autofunction:: cuda.bindings.driver.cuGreenCtxCreate +.. autofunction:: cuda.bindings.driver.cuGreenCtxDestroy +.. autofunction:: cuda.bindings.driver.cuCtxFromGreenCtx +.. autofunction:: cuda.bindings.driver.cuDeviceGetDevResource +.. autofunction:: cuda.bindings.driver.cuCtxGetDevResource +.. autofunction:: cuda.bindings.driver.cuGreenCtxGetDevResource +.. autofunction:: cuda.bindings.driver.cuDevSmResourceSplitByCount +.. autofunction:: cuda.bindings.driver.cuDevResourceGenerateDesc +.. autofunction:: cuda.bindings.driver.cuGreenCtxRecordEvent +.. autofunction:: cuda.bindings.driver.cuGreenCtxWaitEvent +.. autofunction:: cuda.bindings.driver.cuStreamGetGreenCtx +.. autofunction:: cuda.bindings.driver.cuGreenCtxStreamCreate +.. autoattribute:: cuda.bindings.driver.RESOURCE_ABI_VERSION +.. autoattribute:: cuda.bindings.driver.RESOURCE_ABI_EXTERNAL_BYTES +.. autoattribute:: cuda.bindings.driver._CONCAT_INNER +.. autoattribute:: cuda.bindings.driver._CONCAT_OUTER + +EGL Interoperability +-------------------- + +This section describes the EGL interoperability functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGraphicsEGLRegisterImage +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerConnect +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerConnectWithFlags +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerDisconnect +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerAcquireFrame +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerReleaseFrame +.. autofunction:: cuda.bindings.driver.cuEGLStreamProducerConnect +.. autofunction:: cuda.bindings.driver.cuEGLStreamProducerDisconnect +.. autofunction:: cuda.bindings.driver.cuEGLStreamProducerPresentFrame +.. autofunction:: cuda.bindings.driver.cuEGLStreamProducerReturnFrame +.. autofunction:: cuda.bindings.driver.cuGraphicsResourceGetMappedEglFrame +.. autofunction:: cuda.bindings.driver.cuEventCreateFromEGLSync + +OpenGL Interoperability +----------------------- + +This section describes the OpenGL interoperability functions of the low-level CUDA driver application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interoperability. + +.. autoclass:: cuda.bindings.driver.CUGLDeviceList + + .. autoattribute:: cuda.bindings.driver.CUGLDeviceList.CU_GL_DEVICE_LIST_ALL + + + The CUDA devices for all GPUs used by the current OpenGL context + + + .. autoattribute:: cuda.bindings.driver.CUGLDeviceList.CU_GL_DEVICE_LIST_CURRENT_FRAME + + + The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame + + + .. autoattribute:: cuda.bindings.driver.CUGLDeviceList.CU_GL_DEVICE_LIST_NEXT_FRAME + + + The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame + +.. autofunction:: cuda.bindings.driver.cuGraphicsGLRegisterBuffer +.. autofunction:: cuda.bindings.driver.cuGraphicsGLRegisterImage +.. autofunction:: cuda.bindings.driver.cuGLGetDevices + +Profiler Control +---------------- + +This section describes the profiler control functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuProfilerStart +.. autofunction:: cuda.bindings.driver.cuProfilerStop + +VDPAU Interoperability +---------------------- + +This section describes the VDPAU interoperability functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuVDPAUGetDevice +.. autofunction:: cuda.bindings.driver.cuVDPAUCtxCreate +.. autofunction:: cuda.bindings.driver.cuGraphicsVDPAURegisterVideoSurface +.. autofunction:: cuda.bindings.driver.cuGraphicsVDPAURegisterOutputSurface diff --git a/docs/_sources/module/nvrtc.rst.txt b/docs/_sources/module/nvrtc.rst.txt index cc7d31be..2a1297c1 100644 --- a/docs/_sources/module/nvrtc.rst.txt +++ b/docs/_sources/module/nvrtc.rst.txt @@ -7,80 +7,80 @@ Error Handling NVRTC defines the following enumeration type and function for API call error handling. -.. autoclass:: cuda.nvrtc.nvrtcResult +.. autoclass:: cuda.bindings.nvrtc.nvrtcResult - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_SUCCESS + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_SUCCESS - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_OUT_OF_MEMORY + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_OUT_OF_MEMORY - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_PROGRAM_CREATION_FAILURE + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_PROGRAM_CREATION_FAILURE - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_INPUT + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_INPUT - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_PROGRAM + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_PROGRAM - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_OPTION + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_OPTION - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_COMPILATION + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_COMPILATION - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_BUILTIN_OPERATION_FAILURE + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_BUILTIN_OPERATION_FAILURE - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_INTERNAL_ERROR + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_INTERNAL_ERROR - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_TIME_FILE_WRITE_FAILED + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_TIME_FILE_WRITE_FAILED -.. autofunction:: cuda.nvrtc.nvrtcGetErrorString +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetErrorString General Information Query ------------------------- NVRTC defines the following function for general information query. -.. autofunction:: cuda.nvrtc.nvrtcVersion -.. autofunction:: cuda.nvrtc.nvrtcGetNumSupportedArchs -.. autofunction:: cuda.nvrtc.nvrtcGetSupportedArchs +.. autofunction:: cuda.bindings.nvrtc.nvrtcVersion +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetNumSupportedArchs +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetSupportedArchs Compilation ----------- NVRTC defines the following type and functions for actual compilation. -.. autoclass:: cuda.nvrtc.nvrtcProgram -.. autofunction:: cuda.nvrtc.nvrtcCreateProgram -.. autofunction:: cuda.nvrtc.nvrtcDestroyProgram -.. autofunction:: cuda.nvrtc.nvrtcCompileProgram -.. autofunction:: cuda.nvrtc.nvrtcGetPTXSize -.. autofunction:: cuda.nvrtc.nvrtcGetPTX -.. autofunction:: cuda.nvrtc.nvrtcGetCUBINSize -.. autofunction:: cuda.nvrtc.nvrtcGetCUBIN -.. autofunction:: cuda.nvrtc.nvrtcGetNVVMSize -.. autofunction:: cuda.nvrtc.nvrtcGetNVVM -.. autofunction:: cuda.nvrtc.nvrtcGetLTOIRSize -.. autofunction:: cuda.nvrtc.nvrtcGetLTOIR -.. autofunction:: cuda.nvrtc.nvrtcGetOptiXIRSize -.. autofunction:: cuda.nvrtc.nvrtcGetOptiXIR -.. autofunction:: cuda.nvrtc.nvrtcGetProgramLogSize -.. autofunction:: cuda.nvrtc.nvrtcGetProgramLog -.. autofunction:: cuda.nvrtc.nvrtcAddNameExpression -.. autofunction:: cuda.nvrtc.nvrtcGetLoweredName +.. autoclass:: cuda.bindings.nvrtc.nvrtcProgram +.. autofunction:: cuda.bindings.nvrtc.nvrtcCreateProgram +.. autofunction:: cuda.bindings.nvrtc.nvrtcDestroyProgram +.. autofunction:: cuda.bindings.nvrtc.nvrtcCompileProgram +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetPTXSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetPTX +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetCUBINSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetCUBIN +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetNVVMSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetNVVM +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetLTOIRSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetLTOIR +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetOptiXIRSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetOptiXIR +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetProgramLogSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetProgramLog +.. autofunction:: cuda.bindings.nvrtc.nvrtcAddNameExpression +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetLoweredName Supported Compile Options ------------------------- diff --git a/docs/_sources/module/runtime.rst.txt b/docs/_sources/module/runtime.rst.txt new file mode 100644 index 00000000..55687b68 --- /dev/null +++ b/docs/_sources/module/runtime.rst.txt @@ -0,0 +1,5274 @@ +------- +runtime +------- + +Profiler Control +---------------- + +This section describes the profiler control functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaProfilerStart +.. autofunction:: cuda.bindings.runtime.cudaProfilerStop + +Device Management +----------------- + +impl_private + + + + + + + +This section describes the device management functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaDeviceReset +.. autofunction:: cuda.bindings.runtime.cudaDeviceSynchronize +.. autofunction:: cuda.bindings.runtime.cudaDeviceSetLimit +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetLimit +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetTexture1DLinearMaxWidth +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetCacheConfig +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetStreamPriorityRange +.. autofunction:: cuda.bindings.runtime.cudaDeviceSetCacheConfig +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetByPCIBusId +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetPCIBusId +.. autofunction:: cuda.bindings.runtime.cudaIpcGetEventHandle +.. autofunction:: cuda.bindings.runtime.cudaIpcOpenEventHandle +.. autofunction:: cuda.bindings.runtime.cudaIpcGetMemHandle +.. autofunction:: cuda.bindings.runtime.cudaIpcOpenMemHandle +.. autofunction:: cuda.bindings.runtime.cudaIpcCloseMemHandle +.. autofunction:: cuda.bindings.runtime.cudaDeviceFlushGPUDirectRDMAWrites +.. autofunction:: cuda.bindings.runtime.cudaDeviceRegisterAsyncNotification +.. autofunction:: cuda.bindings.runtime.cudaDeviceUnregisterAsyncNotification +.. autofunction:: cuda.bindings.runtime.cudaGetDeviceCount +.. autofunction:: cuda.bindings.runtime.cudaGetDeviceProperties +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetDefaultMemPool +.. autofunction:: cuda.bindings.runtime.cudaDeviceSetMemPool +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetMemPool +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetNvSciSyncAttributes +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetP2PAttribute +.. autofunction:: cuda.bindings.runtime.cudaChooseDevice +.. autofunction:: cuda.bindings.runtime.cudaInitDevice +.. autofunction:: cuda.bindings.runtime.cudaSetDevice +.. autofunction:: cuda.bindings.runtime.cudaGetDevice +.. autofunction:: cuda.bindings.runtime.cudaSetDeviceFlags +.. autofunction:: cuda.bindings.runtime.cudaGetDeviceFlags + +Error Handling +-------------- + +This section describes the error handling functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGetLastError +.. autofunction:: cuda.bindings.runtime.cudaPeekAtLastError +.. autofunction:: cuda.bindings.runtime.cudaGetErrorName +.. autofunction:: cuda.bindings.runtime.cudaGetErrorString + +Stream Management +----------------- + +This section describes the stream management functions of the CUDA runtime application programming interface. + +.. autoclass:: cuda.bindings.runtime.cudaStreamCallback_t +.. autofunction:: cuda.bindings.runtime.cudaStreamCreate +.. autofunction:: cuda.bindings.runtime.cudaStreamCreateWithFlags +.. autofunction:: cuda.bindings.runtime.cudaStreamCreateWithPriority +.. autofunction:: cuda.bindings.runtime.cudaStreamGetPriority +.. autofunction:: cuda.bindings.runtime.cudaStreamGetFlags +.. autofunction:: cuda.bindings.runtime.cudaStreamGetId +.. autofunction:: cuda.bindings.runtime.cudaCtxResetPersistingL2Cache +.. autofunction:: cuda.bindings.runtime.cudaStreamCopyAttributes +.. autofunction:: cuda.bindings.runtime.cudaStreamGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaStreamSetAttribute +.. autofunction:: cuda.bindings.runtime.cudaStreamDestroy +.. autofunction:: cuda.bindings.runtime.cudaStreamWaitEvent +.. autofunction:: cuda.bindings.runtime.cudaStreamAddCallback +.. autofunction:: cuda.bindings.runtime.cudaStreamSynchronize +.. autofunction:: cuda.bindings.runtime.cudaStreamQuery +.. autofunction:: cuda.bindings.runtime.cudaStreamAttachMemAsync +.. autofunction:: cuda.bindings.runtime.cudaStreamBeginCapture +.. autofunction:: cuda.bindings.runtime.cudaStreamBeginCaptureToGraph +.. autofunction:: cuda.bindings.runtime.cudaThreadExchangeStreamCaptureMode +.. autofunction:: cuda.bindings.runtime.cudaStreamEndCapture +.. autofunction:: cuda.bindings.runtime.cudaStreamIsCapturing +.. autofunction:: cuda.bindings.runtime.cudaStreamGetCaptureInfo +.. autofunction:: cuda.bindings.runtime.cudaStreamGetCaptureInfo_v3 +.. autofunction:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies +.. autofunction:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies_v2 + +Event Management +---------------- + +This section describes the event management functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaEventCreate +.. autofunction:: cuda.bindings.runtime.cudaEventCreateWithFlags +.. autofunction:: cuda.bindings.runtime.cudaEventRecord +.. autofunction:: cuda.bindings.runtime.cudaEventRecordWithFlags +.. autofunction:: cuda.bindings.runtime.cudaEventQuery +.. autofunction:: cuda.bindings.runtime.cudaEventSynchronize +.. autofunction:: cuda.bindings.runtime.cudaEventDestroy +.. autofunction:: cuda.bindings.runtime.cudaEventElapsedTime + +External Resource Interoperability +---------------------------------- + +This section describes the external resource interoperability functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaImportExternalMemory +.. autofunction:: cuda.bindings.runtime.cudaExternalMemoryGetMappedBuffer +.. autofunction:: cuda.bindings.runtime.cudaExternalMemoryGetMappedMipmappedArray +.. autofunction:: cuda.bindings.runtime.cudaDestroyExternalMemory +.. autofunction:: cuda.bindings.runtime.cudaImportExternalSemaphore +.. autofunction:: cuda.bindings.runtime.cudaSignalExternalSemaphoresAsync +.. autofunction:: cuda.bindings.runtime.cudaWaitExternalSemaphoresAsync +.. autofunction:: cuda.bindings.runtime.cudaDestroyExternalSemaphore + +Execution Control +----------------- + +This section describes the execution control functions of the CUDA runtime application programming interface. + + + +Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module. + +.. autofunction:: cuda.bindings.runtime.cudaFuncSetCacheConfig +.. autofunction:: cuda.bindings.runtime.cudaFuncGetAttributes +.. autofunction:: cuda.bindings.runtime.cudaFuncSetAttribute +.. autofunction:: cuda.bindings.runtime.cudaLaunchHostFunc + +Occupancy +--------- + +This section describes the occupancy calculation functions of the CUDA runtime application programming interface. + + + +Besides the occupancy calculator functions (cudaOccupancyMaxActiveBlocksPerMultiprocessor and cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags), there are also C++ only occupancy-based launch configuration functions documented in C++ API Routines module. + + + +See cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API) cudaOccupancyAvailableDynamicSMemPerBlock (C++ API), + +.. autofunction:: cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessor +.. autofunction:: cuda.bindings.runtime.cudaOccupancyAvailableDynamicSMemPerBlock +.. autofunction:: cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + +Memory Management +----------------- + +This section describes the memory management functions of the CUDA runtime application programming interface. + + + +Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module. + +.. autofunction:: cuda.bindings.runtime.cudaMallocManaged +.. autofunction:: cuda.bindings.runtime.cudaMalloc +.. autofunction:: cuda.bindings.runtime.cudaMallocHost +.. autofunction:: cuda.bindings.runtime.cudaMallocPitch +.. autofunction:: cuda.bindings.runtime.cudaMallocArray +.. autofunction:: cuda.bindings.runtime.cudaFree +.. autofunction:: cuda.bindings.runtime.cudaFreeHost +.. autofunction:: cuda.bindings.runtime.cudaFreeArray +.. autofunction:: cuda.bindings.runtime.cudaFreeMipmappedArray +.. autofunction:: cuda.bindings.runtime.cudaHostAlloc +.. autofunction:: cuda.bindings.runtime.cudaHostRegister +.. autofunction:: cuda.bindings.runtime.cudaHostUnregister +.. autofunction:: cuda.bindings.runtime.cudaHostGetDevicePointer +.. autofunction:: cuda.bindings.runtime.cudaHostGetFlags +.. autofunction:: cuda.bindings.runtime.cudaMalloc3D +.. autofunction:: cuda.bindings.runtime.cudaMalloc3DArray +.. autofunction:: cuda.bindings.runtime.cudaMallocMipmappedArray +.. autofunction:: cuda.bindings.runtime.cudaGetMipmappedArrayLevel +.. autofunction:: cuda.bindings.runtime.cudaMemcpy3D +.. autofunction:: cuda.bindings.runtime.cudaMemcpy3DPeer +.. autofunction:: cuda.bindings.runtime.cudaMemcpy3DAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpy3DPeerAsync +.. autofunction:: cuda.bindings.runtime.cudaMemGetInfo +.. autofunction:: cuda.bindings.runtime.cudaArrayGetInfo +.. autofunction:: cuda.bindings.runtime.cudaArrayGetPlane +.. autofunction:: cuda.bindings.runtime.cudaArrayGetMemoryRequirements +.. autofunction:: cuda.bindings.runtime.cudaMipmappedArrayGetMemoryRequirements +.. autofunction:: cuda.bindings.runtime.cudaArrayGetSparseProperties +.. autofunction:: cuda.bindings.runtime.cudaMipmappedArrayGetSparseProperties +.. autofunction:: cuda.bindings.runtime.cudaMemcpy +.. autofunction:: cuda.bindings.runtime.cudaMemcpyPeer +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2D +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DToArray +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DFromArray +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DArrayToArray +.. autofunction:: cuda.bindings.runtime.cudaMemcpyAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpyPeerAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DToArrayAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DFromArrayAsync +.. autofunction:: cuda.bindings.runtime.cudaMemset +.. autofunction:: cuda.bindings.runtime.cudaMemset2D +.. autofunction:: cuda.bindings.runtime.cudaMemset3D +.. autofunction:: cuda.bindings.runtime.cudaMemsetAsync +.. autofunction:: cuda.bindings.runtime.cudaMemset2DAsync +.. autofunction:: cuda.bindings.runtime.cudaMemset3DAsync +.. autofunction:: cuda.bindings.runtime.cudaMemPrefetchAsync +.. autofunction:: cuda.bindings.runtime.cudaMemPrefetchAsync_v2 +.. autofunction:: cuda.bindings.runtime.cudaMemAdvise +.. autofunction:: cuda.bindings.runtime.cudaMemAdvise_v2 +.. autofunction:: cuda.bindings.runtime.cudaMemRangeGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaMemRangeGetAttributes +.. autofunction:: cuda.bindings.runtime.make_cudaPitchedPtr +.. autofunction:: cuda.bindings.runtime.make_cudaPos +.. autofunction:: cuda.bindings.runtime.make_cudaExtent + +Stream Ordered Memory Allocator +------------------------------- + +**overview** + + + +The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior. + +The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee. + + + + + +**Supported Platforms** + + + +Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cudaDeviceGetAttribute() with the device attribute cudaDevAttrMemoryPoolsSupported. + +.. autofunction:: cuda.bindings.runtime.cudaMallocAsync +.. autofunction:: cuda.bindings.runtime.cudaFreeAsync +.. autofunction:: cuda.bindings.runtime.cudaMemPoolTrimTo +.. autofunction:: cuda.bindings.runtime.cudaMemPoolSetAttribute +.. autofunction:: cuda.bindings.runtime.cudaMemPoolGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaMemPoolSetAccess +.. autofunction:: cuda.bindings.runtime.cudaMemPoolGetAccess +.. autofunction:: cuda.bindings.runtime.cudaMemPoolCreate +.. autofunction:: cuda.bindings.runtime.cudaMemPoolDestroy +.. autofunction:: cuda.bindings.runtime.cudaMallocFromPoolAsync +.. autofunction:: cuda.bindings.runtime.cudaMemPoolExportToShareableHandle +.. autofunction:: cuda.bindings.runtime.cudaMemPoolImportFromShareableHandle +.. autofunction:: cuda.bindings.runtime.cudaMemPoolExportPointer +.. autofunction:: cuda.bindings.runtime.cudaMemPoolImportPointer + +Unified Addressing +------------------ + +This section describes the unified addressing functions of the CUDA runtime application programming interface. + + + + + +**Overview** + + + +CUDA devices can share a unified address space with the host. + + For these devices there is no distinction between a device pointer and a host pointer -- the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below). + + + + + +**Supported Platforms** + + + +Whether or not a device supports unified addressing may be queried by calling cudaGetDeviceProperties() with the device property cudaDeviceProp::unifiedAddressing. + +Unified addressing is automatically enabled in 64-bit processes . + + + + + +**Looking Up Information from Pointer Values** + + + +It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cudaPointerGetAttributes() + +Since pointers are unique, it is not necessary to specify information about the pointers specified to cudaMemcpy() and other copy functions. + + The copy direction cudaMemcpyDefault may be used to specify that the CUDA runtime should infer the location of the pointer from its value. + + + + + +**Automatic Mapping of Host Allocated Host Memory** + + + +All host memory allocated through all devices using cudaMallocHost() and cudaHostAlloc() is always directly accessible from all devices that support unified addressing. This is the case regardless of whether or not the flags cudaHostAllocPortable and cudaHostAllocMapped are specified. + +The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host. It is not necessary to call cudaHostGetDevicePointer() to get the device pointer for these allocations. + + + +Note that this is not the case for memory allocated using the flag cudaHostAllocWriteCombined, as discussed below. + + + + + +**Direct Access of Peer Memory** + + + +Upon enabling direct access from a device that supports unified addressing to another peer device that supports unified addressing using cudaDeviceEnablePeerAccess() all memory allocated in the peer device using cudaMalloc() and cudaMallocPitch() will immediately be accessible by the current device. The device pointer value through which any peer's memory may be accessed in the current device is the same pointer value through which that memory may be accessed from the peer device. + + + + + +**Exceptions, Disjoint Addressing** + + + +Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cudaHostRegister() and host memory allocated using the flag cudaHostAllocWriteCombined. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all devices that support unified addressing. + + + +This device address may be queried using cudaHostGetDevicePointer() when a device using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory in cudaMemcpy() and similar functions using the cudaMemcpyDefault memory direction. + +.. autofunction:: cuda.bindings.runtime.cudaPointerGetAttributes + +Peer Device Memory Access +------------------------- + +This section describes the peer device memory access functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaDeviceCanAccessPeer +.. autofunction:: cuda.bindings.runtime.cudaDeviceEnablePeerAccess +.. autofunction:: cuda.bindings.runtime.cudaDeviceDisablePeerAccess + +OpenGL Interoperability +----------------------- + +impl_private + + + +This section describes the OpenGL interoperability functions of the CUDA runtime application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interopability. + +.. autoclass:: cuda.bindings.runtime.cudaGLDeviceList + + .. autoattribute:: cuda.bindings.runtime.cudaGLDeviceList.cudaGLDeviceListAll + + + The CUDA devices for all GPUs used by the current OpenGL context + + + .. autoattribute:: cuda.bindings.runtime.cudaGLDeviceList.cudaGLDeviceListCurrentFrame + + + The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame + + + .. autoattribute:: cuda.bindings.runtime.cudaGLDeviceList.cudaGLDeviceListNextFrame + + + The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame + +.. autofunction:: cuda.bindings.runtime.cudaGLGetDevices +.. autofunction:: cuda.bindings.runtime.cudaGraphicsGLRegisterImage +.. autofunction:: cuda.bindings.runtime.cudaGraphicsGLRegisterBuffer + +Direct3D 9 Interoperability +--------------------------- + + + + +Direct3D 10 Interoperability +---------------------------- + + + + +Direct3D 11 Interoperability +---------------------------- + + + + +VDPAU Interoperability +---------------------- + +This section describes the VDPAU interoperability functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaVDPAUGetDevice +.. autofunction:: cuda.bindings.runtime.cudaVDPAUSetVDPAUDevice +.. autofunction:: cuda.bindings.runtime.cudaGraphicsVDPAURegisterVideoSurface +.. autofunction:: cuda.bindings.runtime.cudaGraphicsVDPAURegisterOutputSurface + +EGL Interoperability +-------------------- + +This section describes the EGL interoperability functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGraphicsEGLRegisterImage +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerConnect +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerConnectWithFlags +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerDisconnect +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerAcquireFrame +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerReleaseFrame +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamProducerConnect +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamProducerDisconnect +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamProducerPresentFrame +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamProducerReturnFrame +.. autofunction:: cuda.bindings.runtime.cudaGraphicsResourceGetMappedEglFrame +.. autofunction:: cuda.bindings.runtime.cudaEventCreateFromEGLSync + +Graphics Interoperability +------------------------- + +This section describes the graphics interoperability functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGraphicsUnregisterResource +.. autofunction:: cuda.bindings.runtime.cudaGraphicsResourceSetMapFlags +.. autofunction:: cuda.bindings.runtime.cudaGraphicsMapResources +.. autofunction:: cuda.bindings.runtime.cudaGraphicsUnmapResources +.. autofunction:: cuda.bindings.runtime.cudaGraphicsResourceGetMappedPointer +.. autofunction:: cuda.bindings.runtime.cudaGraphicsSubResourceGetMappedArray +.. autofunction:: cuda.bindings.runtime.cudaGraphicsResourceGetMappedMipmappedArray + +Texture Object Management +------------------------- + +This section describes the low level texture object management functions of the CUDA runtime application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher. + +.. autofunction:: cuda.bindings.runtime.cudaGetChannelDesc +.. autofunction:: cuda.bindings.runtime.cudaCreateChannelDesc +.. autofunction:: cuda.bindings.runtime.cudaCreateTextureObject +.. autofunction:: cuda.bindings.runtime.cudaDestroyTextureObject +.. autofunction:: cuda.bindings.runtime.cudaGetTextureObjectResourceDesc +.. autofunction:: cuda.bindings.runtime.cudaGetTextureObjectTextureDesc +.. autofunction:: cuda.bindings.runtime.cudaGetTextureObjectResourceViewDesc + +Surface Object Management +------------------------- + +This section describes the low level texture object management functions of the CUDA runtime application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher. + +.. autofunction:: cuda.bindings.runtime.cudaCreateSurfaceObject +.. autofunction:: cuda.bindings.runtime.cudaDestroySurfaceObject +.. autofunction:: cuda.bindings.runtime.cudaGetSurfaceObjectResourceDesc + +Version Management +------------------ + + + +.. autofunction:: cuda.bindings.runtime.cudaDriverGetVersion +.. autofunction:: cuda.bindings.runtime.cudaRuntimeGetVersion +.. autofunction:: cuda.bindings.runtime.getLocalRuntimeVersion + +Graph Management +---------------- + +This section describes the graph management functions of CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGraphCreate +.. autofunction:: cuda.bindings.runtime.cudaGraphAddKernelNode +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeCopyAttributes +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeSetAttribute +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemcpyNode +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemcpyNode1D +.. autofunction:: cuda.bindings.runtime.cudaGraphMemcpyNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams1D +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemsetNode +.. autofunction:: cuda.bindings.runtime.cudaGraphMemsetNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphMemsetNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddHostNode +.. autofunction:: cuda.bindings.runtime.cudaGraphHostNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphHostNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddChildGraphNode +.. autofunction:: cuda.bindings.runtime.cudaGraphChildGraphNodeGetGraph +.. autofunction:: cuda.bindings.runtime.cudaGraphAddEmptyNode +.. autofunction:: cuda.bindings.runtime.cudaGraphAddEventRecordNode +.. autofunction:: cuda.bindings.runtime.cudaGraphEventRecordNodeGetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphEventRecordNodeSetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphAddEventWaitNode +.. autofunction:: cuda.bindings.runtime.cudaGraphEventWaitNodeGetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphEventWaitNodeSetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphAddExternalSemaphoresSignalNode +.. autofunction:: cuda.bindings.runtime.cudaGraphExternalSemaphoresSignalNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExternalSemaphoresSignalNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddExternalSemaphoresWaitNode +.. autofunction:: cuda.bindings.runtime.cudaGraphExternalSemaphoresWaitNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExternalSemaphoresWaitNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemAllocNode +.. autofunction:: cuda.bindings.runtime.cudaGraphMemAllocNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemFreeNode +.. autofunction:: cuda.bindings.runtime.cudaGraphMemFreeNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaDeviceGraphMemTrim +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetGraphMemAttribute +.. autofunction:: cuda.bindings.runtime.cudaDeviceSetGraphMemAttribute +.. autofunction:: cuda.bindings.runtime.cudaGraphClone +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeFindInClone +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetType +.. autofunction:: cuda.bindings.runtime.cudaGraphGetNodes +.. autofunction:: cuda.bindings.runtime.cudaGraphGetRootNodes +.. autofunction:: cuda.bindings.runtime.cudaGraphGetEdges +.. autofunction:: cuda.bindings.runtime.cudaGraphGetEdges_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetDependencies +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetDependencies_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetDependentNodes +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetDependentNodes_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphAddDependencies +.. autofunction:: cuda.bindings.runtime.cudaGraphAddDependencies_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphRemoveDependencies +.. autofunction:: cuda.bindings.runtime.cudaGraphRemoveDependencies_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphDestroyNode +.. autofunction:: cuda.bindings.runtime.cudaGraphInstantiate +.. autofunction:: cuda.bindings.runtime.cudaGraphInstantiateWithFlags +.. autofunction:: cuda.bindings.runtime.cudaGraphInstantiateWithParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecGetFlags +.. autofunction:: cuda.bindings.runtime.cudaGraphExecKernelNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecMemcpyNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecMemcpyNodeSetParams1D +.. autofunction:: cuda.bindings.runtime.cudaGraphExecMemsetNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecHostNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecChildGraphNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecEventRecordNodeSetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphExecEventWaitNodeSetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphExecExternalSemaphoresSignalNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecExternalSemaphoresWaitNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeSetEnabled +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetEnabled +.. autofunction:: cuda.bindings.runtime.cudaGraphExecUpdate +.. autofunction:: cuda.bindings.runtime.cudaGraphUpload +.. autofunction:: cuda.bindings.runtime.cudaGraphLaunch +.. autofunction:: cuda.bindings.runtime.cudaGraphExecDestroy +.. autofunction:: cuda.bindings.runtime.cudaGraphDestroy +.. autofunction:: cuda.bindings.runtime.cudaGraphDebugDotPrint +.. autofunction:: cuda.bindings.runtime.cudaUserObjectCreate +.. autofunction:: cuda.bindings.runtime.cudaUserObjectRetain +.. autofunction:: cuda.bindings.runtime.cudaUserObjectRelease +.. autofunction:: cuda.bindings.runtime.cudaGraphRetainUserObject +.. autofunction:: cuda.bindings.runtime.cudaGraphReleaseUserObject +.. autofunction:: cuda.bindings.runtime.cudaGraphAddNode +.. autofunction:: cuda.bindings.runtime.cudaGraphAddNode_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphConditionalHandleCreate + +Driver Entry Point Access +------------------------- + +This section describes the driver entry point access functions of CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGetDriverEntryPoint +.. autofunction:: cuda.bindings.runtime.cudaGetDriverEntryPointByVersion + +C++ API Routines +---------------- +C++-style interface built on top of CUDA runtime API. +impl_private + + + + + + + +This section describes the C++ high level API functions of the CUDA runtime application programming interface. To use these functions, your application needs to be compiled with the ``nvcc``\ compiler. + + +Interactions with the CUDA Driver API +------------------------------------- + +This section describes the interactions between the CUDA Driver API and the CUDA Runtime API + + + + + +**Primary Contexts** + + + +There exists a one to one relationship between CUDA devices in the CUDA Runtime API and ::CUcontext s in the CUDA Driver API within a process. The specific context which the CUDA Runtime API uses for a device is called the device's primary context. From the perspective of the CUDA Runtime API, a device and its primary context are synonymous. + + + + + +**Initialization and Tear-Down** + + + +CUDA Runtime API calls operate on the CUDA Driver API ::CUcontext which is current to to the calling host thread. + +The function cudaInitDevice() ensures that the primary context is initialized for the requested device but does not make it current to the calling thread. + +The function cudaSetDevice() initializes the primary context for the specified device and makes it current to the calling thread by calling ::cuCtxSetCurrent(). + +The CUDA Runtime API will automatically initialize the primary context for a device at the first CUDA Runtime API call which requires an active context. If no ::CUcontext is current to the calling thread when a CUDA Runtime API call which requires an active context is made, then the primary context for a device will be selected, made current to the calling thread, and initialized. + +The context which the CUDA Runtime API initializes will be initialized using the parameters specified by the CUDA Runtime API functions cudaSetDeviceFlags(), ::cudaD3D9SetDirect3DDevice(), ::cudaD3D10SetDirect3DDevice(), ::cudaD3D11SetDirect3DDevice(), cudaGLSetGLDevice(), and cudaVDPAUSetVDPAUDevice(). Note that these functions will fail with cudaErrorSetOnActiveProcess if they are called when the primary context for the specified device has already been initialized. (or if the current device has already been initialized, in the case of cudaSetDeviceFlags()). + +Primary contexts will remain active until they are explicitly deinitialized using cudaDeviceReset(). The function cudaDeviceReset() will deinitialize the primary context for the calling thread's current device immediately. The context will remain current to all of the threads that it was current to. The next CUDA Runtime API call on any thread which requires an active context will trigger the reinitialization of that device's primary context. + +Note that primary contexts are shared resources. It is recommended that the primary context not be reset except just before exit or to recover from an unspecified launch failure. + + + + + +**Context Interoperability** + + + +Note that the use of multiple ::CUcontext s per device within a single process will substantially degrade performance and is strongly discouraged. Instead, it is highly recommended that the implicit one-to-one device-to-context mapping for the process provided by the CUDA Runtime API be used. + +If a non-primary ::CUcontext created by the CUDA Driver API is current to a thread then the CUDA Runtime API calls to that thread will operate on that ::CUcontext, with some exceptions listed below. Interoperability between data types is discussed in the following sections. + +The function cudaPointerGetAttributes() will return the error cudaErrorIncompatibleDriverContext if the pointer being queried was allocated by a non-primary context. The function cudaDeviceEnablePeerAccess() and the rest of the peer access API may not be called when a non-primary ::CUcontext is current. + + To use the pointer query and peer access APIs with a context created using the CUDA Driver API, it is necessary that the CUDA Driver API be used to access these features. + +All CUDA Runtime API state (e.g, global variables' addresses and values) travels with its underlying ::CUcontext. In particular, if a ::CUcontext is moved from one thread to another then all CUDA Runtime API state will move to that thread as well. + +Please note that attaching to legacy contexts (those with a version of 3010 as returned by ::cuCtxGetApiVersion()) is not possible. The CUDA Runtime will return cudaErrorIncompatibleDriverContext in such cases. + + + + + +**Interactions between CUstream and cudaStream_t** + + + +The types ::CUstream and cudaStream_t are identical and may be used interchangeably. + + + + + +**Interactions between CUevent and cudaEvent_t** + + + +The types ::CUevent and cudaEvent_t are identical and may be used interchangeably. + + + + + +**Interactions between CUarray and cudaArray_t** + + + +The types ::CUarray and struct ::cudaArray * represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a ::CUarray in a CUDA Runtime API function which takes a struct ::cudaArray *, it is necessary to explicitly cast the ::CUarray to a struct ::cudaArray *. + +In order to use a struct ::cudaArray * in a CUDA Driver API function which takes a ::CUarray, it is necessary to explicitly cast the struct ::cudaArray * to a ::CUarray . + + + + + +**Interactions between CUgraphicsResource and cudaGraphicsResource_t** + + + +The types ::CUgraphicsResource and cudaGraphicsResource_t represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a ::CUgraphicsResource in a CUDA Runtime API function which takes a cudaGraphicsResource_t, it is necessary to explicitly cast the ::CUgraphicsResource to a cudaGraphicsResource_t. + +In order to use a cudaGraphicsResource_t in a CUDA Driver API function which takes a ::CUgraphicsResource, it is necessary to explicitly cast the cudaGraphicsResource_t to a ::CUgraphicsResource. + + + + + +**Interactions between CUtexObject and cudaTextureObject_t** + + + +The types ::CUtexObject and cudaTextureObject_t represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a ::CUtexObject in a CUDA Runtime API function which takes a cudaTextureObject_t, it is necessary to explicitly cast the ::CUtexObject to a cudaTextureObject_t. + +In order to use a cudaTextureObject_t in a CUDA Driver API function which takes a ::CUtexObject, it is necessary to explicitly cast the cudaTextureObject_t to a ::CUtexObject. + + + + + +**Interactions between CUsurfObject and cudaSurfaceObject_t** + + + +The types ::CUsurfObject and cudaSurfaceObject_t represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a ::CUsurfObject in a CUDA Runtime API function which takes a cudaSurfaceObject_t, it is necessary to explicitly cast the ::CUsurfObject to a cudaSurfaceObject_t. + +In order to use a cudaSurfaceObject_t in a CUDA Driver API function which takes a ::CUsurfObject, it is necessary to explicitly cast the cudaSurfaceObject_t to a ::CUsurfObject. + + + + + +**Interactions between CUfunction and cudaFunction_t** + + + +The types ::CUfunction and cudaFunction_t represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a cudaFunction_t in a CUDA Driver API function which takes a ::CUfunction, it is necessary to explicitly cast the cudaFunction_t to a ::CUfunction. + +.. autofunction:: cuda.bindings.runtime.cudaGetKernel + +Data types used by CUDA Runtime +------------------------------- + + + +.. autoclass:: cuda.bindings.runtime.cudaEglPlaneDesc_st +.. autoclass:: cuda.bindings.runtime.cudaEglFrame_st +.. autoclass:: cuda.bindings.runtime.cudaChannelFormatDesc +.. autoclass:: cuda.bindings.runtime.cudaArraySparseProperties +.. autoclass:: cuda.bindings.runtime.cudaArrayMemoryRequirements +.. autoclass:: cuda.bindings.runtime.cudaPitchedPtr +.. autoclass:: cuda.bindings.runtime.cudaExtent +.. autoclass:: cuda.bindings.runtime.cudaPos +.. autoclass:: cuda.bindings.runtime.cudaMemcpy3DParms +.. autoclass:: cuda.bindings.runtime.cudaMemcpyNodeParams +.. autoclass:: cuda.bindings.runtime.cudaMemcpy3DPeerParms +.. autoclass:: cuda.bindings.runtime.cudaMemsetParams +.. autoclass:: cuda.bindings.runtime.cudaMemsetParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaAccessPolicyWindow +.. autoclass:: cuda.bindings.runtime.cudaHostNodeParams +.. autoclass:: cuda.bindings.runtime.cudaHostNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaResourceDesc +.. autoclass:: cuda.bindings.runtime.cudaResourceViewDesc +.. autoclass:: cuda.bindings.runtime.cudaPointerAttributes +.. autoclass:: cuda.bindings.runtime.cudaFuncAttributes +.. autoclass:: cuda.bindings.runtime.cudaMemLocation +.. autoclass:: cuda.bindings.runtime.cudaMemAccessDesc +.. autoclass:: cuda.bindings.runtime.cudaMemPoolProps +.. autoclass:: cuda.bindings.runtime.cudaMemPoolPtrExportData +.. autoclass:: cuda.bindings.runtime.cudaMemAllocNodeParams +.. autoclass:: cuda.bindings.runtime.cudaMemAllocNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaMemFreeNodeParams +.. autoclass:: cuda.bindings.runtime.CUuuid_st +.. autoclass:: cuda.bindings.runtime.cudaDeviceProp +.. autoclass:: cuda.bindings.runtime.cudaIpcEventHandle_st +.. autoclass:: cuda.bindings.runtime.cudaIpcMemHandle_st +.. autoclass:: cuda.bindings.runtime.cudaMemFabricHandle_st +.. autoclass:: cuda.bindings.runtime.cudaExternalMemoryHandleDesc +.. autoclass:: cuda.bindings.runtime.cudaExternalMemoryBufferDesc +.. autoclass:: cuda.bindings.runtime.cudaExternalMemoryMipmappedArrayDesc +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreHandleDesc +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreSignalParams +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreWaitParams +.. autoclass:: cuda.bindings.runtime.cudaKernelNodeParams +.. autoclass:: cuda.bindings.runtime.cudaKernelNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreSignalNodeParams +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreSignalNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreWaitNodeParams +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreWaitNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaConditionalNodeParams +.. autoclass:: cuda.bindings.runtime.cudaChildGraphNodeParams +.. autoclass:: cuda.bindings.runtime.cudaEventRecordNodeParams +.. autoclass:: cuda.bindings.runtime.cudaEventWaitNodeParams +.. autoclass:: cuda.bindings.runtime.cudaGraphNodeParams +.. autoclass:: cuda.bindings.runtime.cudaGraphEdgeData_st +.. autoclass:: cuda.bindings.runtime.cudaGraphInstantiateParams_st +.. autoclass:: cuda.bindings.runtime.cudaGraphExecUpdateResultInfo_st +.. autoclass:: cuda.bindings.runtime.cudaGraphKernelNodeUpdate +.. autoclass:: cuda.bindings.runtime.cudaLaunchMemSyncDomainMap_st +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttributeValue +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttribute_st +.. autoclass:: cuda.bindings.runtime.cudaAsyncNotificationInfo +.. autoclass:: cuda.bindings.runtime.cudaTextureDesc +.. autoclass:: cuda.bindings.runtime.cudaEglFrameType + + .. autoattribute:: cuda.bindings.runtime.cudaEglFrameType.cudaEglFrameTypeArray + + + Frame type CUDA array + + + .. autoattribute:: cuda.bindings.runtime.cudaEglFrameType.cudaEglFrameTypePitch + + + Frame type CUDA pointer + +.. autoclass:: cuda.bindings.runtime.cudaEglResourceLocationFlags + + .. autoattribute:: cuda.bindings.runtime.cudaEglResourceLocationFlags.cudaEglResourceLocationSysmem + + + Resource location sysmem + + + .. autoattribute:: cuda.bindings.runtime.cudaEglResourceLocationFlags.cudaEglResourceLocationVidmem + + + Resource location vidmem + +.. autoclass:: cuda.bindings.runtime.cudaEglColorFormat + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420Planar + + + Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar + + + Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV422Planar + + + Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV422SemiPlanar + + + Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatARGB + + + R/G/B/A four channels in one surface with BGRA byte ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatRGBA + + + R/G/B/A four channels in one surface with ABGR byte ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatL + + + single luminance channel in one surface. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatR + + + single color channel in one surface. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV444Planar + + + Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV444SemiPlanar + + + Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUYV422 + + + Y, U, V in one surface, interleaved as UYVY in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatUYVY422 + + + Y, U, V in one surface, interleaved as YUYV in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatABGR + + + R/G/B/A four channels in one surface with RGBA byte ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBGRA + + + R/G/B/A four channels in one surface with ARGB byte ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatA + + + Alpha color format - one channel in one surface. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatRG + + + R/G color format - two channels in one surface with GR byte ordering + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatAYUV + + + Y, U, V, A four channels in one surface, interleaved as VUYA. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU444SemiPlanar + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU422SemiPlanar + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar + + + Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar + + + Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar + + + Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar + + + Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatVYUY_ER + + + Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatUYVY_ER + + + Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUYV_ER + + + Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVYU_ER + + + Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUVA_ER + + + Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatAYUV_ER + + + Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV444Planar_ER + + + Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV422Planar_ER + + + Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_ER + + + Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV444SemiPlanar_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV422SemiPlanar_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU444Planar_ER + + + Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU422Planar_ER + + + Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_ER + + + Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU444SemiPlanar_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU422SemiPlanar_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerRGGB + + + Bayer format - one channel in one surface with interleaved RGGB ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerBGGR + + + Bayer format - one channel in one surface with interleaved BGGR ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerGRBG + + + Bayer format - one channel in one surface with interleaved GRBG ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerGBRG + + + Bayer format - one channel in one surface with interleaved GBRG ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10RGGB + + + Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10BGGR + + + Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10GRBG + + + Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10GBRG + + + Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12RGGB + + + Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12BGGR + + + Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12GRBG + + + Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12GBRG + + + Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer14RGGB + + + Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer14BGGR + + + Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer14GRBG + + + Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer14GBRG + + + Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer20RGGB + + + Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer20BGGR + + + Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer20GRBG + + + Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer20GBRG + + + Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU444Planar + + + Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU422Planar + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420Planar + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerIspRGGB + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerIspBGGR + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerIspGRBG + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerIspGBRG + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerBCCR + + + Bayer format - one channel in one surface with interleaved BCCR ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerRCCB + + + Bayer format - one channel in one surface with interleaved RCCB ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerCRBC + + + Bayer format - one channel in one surface with interleaved CRBC ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerCBRC + + + Bayer format - one channel in one surface with interleaved CBRC ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10CCCC + + + Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12BCCR + + + Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12RCCB + + + Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12CRBC + + + Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12CBRC + + + Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12CCCC + + + Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY + + + Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_2020 + + + Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_2020 + + + Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_2020 + + + Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_2020 + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_709 + + + Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_709 + + + Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_709 + + + Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_709 + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_709 + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_2020 + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar_2020 + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar_709 + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY_ER + + + Extended Range Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY_709_ER + + + Extended Range Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10_ER + + + Extended Range Color format for single Y10 plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10_709_ER + + + Extended Range Color format for single Y10 plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12_ER + + + Extended Range Color format for single Y12 plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12_709_ER + + + Extended Range Color format for single Y12 plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUVA + + + Y, U, V, A four channels in one surface, interleaved as AVUY. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVYU + + + Y, U, V in one surface, interleaved as YVYU in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatVYUY + + + Y, U, V in one surface, interleaved as VYUY in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + +.. autoclass:: cuda.bindings.runtime.cudaError_t + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaSuccess + + + The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`). + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidValue + + + This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMemoryAllocation + + + The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInitializationError + + + The API call failed because the CUDA driver and runtime could not be initialized. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCudartUnloading + + + This indicates that a CUDA Runtime API call cannot be executed because it is being called during process shut down, at a point in time after CUDA driver has been unloaded. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorProfilerDisabled + + + This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorProfilerNotInitialized + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorProfilerAlreadyStarted + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorProfilerAlreadyStopped + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidConfiguration + + + This indicates that a kernel launch is requesting resources that can never be satisfied by the current device. Requesting more shared memory per block than the device supports will trigger this error, as will requesting too many threads or blocks. See :py:obj:`~.cudaDeviceProp` for more device limitations. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidPitchValue + + + This indicates that one or more of the pitch-related parameters passed to the API call is not within the acceptable range for pitch. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidSymbol + + + This indicates that the symbol name/identifier passed to the API call is not a valid name or identifier. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidHostPointer + + + This indicates that at least one host pointer passed to the API call is not a valid host pointer. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidDevicePointer + + + This indicates that at least one device pointer passed to the API call is not a valid device pointer. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidTexture + + + This indicates that the texture passed to the API call is not a valid texture. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidTextureBinding + + + This indicates that the texture binding is not valid. This occurs if you call :py:obj:`~.cudaGetTextureAlignmentOffset()` with an unbound texture. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidChannelDescriptor + + + This indicates that the channel descriptor passed to the API call is not valid. This occurs if the format is not one of the formats specified by :py:obj:`~.cudaChannelFormatKind`, or if one of the dimensions is invalid. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidMemcpyDirection + + + This indicates that the direction of the memcpy passed to the API call is not one of the types specified by :py:obj:`~.cudaMemcpyKind`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorAddressOfConstant + + + This indicated that the user has taken the address of a constant variable, which was forbidden up until the CUDA 3.1 release. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorTextureFetchFailed + + + This indicated that a texture fetch was not able to be performed. This was previously used for device emulation of texture operations. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorTextureNotBound + + + This indicated that a texture was not bound for access. This was previously used for device emulation of texture operations. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSynchronizationError + + + This indicated that a synchronization operation had failed. This was previously used for some device emulation functions. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidFilterSetting + + + This indicates that a non-float texture was being accessed with linear filtering. This is not supported by CUDA. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidNormSetting + + + This indicates that an attempt was made to read a non-float texture as a normalized float. This is not supported by CUDA. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMixedDeviceExecution + + + Mixing of device and device emulation code was not allowed. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotYetImplemented + + + This indicates that the API call is not yet implemented. Production releases of CUDA will never return this error. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMemoryValueTooLarge + + + This indicated that an emulated device pointer exceeded the 32-bit address range. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStubLibrary + + + This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInsufficientDriver + + + This indicates that the installed NVIDIA CUDA driver is older than the CUDA runtime library. This is not a supported configuration. Users should install an updated NVIDIA display driver to allow the application to run. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCallRequiresNewerDriver + + + This indicates that the API call requires a newer CUDA driver than the one currently installed. Users should install an updated NVIDIA CUDA driver to allow the API call to succeed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidSurface + + + This indicates that the surface passed to the API call is not a valid surface. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDuplicateVariableName + + + This indicates that multiple global or constant variables (across separate CUDA source files in the application) share the same string name. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDuplicateTextureName + + + This indicates that multiple textures (across separate CUDA source files in the application) share the same string name. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDuplicateSurfaceName + + + This indicates that multiple surfaces (across separate CUDA source files in the application) share the same string name. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDevicesUnavailable + + + This indicates that all CUDA devices are busy or unavailable at the current time. Devices are often busy/unavailable due to use of :py:obj:`~.cudaComputeModeProhibited`, :py:obj:`~.cudaComputeModeExclusiveProcess`, or when long running CUDA kernels have filled up the GPU and are blocking new work from starting. They can also be unavailable due to memory constraints on a device that already has active CUDA work being performed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorIncompatibleDriverContext + + + This indicates that the current context is not compatible with this the CUDA Runtime. This can only occur if you are using CUDA Runtime/Driver interoperability and have created an existing Driver context using the driver API. The Driver context may be incompatible either because the Driver context was created using an older version of the API, because the Runtime API call expects a primary driver context and the Driver context is not primary, or because the Driver context has been destroyed. Please see :py:obj:`~.Interactions`with the CUDA Driver API" for more information. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMissingConfiguration + + + The device function being invoked (usually via :py:obj:`~.cudaLaunchKernel()`) was not previously configured via the :py:obj:`~.cudaConfigureCall()` function. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorPriorLaunchFailure + + + This indicated that a previous kernel launch failed. This was previously used for device emulation of kernel launches. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchMaxDepthExceeded + + + This error indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchFileScopedTex + + + This error indicates that a grid launch did not occur because the kernel uses file-scoped textures which are unsupported by the device runtime. Kernels launched via the device runtime only support textures created with the Texture Object API's. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchFileScopedSurf + + + This error indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. Kernels launched via the device runtime only support surfaces created with the Surface Object API's. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSyncDepthExceeded + + + This error indicates that a call to :py:obj:`~.cudaDeviceSynchronize` made from the device runtime failed because the call was made at grid depth greater than than either the default (2 levels of grids) or user specified device limit :py:obj:`~.cudaLimitDevRuntimeSyncDepth`. To be able to synchronize on launched grids at a greater depth successfully, the maximum nested depth at which :py:obj:`~.cudaDeviceSynchronize` will be called must be specified with the :py:obj:`~.cudaLimitDevRuntimeSyncDepth` limit to the :py:obj:`~.cudaDeviceSetLimit` api before the host-side launch of a kernel using the device runtime. Keep in mind that additional levels of sync depth require the runtime to reserve large amounts of device memory that cannot be used for user allocations. Note that :py:obj:`~.cudaDeviceSynchronize` made from device runtime is only supported on devices of compute capability < 9.0. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchPendingCountExceeded + + + This error indicates that a device runtime grid launch failed because the launch would exceed the limit :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount`. For this launch to proceed successfully, :py:obj:`~.cudaDeviceSetLimit` must be called to set the :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount` to be higher than the upper bound of outstanding launches that can be issued to the device runtime. Keep in mind that raising the limit of pending device runtime launches will require the runtime to reserve device memory that cannot be used for user allocations. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidDeviceFunction + + + The requested device function does not exist or is not compiled for the proper device architecture. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNoDevice + + + This indicates that no CUDA-capable devices were detected by the installed CUDA driver. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidDevice + + + This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDeviceNotLicensed + + + This indicates that the device doesn't have a valid Grid License. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSoftwareValidityNotEstablished + + + By default, the CUDA runtime may perform a minimal set of self-tests, as well as CUDA driver tests, to establish the validity of both. Introduced in CUDA 11.2, this error return indicates that at least one of these tests has failed and the validity of either the runtime or the driver could not be established. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStartupFailure + + + This indicates an internal startup failure in the CUDA runtime. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidKernelImage + + + This indicates that the device kernel image is invalid. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDeviceUninitialized + + + This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had :py:obj:`~.cuCtxDestroy()` invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See :py:obj:`~.cuCtxGetApiVersion()` for more details. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMapBufferObjectFailed + + + This indicates that the buffer object could not be mapped. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnmapBufferObjectFailed + + + This indicates that the buffer object could not be unmapped. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorArrayIsMapped + + + This indicates that the specified array is currently mapped and thus cannot be destroyed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorAlreadyMapped + + + This indicates that the resource is already mapped. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNoKernelImageForDevice + + + This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorAlreadyAcquired + + + This indicates that a resource has already been acquired. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotMapped + + + This indicates that a resource is not mapped. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotMappedAsArray + + + This indicates that a mapped resource is not available for access as an array. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotMappedAsPointer + + + This indicates that a mapped resource is not available for access as a pointer. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorECCUncorrectable + + + This indicates that an uncorrectable ECC error was detected during execution. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnsupportedLimit + + + This indicates that the :py:obj:`~.cudaLimit` passed to the API call is not supported by the active device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDeviceAlreadyInUse + + + This indicates that a call tried to access an exclusive-thread device that is already in use by a different thread. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorPeerAccessUnsupported + + + This error indicates that P2P access is not supported across the given devices. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidPtx + + + A PTX compilation failed. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidGraphicsContext + + + This indicates an error with the OpenGL or DirectX context. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNvlinkUncorrectable + + + This indicates that an uncorrectable NVLink error was detected during the execution. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorJitCompilerNotFound + + + This indicates that the PTX JIT compiler library was not found. The JIT Compiler library is used for PTX compilation. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnsupportedPtxVersion + + + This indicates that the provided PTX was compiled with an unsupported toolchain. The most common reason for this, is the PTX was generated by a compiler newer than what is supported by the CUDA driver and PTX JIT compiler. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorJitCompilationDisabled + + + This indicates that the JIT compilation was disabled. The JIT compilation compiles PTX. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnsupportedExecAffinity + + + This indicates that the provided execution affinity is not supported by the device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnsupportedDevSideSync + + + This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidSource + + + This indicates that the device kernel source is invalid. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorFileNotFound + + + This indicates that the file specified was not found. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSharedObjectSymbolNotFound + + + This indicates that a link to a shared object failed to resolve. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSharedObjectInitFailed + + + This indicates that initialization of a shared object failed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorOperatingSystem + + + This error indicates that an OS call failed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidResourceHandle + + + This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like :py:obj:`~.cudaStream_t` and :py:obj:`~.cudaEvent_t`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorIllegalState + + + This indicates that a resource required by the API call is not in a valid state to perform the requested operation. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLossyQuery + + + This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSymbolNotFound + + + This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotReady + + + This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than :py:obj:`~.cudaSuccess` (which indicates completion). Calls that may return this value include :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorIllegalAddress + + + The device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchOutOfResources + + + This indicates that a launch did not occur because it did not have appropriate resources. Although this error is similar to :py:obj:`~.cudaErrorInvalidConfiguration`, this error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchTimeout + + + This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device property :py:obj:`~.kernelExecTimeoutEnabled` for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchIncompatibleTexturing + + + This error indicates a kernel launch that uses an incompatible texturing mode. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorPeerAccessAlreadyEnabled + + + This error indicates that a call to :py:obj:`~.cudaDeviceEnablePeerAccess()` is trying to re-enable peer addressing on from a context which has already had peer addressing enabled. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorPeerAccessNotEnabled + + + This error indicates that :py:obj:`~.cudaDeviceDisablePeerAccess()` is trying to disable peer addressing which has not been enabled yet via :py:obj:`~.cudaDeviceEnablePeerAccess()`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSetOnActiveProcess + + + This indicates that the user has called :py:obj:`~.cudaSetValidDevices()`, :py:obj:`~.cudaSetDeviceFlags()`, :py:obj:`~.cudaD3D9SetDirect3DDevice()`, :py:obj:`~.cudaD3D10SetDirect3DDevice`, :py:obj:`~.cudaD3D11SetDirect3DDevice()`, or :py:obj:`~.cudaVDPAUSetVDPAUDevice()` after initializing the CUDA runtime by calling non-device management operations (allocating memory and launching kernels are examples of non-device management operations). This error can also be returned if using runtime/driver interoperability and there is an existing :py:obj:`~.CUcontext` active on the host thread. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorContextIsDestroyed + + + This error indicates that the context current to the calling thread has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary context which has not yet been initialized. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorAssert + + + An assert triggered in device code during kernel execution. The device cannot be used again. All existing allocations are invalid. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorTooManyPeers + + + This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to :py:obj:`~.cudaEnablePeerAccess()`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorHostMemoryAlreadyRegistered + + + This error indicates that the memory range passed to :py:obj:`~.cudaHostRegister()` has already been registered. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorHostMemoryNotRegistered + + + This error indicates that the pointer passed to :py:obj:`~.cudaHostUnregister()` does not correspond to any currently registered memory region. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorHardwareStackError + + + Device encountered an error in the call stack during kernel execution, possibly due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorIllegalInstruction + + + The device encountered an illegal instruction during kernel execution This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMisalignedAddress + + + The device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidAddressSpace + + + While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidPc + + + The device encountered an invalid program counter. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchFailure + + + An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCooperativeLaunchTooLarge + + + This error indicates that the number of blocks launched per grid for a kernel that was launched via either :py:obj:`~.cudaLaunchCooperativeKernel` or :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` exceeds the maximum number of blocks as allowed by :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessor` or :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` times the number of multiprocessors as specified by the device attribute :py:obj:`~.cudaDevAttrMultiProcessorCount`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotPermitted + + + This error indicates the attempted operation is not permitted. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotSupported + + + This error indicates the attempted operation is not supported on the current system or device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSystemNotReady + + + This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSystemDriverMismatch + + + This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCompatNotSupportedOnDevice + + + This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsConnectionFailed + + + This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsRpcFailure + + + This error indicates that the remote procedural call between the MPS server and the MPS client failed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsServerNotReady + + + This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsMaxClientsReached + + + This error indicates that the hardware resources required to create MPS client have been exhausted. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsMaxConnectionsReached + + + This error indicates the the hardware resources required to device connections have been exhausted. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsClientTerminated + + + This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCdpNotSupported + + + This error indicates, that the program is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCdpVersionMismatch + + + This error indicates, that the program contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureUnsupported + + + The operation is not permitted when the stream is capturing. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureInvalidated + + + The current capture sequence on the stream has been invalidated due to a previous error. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureMerge + + + The operation would have resulted in a merge of two independent capture sequences. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureUnmatched + + + The capture was not initiated in this stream. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureUnjoined + + + The capture sequence contains a fork that was not joined to the primary stream. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureIsolation + + + A dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureImplicit + + + The operation would have resulted in a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCapturedEvent + + + The operation is not permitted on an event which was last recorded in a capturing stream. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureWrongThread + + + A stream capture sequence not initiated with the :py:obj:`~.cudaStreamCaptureModeRelaxed` argument to :py:obj:`~.cudaStreamBeginCapture` was passed to :py:obj:`~.cudaStreamEndCapture` in a different thread. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorTimeout + + + This indicates that the wait operation has timed out. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorGraphExecUpdateFailure + + + This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorExternalDevice + + + This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device's signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidClusterSize + + + This indicates that a kernel launch error has occurred due to cluster misconfiguration. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorFunctionNotLoaded + + + Indiciates a function handle is not loaded when calling an API that requires a loaded function. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidResourceType + + + This error indicates one or more resources passed in are not valid resource types for the operation. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidResourceConfiguration + + + This error indicates one or more resources are insufficient or non-applicable for the operation. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnknown + + + This indicates that an unknown internal error has occurred. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorApiFailureBase + +.. autoclass:: cuda.bindings.runtime.cudaChannelFormatKind + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSigned + + + Signed channel format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsigned + + + Unsigned channel format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindFloat + + + Float channel format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindNone + + + No channel format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindNV12 + + + Unsigned 8-bit integers, planar 4:2:0 YUV format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X1 + + + 1 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X2 + + + 2 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X4 + + + 4 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X1 + + + 1 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X2 + + + 2 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X4 + + + 4 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X1 + + + 1 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X2 + + + 2 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X4 + + + 4 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X1 + + + 1 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X2 + + + 2 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X4 + + + 4 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1 + + + 4 channel unsigned normalized block-compressed (BC1 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1SRGB + + + 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2 + + + 4 channel unsigned normalized block-compressed (BC2 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2SRGB + + + 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3 + + + 4 channel unsigned normalized block-compressed (BC3 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3SRGB + + + 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed4 + + + 1 channel unsigned normalized block-compressed (BC4 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed4 + + + 1 channel signed normalized block-compressed (BC4 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed5 + + + 2 channel unsigned normalized block-compressed (BC5 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed5 + + + 2 channel signed normalized block-compressed (BC5 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed6H + + + 3 channel unsigned half-float block-compressed (BC6H compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed6H + + + 3 channel signed half-float block-compressed (BC6H compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7 + + + 4 channel unsigned normalized block-compressed (BC7 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7SRGB + + + 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding + +.. autoclass:: cuda.bindings.runtime.cudaMemoryType + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryType.cudaMemoryTypeUnregistered + + + Unregistered memory + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryType.cudaMemoryTypeHost + + + Host memory + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryType.cudaMemoryTypeDevice + + + Device memory + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryType.cudaMemoryTypeManaged + + + Managed memory + +.. autoclass:: cuda.bindings.runtime.cudaMemcpyKind + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyHostToHost + + + Host -> Host + + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice + + + Host -> Device + + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost + + + Device -> Host + + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice + + + Device -> Device + + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyDefault + + + Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing + +.. autoclass:: cuda.bindings.runtime.cudaAccessProperty + + .. autoattribute:: cuda.bindings.runtime.cudaAccessProperty.cudaAccessPropertyNormal + + + Normal cache persistence. + + + .. autoattribute:: cuda.bindings.runtime.cudaAccessProperty.cudaAccessPropertyStreaming + + + Streaming access is less likely to persit from cache. + + + .. autoattribute:: cuda.bindings.runtime.cudaAccessProperty.cudaAccessPropertyPersisting + + + Persisting access is more likely to persist in cache. + +.. autoclass:: cuda.bindings.runtime.cudaStreamCaptureStatus + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureStatus.cudaStreamCaptureStatusNone + + + Stream is not capturing + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureStatus.cudaStreamCaptureStatusActive + + + Stream is actively capturing + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureStatus.cudaStreamCaptureStatusInvalidated + + + Stream is part of a capture sequence that has been invalidated, but not terminated + +.. autoclass:: cuda.bindings.runtime.cudaStreamCaptureMode + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureMode.cudaStreamCaptureModeThreadLocal + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureMode.cudaStreamCaptureModeRelaxed + +.. autoclass:: cuda.bindings.runtime.cudaSynchronizationPolicy + + .. autoattribute:: cuda.bindings.runtime.cudaSynchronizationPolicy.cudaSyncPolicyAuto + + + .. autoattribute:: cuda.bindings.runtime.cudaSynchronizationPolicy.cudaSyncPolicySpin + + + .. autoattribute:: cuda.bindings.runtime.cudaSynchronizationPolicy.cudaSyncPolicyYield + + + .. autoattribute:: cuda.bindings.runtime.cudaSynchronizationPolicy.cudaSyncPolicyBlockingSync + +.. autoclass:: cuda.bindings.runtime.cudaClusterSchedulingPolicy + + .. autoattribute:: cuda.bindings.runtime.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyDefault + + + the default policy + + + .. autoattribute:: cuda.bindings.runtime.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicySpread + + + spread the blocks within a cluster to the SMs + + + .. autoattribute:: cuda.bindings.runtime.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyLoadBalancing + + + allow the hardware to load-balance the blocks in a cluster to the SMs + +.. autoclass:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependenciesFlags + + .. autoattribute:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamAddCaptureDependencies + + + Add new nodes to the dependency set + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamSetCaptureDependencies + + + Replace the dependency set with the new nodes + +.. autoclass:: cuda.bindings.runtime.cudaUserObjectFlags + + .. autoattribute:: cuda.bindings.runtime.cudaUserObjectFlags.cudaUserObjectNoDestructorSync + + + Indicates the destructor execution is not synchronized by any CUDA handle. + +.. autoclass:: cuda.bindings.runtime.cudaUserObjectRetainFlags + + .. autoattribute:: cuda.bindings.runtime.cudaUserObjectRetainFlags.cudaGraphUserObjectMove + + + Transfer references from the caller rather than creating new references. + +.. autoclass:: cuda.bindings.runtime.cudaGraphicsRegisterFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone + + + Default + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly + + + CUDA will not write to this resource + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard + + + CUDA will only write to and will not read from this resource + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsSurfaceLoadStore + + + CUDA will bind this resource to a surface reference + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsTextureGather + + + CUDA will perform texture gather operations on this resource + +.. autoclass:: cuda.bindings.runtime.cudaGraphicsMapFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsMapFlags.cudaGraphicsMapFlagsNone + + + Default; Assume resource can be read/written + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsMapFlags.cudaGraphicsMapFlagsReadOnly + + + CUDA will not write to this resource + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsMapFlags.cudaGraphicsMapFlagsWriteDiscard + + + CUDA will only write to and will not read from this resource + +.. autoclass:: cuda.bindings.runtime.cudaGraphicsCubeFace + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveX + + + Positive X face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeX + + + Negative X face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveY + + + Positive Y face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeY + + + Negative Y face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveZ + + + Positive Z face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeZ + + + Negative Z face of cubemap + +.. autoclass:: cuda.bindings.runtime.cudaResourceType + + .. autoattribute:: cuda.bindings.runtime.cudaResourceType.cudaResourceTypeArray + + + Array resource + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceType.cudaResourceTypeMipmappedArray + + + Mipmapped array resource + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceType.cudaResourceTypeLinear + + + Linear resource + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceType.cudaResourceTypePitch2D + + + Pitch 2D resource + +.. autoclass:: cuda.bindings.runtime.cudaResourceViewFormat + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatNone + + + No resource view format (use underlying resource format) + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedChar1 + + + 1 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedChar2 + + + 2 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedChar4 + + + 4 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedChar1 + + + 1 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedChar2 + + + 2 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedChar4 + + + 4 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedShort1 + + + 1 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedShort2 + + + 2 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedShort4 + + + 4 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedShort1 + + + 1 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedShort2 + + + 2 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedShort4 + + + 4 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedInt1 + + + 1 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedInt2 + + + 2 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedInt4 + + + 4 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedInt1 + + + 1 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedInt2 + + + 2 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedInt4 + + + 4 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatHalf1 + + + 1 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatHalf2 + + + 2 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatHalf4 + + + 4 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatFloat1 + + + 1 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatFloat2 + + + 2 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatFloat4 + + + 4 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed1 + + + Block compressed 1 + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed2 + + + Block compressed 2 + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed3 + + + Block compressed 3 + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed4 + + + Block compressed 4 unsigned + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed4 + + + Block compressed 4 signed + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed5 + + + Block compressed 5 unsigned + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed5 + + + Block compressed 5 signed + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed6H + + + Block compressed 6 unsigned half-float + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed6H + + + Block compressed 6 signed half-float + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed7 + + + Block compressed 7 + +.. autoclass:: cuda.bindings.runtime.cudaFuncAttribute + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeMaxDynamicSharedMemorySize + + + Maximum dynamic shared memory size + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributePreferredSharedMemoryCarveout + + + Preferred shared memory-L1 cache split + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeClusterDimMustBeSet + + + Indicator to enforce valid cluster dimension specification on kernel launch + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeRequiredClusterWidth + + + Required cluster width + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeRequiredClusterHeight + + + Required cluster height + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeRequiredClusterDepth + + + Required cluster depth + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeNonPortableClusterSizeAllowed + + + Whether non-portable cluster scheduling policy is supported + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeClusterSchedulingPolicyPreference + + + Required cluster scheduling policy preference + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeMax + +.. autoclass:: cuda.bindings.runtime.cudaFuncCache + + .. autoattribute:: cuda.bindings.runtime.cudaFuncCache.cudaFuncCachePreferNone + + + Default function cache configuration, no preference + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncCache.cudaFuncCachePreferShared + + + Prefer larger shared memory and smaller L1 cache + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncCache.cudaFuncCachePreferL1 + + + Prefer larger L1 cache and smaller shared memory + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncCache.cudaFuncCachePreferEqual + + + Prefer equal size L1 cache and shared memory + +.. autoclass:: cuda.bindings.runtime.cudaSharedMemConfig + + .. autoattribute:: cuda.bindings.runtime.cudaSharedMemConfig.cudaSharedMemBankSizeDefault + + + .. autoattribute:: cuda.bindings.runtime.cudaSharedMemConfig.cudaSharedMemBankSizeFourByte + + + .. autoattribute:: cuda.bindings.runtime.cudaSharedMemConfig.cudaSharedMemBankSizeEightByte + +.. autoclass:: cuda.bindings.runtime.cudaSharedCarveout + + .. autoattribute:: cuda.bindings.runtime.cudaSharedCarveout.cudaSharedmemCarveoutDefault + + + No preference for shared memory or L1 (default) + + + .. autoattribute:: cuda.bindings.runtime.cudaSharedCarveout.cudaSharedmemCarveoutMaxShared + + + Prefer maximum available shared memory, minimum L1 cache + + + .. autoattribute:: cuda.bindings.runtime.cudaSharedCarveout.cudaSharedmemCarveoutMaxL1 + + + Prefer maximum available L1 cache, minimum shared memory + +.. autoclass:: cuda.bindings.runtime.cudaComputeMode + + .. autoattribute:: cuda.bindings.runtime.cudaComputeMode.cudaComputeModeDefault + + + Default compute mode (Multiple threads can use :py:obj:`~.cudaSetDevice()` with this device) + + + .. autoattribute:: cuda.bindings.runtime.cudaComputeMode.cudaComputeModeExclusive + + + Compute-exclusive-thread mode (Only one thread in one process will be able to use :py:obj:`~.cudaSetDevice()` with this device) + + + .. autoattribute:: cuda.bindings.runtime.cudaComputeMode.cudaComputeModeProhibited + + + Compute-prohibited mode (No threads can use :py:obj:`~.cudaSetDevice()` with this device) + + + .. autoattribute:: cuda.bindings.runtime.cudaComputeMode.cudaComputeModeExclusiveProcess + + + Compute-exclusive-process mode (Many threads in one process will be able to use :py:obj:`~.cudaSetDevice()` with this device) + +.. autoclass:: cuda.bindings.runtime.cudaLimit + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitStackSize + + + GPU thread stack size + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitPrintfFifoSize + + + GPU printf FIFO size + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitMallocHeapSize + + + GPU malloc heap size + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitDevRuntimeSyncDepth + + + GPU device runtime synchronize depth + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitDevRuntimePendingLaunchCount + + + GPU device runtime pending launch count + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitMaxL2FetchGranularity + + + A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitPersistingL2CacheSize + + + A size in bytes for L2 persisting lines cache size + +.. autoclass:: cuda.bindings.runtime.cudaMemoryAdvise + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseSetReadMostly + + + Data will mostly be read and only occassionally be written to + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseUnsetReadMostly + + + Undo the effect of :py:obj:`~.cudaMemAdviseSetReadMostly` + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseSetPreferredLocation + + + Set the preferred location for the data as the specified device + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseUnsetPreferredLocation + + + Clear the preferred location for the data + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseSetAccessedBy + + + Data will be accessed by the specified device, so prevent page faults as much as possible + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseUnsetAccessedBy + + + Let the Unified Memory subsystem decide on the page faulting policy for the specified device + +.. autoclass:: cuda.bindings.runtime.cudaMemRangeAttribute + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeReadMostly + + + Whether the range will mostly be read and only occassionally be written to + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocation + + + The preferred location of the range + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeAccessedBy + + + Memory range has :py:obj:`~.cudaMemAdviseSetAccessedBy` set for specified device + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocation + + + The last location to which the range was prefetched + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationType + + + The preferred location type of the range + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationId + + + The preferred location id of the range + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationType + + + The last location type to which the range was prefetched + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationId + + + The last location id to which the range was prefetched + +.. autoclass:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesOptions + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionHost + + + :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` and its CUDA Driver API counterpart are supported on the device. + + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionMemOps + + + The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the CUDA device. + +.. autoclass:: cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering + + .. autoattribute:: cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingNone + + + The device does not natively support ordering of GPUDirect RDMA writes. :py:obj:`~.cudaFlushGPUDirectRDMAWrites()` can be leveraged if supported. + + + .. autoattribute:: cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingOwner + + + Natively, the device can consistently consume GPUDirect RDMA writes, although other CUDA devices may not. + + + .. autoattribute:: cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingAllDevices + + + Any CUDA device in the system can consistently consume GPUDirect RDMA writes to this device. + +.. autoclass:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesScope + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToOwner + + + Blocks until remote writes are visible to the CUDA device context owning the data. + + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToAllDevices + + + Blocks until remote writes are visible to all CUDA device contexts. + +.. autoclass:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesTarget + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesTarget.cudaFlushGPUDirectRDMAWritesTargetCurrentDevice + + + Sets the target for :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` to the currently active CUDA device context. + +.. autoclass:: cuda.bindings.runtime.cudaDeviceAttr + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock + + + Maximum number of threads per block + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxBlockDimX + + + Maximum block dimension X + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxBlockDimY + + + Maximum block dimension Y + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxBlockDimZ + + + Maximum block dimension Z + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxGridDimX + + + Maximum grid dimension X + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxGridDimY + + + Maximum grid dimension Y + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxGridDimZ + + + Maximum grid dimension Z + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock + + + Maximum shared memory available per block in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTotalConstantMemory + + + Memory available on device for constant variables in a CUDA C kernel in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrWarpSize + + + Warp size in threads + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxPitch + + + Maximum pitch in bytes allowed by memory copies + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock + + + Maximum number of 32-bit registers available per block + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrClockRate + + + Peak clock frequency in kilohertz + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTextureAlignment + + + Alignment requirement for textures + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGpuOverlap + + + Device can possibly copy memory and execute a kernel concurrently + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMultiProcessorCount + + + Number of multiprocessors on device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrKernelExecTimeout + + + Specifies whether there is a run time limit on kernels + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrIntegrated + + + Device is integrated with host memory + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCanMapHostMemory + + + Device can map host memory into CUDA address space + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrComputeMode + + + Compute mode (See :py:obj:`~.cudaComputeMode` for details) + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth + + + Maximum 1D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth + + + Maximum 2D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight + + + Maximum 2D texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth + + + Maximum 3D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight + + + Maximum 3D texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth + + + Maximum 3D texture depth + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth + + + Maximum 2D layered texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight + + + Maximum 2D layered texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers + + + Maximum layers in a 2D layered texture + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrSurfaceAlignment + + + Alignment requirement for surfaces + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrConcurrentKernels + + + Device can possibly execute multiple kernels concurrently + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrEccEnabled + + + Device has ECC support enabled + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPciBusId + + + PCI bus ID of the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPciDeviceId + + + PCI device ID of the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTccDriver + + + Device is using TCC driver model + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMemoryClockRate + + + Peak memory clock frequency in kilohertz + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth + + + Global memory bus width in bits + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrL2CacheSize + + + Size of L2 cache in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor + + + Maximum resident threads per multiprocessor + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrAsyncEngineCount + + + Number of asynchronous engines + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrUnifiedAddressing + + + Device shares a unified address space with the host + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth + + + Maximum 1D layered texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers + + + Maximum layers in a 1D layered texture + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth + + + Maximum 2D texture width if cudaArrayTextureGather is set + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight + + + Maximum 2D texture height if cudaArrayTextureGather is set + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt + + + Alternate maximum 3D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt + + + Alternate maximum 3D texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt + + + Alternate maximum 3D texture depth + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPciDomainId + + + PCI domain ID of the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTexturePitchAlignment + + + Pitch alignment requirement for textures + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth + + + Maximum cubemap texture width/height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth + + + Maximum cubemap layered texture width/height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers + + + Maximum layers in a cubemap layered texture + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth + + + Maximum 1D surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth + + + Maximum 2D surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight + + + Maximum 2D surface height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth + + + Maximum 3D surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight + + + Maximum 3D surface height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth + + + Maximum 3D surface depth + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth + + + Maximum 1D layered surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers + + + Maximum layers in a 1D layered surface + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth + + + Maximum 2D layered surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight + + + Maximum 2D layered surface height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers + + + Maximum layers in a 2D layered surface + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth + + + Maximum cubemap surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth + + + Maximum cubemap layered surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers + + + Maximum layers in a cubemap layered surface + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth + + + Maximum 1D linear texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth + + + Maximum 2D linear texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight + + + Maximum 2D linear texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch + + + Maximum 2D linear texture pitch in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth + + + Maximum mipmapped 2D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight + + + Maximum mipmapped 2D texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor + + + Major compute capability version number + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor + + + Minor compute capability version number + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth + + + Maximum mipmapped 1D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported + + + Device supports stream priorities + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported + + + Device supports caching globals in L1 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported + + + Device supports caching locals in L1 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor + + + Maximum shared memory available per multiprocessor in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor + + + Maximum number of 32-bit registers available per multiprocessor + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrManagedMemory + + + Device can allocate managed memory on this system + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard + + + Device is on a multi-GPU board + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID + + + Unique identifier for a group of devices on the same multi-GPU board + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported + + + Link between the device and the host supports native atomic operations + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio + + + Ratio of single precision performance (in floating-point operations per second) to double precision performance + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPageableMemoryAccess + + + Device supports coherently accessing pageable memory without calling cudaHostRegister on it + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess + + + Device can coherently access managed memory concurrently with the CPU + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrComputePreemptionSupported + + + Device supports Compute Preemption + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem + + + Device can access host registered memory at the same virtual address as the CPU + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved92 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved93 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved94 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCooperativeLaunch + + + Device supports launching cooperative kernels via :py:obj:`~.cudaLaunchCooperativeKernel` + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch + + + Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin + + + The maximum optin shared memory per block. This value may vary by chip. See :py:obj:`~.cudaFuncSetAttribute` + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites + + + Device supports flushing of outstanding remote writes. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrHostRegisterSupported + + + Device supports host memory registration via :py:obj:`~.cudaHostRegister`. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables + + + Device accesses pageable memory via the host's page tables. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost + + + Host can directly access managed memory on the device without migration. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxBlocksPerMultiprocessor + + + Maximum number of blocks per multiprocessor + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxPersistingL2CacheSize + + + Maximum L2 persisting lines capacity setting in bytes. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxAccessPolicyWindowSize + + + Maximum value of :py:obj:`~.cudaAccessPolicyWindow.num_bytes`. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReservedSharedMemoryPerBlock + + + Shared memory reserved by CUDA driver per block in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrSparseCudaArraySupported + + + Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrHostRegisterReadOnlySupported + + + Device supports using the :py:obj:`~.cudaHostRegister` flag cudaHostRegisterReadOnly to register memory that must be mapped as read-only to the GPU + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTimelineSemaphoreInteropSupported + + + External timeline semaphore interop is supported on the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTimelineSemaphoreInteropSupported + + + Deprecated, External timeline semaphore interop is supported on the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMemoryPoolsSupported + + + Device supports using the :py:obj:`~.cudaMallocAsync` and :py:obj:`~.cudaMemPool` family of APIs + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGPUDirectRDMASupported + + + Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAFlushWritesOptions + + + The returned attribute shall be interpreted as a bitmask, where the individual bits are listed in the :py:obj:`~.cudaFlushGPUDirectRDMAWritesOptions` enum + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAWritesOrdering + + + GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See :py:obj:`~.cudaGPUDirectRDMAWritesOrdering` for the numerical values returned here. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMemoryPoolSupportedHandleTypes + + + Handle types supported with mempool based IPC + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrClusterLaunch + + + Indicates device supports cluster launch + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrDeferredMappingCudaArraySupported + + + Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved122 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved123 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved124 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrIpcEventSupport + + + Device supports IPC Events. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMemSyncDomainCount + + + Number of memory synchronization domains the device supports. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved127 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved128 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved129 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrNumaConfig + + + NUMA configuration of a device: value is of type :py:obj:`~.cudaDeviceNumaConfig` enum + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrNumaId + + + NUMA node ID of the GPU memory + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved132 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMpsEnabled + + + Contexts created on this device will be shared via MPS + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrHostNumaId + + + NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrD3D12CigSupported + + + Device supports CIG with D3D12. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMax + +.. autoclass:: cuda.bindings.runtime.cudaMemPoolAttr + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolReuseFollowEventDependencies + + + (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolReuseAllowOpportunistic + + + (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolReuseAllowInternalDependencies + + + (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled). + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold + + + (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrReservedMemCurrent + + + (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool. + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrReservedMemHigh + + + (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero. + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrUsedMemCurrent + + + (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application. + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrUsedMemHigh + + + (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero. + +.. autoclass:: cuda.bindings.runtime.cudaMemLocationType + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeInvalid + + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeDevice + + + Location is a device location, thus id is a device ordinal + + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeHost + + + Location is host, id is ignored + + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeHostNuma + + + Location is a host NUMA node, thus id is a host NUMA node id + + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeHostNumaCurrent + + + Location is the host NUMA node closest to the current thread's CPU, id is ignored + +.. autoclass:: cuda.bindings.runtime.cudaMemAccessFlags + + .. autoattribute:: cuda.bindings.runtime.cudaMemAccessFlags.cudaMemAccessFlagsProtNone + + + Default, make the address range not accessible + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAccessFlags.cudaMemAccessFlagsProtRead + + + Make the address range read accessible + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAccessFlags.cudaMemAccessFlagsProtReadWrite + + + Make the address range read-write accessible + +.. autoclass:: cuda.bindings.runtime.cudaMemAllocationType + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationType.cudaMemAllocationTypeInvalid + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationType.cudaMemAllocationTypePinned + + + This allocation type is 'pinned', i.e. cannot migrate from its current location while the application is actively using it + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationType.cudaMemAllocationTypeMax + +.. autoclass:: cuda.bindings.runtime.cudaMemAllocationHandleType + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypeNone + + + Does not allow any export mechanism. > + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypePosixFileDescriptor + + + Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypeWin32 + + + Allows a Win32 NT handle to be used for exporting. (HANDLE) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypeWin32Kmt + + + Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypeFabric + + + Allows a fabric handle to be used for exporting. (cudaMemFabricHandle_t) + +.. autoclass:: cuda.bindings.runtime.cudaGraphMemAttributeType + + .. autoattribute:: cuda.bindings.runtime.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemCurrent + + + (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemHigh + + + (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemCurrent + + + (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemHigh + + + (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. + +.. autoclass:: cuda.bindings.runtime.cudaDeviceP2PAttr + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceP2PAttr.cudaDevP2PAttrPerformanceRank + + + A relative value indicating the performance of the link between two devices + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceP2PAttr.cudaDevP2PAttrAccessSupported + + + Peer access is enabled + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceP2PAttr.cudaDevP2PAttrNativeAtomicSupported + + + Native atomic operation over the link supported + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceP2PAttr.cudaDevP2PAttrCudaArrayAccessSupported + + + Accessing CUDA arrays over the link supported + +.. autoclass:: cuda.bindings.runtime.cudaExternalMemoryHandleType + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueFd + + + Handle is an opaque file descriptor + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32 + + + Handle is an opaque shared NT handle + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32Kmt + + + Handle is an opaque, globally shared handle + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Heap + + + Handle is a D3D12 heap object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Resource + + + Handle is a D3D12 committed resource + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11Resource + + + Handle is a shared NT handle to a D3D11 resource + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11ResourceKmt + + + Handle is a globally shared handle to a D3D11 resource + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeNvSciBuf + + + Handle is an NvSciBuf object + +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueFd + + + Handle is an opaque file descriptor + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32 + + + Handle is an opaque shared NT handle + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt + + + Handle is an opaque, globally shared handle + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D12Fence + + + Handle is a shared NT handle referencing a D3D12 fence object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D11Fence + + + Handle is a shared NT handle referencing a D3D11 fence object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeNvSciSync + + + Opaque handle to NvSciSync Object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutex + + + Handle is a shared NT handle referencing a D3D11 keyed mutex object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutexKmt + + + Handle is a shared KMT handle referencing a D3D11 keyed mutex object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd + + + Handle is an opaque handle file descriptor referencing a timeline semaphore + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 + + + Handle is an opaque handle file descriptor referencing a timeline semaphore + +.. autoclass:: cuda.bindings.runtime.cudaCGScope + + .. autoattribute:: cuda.bindings.runtime.cudaCGScope.cudaCGScopeInvalid + + + Invalid cooperative group scope + + + .. autoattribute:: cuda.bindings.runtime.cudaCGScope.cudaCGScopeGrid + + + Scope represented by a grid_group + + + .. autoattribute:: cuda.bindings.runtime.cudaCGScope.cudaCGScopeMultiGrid + + + Scope represented by a multi_grid_group + +.. autoclass:: cuda.bindings.runtime.cudaGraphConditionalHandleFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphConditionalHandleFlags.cudaGraphCondAssignDefault + + + Apply default handle value when graph is launched. + +.. autoclass:: cuda.bindings.runtime.cudaGraphConditionalNodeType + + .. autoattribute:: cuda.bindings.runtime.cudaGraphConditionalNodeType.cudaGraphCondTypeIf + + + Conditional 'if' Node. Body executed once if condition value is non-zero. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphConditionalNodeType.cudaGraphCondTypeWhile + + + Conditional 'while' Node. Body executed repeatedly while condition value is non-zero. + +.. autoclass:: cuda.bindings.runtime.cudaGraphNodeType + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeKernel + + + GPU kernel node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeMemcpy + + + Memcpy node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeMemset + + + Memset node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeHost + + + Host (executable) node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeGraph + + + Node which executes an embedded graph + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeEmpty + + + Empty (no-op) node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeWaitEvent + + + External event wait node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeEventRecord + + + External event record node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreSignal + + + External semaphore signal node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreWait + + + External semaphore wait node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeMemAlloc + + + Memory allocation node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeMemFree + + + Memory free node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeConditional + + + Conditional node May be used to implement a conditional execution path or loop + + inside of a graph. The graph(s) contained within the body of the conditional node + + can be selectively executed or iterated upon based on the value of a conditional + + variable. + + + + Handles must be created in advance of creating the node + + using :py:obj:`~.cudaGraphConditionalHandleCreate`. + + + + The following restrictions apply to graphs which contain conditional nodes: + + The graph cannot be used in a child node. + + Only one instantiation of the graph may exist at any point in time. + + The graph cannot be cloned. + + + + To set the control value, supply a default value when creating the handle and/or + + call :py:obj:`~.cudaGraphSetConditional` from device code. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeCount + +.. autoclass:: cuda.bindings.runtime.cudaGraphDependencyType + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDependencyType.cudaGraphDependencyTypeDefault + + + This is an ordinary dependency. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDependencyType.cudaGraphDependencyTypeProgrammatic + + + This dependency type allows the downstream node to use `cudaGridDependencySynchronize()`. It may only be used between kernel nodes, and must be used with either the :py:obj:`~.cudaGraphKernelNodePortProgrammatic` or :py:obj:`~.cudaGraphKernelNodePortLaunchCompletion` outgoing port. + +.. autoclass:: cuda.bindings.runtime.cudaGraphExecUpdateResult + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateSuccess + + + The update succeeded + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateError + + + The update failed for an unexpected reason which is described in the return value of the function + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorTopologyChanged + + + The update failed because the topology changed + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNodeTypeChanged + + + The update failed because a node type changed + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorFunctionChanged + + + The update failed because the function of a kernel node changed (CUDA driver < 11.2) + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorParametersChanged + + + The update failed because the parameters changed in a way that is not supported + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNotSupported + + + The update failed because something about the node is not supported + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorUnsupportedFunctionChange + + + The update failed because the function of a kernel node changed in an unsupported way + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorAttributesChanged + + + The update failed because the node attributes changed in a way that is not supported + +.. autoclass:: cuda.bindings.runtime.cudaGraphInstantiateResult + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateSuccess + + + Instantiation succeeded + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateError + + + Instantiation failed for an unexpected reason which is described in the return value of the function + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateInvalidStructure + + + Instantiation failed due to invalid structure, such as cycles + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateNodeOperationNotSupported + + + Instantiation for device launch failed because the graph contained an unsupported operation + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateMultipleDevicesNotSupported + + + Instantiation for device launch failed due to the nodes belonging to different contexts + +.. autoclass:: cuda.bindings.runtime.cudaGraphKernelNodeField + + .. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldInvalid + + + Invalid field + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldGridDim + + + Grid dimension update + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldParam + + + Kernel parameter update + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldEnabled + + + Node enable/disable + +.. autoclass:: cuda.bindings.runtime.cudaGetDriverEntryPointFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGetDriverEntryPointFlags.cudaEnableDefault + + + Default search mode for driver symbols. + + + .. autoattribute:: cuda.bindings.runtime.cudaGetDriverEntryPointFlags.cudaEnableLegacyStream + + + Search for legacy versions of driver symbols. + + + .. autoattribute:: cuda.bindings.runtime.cudaGetDriverEntryPointFlags.cudaEnablePerThreadDefaultStream + + + Search for per-thread versions of driver symbols. + +.. autoclass:: cuda.bindings.runtime.cudaDriverEntryPointQueryResult + + .. autoattribute:: cuda.bindings.runtime.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSuccess + + + Search for symbol found a match + + + .. autoattribute:: cuda.bindings.runtime.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSymbolNotFound + + + Search for symbol was not found + + + .. autoattribute:: cuda.bindings.runtime.cudaDriverEntryPointQueryResult.cudaDriverEntryPointVersionNotSufficent + + + Search for symbol was found but version wasn't great enough + +.. autoclass:: cuda.bindings.runtime.cudaGraphDebugDotFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsVerbose + + + Output all debug data as if every debug flag is enabled + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeParams + + + Adds :py:obj:`~.cudaKernelNodeParams` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemcpyNodeParams + + + Adds :py:obj:`~.cudaMemcpy3DParms` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemsetNodeParams + + + Adds :py:obj:`~.cudaMemsetParams` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHostNodeParams + + + Adds :py:obj:`~.cudaHostNodeParams` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsEventNodeParams + + + Adds cudaEvent_t handle from record and wait nodes to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasSignalNodeParams + + + Adds :py:obj:`~.cudaExternalSemaphoreSignalNodeParams` values to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasWaitNodeParams + + + Adds :py:obj:`~.cudaExternalSemaphoreWaitNodeParams` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeAttributes + + + Adds cudaKernelNodeAttrID values to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHandles + + + Adds node handles and every kernel function handle to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsConditionalNodeParams + + + Adds :py:obj:`~.cudaConditionalNodeParams` to output + +.. autoclass:: cuda.bindings.runtime.cudaGraphInstantiateFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagAutoFreeOnLaunch + + + Automatically free memory allocated in a graph before relaunching. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUpload + + + Automatically upload the graph after instantiation. Only supported by + + :py:obj:`~.cudaGraphInstantiateWithParams`. The upload will be performed using the + + stream provided in `instantiateParams`. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagDeviceLaunch + + + Instantiate the graph to be launchable from the device. This flag can only + + be used on platforms which support unified addressing. This flag cannot be + + used in conjunction with cudaGraphInstantiateFlagAutoFreeOnLaunch. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUseNodePriority + + + Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into. + +.. autoclass:: cuda.bindings.runtime.cudaLaunchMemSyncDomain + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainDefault + + + Launch kernels in the default domain + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainRemote + + + Launch kernels in the remote domain + +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttributeID + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeIgnore + + + Ignored entry, for convenient composition + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow + + + Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.accessPolicyWindow`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeCooperative + + + Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.cooperative`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy + + + Valid for streams. See :py:obj:`~.cudaLaunchAttributeValue.syncPolicy`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension + + + Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.clusterDim`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference + + + Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.clusterSchedulingPolicyPreference`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization + + + Valid for launches. Setting :py:obj:`~.cudaLaunchAttributeValue.programmaticStreamSerializationAllowed` to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid's execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent + + + Valid for launches. Set :py:obj:`~.cudaLaunchAttributeValue.programmaticEvent` to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event programmatically in a future CUDA release. A trigger can also be inserted at the beginning of each block's execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling :py:obj:`~.cudaEventSynchronize()`) are not guaranteed to observe the release precisely when it is released. For example, :py:obj:`~.cudaEventSynchronize()` may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks. + + The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.cudaEventDisableTiming` flag set). + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributePriority + + + Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.priority`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap + + + Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.memSyncDomainMap`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain + + + Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.memSyncDomain`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent + + + Valid for launches. Set :py:obj:`~.cudaLaunchAttributeValue.launchCompletionEvent` to record the event. + + Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock. + + A launch completion event is nominally similar to a programmatic event with `triggerAtBlockStart` set except that it is not visible to `cudaGridDependencySynchronize()` and can be used with compute capability less than 9.0. + + The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.cudaEventDisableTiming` flag set). + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode + + + Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error. + + :cudaLaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via :py:obj:`~.cudaLaunchAttributeValue`::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node's kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see :py:obj:`~.cudaGraphKernelNodeUpdatesApply`. + + Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via :py:obj:`~.cudaGraphDestroyNode`. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via :py:obj:`~.cudaGraphKernelNodeCopyAttributes`. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to :py:obj:`~.cudaGraphExecUpdate`. + + If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with :py:obj:`~.cuGraphUpload` before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout + + + Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting :py:obj:`~.cudaLaunchAttributeValue.sharedMemCarveout` to a percentage between 0-100 signals sets the shared memory carveout preference in percent of the total shared memory for that kernel launch. This attribute takes precedence over :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout`. This is only a hint, and the driver can choose a different configuration if required for the launch. + +.. autoclass:: cuda.bindings.runtime.cudaDeviceNumaConfig + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceNumaConfig.cudaDeviceNumaConfigNone + + + The GPU is not a NUMA node + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceNumaConfig.cudaDeviceNumaConfigNumaNode + + + The GPU is a NUMA node, cudaDevAttrNumaId contains its NUMA ID + +.. autoclass:: cuda.bindings.runtime.cudaAsyncNotificationType + + .. autoattribute:: cuda.bindings.runtime.cudaAsyncNotificationType.cudaAsyncNotificationTypeOverBudget + +.. autoclass:: cuda.bindings.runtime.cudaSurfaceBoundaryMode + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceBoundaryMode.cudaBoundaryModeZero + + + Zero boundary mode + + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceBoundaryMode.cudaBoundaryModeClamp + + + Clamp boundary mode + + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceBoundaryMode.cudaBoundaryModeTrap + + + Trap boundary mode + +.. autoclass:: cuda.bindings.runtime.cudaSurfaceFormatMode + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceFormatMode.cudaFormatModeForced + + + Forced format mode + + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceFormatMode.cudaFormatModeAuto + + + Auto format mode + +.. autoclass:: cuda.bindings.runtime.cudaTextureAddressMode + + .. autoattribute:: cuda.bindings.runtime.cudaTextureAddressMode.cudaAddressModeWrap + + + Wrapping address mode + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureAddressMode.cudaAddressModeClamp + + + Clamp to edge address mode + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureAddressMode.cudaAddressModeMirror + + + Mirror address mode + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureAddressMode.cudaAddressModeBorder + + + Border address mode + +.. autoclass:: cuda.bindings.runtime.cudaTextureFilterMode + + .. autoattribute:: cuda.bindings.runtime.cudaTextureFilterMode.cudaFilterModePoint + + + Point filter mode + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureFilterMode.cudaFilterModeLinear + + + Linear filter mode + +.. autoclass:: cuda.bindings.runtime.cudaTextureReadMode + + .. autoattribute:: cuda.bindings.runtime.cudaTextureReadMode.cudaReadModeElementType + + + Read texture as specified element type + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureReadMode.cudaReadModeNormalizedFloat + + + Read texture as normalized float + +.. autoclass:: cuda.bindings.runtime.cudaEglPlaneDesc +.. autoclass:: cuda.bindings.runtime.cudaEglFrame +.. autoclass:: cuda.bindings.runtime.cudaEglStreamConnection +.. autoclass:: cuda.bindings.runtime.cudaArray_t +.. autoclass:: cuda.bindings.runtime.cudaArray_const_t +.. autoclass:: cuda.bindings.runtime.cudaMipmappedArray_t +.. autoclass:: cuda.bindings.runtime.cudaMipmappedArray_const_t +.. autoclass:: cuda.bindings.runtime.cudaHostFn_t +.. autoclass:: cuda.bindings.runtime.CUuuid +.. autoclass:: cuda.bindings.runtime.cudaUUID_t +.. autoclass:: cuda.bindings.runtime.cudaIpcEventHandle_t +.. autoclass:: cuda.bindings.runtime.cudaIpcMemHandle_t +.. autoclass:: cuda.bindings.runtime.cudaMemFabricHandle_t +.. autoclass:: cuda.bindings.runtime.cudaStream_t +.. autoclass:: cuda.bindings.runtime.cudaEvent_t +.. autoclass:: cuda.bindings.runtime.cudaGraphicsResource_t +.. autoclass:: cuda.bindings.runtime.cudaExternalMemory_t +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphore_t +.. autoclass:: cuda.bindings.runtime.cudaGraph_t +.. autoclass:: cuda.bindings.runtime.cudaGraphNode_t +.. autoclass:: cuda.bindings.runtime.cudaUserObject_t +.. autoclass:: cuda.bindings.runtime.cudaGraphConditionalHandle +.. autoclass:: cuda.bindings.runtime.cudaFunction_t +.. autoclass:: cuda.bindings.runtime.cudaKernel_t +.. autoclass:: cuda.bindings.runtime.cudaMemPool_t +.. autoclass:: cuda.bindings.runtime.cudaGraphEdgeData +.. autoclass:: cuda.bindings.runtime.cudaGraphExec_t +.. autoclass:: cuda.bindings.runtime.cudaGraphInstantiateParams +.. autoclass:: cuda.bindings.runtime.cudaGraphExecUpdateResultInfo +.. autoclass:: cuda.bindings.runtime.cudaGraphDeviceNode_t +.. autoclass:: cuda.bindings.runtime.cudaLaunchMemSyncDomainMap +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttributeValue +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttribute +.. autoclass:: cuda.bindings.runtime.cudaAsyncCallbackHandle_t +.. autoclass:: cuda.bindings.runtime.cudaAsyncNotificationInfo_t +.. autoclass:: cuda.bindings.runtime.cudaAsyncCallback +.. autoclass:: cuda.bindings.runtime.cudaSurfaceObject_t +.. autoclass:: cuda.bindings.runtime.cudaTextureObject_t +.. autoattribute:: cuda.bindings.runtime.CUDA_EGL_MAX_PLANES + + Maximum number of planes per frame + +.. autoattribute:: cuda.bindings.runtime.cudaHostAllocDefault + + Default page-locked allocation flag + +.. autoattribute:: cuda.bindings.runtime.cudaHostAllocPortable + + Pinned memory accessible by all CUDA contexts + +.. autoattribute:: cuda.bindings.runtime.cudaHostAllocMapped + + Map allocation into device space + +.. autoattribute:: cuda.bindings.runtime.cudaHostAllocWriteCombined + + Write-combined memory + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterDefault + + Default host memory registration flag + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterPortable + + Pinned memory accessible by all CUDA contexts + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterMapped + + Map registered memory into device space + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterIoMemory + + Memory-mapped I/O space + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterReadOnly + + Memory-mapped read-only + +.. autoattribute:: cuda.bindings.runtime.cudaPeerAccessDefault + + Default peer addressing enable flag + +.. autoattribute:: cuda.bindings.runtime.cudaStreamDefault + + Default stream flag + +.. autoattribute:: cuda.bindings.runtime.cudaStreamNonBlocking + + Stream does not synchronize with stream 0 (the NULL stream) + +.. autoattribute:: cuda.bindings.runtime.cudaStreamLegacy + + Legacy stream handle + + + + Stream handle that can be passed as a cudaStream_t to use an implicit stream with legacy synchronization behavior. + + + + See details of the \link_sync_behavior + +.. autoattribute:: cuda.bindings.runtime.cudaStreamPerThread + + Per-thread stream handle + + + + Stream handle that can be passed as a cudaStream_t to use an implicit stream with per-thread synchronization behavior. + + + + See details of the \link_sync_behavior + +.. autoattribute:: cuda.bindings.runtime.cudaEventDefault + + Default event flag + +.. autoattribute:: cuda.bindings.runtime.cudaEventBlockingSync + + Event uses blocking synchronization + +.. autoattribute:: cuda.bindings.runtime.cudaEventDisableTiming + + Event will not record timing data + +.. autoattribute:: cuda.bindings.runtime.cudaEventInterprocess + + Event is suitable for interprocess use. cudaEventDisableTiming must be set + +.. autoattribute:: cuda.bindings.runtime.cudaEventRecordDefault + + Default event record flag + +.. autoattribute:: cuda.bindings.runtime.cudaEventRecordExternal + + Event is captured in the graph as an external event node when performing stream capture + +.. autoattribute:: cuda.bindings.runtime.cudaEventWaitDefault + + Default event wait flag + +.. autoattribute:: cuda.bindings.runtime.cudaEventWaitExternal + + Event is captured in the graph as an external event node when performing stream capture + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleAuto + + Device flag - Automatic scheduling + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleSpin + + Device flag - Spin default scheduling + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleYield + + Device flag - Yield default scheduling + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleBlockingSync + + Device flag - Use blocking synchronization + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceBlockingSync + + Device flag - Use blocking synchronization [Deprecated] + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleMask + + Device schedule flags mask + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceMapHost + + Device flag - Support mapped pinned allocations + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceLmemResizeToMax + + Device flag - Keep local memory allocation after launch + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceSyncMemops + + Device flag - Ensure synchronous memory operations on this context will synchronize + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceMask + + Device flags mask + +.. autoattribute:: cuda.bindings.runtime.cudaArrayDefault + + Default CUDA array allocation flag + +.. autoattribute:: cuda.bindings.runtime.cudaArrayLayered + + Must be set in cudaMalloc3DArray to create a layered CUDA array + +.. autoattribute:: cuda.bindings.runtime.cudaArraySurfaceLoadStore + + Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind surfaces to the CUDA array + +.. autoattribute:: cuda.bindings.runtime.cudaArrayCubemap + + Must be set in cudaMalloc3DArray to create a cubemap CUDA array + +.. autoattribute:: cuda.bindings.runtime.cudaArrayTextureGather + + Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform texture gather operations on the CUDA array + +.. autoattribute:: cuda.bindings.runtime.cudaArrayColorAttachment + + Must be set in cudaExternalMemoryGetMappedMipmappedArray if the mipmapped array is used as a color target in a graphics API + +.. autoattribute:: cuda.bindings.runtime.cudaArraySparse + + Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a sparse CUDA array or CUDA mipmapped array + +.. autoattribute:: cuda.bindings.runtime.cudaArrayDeferredMapping + + Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a deferred mapping CUDA array or CUDA mipmapped array + +.. autoattribute:: cuda.bindings.runtime.cudaIpcMemLazyEnablePeerAccess + + Automatically enable peer access between remote devices as needed + +.. autoattribute:: cuda.bindings.runtime.cudaMemAttachGlobal + + Memory can be accessed by any stream on any device + +.. autoattribute:: cuda.bindings.runtime.cudaMemAttachHost + + Memory cannot be accessed by any stream on any device + +.. autoattribute:: cuda.bindings.runtime.cudaMemAttachSingle + + Memory can only be accessed by a single stream on the associated device + +.. autoattribute:: cuda.bindings.runtime.cudaOccupancyDefault + + Default behavior + +.. autoattribute:: cuda.bindings.runtime.cudaOccupancyDisableCachingOverride + + Assume global caching is enabled and cannot be automatically turned off + +.. autoattribute:: cuda.bindings.runtime.cudaCpuDeviceId + + Device id that represents the CPU + +.. autoattribute:: cuda.bindings.runtime.cudaInvalidDeviceId + + Device id that represents an invalid device + +.. autoattribute:: cuda.bindings.runtime.cudaInitDeviceFlagsAreValid + + Tell the CUDA runtime that DeviceFlags is being set in cudaInitDevice call + +.. autoattribute:: cuda.bindings.runtime.cudaCooperativeLaunchMultiDeviceNoPreSync + + If set, each kernel launched as part of :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution. + +.. autoattribute:: cuda.bindings.runtime.cudaCooperativeLaunchMultiDeviceNoPostSync + + If set, any subsequent work pushed in a stream that participated in a call to :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution. + +.. autoattribute:: cuda.bindings.runtime.cudaArraySparsePropertiesSingleMipTail + + Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers + +.. autoattribute:: cuda.bindings.runtime.CUDART_CB +.. autoattribute:: cuda.bindings.runtime.CU_UUID_HAS_BEEN_DEFINED + + CUDA UUID types + +.. autoattribute:: cuda.bindings.runtime.CUDA_IPC_HANDLE_SIZE + + CUDA IPC Handle Size + +.. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryDedicated + + Indicates that the external memory object is a dedicated resource + +.. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreSignalSkipNvSciBufMemSync + + When the /p flags parameter of :py:obj:`~.cudaExternalSemaphoreSignalParams` contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. + +.. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreWaitSkipNvSciBufMemSync + + When the /p flags parameter of :py:obj:`~.cudaExternalSemaphoreWaitParams` contains this flag, it indicates that waiting an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. + +.. autoattribute:: cuda.bindings.runtime.cudaNvSciSyncAttrSignal + + When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to this, it indicates that application need signaler specific NvSciSyncAttr to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. + +.. autoattribute:: cuda.bindings.runtime.cudaNvSciSyncAttrWait + + When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to this, it indicates that application need waiter specific NvSciSyncAttr to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. + +.. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodePortDefault + + This port activates when the kernel has finished executing. + +.. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodePortProgrammatic + + This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type :py:obj:`~.cudaGraphDependencyTypeProgrammatic`. See also :py:obj:`~.cudaLaunchAttributeProgrammaticEvent`. + +.. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodePortLaunchCompletion + + This port activates when all blocks of the kernel have begun execution. See also :py:obj:`~.cudaLaunchAttributeLaunchCompletionEvent`. + +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttrID +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributeAccessPolicyWindow +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributeSynchronizationPolicy +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributeMemSyncDomainMap +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributeMemSyncDomain +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributePriority +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttrValue +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttrID +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeAccessPolicyWindow +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeCooperative +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributePriority +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeClusterDimension +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeClusterSchedulingPolicyPreference +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeMemSyncDomainMap +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeMemSyncDomain +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributePreferredSharedMemoryCarveout +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeDeviceUpdatableKernelNode +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttrValue +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType1D +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType2D +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType3D +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceTypeCubemap +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType1DLayered +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType2DLayered +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceTypeCubemapLayered +.. autoattribute:: cuda.bindings.runtime.cudaTextureType1D +.. autoattribute:: cuda.bindings.runtime.cudaTextureType2D +.. autoattribute:: cuda.bindings.runtime.cudaTextureType3D +.. autoattribute:: cuda.bindings.runtime.cudaTextureTypeCubemap +.. autoattribute:: cuda.bindings.runtime.cudaTextureType1DLayered +.. autoattribute:: cuda.bindings.runtime.cudaTextureType2DLayered +.. autoattribute:: cuda.bindings.runtime.cudaTextureTypeCubemapLayered diff --git a/docs/_sources/overview.md.txt b/docs/_sources/overview.md.txt index d7b068b3..155be761 100644 --- a/docs/_sources/overview.md.txt +++ b/docs/_sources/overview.md.txt @@ -49,7 +49,7 @@ Python package. In this example, you copy data from the host to device. You need [NumPy](https://numpy.org/doc/stable/contents.html) to store data on the host. ```{code-cell} python -from cuda import cuda, nvrtc +from cuda.bindings import driver, nvrtc import numpy as np ``` @@ -60,9 +60,9 @@ object model. ```{code-cell} python def _cudaGetErrorEnum(error): - if isinstance(error, cuda.CUresult): - err, name = cuda.cuGetErrorName(error) - return name if err == cuda.CUresult.CUDA_SUCCESS else "" + if isinstance(error, driver.CUresult): + err, name = driver.cuGetErrorName(error) + return name if err == driver.CUresult.CUDA_SUCCESS else "" elif isinstance(error, nvrtc.nvrtcResult): return nvrtc.nvrtcGetErrorString(error)[1] else: @@ -110,14 +110,14 @@ the program is compiled to target our local compute capability architecture with ```{code-cell} python # Initialize CUDA Driver API -checkCudaErrors(cuda.cuInit(0)) +checkCudaErrors(driver.cuInit(0)) # Retrieve handle for device 0 -cuDevice = checkCudaErrors(cuda.cuDeviceGet(0)) +cuDevice = checkCudaErrors(driver.cuDeviceGet(0)) # Derive target architecture for device 0 -major = checkCudaErrors(cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevice)) -minor = checkCudaErrors(cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevice)) +major = checkCudaErrors(driver.cuDeviceGetAttribute(driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevice)) +minor = checkCudaErrors(driver.cuDeviceGetAttribute(driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevice)) arch_arg = bytes(f'--gpu-architecture=compute_{major}{minor}', 'ascii') # Create program @@ -140,7 +140,7 @@ following code example, a handle for compute device 0 is passed to ```{code-cell} python # Create context -context = checkCudaErrors(cuda.cuCtxCreate(0, cuDevice)) +context = checkCudaErrors(driver.cuCtxCreate(0, cuDevice)) ``` With a CUDA context created on device 0, load the PTX generated earlier into a @@ -152,8 +152,8 @@ After loading into the module, extract a specific kernel with # Load PTX as module data and retrieve function ptx = np.char.array(ptx) # Note: Incompatible --gpu-architecture would be detected here -module = checkCudaErrors(cuda.cuModuleLoadData(ptx.ctypes.data)) -kernel = checkCudaErrors(cuda.cuModuleGetFunction(module, b"saxpy")) +module = checkCudaErrors(driver.cuModuleLoadData(ptx.ctypes.data)) +kernel = checkCudaErrors(driver.cuModuleGetFunction(module, b"saxpy")) ``` Next, get all your data prepared and transferred to the GPU. For increased @@ -185,16 +185,16 @@ Python doesn’t have a natural concept of pointers, yet `cuMemcpyHtoDAsync` exp XX. ```{code-cell} python -dXclass = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) -dYclass = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) -dOutclass = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) +dXclass = checkCudaErrors(driver.cuMemAlloc(bufferSize)) +dYclass = checkCudaErrors(driver.cuMemAlloc(bufferSize)) +dOutclass = checkCudaErrors(driver.cuMemAlloc(bufferSize)) -stream = checkCudaErrors(cuda.cuStreamCreate(0)) +stream = checkCudaErrors(driver.cuStreamCreate(0)) -checkCudaErrors(cuda.cuMemcpyHtoDAsync( +checkCudaErrors(driver.cuMemcpyHtoDAsync( dXclass, hX.ctypes.data, bufferSize, stream )) -checkCudaErrors(cuda.cuMemcpyHtoDAsync( +checkCudaErrors(driver.cuMemcpyHtoDAsync( dYclass, hY.ctypes.data, bufferSize, stream )) ``` @@ -223,7 +223,7 @@ args = np.array([arg.ctypes.data for arg in args], dtype=np.uint64) Now the kernel can be launched: ```{code-cell} python -checkCudaErrors(cuda.cuLaunchKernel( +checkCudaErrors(driver.cuLaunchKernel( kernel, NUM_BLOCKS, # grid x dim 1, # grid y dim @@ -237,10 +237,10 @@ checkCudaErrors(cuda.cuLaunchKernel( 0, # extra (ignore) )) -checkCudaErrors(cuda.cuMemcpyDtoHAsync( +checkCudaErrors(driver.cuMemcpyDtoHAsync( hOut.ctypes.data, dOutclass, bufferSize, stream )) -checkCudaErrors(cuda.cuStreamSynchronize(stream)) +checkCudaErrors(driver.cuStreamSynchronize(stream)) ``` The `cuLaunchKernel` function takes the compiled module kernel and execution @@ -262,12 +262,12 @@ Perform verification of the data to ensure correctness and finish the code with memory clean up. ```{code-cell} python -checkCudaErrors(cuda.cuStreamDestroy(stream)) -checkCudaErrors(cuda.cuMemFree(dXclass)) -checkCudaErrors(cuda.cuMemFree(dYclass)) -checkCudaErrors(cuda.cuMemFree(dOutclass)) -checkCudaErrors(cuda.cuModuleUnload(module)) -checkCudaErrors(cuda.cuCtxDestroy(context)) +checkCudaErrors(driver.cuStreamDestroy(stream)) +checkCudaErrors(driver.cuMemFree(dXclass)) +checkCudaErrors(driver.cuMemFree(dYclass)) +checkCudaErrors(driver.cuMemFree(dOutclass)) +checkCudaErrors(driver.cuModuleUnload(module)) +checkCudaErrors(driver.cuCtxDestroy(context)) ``` ## Performance diff --git a/docs/_sources/release.md.txt b/docs/_sources/release.md.txt index 03f9ab20..c3ae5a30 100644 --- a/docs/_sources/release.md.txt +++ b/docs/_sources/release.md.txt @@ -5,6 +5,7 @@ maxdepth: 3 --- + 12.6.1 12.6.0 12.5.0 12.4.0 @@ -13,6 +14,7 @@ maxdepth: 3 12.2.0 12.1.0 12.0.0 + 11.8.4 11.8.3 11.8.2 11.8.1 diff --git a/docs/_sources/release/11.8.4-notes.md.txt b/docs/_sources/release/11.8.4-notes.md.txt new file mode 100644 index 00000000..9cae2915 --- /dev/null +++ b/docs/_sources/release/11.8.4-notes.md.txt @@ -0,0 +1,32 @@ +# CUDA Python 11.8.4 Release notes + +Released on October 7, 2024 + +## Hightlights +- Resolve [Issue #89](https://github.com/NVIDIA/cuda-python/issues/89): Fix getLocalRuntimeVersion searching for wrong libcudart version +- Resolve [Issue #90](https://github.com/NVIDIA/cuda-python/issues/90): Use new layout in preperation for cuda-python becoming a metapackage + +## Limitations + +### CUDA Functions Not Supported in this Release + +- Symbol APIs + - cudaGraphExecMemcpyNodeSetParamsFromSymbol + - cudaGraphExecMemcpyNodeSetParamsToSymbol + - cudaGraphAddMemcpyNodeToSymbol + - cudaGraphAddMemcpyNodeFromSymbol + - cudaGraphMemcpyNodeSetParamsToSymbol + - cudaGraphMemcpyNodeSetParamsFromSymbol + - cudaMemcpyToSymbol + - cudaMemcpyFromSymbol + - cudaMemcpyToSymbolAsync + - cudaMemcpyFromSymbolAsync + - cudaGetSymbolAddress + - cudaGetSymbolSize + - cudaGetFuncBySymbol +- Launch Options + - cudaLaunchKernel + - cudaLaunchCooperativeKernel + - cudaLaunchCooperativeKernelMultiDevice +- cudaSetValidDevices +- cudaVDPAUSetVDPAUDevice diff --git a/docs/_sources/release/12.6.1-notes.md.txt b/docs/_sources/release/12.6.1-notes.md.txt new file mode 100644 index 00000000..d5fe82ac --- /dev/null +++ b/docs/_sources/release/12.6.1-notes.md.txt @@ -0,0 +1,33 @@ +# CUDA Python 12.6.1 Release notes + +Released on October 7, 2024 + +## Hightlights +- Resolve [Issue #90](https://github.com/NVIDIA/cuda-python/issues/90): Use new layout in preperation for cuda-python becoming a metapackage + +## Limitations + +### CUDA Functions Not Supported in this Release + +- Symbol APIs + - cudaGraphExecMemcpyNodeSetParamsFromSymbol + - cudaGraphExecMemcpyNodeSetParamsToSymbol + - cudaGraphAddMemcpyNodeToSymbol + - cudaGraphAddMemcpyNodeFromSymbol + - cudaGraphMemcpyNodeSetParamsToSymbol + - cudaGraphMemcpyNodeSetParamsFromSymbol + - cudaMemcpyToSymbol + - cudaMemcpyFromSymbol + - cudaMemcpyToSymbolAsync + - cudaMemcpyFromSymbolAsync + - cudaGetSymbolAddress + - cudaGetSymbolSize + - cudaGetFuncBySymbol +- Launch Options + - cudaLaunchKernel + - cudaLaunchCooperativeKernel + - cudaLaunchCooperativeKernelMultiDevice +- cudaSetValidDevices +- cudaVDPAUSetVDPAUDevice +- cudaFuncGetName +- cudaFuncGetParamInfo diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js index 1854a578..c63309f3 100644 --- a/docs/_static/documentation_options.js +++ b/docs/_static/documentation_options.js @@ -1,6 +1,6 @@ var DOCUMENTATION_OPTIONS = { URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '12.6.0', + VERSION: '12.6.1', LANGUAGE: 'en', COLLAPSE_INDEX: false, BUILDER: 'html', diff --git a/docs/api.html b/docs/api.html index 652ca489..7d939392 100644 --- a/docs/api.html +++ b/docs/api.html @@ -3,11 +3,11 @@ - + - CUDA Python API Reference - CUDA Python 12.6.0 documentation + CUDA Python API Reference - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
@@ -152,7 +152,7 @@
- CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation
@@ -152,7 +152,7 @@
- CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/contribute.html b/docs/contribute.html index a3f31eae..e755a711 100644 --- a/docs/contribute.html +++ b/docs/contribute.html @@ -7,7 +7,7 @@ - Contributing - CUDA Python 12.6.0 documentation + Contributing - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/genindex.html b/docs/genindex.html index d541cc91..16b2649a 100644 --- a/docs/genindex.html +++ b/docs/genindex.html @@ -5,7 +5,7 @@ - Index - CUDA Python 12.6.0 documentation + Index - CUDA Python 12.6.1 documentation @@ -123,7 +123,7 @@
    @@ -150,7 +150,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -166,6 +166,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • @@ -234,18 +236,18 @@

    Index

    _

    @@ -256,118 +258,118 @@

    _

    A

    @@ -378,140 +380,140 @@

    A

    B

    @@ -522,6023 +524,6023 @@

    B

    C

    @@ -6548,360 +6550,360 @@

    C

    D

    @@ -6912,176 +6914,176 @@

    D

    E

    @@ -7092,290 +7094,290 @@

    E

    F

    @@ -7386,764 +7388,764 @@

    F

    G

    @@ -8154,172 +8156,172 @@

    G

    H

    @@ -8330,41 +8332,41 @@

    H

    I

    @@ -8374,57 +8376,57 @@

    I

    K

    @@ -8434,77 +8436,77 @@

    K

    L

    @@ -8514,297 +8516,297 @@

    L

    M

    @@ -8814,201 +8816,201 @@

    M

    N

    @@ -9018,44 +9020,44 @@

    N

    O

    @@ -9066,239 +9068,239 @@

    O

    P

    @@ -9308,286 +9310,286 @@

    P

    R

    @@ -9598,388 +9600,388 @@

    R

    S

    @@ -9990,122 +9992,122 @@

    S

    T

    @@ -10116,47 +10118,47 @@

    T

    U

    @@ -10166,46 +10168,46 @@

    U

    V

    @@ -10216,114 +10218,114 @@

    V

    W

    @@ -10334,15 +10336,15 @@

    W

    X

    @@ -10352,15 +10354,15 @@

    X

    Y

    @@ -10370,10 +10372,10 @@

    Y

    Z

    diff --git a/docs/index.html b/docs/index.html index 5b5f3ce5..63a65bdc 100644 --- a/docs/index.html +++ b/docs/index.html @@ -7,7 +7,7 @@ - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • @@ -264,6 +266,7 @@

    CUDA Python ManualContributing
  • Release Notes
  • @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/module/cuda.html b/docs/module/cuda.html deleted file mode 100644 index cc007ce5..00000000 --- a/docs/module/cuda.html +++ /dev/null @@ -1,39420 +0,0 @@ - - - - - - - - - - cuda - CUDA Python 12.6.0 documentation - - - - - - - - - - - - - - - - - Contents - - - - - - Menu - - - - - - - - Expand - - - - - - Light mode - - - - - - - - - - - - - - Dark mode - - - - - - - Auto light/dark mode - - - - - - - - - - - - - - - - - - - -
    -
    -
    - -
    - -
    -
    - -
    - -
    -
    - -
    -
    -
    - - - - - Back to top - -
    - -
    - -
    - -
    -
    -
    -

    cuda#

    -
    -

    Data types used by CUDA driver#

    -
    -
    -class cuda.cuda.CUuuid_st(void_ptr _ptr=0)#
    -
    -
    -bytes#
    -

    < CUDA definition of UUID

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemFabricHandle_st(void_ptr _ptr=0)#
    -

    Fabric handle - An opaque handle representing a memory allocation -that can be exported to processes in same or different nodes. For -IPC between processes on different nodes they must be connected via -the NVSwitch fabric.

    -
    -
    -data#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUipcEventHandle_st(void_ptr _ptr=0)#
    -

    CUDA IPC event handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUipcMemHandle_st(void_ptr _ptr=0)#
    -

    CUDA IPC mem handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamBatchMemOpParams_union(void_ptr _ptr=0)#
    -

    Per-operation parameters for cuStreamBatchMemOp

    -
    -
    -operation#
    -
    -
    Type:
    -

    CUstreamBatchMemOpType

    -
    -
    -
    - -
    -
    -waitValue#
    -
    -
    Type:
    -

    CUstreamMemOpWaitValueParams_st

    -
    -
    -
    - -
    -
    -writeValue#
    -
    -
    Type:
    -

    CUstreamMemOpWriteValueParams_st

    -
    -
    -
    - -
    -
    -flushRemoteWrites#
    -
    -
    Type:
    -

    CUstreamMemOpFlushRemoteWritesParams_st

    -
    -
    -
    - -
    -
    -memoryBarrier#
    -
    -
    Type:
    -

    CUstreamMemOpMemoryBarrierParams_st

    -
    -
    -
    - -
    -
    -pad#
    -
    -
    Type:
    -

    List[cuuint64_t]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st(void_ptr _ptr=0)#
    -
    -
    -ctx#
    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -count#
    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -paramArray#
    -
    -
    Type:
    -

    CUstreamBatchMemOpParams

    -
    -
    -
    - -
    -
    -flags#
    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    -

    Batch memory operation node parameters

    -
    -
    -ctx#
    -

    Context to use for the operations.

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -count#
    -

    Number of operations in paramArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -paramArray#
    -

    Array of batch memory operations.

    -
    -
    Type:
    -

    CUstreamBatchMemOpParams

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags to control the node.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUasyncNotificationInfo_st(void_ptr _ptr=0)#
    -

    Information passed to the user via the async notification callback

    -
    -
    -type#
    -
    -
    Type:
    -

    CUasyncNotificationType

    -
    -
    -
    - -
    -
    -info#
    -
    -
    Type:
    -

    anon_union2

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevprop_st(void_ptr _ptr=0)#
    -

    Legacy device properties

    -
    -
    -maxThreadsPerBlock#
    -

    Maximum number of threads per block

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxThreadsDim#
    -

    Maximum size of each dimension of a block

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxGridSize#
    -

    Maximum size of each dimension of a grid

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -sharedMemPerBlock#
    -

    Shared memory available per block in bytes

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -totalConstantMemory#
    -

    Constant memory available on device in bytes

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -SIMDWidth#
    -

    Warp size in threads

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memPitch#
    -

    Maximum pitch in bytes allowed by memory copies

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -regsPerBlock#
    -

    32-bit registers available per block

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -clockRate#
    -

    Clock frequency in kilohertz

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -textureAlign#
    -

    Alignment requirement for textures

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUaccessPolicyWindow_st(void_ptr _ptr=0)#
    -

    Specifies an access policy for a window, a contiguous extent of -memory beginning at base_ptr and ending at base_ptr + num_bytes. -num_bytes is limited by -CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. Partition into -many segments and assign segments such that: sum of “hit segments” -/ window == approx. ratio. sum of “miss segments” / window == -approx 1-ratio. Segments and ratio specifications are fitted to the -capabilities of the architecture. Accesses in a hit segment apply -the hitProp access policy. Accesses in a miss segment apply the -missProp access policy.

    -
    -
    -base_ptr#
    -

    Starting address of the access policy window. CUDA driver may align -it.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -num_bytes#
    -

    Size in bytes of the window policy. CUDA driver may restrict the -maximum size and alignment.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -hitRatio#
    -

    hitRatio specifies percentage of lines assigned hitProp, rest are -assigned missProp.

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -hitProp#
    -

    CUaccessProperty set for hit.

    -
    -
    Type:
    -

    CUaccessProperty

    -
    -
    -
    - -
    -
    -missProp#
    -

    CUaccessProperty set for miss. Must be either NORMAL or STREAMING

    -
    -
    Type:
    -

    CUaccessProperty

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_KERNEL_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    GPU kernel node parameters

    -
    -
    -func#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -extra#
    -

    Extra options

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    -

    GPU kernel node parameters

    -
    -
    -func#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -extra#
    -

    Extra options

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -kern#
    -

    Kernel to launch, will only be referenced if func is NULL

    -
    -
    Type:
    -

    CUkernel

    -
    -
    -
    - -
    -
    -ctx#
    -

    Context for the kernel task to run in. The value NULL will indicate -the current context should be used by the api. This field is -ignored if func is set.

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v3_st(void_ptr _ptr=0)#
    -

    GPU kernel node parameters

    -
    -
    -func#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -extra#
    -

    Extra options

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -kern#
    -

    Kernel to launch, will only be referenced if func is NULL

    -
    -
    Type:
    -

    CUkernel

    -
    -
    -
    - -
    -
    -ctx#
    -

    Context for the kernel task to run in. The value NULL will indicate -the current context should be used by the api. This field is -ignored if func is set.

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMSET_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    Memset node parameters

    -
    -
    -dst#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of destination device pointer. Unused if height is 1

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -value#
    -

    Value to be set

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -elementSize#
    -

    Size of each element in bytes. Must be 1, 2, or 4.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the row in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Number of rows

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    -

    Memset node parameters

    -
    -
    -dst#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of destination device pointer. Unused if height is 1

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -value#
    -

    Value to be set

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -elementSize#
    -

    Size of each element in bytes. Must be 1, 2, or 4.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the row in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Number of rows

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -ctx#
    -

    Context on which to run the node

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_HOST_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    Host node parameters

    -
    -
    -fn#
    -

    The function to call when the node executes

    -
    -
    Type:
    -

    CUhostFn

    -
    -
    -
    - -
    -
    -userData#
    -

    Argument to pass to the function

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_HOST_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    -

    Host node parameters

    -
    -
    -fn#
    -

    The function to call when the node executes

    -
    -
    Type:
    -

    CUhostFn

    -
    -
    -
    - -
    -
    -userData#
    -

    Argument to pass to the function

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_CONDITIONAL_NODE_PARAMS(void_ptr _ptr=0)#
    -

    Conditional node parameters

    -
    -
    -handle#
    -

    Conditional node handle. Handles must be created in advance of -creating the node using cuGraphConditionalHandleCreate.

    -
    -
    Type:
    -

    CUgraphConditionalHandle

    -
    -
    -
    - -
    -
    -type#
    -

    Type of conditional node.

    -
    -
    Type:
    -

    CUgraphConditionalNodeType

    -
    -
    -
    - -
    -
    -size#
    -

    Size of graph output array. Must be 1.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -phGraph_out#
    -

    CUDA-owned array populated with conditional node child graphs -during creation of the node. Valid for the lifetime of the -conditional node. The contents of the graph(s) are subject to the -following constraints: - Allowed node types are kernel nodes, -empty nodes, child graphs, memsets, memcopies, and conditionals. -This applies recursively to child graphs and conditional bodies. -- All kernels, including kernels in nested conditionals or child -graphs at any level, must belong to the same CUDA context. -These graphs may be populated using graph node creation APIs or -cuStreamBeginCaptureToGraph.

    -
    -
    Type:
    -

    CUgraph

    -
    -
    -
    - -
    -
    -ctx#
    -

    Context on which to run the node. Must match context used to create -the handle and all body nodes.

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphEdgeData_st(void_ptr _ptr=0)#
    -

    Optional annotation for edges in a CUDA graph. Note, all edges -implicitly have annotations and default to a zero-initialized value -if not specified. A zero-initialized struct indicates a standard -full serialization of two nodes with memory visibility.

    -
    -
    -from_port#
    -

    This indicates when the dependency is triggered from the upstream -node on the edge. The meaning is specfic to the node type. A value -of 0 in all cases means full completion of the upstream node, with -memory visibility to the downstream node or portion thereof -(indicated by to_port). Only kernel nodes define non-zero -ports. A kernel node can use the following output port types: -CU_GRAPH_KERNEL_NODE_PORT_DEFAULT, -CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC, or -CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -to_port#
    -

    This indicates what portion of the downstream node is dependent on -the upstream node or portion thereof (indicated by from_port). -The meaning is specific to the node type. A value of 0 in all cases -means the entirety of the downstream node is dependent on the -upstream work. Currently no node types define non-zero ports. -Accordingly, this field must be set to zero.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -type#
    -

    This should be populated with a value from CUgraphDependencyType. -(It is typed as char due to compiler-specific layout of bitfields.) -See CUgraphDependencyType.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -reserved#
    -

    These bytes are unused and must be zeroed. This ensures -compatibility if additional fields are added in the future.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_GRAPH_INSTANTIATE_PARAMS_st(void_ptr _ptr=0)#
    -

    Graph instantiation parameters

    -
    -
    -flags#
    -

    Instantiation flags

    -
    -
    Type:
    -

    cuuint64_t

    -
    -
    -
    - -
    -
    -hUploadStream#
    -

    Upload stream

    -
    -
    Type:
    -

    CUstream

    -
    -
    -
    - -
    -
    -hErrNode_out#
    -

    The node which caused instantiation to fail, if any

    -
    -
    Type:
    -

    CUgraphNode

    -
    -
    -
    - -
    -
    -result_out#
    -

    Whether instantiation was successful. If it failed, the reason why

    -
    -
    Type:
    -

    CUgraphInstantiateResult

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchMemSyncDomainMap_st(void_ptr _ptr=0)#
    -

    Memory Synchronization Domain map See ::cudaLaunchMemSyncDomain. -By default, kernels are launched in domain 0. Kernel launched with -CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a different domain ID. -User may also alter the domain ID with CUlaunchMemSyncDomainMap for -a specific stream / graph node / kernel launch. See -CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. Domain ID range is -available through CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.

    -
    -
    -default_#
    -

    The default domain ID to use for designated kernels

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -remote#
    -

    The remote domain ID to use for designated kernels

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchAttributeValue_union(void_ptr _ptr=0)#
    -

    Launch attributes union; used as value field of CUlaunchAttribute

    -
    -
    -pad#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -accessPolicyWindow#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    -
    -
    Type:
    -

    CUaccessPolicyWindow

    -
    -
    -
    - -
    -
    -cooperative#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero -indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -syncPolicy#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. -::CUsynchronizationPolicy for work queued up in this stream

    -
    -
    Type:
    -

    CUsynchronizationPolicy

    -
    -
    -
    - -
    -
    -clusterDim#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION -that represents the desired cluster dimensions for the kernel. -Opaque type with the following fields: - x - The X dimension of -the cluster, in blocks. Must be a divisor of the grid X dimension. -- y - The Y dimension of the cluster, in blocks. Must be a -divisor of the grid Y dimension. - z - The Z dimension of the -cluster, in blocks. Must be a divisor of the grid Z dimension.

    -
    -
    Type:
    -

    anon_struct1

    -
    -
    -
    - -
    -
    -clusterSchedulingPolicyPreference#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster -scheduling policy preference for the kernel.

    -
    -
    Type:
    -

    CUclusterSchedulingPolicy

    -
    -
    -
    - -
    -
    -programmaticStreamSerializationAllowed#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -programmaticEvent#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT -with the following fields: - CUevent event - Event to fire when -all blocks trigger it. - Event record flags, see -cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. -- triggerAtBlockStart - If this is set to non-0, each block -launch will automatically trigger the event.

    -
    -
    Type:
    -

    anon_struct2

    -
    -
    -
    - -
    -
    -launchCompletionEvent#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following -fields: - CUevent event - Event to fire when the last block -launches - int flags; - Event record flags, see -cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    -
    -
    Type:
    -

    anon_struct3

    -
    -
    -
    - -
    -
    -priority#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution -priority of the kernel.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memSyncDomainMap#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. -See CUlaunchMemSyncDomainMap.

    -
    -
    Type:
    -

    CUlaunchMemSyncDomainMap

    -
    -
    -
    - -
    -
    -memSyncDomain#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. -See::CUlaunchMemSyncDomain

    -
    -
    Type:
    -

    CUlaunchMemSyncDomain

    -
    -
    -
    - -
    -
    -deviceUpdatableKernelNode#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the -following fields: - int deviceUpdatable - Whether or not the -resulting kernel node should be device-updatable. - -CUgraphDeviceNode devNode - Returns a handle to pass to the -various device-side update functions.

    -
    -
    Type:
    -

    anon_struct4

    -
    -
    -
    - -
    -
    -sharedMemCarveout#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchAttribute_st(void_ptr _ptr=0)#
    -

    Launch attribute

    -
    -
    -id#
    -

    Attribute to set

    -
    -
    Type:
    -

    CUlaunchAttributeID

    -
    -
    -
    - -
    -
    -value#
    -

    Value of the attribute

    -
    -
    Type:
    -

    CUlaunchAttributeValue

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchConfig_st(void_ptr _ptr=0)#
    -

    CUDA extensible launch configuration

    -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -hStream#
    -

    Stream identifier

    -
    -
    Type:
    -

    CUstream

    -
    -
    -
    - -
    -
    -attrs#
    -

    List of attributes; nullable if CUlaunchConfig::numAttrs == 0

    -
    -
    Type:
    -

    CUlaunchAttribute

    -
    -
    -
    - -
    -
    -numAttrs#
    -

    Number of attributes populated in CUlaunchConfig::attrs

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUexecAffinitySmCount_st(void_ptr _ptr=0)#
    -

    Value for CU_EXEC_AFFINITY_TYPE_SM_COUNT

    -
    -
    -val#
    -

    The number of SMs the context is limited to use.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUexecAffinityParam_st(void_ptr _ptr=0)#
    -

    Execution Affinity Parameters

    -
    -
    -type#
    -
    -
    Type:
    -

    CUexecAffinityType

    -
    -
    -
    - -
    -
    -param#
    -
    -
    Type:
    -

    anon_union3

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUctxCigParam_st(void_ptr _ptr=0)#
    -

    CIG Context Create Params

    -
    -
    -sharedDataType#
    -
    -
    Type:
    -

    CUcigDataType

    -
    -
    -
    - -
    -
    -sharedData#
    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUctxCreateParams_st(void_ptr _ptr=0)#
    -

    Params for creating CUDA context Exactly one of execAffinityParams -and cigParams must be non-NULL.

    -
    -
    -execAffinityParams#
    -
    -
    Type:
    -

    CUexecAffinityParam

    -
    -
    -
    - -
    -
    -numExecAffinityParams#
    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -cigParams#
    -
    -
    Type:
    -

    CUctxCigParam

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlibraryHostUniversalFunctionAndDataTable_st(void_ptr _ptr=0)#
    -
    -
    -functionTable#
    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -functionWindowSize#
    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dataTable#
    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dataWindowSize#
    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY2D_st(void_ptr _ptr=0)#
    -

    2D memory copy parameters

    -
    -
    -srcXInBytes#
    -

    Source X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcY#
    -

    Source Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcMemoryType#
    -

    Source memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -srcHost#
    -

    Source host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -srcArray#
    -

    Source array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -srcPitch#
    -

    Source pitch (ignored when src is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstXInBytes#
    -

    Destination X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstY#
    -

    Destination Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstMemoryType#
    -

    Destination memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -dstHost#
    -

    Destination host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -dstPitch#
    -

    Destination pitch (ignored when dst is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -WidthInBytes#
    -

    Width of 2D memory copy in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 2D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY3D_st(void_ptr _ptr=0)#
    -

    3D memory copy parameters

    -
    -
    -srcXInBytes#
    -

    Source X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcY#
    -

    Source Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcZ#
    -

    Source Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcLOD#
    -

    Source LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcMemoryType#
    -

    Source memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -srcHost#
    -

    Source host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -srcArray#
    -

    Source array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -reserved0#
    -

    Must be NULL

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcPitch#
    -

    Source pitch (ignored when src is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcHeight#
    -

    Source height (ignored when src is array; may be 0 if Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstXInBytes#
    -

    Destination X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstY#
    -

    Destination Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstZ#
    -

    Destination Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstLOD#
    -

    Destination LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstMemoryType#
    -

    Destination memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -dstHost#
    -

    Destination host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -reserved1#
    -

    Must be NULL

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstPitch#
    -

    Destination pitch (ignored when dst is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstHeight#
    -

    Destination height (ignored when dst is array; may be 0 if -Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -WidthInBytes#
    -

    Width of 3D memory copy in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Depth#
    -

    Depth of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY3D_PEER_st(void_ptr _ptr=0)#
    -

    3D memory cross-context copy parameters

    -
    -
    -srcXInBytes#
    -

    Source X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcY#
    -

    Source Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcZ#
    -

    Source Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcLOD#
    -

    Source LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcMemoryType#
    -

    Source memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -srcHost#
    -

    Source host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -srcArray#
    -

    Source array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -srcContext#
    -

    Source context (ignored with srcMemoryType is CU_MEMORYTYPE_ARRAY)

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -srcPitch#
    -

    Source pitch (ignored when src is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcHeight#
    -

    Source height (ignored when src is array; may be 0 if Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstXInBytes#
    -

    Destination X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstY#
    -

    Destination Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstZ#
    -

    Destination Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstLOD#
    -

    Destination LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstMemoryType#
    -

    Destination memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -dstHost#
    -

    Destination host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -dstContext#
    -

    Destination context (ignored with dstMemoryType is -CU_MEMORYTYPE_ARRAY)

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -dstPitch#
    -

    Destination pitch (ignored when dst is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstHeight#
    -

    Destination height (ignored when dst is array; may be 0 if -Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -WidthInBytes#
    -

    Width of 3D memory copy in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Depth#
    -

    Depth of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    Memcpy node parameters

    -
    -
    -flags#
    -

    Must be zero

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -reserved#
    -

    Must be zero

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -copyCtx#
    -

    Context on which to run the node

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -copyParams#
    -

    Parameters for the memory copy

    -
    -
    Type:
    -

    CUDA_MEMCPY3D

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY_DESCRIPTOR_st(void_ptr _ptr=0)#
    -

    Array descriptor

    -
    -
    -Width#
    -

    Width of array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Format#
    -

    Array format

    -
    -
    Type:
    -

    CUarray_format

    -
    -
    -
    - -
    -
    -NumChannels#
    -

    Channels per array element

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR_st(void_ptr _ptr=0)#
    -

    3D array descriptor

    -
    -
    -Width#
    -

    Width of 3D array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 3D array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Depth#
    -

    Depth of 3D array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Format#
    -

    Array format

    -
    -
    Type:
    -

    CUarray_format

    -
    -
    -
    - -
    -
    -NumChannels#
    -

    Channels per array element

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -Flags#
    -

    Flags

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES_st(void_ptr _ptr=0)#
    -

    CUDA array sparse properties

    -
    -
    -tileExtent#
    -
    -
    Type:
    -

    anon_struct5

    -
    -
    -
    - -
    -
    -miptailFirstLevel#
    -

    First mip level at which the mip tail begins.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -miptailSize#
    -

    Total size of the mip tail.

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags will either be zero or -CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_st(void_ptr _ptr=0)#
    -

    CUDA array memory requirements

    -
    -
    -size#
    -

    Total required memory size

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -alignment#
    -

    alignment requirement

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_RESOURCE_DESC_st(void_ptr _ptr=0)#
    -

    CUDA Resource descriptor

    -
    -
    -resType#
    -

    Resource type

    -
    -
    Type:
    -

    CUresourcetype

    -
    -
    -
    - -
    -
    -res#
    -
    -
    Type:
    -

    anon_union4

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags (must be zero)

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_TEXTURE_DESC_st(void_ptr _ptr=0)#
    -

    Texture descriptor

    -
    -
    -addressMode#
    -

    Address modes

    -
    -
    Type:
    -

    List[CUaddress_mode]

    -
    -
    -
    - -
    -
    -filterMode#
    -

    Filter mode

    -
    -
    Type:
    -

    CUfilter_mode

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -maxAnisotropy#
    -

    Maximum anisotropy ratio

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -mipmapFilterMode#
    -

    Mipmap filter mode

    -
    -
    Type:
    -

    CUfilter_mode

    -
    -
    -
    - -
    -
    -mipmapLevelBias#
    -

    Mipmap level bias

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -minMipmapLevelClamp#
    -

    Mipmap minimum level clamp

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -maxMipmapLevelClamp#
    -

    Mipmap maximum level clamp

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -borderColor#
    -

    Border Color

    -
    -
    Type:
    -

    List[float]

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_RESOURCE_VIEW_DESC_st(void_ptr _ptr=0)#
    -

    Resource view descriptor

    -
    -
    -format#
    -

    Resource view format

    -
    -
    Type:
    -

    CUresourceViewFormat

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Height of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -firstMipmapLevel#
    -

    First defined mipmap level

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -lastMipmapLevel#
    -

    Last defined mipmap level

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -firstLayer#
    -

    First layer index

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -lastLayer#
    -

    Last layer index

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUtensorMap_st(void_ptr _ptr=0)#
    -

    Tensor map descriptor. Requires compiler support for aligning to 64 -bytes.

    -
    -
    -opaque#
    -
    -
    Type:
    -

    List[cuuint64_t]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st(void_ptr _ptr=0)#
    -

    GPU Direct v3 tokens

    -
    -
    -p2pToken#
    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -vaSpaceToken#
    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_LAUNCH_PARAMS_st(void_ptr _ptr=0)#
    -

    Kernel launch parameters

    -
    -
    -function#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -hStream#
    -

    Stream identifier

    -
    -
    Type:
    -

    CUstream

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st(void_ptr _ptr=0)#
    -

    External memory handle descriptor

    -
    -
    -type#
    -

    Type of the handle

    -
    -
    Type:
    -

    CUexternalMemoryHandleType

    -
    -
    -
    - -
    -
    -handle#
    -
    -
    Type:
    -

    anon_union5

    -
    -
    -
    - -
    -
    -size#
    -

    Size of the memory allocation

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags must either be zero or CUDA_EXTERNAL_MEMORY_DEDICATED

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st(void_ptr _ptr=0)#
    -

    External memory buffer descriptor

    -
    -
    -offset#
    -

    Offset into the memory object where the buffer’s base is

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -size#
    -

    Size of the buffer

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags reserved for future use. Must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st(void_ptr _ptr=0)#
    -

    External memory mipmap descriptor

    -
    -
    -offset#
    -

    Offset into the memory object where the base level of the mipmap -chain is.

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -arrayDesc#
    -

    Format, dimension and type of base level of the mipmap chain

    -
    -
    Type:
    -

    CUDA_ARRAY3D_DESCRIPTOR

    -
    -
    -
    - -
    -
    -numLevels#
    -

    Total number of levels in the mipmap chain

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st(void_ptr _ptr=0)#
    -

    External semaphore handle descriptor

    -
    -
    -type#
    -

    Type of the handle

    -
    -
    Type:
    -

    CUexternalSemaphoreHandleType

    -
    -
    -
    - -
    -
    -handle#
    -
    -
    Type:
    -

    anon_union6

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags reserved for the future. Must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st(void_ptr _ptr=0)#
    -

    External semaphore signal parameters

    -
    -
    -params#
    -
    -
    Type:
    -

    anon_struct15

    -
    -
    -
    - -
    -
    -flags#
    -

    Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to signal -a CUexternalSemaphore of type -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which -indicates that while signaling the CUexternalSemaphore, no memory -synchronization operations should be performed for any external -memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. -For all other types of CUexternalSemaphore, flags must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st(void_ptr _ptr=0)#
    -

    External semaphore wait parameters

    -
    -
    -params#
    -
    -
    Type:
    -

    anon_struct18

    -
    -
    -
    - -
    -
    -flags#
    -

    Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on -a CUexternalSemaphore of type -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is -CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC which indicates -that while waiting for the CUexternalSemaphore, no memory -synchronization operations should be performed for any external -memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. -For all other types of CUexternalSemaphore, flags must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    Semaphore signal node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore signal parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    -

    Semaphore signal node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore signal parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    Semaphore wait node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore wait parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    -

    Semaphore wait node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore wait parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUarrayMapInfo_st(void_ptr _ptr=0)#
    -

    Specifies the CUDA array or CUDA mipmapped array memory mapping -information

    -
    -
    -resourceType#
    -

    Resource type

    -
    -
    Type:
    -

    CUresourcetype

    -
    -
    -
    - -
    -
    -resource#
    -
    -
    Type:
    -

    anon_union9

    -
    -
    -
    - -
    -
    -subresourceType#
    -

    Sparse subresource type

    -
    -
    Type:
    -

    CUarraySparseSubresourceType

    -
    -
    -
    - -
    -
    -subresource#
    -
    -
    Type:
    -

    anon_union10

    -
    -
    -
    - -
    -
    -memOperationType#
    -

    Memory operation type

    -
    -
    Type:
    -

    CUmemOperationType

    -
    -
    -
    - -
    -
    -memHandleType#
    -

    Memory handle type

    -
    -
    Type:
    -

    CUmemHandleType

    -
    -
    -
    - -
    -
    -memHandle#
    -
    -
    Type:
    -

    anon_union11

    -
    -
    -
    - -
    -
    -offset#
    -

    Offset within mip tail Offset within the memory

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -deviceBitMask#
    -

    Device ordinal bit mask

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -flags#
    -

    flags for future use, must be zero now.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -

    Reserved for future use, must be zero now.

    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemLocation_st(void_ptr _ptr=0)#
    -

    Specifies a memory location.

    -
    -
    -type#
    -

    Specifies the location type, which modifies the meaning of id.

    -
    -
    Type:
    -

    CUmemLocationType

    -
    -
    -
    - -
    -
    -id#
    -

    identifier for a given this location’s CUmemLocationType.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAllocationProp_st(void_ptr _ptr=0)#
    -

    Specifies the allocation properties for a allocation.

    -
    -
    -type#
    -

    Allocation type

    -
    -
    Type:
    -

    CUmemAllocationType

    -
    -
    -
    - -
    -
    -requestedHandleTypes#
    -

    requested CUmemAllocationHandleType

    -
    -
    Type:
    -

    CUmemAllocationHandleType

    -
    -
    -
    - -
    -
    -location#
    -

    Location of allocation

    -
    -
    Type:
    -

    CUmemLocation

    -
    -
    -
    - -
    -
    -win32HandleMetaData#
    -

    Windows-specific POBJECT_ATTRIBUTES required when -CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes -structure includes security attributes that define the scope of -which exported allocations may be transferred to other processes. -In all other cases, this field is required to be zero.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -allocFlags#
    -
    -
    Type:
    -

    anon_struct21

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmulticastObjectProp_st(void_ptr _ptr=0)#
    -

    Specifies the properties for a multicast object.

    -
    -
    -numDevices#
    -

    The number of devices in the multicast team that will bind memory -to this object

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -size#
    -

    The maximum amount of memory that can be bound to this multicast -object per device

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -handleTypes#
    -

    Bitmask of exportable handle types (see CUmemAllocationHandleType) -for this object

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags for future use, must be zero now

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAccessDesc_st(void_ptr _ptr=0)#
    -

    Memory access descriptor

    -
    -
    -location#
    -

    Location on which the request is to change it’s accessibility

    -
    -
    Type:
    -

    CUmemLocation

    -
    -
    -
    - -
    -
    -flags#
    -

    ::CUmemProt accessibility flags to set on the request

    -
    -
    Type:
    -

    CUmemAccess_flags

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphExecUpdateResultInfo_st(void_ptr _ptr=0)#
    -

    Result information returned by cuGraphExecUpdate

    -
    -
    -result#
    -

    Gives more specific detail when a cuda graph update fails.

    -
    -
    Type:
    -

    CUgraphExecUpdateResult

    -
    -
    -
    - -
    -
    -errorNode#
    -

    The “to node” of the error edge when the topologies do not match. -The error node when the error is associated with a specific node. -NULL when the error is generic.

    -
    -
    Type:
    -

    CUgraphNode

    -
    -
    -
    - -
    -
    -errorFromNode#
    -

    The from node of error edge when the topologies do not match. -Otherwise NULL.

    -
    -
    Type:
    -

    CUgraphNode

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemPoolProps_st(void_ptr _ptr=0)#
    -

    Specifies the properties of allocations made from the pool.

    -
    -
    -allocType#
    -

    Allocation type. Currently must be specified as -CU_MEM_ALLOCATION_TYPE_PINNED

    -
    -
    Type:
    -

    CUmemAllocationType

    -
    -
    -
    - -
    -
    -handleTypes#
    -

    Handle types that will be supported by allocations from the pool.

    -
    -
    Type:
    -

    CUmemAllocationHandleType

    -
    -
    -
    - -
    -
    -location#
    -

    Location where allocations should reside.

    -
    -
    Type:
    -

    CUmemLocation

    -
    -
    -
    - -
    -
    -win32SecurityAttributes#
    -

    Windows-specific LPSECURITYATTRIBUTES required when -CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute -defines the scope of which exported allocations may be transferred -to other processes. In all other cases, this field is required to -be zero.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -maxSize#
    -

    Maximum pool size. When set to 0, defaults to a system dependent -value.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -usage#
    -

    Bitmask indicating intended usage for the pool.

    -
    -
    Type:
    -

    unsigned short

    -
    -
    -
    - -
    -
    -reserved#
    -

    reserved for future use, must be 0

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemPoolPtrExportData_st(void_ptr _ptr=0)#
    -

    Opaque data for exporting a pool allocation

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st(void_ptr _ptr=0)#
    -

    Memory allocation node parameters

    -
    -
    -poolProps#
    -

    in: location where the allocation should reside (specified in -::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is -not supported.

    -
    -
    Type:
    -

    CUmemPoolProps

    -
    -
    -
    - -
    -
    -accessDescs#
    -

    in: array of memory access descriptors. Used to describe peer GPU -access

    -
    -
    Type:
    -

    CUmemAccessDesc

    -
    -
    -
    - -
    -
    -accessDescCount#
    -

    in: number of memory access descriptors. Must not exceed the number -of GPUs.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -bytesize#
    -

    in: size in bytes of the requested allocation

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dptr#
    -

    out: address of the allocation returned by CUDA

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    -

    Memory allocation node parameters

    -
    -
    -poolProps#
    -

    in: location where the allocation should reside (specified in -::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is -not supported.

    -
    -
    Type:
    -

    CUmemPoolProps

    -
    -
    -
    - -
    -
    -accessDescs#
    -

    in: array of memory access descriptors. Used to describe peer GPU -access

    -
    -
    Type:
    -

    CUmemAccessDesc

    -
    -
    -
    - -
    -
    -accessDescCount#
    -

    in: number of memory access descriptors. Must not exceed the number -of GPUs.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -bytesize#
    -

    in: size in bytes of the requested allocation

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dptr#
    -

    out: address of the allocation returned by CUDA

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEM_FREE_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    Memory free node parameters

    -
    -
    -dptr#
    -

    in: the pointer to free

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_CHILD_GRAPH_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    Child graph node parameters

    -
    -
    -graph#
    -

    The child graph to clone into the node for node creation, or a -handle to the graph owned by the node for node query

    -
    -
    Type:
    -

    CUgraph

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EVENT_RECORD_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    Event record node parameters

    -
    -
    -event#
    -

    The event to record when the node executes

    -
    -
    Type:
    -

    CUevent

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EVENT_WAIT_NODE_PARAMS_st(void_ptr _ptr=0)#
    -

    Event wait node parameters

    -
    -
    -event#
    -

    The event to wait on from the node

    -
    -
    Type:
    -

    CUevent

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphNodeParams_st(void_ptr _ptr=0)#
    -

    Graph node parameters. See cuGraphAddNode.

    -
    -
    -type#
    -

    Type of the node

    -
    -
    Type:
    -

    CUgraphNodeType

    -
    -
    -
    - -
    -
    -reserved0#
    -

    Reserved. Must be zero.

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -reserved1#
    -

    Padding. Unused bytes must be zero.

    -
    -
    Type:
    -

    List[long long]

    -
    -
    -
    - -
    -
    -kernel#
    -

    Kernel node parameters.

    -
    -
    Type:
    -

    CUDA_KERNEL_NODE_PARAMS_v3

    -
    -
    -
    - -
    -
    -memcpy#
    -

    Memcpy node parameters.

    -
    -
    Type:
    -

    CUDA_MEMCPY_NODE_PARAMS

    -
    -
    -
    - -
    -
    -memset#
    -

    Memset node parameters.

    -
    -
    Type:
    -

    CUDA_MEMSET_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -host#
    -

    Host node parameters.

    -
    -
    Type:
    -

    CUDA_HOST_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -graph#
    -

    Child graph node parameters.

    -
    -
    Type:
    -

    CUDA_CHILD_GRAPH_NODE_PARAMS

    -
    -
    -
    - -
    -
    -eventWait#
    -

    Event wait node parameters.

    -
    -
    Type:
    -

    CUDA_EVENT_WAIT_NODE_PARAMS

    -
    -
    -
    - -
    -
    -eventRecord#
    -

    Event record node parameters.

    -
    -
    Type:
    -

    CUDA_EVENT_RECORD_NODE_PARAMS

    -
    -
    -
    - -
    -
    -extSemSignal#
    -

    External semaphore signal node parameters.

    -
    -
    Type:
    -

    CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -extSemWait#
    -

    External semaphore wait node parameters.

    -
    -
    Type:
    -

    CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -alloc#
    -

    Memory allocation node parameters.

    -
    -
    Type:
    -

    CUDA_MEM_ALLOC_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -free#
    -

    Memory free node parameters.

    -
    -
    Type:
    -

    CUDA_MEM_FREE_NODE_PARAMS

    -
    -
    -
    - -
    -
    -memOp#
    -

    MemOp node parameters.

    -
    -
    Type:
    -

    CUDA_BATCH_MEM_OP_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -conditional#
    -

    Conditional node parameters.

    -
    -
    Type:
    -

    CUDA_CONDITIONAL_NODE_PARAMS

    -
    -
    -
    - -
    -
    -reserved2#
    -

    Reserved bytes. Must be zero.

    -
    -
    Type:
    -

    long long

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUeglFrame_st(void_ptr _ptr=0)#
    -

    CUDA EGLFrame structure Descriptor - structure defining one frame -of EGL. Each frame may contain one or more planes depending on -whether the surface * is Multiplanar or not.

    -
    -
    -frame#
    -
    -
    Type:
    -

    anon_union14

    -
    -
    -
    - -
    -
    -width#
    -

    Width of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -height#
    -

    Height of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -planeCount#
    -

    Number of planes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -numChannels#
    -

    Number of channels for the plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -frameType#
    -

    Array or Pitch

    -
    -
    Type:
    -

    CUeglFrameType

    -
    -
    -
    - -
    -
    -eglColorFormat#
    -

    CUDA EGL Color Format

    -
    -
    Type:
    -

    CUeglColorFormat

    -
    -
    -
    - -
    -
    -cuFormat#
    -

    CUDA Array Format

    -
    -
    Type:
    -

    CUarray_format

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUipcMem_flags(value)#
    -

    CUDA Ipc Mem Flags

    -
    -
    -CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS = 1#
    -

    Automatically enable peer access between remote devices as needed

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAttach_flags(value)#
    -

    CUDA Mem Attach Flags

    -
    -
    -CU_MEM_ATTACH_GLOBAL = 1#
    -

    Memory can be accessed by any stream on any device

    -
    - -
    -
    -CU_MEM_ATTACH_HOST = 2#
    -

    Memory cannot be accessed by any stream on any device

    -
    - -
    -
    -CU_MEM_ATTACH_SINGLE = 4#
    -

    Memory can only be accessed by a single stream on the associated device

    -
    - -
    - -
    -
    -class cuda.cuda.CUctx_flags(value)#
    -

    Context creation flags

    -
    -
    -CU_CTX_SCHED_AUTO = 0#
    -

    Automatic scheduling

    -
    - -
    -
    -CU_CTX_SCHED_SPIN = 1#
    -

    Set spin as default scheduling

    -
    - -
    -
    -CU_CTX_SCHED_YIELD = 2#
    -

    Set yield as default scheduling

    -
    - -
    -
    -CU_CTX_SCHED_BLOCKING_SYNC = 4#
    -

    Set blocking synchronization as default scheduling

    -
    - -
    -
    -CU_CTX_BLOCKING_SYNC = 4#
    -

    Set blocking synchronization as default scheduling [Deprecated]

    -
    - -
    -
    -CU_CTX_SCHED_MASK = 7#
    -
    - -
    -
    -CU_CTX_MAP_HOST = 8#
    -

    [Deprecated]

    -
    - -
    -
    -CU_CTX_LMEM_RESIZE_TO_MAX = 16#
    -

    Keep local memory allocation after launch

    -
    - -
    -
    -CU_CTX_COREDUMP_ENABLE = 32#
    -

    Trigger coredumps from exceptions in this context

    -
    - -
    -
    -CU_CTX_USER_COREDUMP_ENABLE = 64#
    -

    Enable user pipe to trigger coredumps in this context

    -
    - -
    -
    -CU_CTX_SYNC_MEMOPS = 128#
    -

    Ensure synchronous memory operations on this context will synchronize

    -
    - -
    -
    -CU_CTX_FLAGS_MASK = 255#
    -
    - -
    - -
    -
    -class cuda.cuda.CUevent_sched_flags(value)#
    -

    Event sched flags

    -
    -
    -CU_EVENT_SCHED_AUTO = 0#
    -

    Automatic scheduling

    -
    - -
    -
    -CU_EVENT_SCHED_SPIN = 1#
    -

    Set spin as default scheduling

    -
    - -
    -
    -CU_EVENT_SCHED_YIELD = 2#
    -

    Set yield as default scheduling

    -
    - -
    -
    -CU_EVENT_SCHED_BLOCKING_SYNC = 4#
    -

    Set blocking synchronization as default scheduling

    -
    - -
    - -
    -
    -class cuda.cuda.cl_event_flags(value)#
    -

    NVCL event scheduling flags

    -
    -
    -NVCL_EVENT_SCHED_AUTO = 0#
    -

    Automatic scheduling

    -
    - -
    -
    -NVCL_EVENT_SCHED_SPIN = 1#
    -

    Set spin as default scheduling

    -
    - -
    -
    -NVCL_EVENT_SCHED_YIELD = 2#
    -

    Set yield as default scheduling

    -
    - -
    -
    -NVCL_EVENT_SCHED_BLOCKING_SYNC = 4#
    -

    Set blocking synchronization as default scheduling

    -
    - -
    - -
    -
    -class cuda.cuda.cl_context_flags(value)#
    -

    NVCL context scheduling flags

    -
    -
    -NVCL_CTX_SCHED_AUTO = 0#
    -

    Automatic scheduling

    -
    - -
    -
    -NVCL_CTX_SCHED_SPIN = 1#
    -

    Set spin as default scheduling

    -
    - -
    -
    -NVCL_CTX_SCHED_YIELD = 2#
    -

    Set yield as default scheduling

    -
    - -
    -
    -NVCL_CTX_SCHED_BLOCKING_SYNC = 4#
    -

    Set blocking synchronization as default scheduling

    -
    - -
    - -
    -
    -class cuda.cuda.CUstream_flags(value)#
    -

    Stream creation flags

    -
    -
    -CU_STREAM_DEFAULT = 0#
    -

    Default stream flag

    -
    - -
    -
    -CU_STREAM_NON_BLOCKING = 1#
    -

    Stream does not synchronize with stream 0 (the NULL stream)

    -
    - -
    - -
    -
    -class cuda.cuda.CUevent_flags(value)#
    -

    Event creation flags

    -
    -
    -CU_EVENT_DEFAULT = 0#
    -

    Default event flag

    -
    - -
    -
    -CU_EVENT_BLOCKING_SYNC = 1#
    -

    Event uses blocking synchronization

    -
    - -
    -
    -CU_EVENT_DISABLE_TIMING = 2#
    -

    Event will not record timing data

    -
    - -
    -
    -CU_EVENT_INTERPROCESS = 4#
    -

    Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set

    -
    - -
    - -
    -
    -class cuda.cuda.CUevent_record_flags(value)#
    -

    Event record flags

    -
    -
    -CU_EVENT_RECORD_DEFAULT = 0#
    -

    Default event record flag

    -
    - -
    -
    -CU_EVENT_RECORD_EXTERNAL = 1#
    -

    When using stream capture, create an event record node instead of the default behavior. This flag is invalid when used outside of capture.

    -
    - -
    - -
    -
    -class cuda.cuda.CUevent_wait_flags(value)#
    -

    Event wait flags

    -
    -
    -CU_EVENT_WAIT_DEFAULT = 0#
    -

    Default event wait flag

    -
    - -
    -
    -CU_EVENT_WAIT_EXTERNAL = 1#
    -

    When using stream capture, create an event wait node instead of the default behavior. This flag is invalid when used outside of capture.

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamWaitValue_flags(value)#
    -

    Flags for cuStreamWaitValue32 and -cuStreamWaitValue64

    -
    -
    -CU_STREAM_WAIT_VALUE_GEQ = 0#
    -

    Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit values). Note this is a cyclic comparison which ignores wraparound. (Default behavior.)

    -
    - -
    -
    -CU_STREAM_WAIT_VALUE_EQ = 1#
    -

    Wait until *addr == value.

    -
    - -
    -
    -CU_STREAM_WAIT_VALUE_AND = 2#
    -

    Wait until (*addr & value) != 0.

    -
    - -
    -
    -CU_STREAM_WAIT_VALUE_NOR = 3#
    -

    Wait until ~(*addr | value) != 0. Support for this operation can be queried with cuDeviceGetAttribute() and CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.

    -
    - -
    -
    -CU_STREAM_WAIT_VALUE_FLUSH = 1073741824#
    -

    Follow the wait operation with a flush of outstanding remote writes. This means that, if a remote write operation is guaranteed to have reached the device before the wait can be satisfied, that write is guaranteed to be visible to downstream device work. The device is permitted to reorder remote writes internally. For example, this flag would be required if two remote writes arrive in a defined order, the wait is satisfied by the second write, and downstream work needs to observe the first write. Support for this operation is restricted to selected platforms and can be queried with CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamWriteValue_flags(value)#
    -

    Flags for cuStreamWriteValue32

    -
    -
    -CU_STREAM_WRITE_VALUE_DEFAULT = 0#
    -

    Default behavior

    -
    - -
    -
    -CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER = 1#
    -

    Permits the write to be reordered with writes which were issued before it, as a performance optimization. Normally, cuStreamWriteValue32 will provide a memory fence before the write, which has similar semantics to __threadfence_system() but is scoped to the stream rather than a CUDA thread. This flag is not supported in the v2 API.

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamBatchMemOpType(value)#
    -

    Operations for cuStreamBatchMemOp

    -
    -
    -CU_STREAM_MEM_OP_WAIT_VALUE_32 = 1#
    -

    Represents a cuStreamWaitValue32 operation

    -
    - -
    -
    -CU_STREAM_MEM_OP_WRITE_VALUE_32 = 2#
    -

    Represents a cuStreamWriteValue32 operation

    -
    - -
    -
    -CU_STREAM_MEM_OP_WAIT_VALUE_64 = 4#
    -

    Represents a cuStreamWaitValue64 operation

    -
    - -
    -
    -CU_STREAM_MEM_OP_WRITE_VALUE_64 = 5#
    -

    Represents a cuStreamWriteValue64 operation

    -
    - -
    -
    -CU_STREAM_MEM_OP_BARRIER = 6#
    -

    Insert a memory barrier of the specified type

    -
    - -
    -
    -CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES = 3#
    -

    This has the same effect as CU_STREAM_WAIT_VALUE_FLUSH, but as a standalone operation.

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamMemoryBarrier_flags(value)#
    -

    Flags for cuStreamMemoryBarrier

    -
    -
    -CU_STREAM_MEMORY_BARRIER_TYPE_SYS = 0#
    -

    System-wide memory barrier.

    -
    - -
    -
    -CU_STREAM_MEMORY_BARRIER_TYPE_GPU = 1#
    -

    Limit memory barrier scope to the GPU.

    -
    - -
    - -
    -
    -class cuda.cuda.CUoccupancy_flags(value)#
    -

    Occupancy calculator flag

    -
    -
    -CU_OCCUPANCY_DEFAULT = 0#
    -

    Default behavior

    -
    - -
    -
    -CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE = 1#
    -

    Assume global caching is enabled and cannot be automatically turned off

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamUpdateCaptureDependencies_flags(value)#
    -

    Flags for cuStreamUpdateCaptureDependencies

    -
    -
    -CU_STREAM_ADD_CAPTURE_DEPENDENCIES = 0#
    -

    Add new nodes to the dependency set

    -
    - -
    -
    -CU_STREAM_SET_CAPTURE_DEPENDENCIES = 1#
    -

    Replace the dependency set with the new nodes

    -
    - -
    - -
    -
    -class cuda.cuda.CUasyncNotificationType(value)#
    -

    Types of async notification that can be sent

    -
    -
    -CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET = 1#
    -
    - -
    - -
    -
    -class cuda.cuda.CUarray_format(value)#
    -

    Array formats

    -
    -
    -CU_AD_FORMAT_UNSIGNED_INT8 = 1#
    -

    Unsigned 8-bit integers

    -
    - -
    -
    -CU_AD_FORMAT_UNSIGNED_INT16 = 2#
    -

    Unsigned 16-bit integers

    -
    - -
    -
    -CU_AD_FORMAT_UNSIGNED_INT32 = 3#
    -

    Unsigned 32-bit integers

    -
    - -
    -
    -CU_AD_FORMAT_SIGNED_INT8 = 8#
    -

    Signed 8-bit integers

    -
    - -
    -
    -CU_AD_FORMAT_SIGNED_INT16 = 9#
    -

    Signed 16-bit integers

    -
    - -
    -
    -CU_AD_FORMAT_SIGNED_INT32 = 10#
    -

    Signed 32-bit integers

    -
    - -
    -
    -CU_AD_FORMAT_HALF = 16#
    -

    16-bit floating point

    -
    - -
    -
    -CU_AD_FORMAT_FLOAT = 32#
    -

    32-bit floating point

    -
    - -
    -
    -CU_AD_FORMAT_NV12 = 176#
    -

    8-bit YUV planar format, with 4:2:0 sampling

    -
    - -
    -
    -CU_AD_FORMAT_UNORM_INT8X1 = 192#
    -

    1 channel unsigned 8-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_UNORM_INT8X2 = 193#
    -

    2 channel unsigned 8-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_UNORM_INT8X4 = 194#
    -

    4 channel unsigned 8-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_UNORM_INT16X1 = 195#
    -

    1 channel unsigned 16-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_UNORM_INT16X2 = 196#
    -

    2 channel unsigned 16-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_UNORM_INT16X4 = 197#
    -

    4 channel unsigned 16-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_SNORM_INT8X1 = 198#
    -

    1 channel signed 8-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_SNORM_INT8X2 = 199#
    -

    2 channel signed 8-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_SNORM_INT8X4 = 200#
    -

    4 channel signed 8-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_SNORM_INT16X1 = 201#
    -

    1 channel signed 16-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_SNORM_INT16X2 = 202#
    -

    2 channel signed 16-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_SNORM_INT16X4 = 203#
    -

    4 channel signed 16-bit normalized integer

    -
    - -
    -
    -CU_AD_FORMAT_BC1_UNORM = 145#
    -

    4 channel unsigned normalized block-compressed (BC1 compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC1_UNORM_SRGB = 146#
    -

    4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding

    -
    - -
    -
    -CU_AD_FORMAT_BC2_UNORM = 147#
    -

    4 channel unsigned normalized block-compressed (BC2 compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC2_UNORM_SRGB = 148#
    -

    4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding

    -
    - -
    -
    -CU_AD_FORMAT_BC3_UNORM = 149#
    -

    4 channel unsigned normalized block-compressed (BC3 compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC3_UNORM_SRGB = 150#
    -

    4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding

    -
    - -
    -
    -CU_AD_FORMAT_BC4_UNORM = 151#
    -

    1 channel unsigned normalized block-compressed (BC4 compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC4_SNORM = 152#
    -

    1 channel signed normalized block-compressed (BC4 compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC5_UNORM = 153#
    -

    2 channel unsigned normalized block-compressed (BC5 compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC5_SNORM = 154#
    -

    2 channel signed normalized block-compressed (BC5 compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC6H_UF16 = 155#
    -

    3 channel unsigned half-float block-compressed (BC6H compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC6H_SF16 = 156#
    -

    3 channel signed half-float block-compressed (BC6H compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC7_UNORM = 157#
    -

    4 channel unsigned normalized block-compressed (BC7 compression) format

    -
    - -
    -
    -CU_AD_FORMAT_BC7_UNORM_SRGB = 158#
    -

    4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding

    -
    - -
    -
    -CU_AD_FORMAT_P010 = 159#
    -

    10-bit YUV planar format, with 4:2:0 sampling

    -
    - -
    -
    -CU_AD_FORMAT_P016 = 161#
    -

    16-bit YUV planar format, with 4:2:0 sampling

    -
    - -
    -
    -CU_AD_FORMAT_NV16 = 162#
    -

    8-bit YUV planar format, with 4:2:2 sampling

    -
    - -
    -
    -CU_AD_FORMAT_P210 = 163#
    -

    10-bit YUV planar format, with 4:2:2 sampling

    -
    - -
    -
    -CU_AD_FORMAT_P216 = 164#
    -

    16-bit YUV planar format, with 4:2:2 sampling

    -
    - -
    -
    -CU_AD_FORMAT_YUY2 = 165#
    -

    2 channel, 8-bit YUV packed planar format, with 4:2:2 sampling

    -
    - -
    -
    -CU_AD_FORMAT_Y210 = 166#
    -

    2 channel, 10-bit YUV packed planar format, with 4:2:2 sampling

    -
    - -
    -
    -CU_AD_FORMAT_Y216 = 167#
    -

    2 channel, 16-bit YUV packed planar format, with 4:2:2 sampling

    -
    - -
    -
    -CU_AD_FORMAT_AYUV = 168#
    -

    4 channel, 8-bit YUV packed planar format, with 4:4:4 sampling

    -
    - -
    -
    -CU_AD_FORMAT_Y410 = 169#
    -

    10-bit YUV packed planar format, with 4:4:4 sampling

    -
    - -
    -
    -CU_AD_FORMAT_Y416 = 177#
    -

    4 channel, 12-bit YUV packed planar format, with 4:4:4 sampling

    -
    - -
    -
    -CU_AD_FORMAT_Y444_PLANAR8 = 178#
    -

    3 channel 8-bit YUV planar format, with 4:4:4 sampling

    -
    - -
    -
    -CU_AD_FORMAT_Y444_PLANAR10 = 179#
    -

    3 channel 10-bit YUV planar format, with 4:4:4 sampling

    -
    - -
    -
    -CU_AD_FORMAT_MAX = 2147483647#
    -
    - -
    - -
    -
    -class cuda.cuda.CUaddress_mode(value)#
    -

    Texture reference addressing modes

    -
    -
    -CU_TR_ADDRESS_MODE_WRAP = 0#
    -

    Wrapping address mode

    -
    - -
    -
    -CU_TR_ADDRESS_MODE_CLAMP = 1#
    -

    Clamp to edge address mode

    -
    - -
    -
    -CU_TR_ADDRESS_MODE_MIRROR = 2#
    -

    Mirror address mode

    -
    - -
    -
    -CU_TR_ADDRESS_MODE_BORDER = 3#
    -

    Border address mode

    -
    - -
    - -
    -
    -class cuda.cuda.CUfilter_mode(value)#
    -

    Texture reference filtering modes

    -
    -
    -CU_TR_FILTER_MODE_POINT = 0#
    -

    Point filter mode

    -
    - -
    -
    -CU_TR_FILTER_MODE_LINEAR = 1#
    -

    Linear filter mode

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevice_attribute(value)#
    -

    Device properties

    -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 1#
    -

    Maximum number of threads per block

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = 2#
    -

    Maximum block dimension X

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = 3#
    -

    Maximum block dimension Y

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = 4#
    -

    Maximum block dimension Z

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = 5#
    -

    Maximum grid dimension X

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = 6#
    -

    Maximum grid dimension Y

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = 7#
    -

    Maximum grid dimension Z

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = 8#
    -

    Maximum shared memory available per block in bytes

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK = 8#
    -

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY = 9#
    -

    Memory available on device for constant variables in a CUDA C kernel in bytes

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_WARP_SIZE = 10#
    -

    Warp size in threads

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_PITCH = 11#
    -

    Maximum pitch in bytes allowed by memory copies

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK = 12#
    -

    Maximum number of 32-bit registers available per block

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK = 12#
    -

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13#
    -

    Typical clock frequency in kilohertz

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT = 14#
    -

    Alignment requirement for textures

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_GPU_OVERLAP = 15#
    -

    Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16#
    -

    Number of multiprocessors on device

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT = 17#
    -

    Specifies whether there is a run time limit on kernels

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_INTEGRATED = 18#
    -

    Device is integrated with host memory

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = 19#
    -

    Device can map host memory into CUDA address space

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_COMPUTE_MODE = 20#
    -

    Compute mode (See CUcomputemode for details)

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH = 21#
    -

    Maximum 1D texture width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH = 22#
    -

    Maximum 2D texture width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT = 23#
    -

    Maximum 2D texture height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH = 24#
    -

    Maximum 3D texture width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT = 25#
    -

    Maximum 3D texture height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH = 26#
    -

    Maximum 3D texture depth

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH = 27#
    -

    Maximum 2D layered texture width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT = 28#
    -

    Maximum 2D layered texture height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS = 29#
    -

    Maximum layers in a 2D layered texture

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH = 27#
    -

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT = 28#
    -

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES = 29#
    -

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT = 30#
    -

    Alignment requirement for surfaces

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS = 31#
    -

    Device can possibly execute multiple kernels concurrently

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_ECC_ENABLED = 32#
    -

    Device has ECC support enabled

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33#
    -

    PCI bus ID of the device

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34#
    -

    PCI device ID of the device

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_TCC_DRIVER = 35#
    -

    Device is using TCC driver model

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36#
    -

    Peak memory clock frequency in kilohertz

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH = 37#
    -

    Global memory bus width in bits

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE = 38#
    -

    Size of L2 cache in bytes

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39#
    -

    Maximum resident threads per multiprocessor

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = 40#
    -

    Number of asynchronous engines

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = 41#
    -

    Device shares a unified address space with the host

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH = 42#
    -

    Maximum 1D layered texture width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS = 43#
    -

    Maximum layers in a 1D layered texture

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER = 44#
    -

    Deprecated, do not use.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH = 45#
    -

    Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT = 46#
    -

    Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE = 47#
    -

    Alternate maximum 3D texture width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE = 48#
    -

    Alternate maximum 3D texture height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE = 49#
    -

    Alternate maximum 3D texture depth

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = 50#
    -

    PCI domain ID of the device

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT = 51#
    -

    Pitch alignment requirement for textures

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH = 52#
    -

    Maximum cubemap texture width/height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH = 53#
    -

    Maximum cubemap layered texture width/height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS = 54#
    -

    Maximum layers in a cubemap layered texture

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH = 55#
    -

    Maximum 1D surface width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH = 56#
    -

    Maximum 2D surface width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT = 57#
    -

    Maximum 2D surface height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH = 58#
    -

    Maximum 3D surface width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT = 59#
    -

    Maximum 3D surface height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH = 60#
    -

    Maximum 3D surface depth

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH = 61#
    -

    Maximum 1D layered surface width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS = 62#
    -

    Maximum layers in a 1D layered surface

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH = 63#
    -

    Maximum 2D layered surface width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT = 64#
    -

    Maximum 2D layered surface height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS = 65#
    -

    Maximum layers in a 2D layered surface

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH = 66#
    -

    Maximum cubemap surface width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH = 67#
    -

    Maximum cubemap layered surface width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS = 68#
    -

    Maximum layers in a cubemap layered surface

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH = 69#
    -

    Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH = 70#
    -

    Maximum 2D linear texture width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT = 71#
    -

    Maximum 2D linear texture height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH = 72#
    -

    Maximum 2D linear texture pitch in bytes

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH = 73#
    -

    Maximum mipmapped 2D texture width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT = 74#
    -

    Maximum mipmapped 2D texture height

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75#
    -

    Major compute capability version number

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76#
    -

    Minor compute capability version number

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH = 77#
    -

    Maximum mipmapped 1D texture width

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED = 78#
    -

    Device supports stream priorities

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED = 79#
    -

    Device supports caching globals in L1

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED = 80#
    -

    Device supports caching locals in L1

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR = 81#
    -

    Maximum shared memory available per multiprocessor in bytes

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR = 82#
    -

    Maximum number of 32-bit registers available per multiprocessor

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY = 83#
    -

    Device can allocate managed memory on this system

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD = 84#
    -

    Device is on a multi-GPU board

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID = 85#
    -

    Unique id for a group of devices on the same multi-GPU board

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED = 86#
    -

    Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO = 87#
    -

    Ratio of single precision performance (in floating-point operations per second) to double precision performance

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS = 88#
    -

    Device supports coherently accessing pageable memory without calling cudaHostRegister on it

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS = 89#
    -

    Device can coherently access managed memory concurrently with the CPU

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED = 90#
    -

    Device supports compute preemption.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM = 91#
    -

    Device can access host registered memory at the same virtual address as the CPU

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 = 92#
    -

    Deprecated, along with v1 MemOps API, cuStreamBatchMemOp and related APIs are supported.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 = 93#
    -

    Deprecated, along with v1 MemOps API, 64-bit operations are supported in cuStreamBatchMemOp and related APIs.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 = 94#
    -

    Deprecated, along with v1 MemOps API, CU_STREAM_WAIT_VALUE_NOR is supported.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH = 95#
    -

    Device supports launching cooperative kernels via cuLaunchCooperativeKernel

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH = 96#
    -

    Deprecated, cuLaunchCooperativeKernelMultiDevice is deprecated.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN = 97#
    -

    Maximum optin shared memory per block

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES = 98#
    -

    The CU_STREAM_WAIT_VALUE_FLUSH flag and the CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See Stream Memory Operations for additional details.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED = 99#
    -

    Device supports host memory registration via cudaHostRegister.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES = 100#
    -

    Device accesses pageable memory via the host’s page tables.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST = 101#
    -

    The host can directly access managed memory on the device without migration.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED = 102#
    -

    Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED = 102#
    -

    Device supports virtual memory management APIs like cuMemAddressReserve, cuMemCreate, cuMemMap and related APIs

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED = 103#
    -

    Device supports exporting memory to a posix file descriptor with cuMemExportToShareableHandle, if requested via cuMemCreate

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED = 104#
    -

    Device supports exporting memory to a Win32 NT handle with cuMemExportToShareableHandle, if requested via cuMemCreate

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED = 105#
    -

    Device supports exporting memory to a Win32 KMT handle with cuMemExportToShareableHandle, if requested via cuMemCreate

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR = 106#
    -

    Maximum number of blocks per multiprocessor

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED = 107#
    -

    Device supports compression of memory

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE = 108#
    -

    Maximum L2 persisting lines capacity setting in bytes.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE = 109#
    -

    Maximum value of num_bytes.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED = 110#
    -

    Device supports specifying the GPUDirect RDMA flag with cuMemCreate

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK = 111#
    -

    Shared memory reserved by CUDA driver per block in bytes

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED = 112#
    -

    Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED = 113#
    -

    Device supports using the cuMemHostRegister flag CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED = 114#
    -

    External timeline semaphore interop is supported on the device

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED = 115#
    -

    Device supports using the cuMemAllocAsync and cuMemPool family of APIs

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED = 116#
    -

    Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information)

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS = 117#
    -

    The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the CUflushGPUDirectRDMAWritesOptions enum

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING = 118#
    -

    GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See CUGPUDirectRDMAWritesOrdering for the numerical values returned here.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES = 119#
    -

    Handle types supported with mempool based IPC

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH = 120#
    -

    Indicates device supports cluster launch

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED = 121#
    -

    Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS = 122#
    -

    64-bit operations are supported in cuStreamBatchMemOp and related MemOp APIs.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR = 123#
    -

    CU_STREAM_WAIT_VALUE_NOR is supported by MemOp APIs.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED = 124#
    -

    Device supports buffer sharing with dma_buf mechanism.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED = 125#
    -

    Device supports IPC Events.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT = 126#
    -

    Number of memory domains the device supports.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED = 127#
    -

    Device supports accessing memory using Tensor Map.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED = 128#
    -

    Device supports exporting memory to a fabric handle with cuMemExportToShareableHandle() or requested with cuMemCreate()

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS = 129#
    -

    Device supports unified function pointers.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_NUMA_CONFIG = 130#
    -

    NUMA configuration of a device: value is of type CUdeviceNumaConfig enum

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_NUMA_ID = 131#
    -

    NUMA node ID of the GPU memory

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED = 132#
    -

    Device supports switch multicast and reduction operations.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MPS_ENABLED = 133#
    -

    Indicates if contexts created on this device will be shared via MPS

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID = 134#
    -

    NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED = 135#
    -

    Device supports CIG with D3D12.

    -
    - -
    -
    -CU_DEVICE_ATTRIBUTE_MAX = 136#
    -
    - -
    - -
    -
    -class cuda.cuda.CUpointer_attribute(value)#
    -

    Pointer information

    -
    -
    -CU_POINTER_ATTRIBUTE_CONTEXT = 1#
    -

    The CUcontext on which a pointer was allocated or registered

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_MEMORY_TYPE = 2#
    -

    The CUmemorytype describing the physical location of a pointer

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_DEVICE_POINTER = 3#
    -

    The address at which a pointer’s memory may be accessed on the device

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_HOST_POINTER = 4#
    -

    The address at which a pointer’s memory may be accessed on the host

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_P2P_TOKENS = 5#
    -

    A pair of tokens for use with the nv-p2p.h Linux kernel interface

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_SYNC_MEMOPS = 6#
    -

    Synchronize every synchronous memory operation initiated on this region

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_BUFFER_ID = 7#
    -

    A process-wide unique ID for an allocated memory region

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_IS_MANAGED = 8#
    -

    Indicates if the pointer points to managed memory

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL = 9#
    -

    A device ordinal of a device on which a pointer was allocated or registered

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE = 10#
    -

    1 if this pointer maps to an allocation that is suitable for cudaIpcGetMemHandle, 0 otherwise

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_RANGE_START_ADDR = 11#
    -

    Starting address for this requested pointer

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_RANGE_SIZE = 12#
    -

    Size of the address range for this requested pointer

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_MAPPED = 13#
    -

    1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES = 14#
    -

    Bitmask of allowed CUmemAllocationHandleType for this allocation

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE = 15#
    -

    1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_ACCESS_FLAGS = 16#
    -

    Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE = 17#
    -

    Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL.

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_MAPPING_SIZE = 18#
    -

    Size of the actual underlying mapping that the pointer belongs to

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR = 19#
    -

    The start address of the mapping that the pointer belongs to

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID = 20#
    -

    A process-wide unique id corresponding to the physical allocation the pointer belongs to

    -
    - -
    - -
    -
    -class cuda.cuda.CUfunction_attribute(value)#
    -

    Function properties

    -
    -
    -CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 0#
    -

    The maximum number of threads per block, beyond which a launch of the function would fail. This number depends on both the function and the device on which the function is currently loaded.

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES = 1#
    -

    The size in bytes of statically-allocated shared memory required by this function. This does not include dynamically-allocated shared memory requested by the user at runtime.

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES = 2#
    -

    The size in bytes of user-allocated constant memory required by this function.

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES = 3#
    -

    The size in bytes of local memory used by each thread of this function.

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_NUM_REGS = 4#
    -

    The number of registers used by each thread of this function.

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_PTX_VERSION = 5#
    -

    The PTX virtual architecture version for which the function was compiled. This value is the major PTX version * 10 + the minor PTX version, so a PTX version 1.3 function would return the value 13. Note that this may return the undefined value of 0 for cubins compiled prior to CUDA 3.0.

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_BINARY_VERSION = 6#
    -

    The binary architecture version for which the function was compiled. This value is the major binary version * 10 + the minor binary version, so a binary version 1.3 function would return the value 13. Note that this will return a value of 10 for legacy cubins that do not have a properly-encoded binary architecture version.

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_CACHE_MODE_CA = 7#
    -

    The attribute to indicate whether the function has been compiled with user specified option “-Xptxas –dlcm=ca” set .

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES = 8#
    -

    The maximum size in bytes of dynamically-allocated shared memory that can be used by this function. If the user-specified dynamic shared memory size is larger than this value, the launch will fail. See cuFuncSetAttribute, cuKernelSetAttribute

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 9#
    -

    On devices where the L1 cache and shared memory use the same hardware resources, this sets the shared memory carveout preference, in percent of the total shared memory. Refer to CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR. This is only a hint, and the driver can choose a different ratio if required to execute the function. See cuFuncSetAttribute, cuKernelSetAttribute

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET = 10#
    -

    If this attribute is set, the kernel must launch with a valid cluster size specified. See cuFuncSetAttribute, cuKernelSetAttribute

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH = 11#
    -

    The required cluster width in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time.

    -

    If the value is set during compile time, it cannot be set at runtime. Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. See cuFuncSetAttribute, cuKernelSetAttribute

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT = 12#
    -

    The required cluster height in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time.

    -

    If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See cuFuncSetAttribute, cuKernelSetAttribute

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH = 13#
    -

    The required cluster depth in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time.

    -

    If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See cuFuncSetAttribute, cuKernelSetAttribute

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED = 14#
    -

    Whether the function can be launched with non-portable cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster size may only function on the specific SKUs the program is tested on. The launch might fail if the program is run on a different hardware platform.

    -

    CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking whether the desired size can be launched on the current device.

    -

    Portable Cluster Size

    -

    A portable cluster size is guaranteed to be functional on all compute capabilities higher than the target compute capability. The portable cluster size for sm_90 is 8 blocks per cluster. This value may increase for future compute capabilities.

    -

    The specific hardware unit may support higher cluster sizes that’s not guaranteed to be portable. See cuFuncSetAttribute, cuKernelSetAttribute

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 15#
    -

    The block scheduling policy of a function. The value type is CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. See cuFuncSetAttribute, cuKernelSetAttribute

    -
    - -
    -
    -CU_FUNC_ATTRIBUTE_MAX = 16#
    -
    - -
    - -
    -
    -class cuda.cuda.CUfunc_cache(value)#
    -

    Function cache configurations

    -
    -
    -CU_FUNC_CACHE_PREFER_NONE = 0#
    -

    no preference for shared memory or L1 (default)

    -
    - -
    -
    -CU_FUNC_CACHE_PREFER_SHARED = 1#
    -

    prefer larger shared memory and smaller L1 cache

    -
    - -
    -
    -CU_FUNC_CACHE_PREFER_L1 = 2#
    -

    prefer larger L1 cache and smaller shared memory

    -
    - -
    -
    -CU_FUNC_CACHE_PREFER_EQUAL = 3#
    -

    prefer equal sized L1 cache and shared memory

    -
    - -
    - -
    -
    -class cuda.cuda.CUsharedconfig(value)#
    -

    [Deprecated] Shared memory configurations

    -
    -
    -CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = 0#
    -

    set default shared memory bank size

    -
    - -
    -
    -CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = 1#
    -

    set shared memory bank width to four bytes

    -
    - -
    -
    -CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = 2#
    -

    set shared memory bank width to eight bytes

    -
    - -
    - -
    -
    -class cuda.cuda.CUshared_carveout(value)#
    -

    Shared memory carveout configurations. These may be passed to -cuFuncSetAttribute or cuKernelSetAttribute

    -
    -
    -CU_SHAREDMEM_CARVEOUT_DEFAULT = -1#
    -

    No preference for shared memory or L1 (default)

    -
    - -
    -
    -CU_SHAREDMEM_CARVEOUT_MAX_SHARED = 100#
    -

    Prefer maximum available shared memory, minimum L1 cache

    -
    - -
    -
    -CU_SHAREDMEM_CARVEOUT_MAX_L1 = 0#
    -

    Prefer maximum available L1 cache, minimum shared memory

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemorytype(value)#
    -

    Memory types

    -
    -
    -CU_MEMORYTYPE_HOST = 1#
    -

    Host memory

    -
    - -
    -
    -CU_MEMORYTYPE_DEVICE = 2#
    -

    Device memory

    -
    - -
    -
    -CU_MEMORYTYPE_ARRAY = 3#
    -

    Array memory

    -
    - -
    -
    -CU_MEMORYTYPE_UNIFIED = 4#
    -

    Unified device or host memory

    -
    - -
    - -
    -
    -class cuda.cuda.CUcomputemode(value)#
    -

    Compute Modes

    -
    -
    -CU_COMPUTEMODE_DEFAULT = 0#
    -

    Default compute mode (Multiple contexts allowed per device)

    -
    - -
    -
    -CU_COMPUTEMODE_PROHIBITED = 2#
    -

    Compute-prohibited mode (No contexts can be created on this device at this time)

    -
    - -
    -
    -CU_COMPUTEMODE_EXCLUSIVE_PROCESS = 3#
    -

    Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time)

    -
    - -
    - -
    -
    -class cuda.cuda.CUmem_advise(value)#
    -

    Memory advise values

    -
    -
    -CU_MEM_ADVISE_SET_READ_MOSTLY = 1#
    -

    Data will mostly be read and only occasionally be written to

    -
    - -
    -
    -CU_MEM_ADVISE_UNSET_READ_MOSTLY = 2#
    -

    Undo the effect of CU_MEM_ADVISE_SET_READ_MOSTLY

    -
    - -
    -
    -CU_MEM_ADVISE_SET_PREFERRED_LOCATION = 3#
    -

    Set the preferred location for the data as the specified device

    -
    - -
    -
    -CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION = 4#
    -

    Clear the preferred location for the data

    -
    - -
    -
    -CU_MEM_ADVISE_SET_ACCESSED_BY = 5#
    -

    Data will be accessed by the specified device, so prevent page faults as much as possible

    -
    - -
    -
    -CU_MEM_ADVISE_UNSET_ACCESSED_BY = 6#
    -

    Let the Unified Memory subsystem decide on the page faulting policy for the specified device

    -
    - -
    - -
    -
    -class cuda.cuda.CUmem_range_attribute(value)#
    -
    -
    -CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY = 1#
    -

    Whether the range will mostly be read and only occasionally be written to

    -
    - -
    -
    -CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION = 2#
    -

    The preferred location of the range

    -
    - -
    -
    -CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY = 3#
    -

    Memory range has CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device

    -
    - -
    -
    -CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION = 4#
    -

    The last location to which the range was prefetched

    -
    - -
    -
    -CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE = 5#
    -

    The preferred location type of the range

    -
    - -
    -
    -CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID = 6#
    -

    The preferred location id of the range

    -
    - -
    -
    -CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE = 7#
    -

    The last location type to which the range was prefetched

    -
    - -
    -
    -CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID = 8#
    -

    The last location id to which the range was prefetched

    -
    - -
    - -
    -
    -class cuda.cuda.CUjit_option(value)#
    -

    Online compiler and linker options

    -
    -
    -CU_JIT_MAX_REGISTERS = 0#
    -

    Max number of registers that a thread may use.

    -

    Option type: unsigned int

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_THREADS_PER_BLOCK = 1#
    -

    IN: Specifies minimum number of threads per block to target compilation for

    -

    OUT: Returns the number of threads the compiler actually targeted. This restricts the resource utilization of the compiler (e.g. max registers) such that a block with the given number of threads should be able to launch based on register limitations. Note, this option does not currently take into account any other resource limitations, such as shared memory utilization.

    -

    Cannot be combined with CU_JIT_TARGET.

    -

    Option type: unsigned int

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_WALL_TIME = 2#
    -

    Overwrites the option value with the total wall clock time, in milliseconds, spent in the compiler and linker

    -

    Option type: float

    -

    Applies to: compiler and linker

    -
    - -
    -
    -CU_JIT_INFO_LOG_BUFFER = 3#
    -

    Pointer to a buffer in which to print any log messages that are informational in nature (the buffer size is specified via option CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)

    -

    Option type: char *

    -

    Applies to: compiler and linker

    -
    - -
    -
    -CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES = 4#
    -

    IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator)

    -

    OUT: Amount of log buffer filled with messages

    -

    Option type: unsigned int

    -

    Applies to: compiler and linker

    -
    - -
    -
    -CU_JIT_ERROR_LOG_BUFFER = 5#
    -

    Pointer to a buffer in which to print any log messages that reflect errors (the buffer size is specified via option CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)

    -

    Option type: char *

    -

    Applies to: compiler and linker

    -
    - -
    -
    -CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES = 6#
    -

    IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator)

    -

    OUT: Amount of log buffer filled with messages

    -

    Option type: unsigned int

    -

    Applies to: compiler and linker

    -
    - -
    -
    -CU_JIT_OPTIMIZATION_LEVEL = 7#
    -

    Level of optimizations to apply to generated code (0 - 4), with 4 being the default and highest level of optimizations.

    -

    Option type: unsigned int

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_TARGET_FROM_CUCONTEXT = 8#
    -

    No option value required. Determines the target based on the current attached context (default)

    -

    Option type: No option value needed

    -

    Applies to: compiler and linker

    -
    - -
    -
    -CU_JIT_TARGET = 9#
    -

    Target is chosen based on supplied CUjit_target. Cannot be combined with CU_JIT_THREADS_PER_BLOCK.

    -

    Option type: unsigned int for enumerated type CUjit_target

    -

    Applies to: compiler and linker

    -
    - -
    -
    -CU_JIT_FALLBACK_STRATEGY = 10#
    -

    Specifies choice of fallback strategy if matching cubin is not found. Choice is based on supplied CUjit_fallback. This option cannot be used with cuLink* APIs as the linker requires exact matches.

    -

    Option type: unsigned int for enumerated type CUjit_fallback

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_GENERATE_DEBUG_INFO = 11#
    -

    Specifies whether to create debug information in output (-g) (0: false, default)

    -

    Option type: int

    -

    Applies to: compiler and linker

    -
    - -
    -
    -CU_JIT_LOG_VERBOSE = 12#
    -

    Generate verbose log messages (0: false, default)

    -

    Option type: int

    -

    Applies to: compiler and linker

    -
    - -
    -
    -CU_JIT_GENERATE_LINE_INFO = 13#
    -

    Generate line number information (-lineinfo) (0: false, default)

    -

    Option type: int

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_CACHE_MODE = 14#
    -

    Specifies whether to enable caching explicitly (-dlcm)

    -

    Choice is based on supplied CUjit_cacheMode_enum.

    -

    Option type: unsigned int for enumerated type CUjit_cacheMode_enum

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_NEW_SM3X_OPT = 15#
    -

    [Deprecated]

    -
    - -
    -
    -CU_JIT_FAST_COMPILE = 16#
    -

    This jit option is used for internal purpose only.

    -
    - -
    -
    -CU_JIT_GLOBAL_SYMBOL_NAMES = 17#
    -

    Array of device symbol names that will be relocated to the corresponding host addresses stored in CU_JIT_GLOBAL_SYMBOL_ADDRESSES.

    -

    Must contain CU_JIT_GLOBAL_SYMBOL_COUNT entries.

    -

    When loading a device module, driver will relocate all encountered unresolved symbols to the host addresses.

    -

    It is only allowed to register symbols that correspond to unresolved global variables.

    -

    It is illegal to register the same device symbol at multiple addresses.

    -

    Option type: const char **

    -

    Applies to: dynamic linker only

    -
    - -
    -
    -CU_JIT_GLOBAL_SYMBOL_ADDRESSES = 18#
    -

    Array of host addresses that will be used to relocate corresponding device symbols stored in CU_JIT_GLOBAL_SYMBOL_NAMES.

    -

    Must contain CU_JIT_GLOBAL_SYMBOL_COUNT entries.

    -

    Option type: void **

    -

    Applies to: dynamic linker only

    -
    - -
    -
    -CU_JIT_GLOBAL_SYMBOL_COUNT = 19#
    -

    Number of entries in CU_JIT_GLOBAL_SYMBOL_NAMES and CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.

    -

    Option type: unsigned int

    -

    Applies to: dynamic linker only

    -
    - -
    -
    -CU_JIT_LTO = 20#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_FTZ = 21#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_PREC_DIV = 22#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_PREC_SQRT = 23#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_FMA = 24#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_REFERENCED_KERNEL_NAMES = 25#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_REFERENCED_KERNEL_COUNT = 26#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_REFERENCED_VARIABLE_NAMES = 27#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_REFERENCED_VARIABLE_COUNT = 28#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES = 29#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_POSITION_INDEPENDENT_CODE = 30#
    -

    Generate position independent code (0: false)

    -

    Option type: int

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_MIN_CTA_PER_SM = 31#
    -

    This option hints to the JIT compiler the minimum number of CTAs from the kernel’s grid to be mapped to a SM. This option is ignored when used together with CU_JIT_MAX_REGISTERS or CU_JIT_THREADS_PER_BLOCK. Optimizations based on this option need CU_JIT_MAX_THREADS_PER_BLOCK to be specified as well. For kernels already using PTX directive .minnctapersm, this option will be ignored by default. Use CU_JIT_OVERRIDE_DIRECTIVE_VALUES to let this option take precedence over the PTX directive. Option type: unsigned int

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_MAX_THREADS_PER_BLOCK = 32#
    -

    Maximum number threads in a thread block, computed as the product of the maximum extent specifed for each dimension of the block. This limit is guaranteed not to be exeeded in any invocation of the kernel. Exceeding the the maximum number of threads results in runtime error or kernel launch failure. For kernels already using PTX directive .maxntid, this option will be ignored by default. Use CU_JIT_OVERRIDE_DIRECTIVE_VALUES to let this option take precedence over the PTX directive. Option type: int

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_OVERRIDE_DIRECTIVE_VALUES = 33#
    -

    This option lets the values specified using CU_JIT_MAX_REGISTERS, CU_JIT_THREADS_PER_BLOCK, CU_JIT_MAX_THREADS_PER_BLOCK and CU_JIT_MIN_CTA_PER_SM take precedence over any PTX directives. (0: Disable, default; 1: Enable) Option type: int

    -

    Applies to: compiler only

    -
    - -
    -
    -CU_JIT_NUM_OPTIONS = 34#
    -
    - -
    - -
    -
    -class cuda.cuda.CUjit_target(value)#
    -

    Online compilation targets

    -
    -
    -CU_TARGET_COMPUTE_30 = 30#
    -

    Compute device class 3.0

    -
    - -
    -
    -CU_TARGET_COMPUTE_32 = 32#
    -

    Compute device class 3.2

    -
    - -
    -
    -CU_TARGET_COMPUTE_35 = 35#
    -

    Compute device class 3.5

    -
    - -
    -
    -CU_TARGET_COMPUTE_37 = 37#
    -

    Compute device class 3.7

    -
    - -
    -
    -CU_TARGET_COMPUTE_50 = 50#
    -

    Compute device class 5.0

    -
    - -
    -
    -CU_TARGET_COMPUTE_52 = 52#
    -

    Compute device class 5.2

    -
    - -
    -
    -CU_TARGET_COMPUTE_53 = 53#
    -

    Compute device class 5.3

    -
    - -
    -
    -CU_TARGET_COMPUTE_60 = 60#
    -

    Compute device class 6.0.

    -
    - -
    -
    -CU_TARGET_COMPUTE_61 = 61#
    -

    Compute device class 6.1.

    -
    - -
    -
    -CU_TARGET_COMPUTE_62 = 62#
    -

    Compute device class 6.2.

    -
    - -
    -
    -CU_TARGET_COMPUTE_70 = 70#
    -

    Compute device class 7.0.

    -
    - -
    -
    -CU_TARGET_COMPUTE_72 = 72#
    -

    Compute device class 7.2.

    -
    - -
    -
    -CU_TARGET_COMPUTE_75 = 75#
    -

    Compute device class 7.5.

    -
    - -
    -
    -CU_TARGET_COMPUTE_80 = 80#
    -

    Compute device class 8.0.

    -
    - -
    -
    -CU_TARGET_COMPUTE_86 = 86#
    -

    Compute device class 8.6.

    -
    - -
    -
    -CU_TARGET_COMPUTE_87 = 87#
    -

    Compute device class 8.7.

    -
    - -
    -
    -CU_TARGET_COMPUTE_89 = 89#
    -

    Compute device class 8.9.

    -
    - -
    -
    -CU_TARGET_COMPUTE_90 = 90#
    -

    Compute device class 9.0. Compute device class 9.0. with accelerated features.

    -
    - -
    -
    -CU_TARGET_COMPUTE_90A = 65626#
    -
    - -
    - -
    -
    -class cuda.cuda.CUjit_fallback(value)#
    -

    Cubin matching fallback strategies

    -
    -
    -CU_PREFER_PTX = 0#
    -

    Prefer to compile ptx if exact binary match not found

    -
    - -
    -
    -CU_PREFER_BINARY = 1#
    -

    Prefer to fall back to compatible binary code if exact match not found

    -
    - -
    - -
    -
    -class cuda.cuda.CUjit_cacheMode(value)#
    -

    Caching modes for dlcm

    -
    -
    -CU_JIT_CACHE_OPTION_NONE = 0#
    -

    Compile with no -dlcm flag specified

    -
    - -
    -
    -CU_JIT_CACHE_OPTION_CG = 1#
    -

    Compile with L1 cache disabled

    -
    - -
    -
    -CU_JIT_CACHE_OPTION_CA = 2#
    -

    Compile with L1 cache enabled

    -
    - -
    - -
    -
    -class cuda.cuda.CUjitInputType(value)#
    -

    Device code formats

    -
    -
    -CU_JIT_INPUT_CUBIN = 0#
    -

    Compiled device-class-specific device code

    -

    Applicable options: none

    -
    - -
    -
    -CU_JIT_INPUT_PTX = 1#
    -

    PTX source code

    -

    Applicable options: PTX compiler options

    -
    - -
    -
    -CU_JIT_INPUT_FATBINARY = 2#
    -

    Bundle of multiple cubins and/or PTX of some device code

    -

    Applicable options: PTX compiler options, CU_JIT_FALLBACK_STRATEGY

    -
    - -
    -
    -CU_JIT_INPUT_OBJECT = 3#
    -

    Host object with embedded device code

    -

    Applicable options: PTX compiler options, CU_JIT_FALLBACK_STRATEGY

    -
    - -
    -
    -CU_JIT_INPUT_LIBRARY = 4#
    -

    Archive of host objects with embedded device code

    -

    Applicable options: PTX compiler options, CU_JIT_FALLBACK_STRATEGY

    -
    - -
    -
    -CU_JIT_INPUT_NVVM = 5#
    -

    [Deprecated]

    -

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    -
    - -
    -
    -CU_JIT_NUM_INPUT_TYPES = 6#
    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphicsRegisterFlags(value)#
    -

    Flags to register a graphics resource

    -
    -
    -CU_GRAPHICS_REGISTER_FLAGS_NONE = 0#
    -
    - -
    -
    -CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY = 1#
    -
    - -
    -
    -CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD = 2#
    -
    - -
    -
    -CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST = 4#
    -
    - -
    -
    -CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER = 8#
    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphicsMapResourceFlags(value)#
    -

    Flags for mapping and unmapping interop resources

    -
    -
    -CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = 0#
    -
    - -
    -
    -CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = 1#
    -
    - -
    -
    -CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 2#
    -
    - -
    - -
    -
    -class cuda.cuda.CUarray_cubemap_face(value)#
    -

    Array indices for cube faces

    -
    -
    -CU_CUBEMAP_FACE_POSITIVE_X = 0#
    -

    Positive X face of cubemap

    -
    - -
    -
    -CU_CUBEMAP_FACE_NEGATIVE_X = 1#
    -

    Negative X face of cubemap

    -
    - -
    -
    -CU_CUBEMAP_FACE_POSITIVE_Y = 2#
    -

    Positive Y face of cubemap

    -
    - -
    -
    -CU_CUBEMAP_FACE_NEGATIVE_Y = 3#
    -

    Negative Y face of cubemap

    -
    - -
    -
    -CU_CUBEMAP_FACE_POSITIVE_Z = 4#
    -

    Positive Z face of cubemap

    -
    - -
    -
    -CU_CUBEMAP_FACE_NEGATIVE_Z = 5#
    -

    Negative Z face of cubemap

    -
    - -
    - -
    -
    -class cuda.cuda.CUlimit(value)#
    -

    Limits

    -
    -
    -CU_LIMIT_STACK_SIZE = 0#
    -

    GPU thread stack size

    -
    - -
    -
    -CU_LIMIT_PRINTF_FIFO_SIZE = 1#
    -

    GPU printf FIFO size

    -
    - -
    -
    -CU_LIMIT_MALLOC_HEAP_SIZE = 2#
    -

    GPU malloc heap size

    -
    - -
    -
    -CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = 3#
    -

    GPU device runtime launch synchronize depth

    -
    - -
    -
    -CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = 4#
    -

    GPU device runtime pending launch count

    -
    - -
    -
    -CU_LIMIT_MAX_L2_FETCH_GRANULARITY = 5#
    -

    A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint

    -
    - -
    -
    -CU_LIMIT_PERSISTING_L2_CACHE_SIZE = 6#
    -

    A size in bytes for L2 persisting lines cache size

    -
    - -
    -
    -CU_LIMIT_SHMEM_SIZE = 7#
    -

    A maximum size in bytes of shared memory available to CUDA kernels on a CIG context. Can only be queried, cannot be set

    -
    - -
    -
    -CU_LIMIT_CIG_ENABLED = 8#
    -

    A non-zero value indicates this CUDA context is a CIG-enabled context. Can only be queried, cannot be set

    -
    - -
    -
    -CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED = 9#
    -

    When set to a non-zero value, CUDA will fail to launch a kernel on a CIG context, instead of using the fallback path, if the kernel uses more shared memory than available

    -
    - -
    -
    -CU_LIMIT_MAX = 10#
    -
    - -
    - -
    -
    -class cuda.cuda.CUresourcetype(value)#
    -

    Resource types

    -
    -
    -CU_RESOURCE_TYPE_ARRAY = 0#
    -

    Array resource

    -
    - -
    -
    -CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 1#
    -

    Mipmapped array resource

    -
    - -
    -
    -CU_RESOURCE_TYPE_LINEAR = 2#
    -

    Linear resource

    -
    - -
    -
    -CU_RESOURCE_TYPE_PITCH2D = 3#
    -

    Pitch 2D resource

    -
    - -
    - -
    -
    -class cuda.cuda.CUaccessProperty(value)#
    -

    Specifies performance hint with CUaccessPolicyWindow -for hitProp and missProp members.

    -
    -
    -CU_ACCESS_PROPERTY_NORMAL = 0#
    -

    Normal cache persistence.

    -
    - -
    -
    -CU_ACCESS_PROPERTY_STREAMING = 1#
    -

    Streaming access is less likely to persit from cache.

    -
    - -
    -
    -CU_ACCESS_PROPERTY_PERSISTING = 2#
    -

    Persisting access is more likely to persist in cache.

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphConditionalNodeType(value)#
    -

    Conditional node types

    -
    -
    -CU_GRAPH_COND_TYPE_IF = 0#
    -

    Conditional ‘if’ Node. Body executed once if condition value is non-zero.

    -
    - -
    -
    -CU_GRAPH_COND_TYPE_WHILE = 1#
    -

    Conditional ‘while’ Node. Body executed repeatedly while condition value is non-zero.

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphNodeType(value)#
    -

    Graph node types

    -
    -
    -CU_GRAPH_NODE_TYPE_KERNEL = 0#
    -

    GPU kernel node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_MEMCPY = 1#
    -

    Memcpy node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_MEMSET = 2#
    -

    Memset node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_HOST = 3#
    -

    Host (executable) node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_GRAPH = 4#
    -

    Node which executes an embedded graph

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_EMPTY = 5#
    -

    Empty (no-op) node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_WAIT_EVENT = 6#
    -

    External event wait node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_EVENT_RECORD = 7#
    -

    External event record node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL = 8#
    -

    External semaphore signal node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT = 9#
    -

    External semaphore wait node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_MEM_ALLOC = 10#
    -

    Memory Allocation Node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_MEM_FREE = 11#
    -

    Memory Free Node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_BATCH_MEM_OP = 12#
    -

    Batch MemOp Node

    -
    - -
    -
    -CU_GRAPH_NODE_TYPE_CONDITIONAL = 13#
    -

    Conditional Node May be used to implement a conditional execution path or loop

    -
    -

    inside of a graph. The graph(s) contained within the body of the conditional node

    -

    can be selectively executed or iterated upon based on the value of a conditional

    -

    variable.

    -

    Handles must be created in advance of creating the node

    -

    using cuGraphConditionalHandleCreate.

    -

    The following restrictions apply to graphs which contain conditional nodes:

    -
    -

    The graph cannot be used in a child node.

    -

    Only one instantiation of the graph may exist at any point in time.

    -

    The graph cannot be cloned.

    -
    -

    To set the control value, supply a default value when creating the handle and/or

    -

    call cudaGraphSetConditional from device code.

    -
    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphDependencyType(value)#
    -

    Type annotations that can be applied to graph edges as part of -CUgraphEdgeData.

    -
    -
    -CU_GRAPH_DEPENDENCY_TYPE_DEFAULT = 0#
    -

    This is an ordinary dependency.

    -
    - -
    -
    -CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC = 1#
    -

    This dependency type allows the downstream node to use cudaGridDependencySynchronize(). It may only be used between kernel nodes, and must be used with either the CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC or CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER outgoing port.

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphInstantiateResult(value)#
    -

    Graph instantiation results

    -
    -
    -CUDA_GRAPH_INSTANTIATE_SUCCESS = 0#
    -

    Instantiation succeeded

    -
    - -
    -
    -CUDA_GRAPH_INSTANTIATE_ERROR = 1#
    -

    Instantiation failed for an unexpected reason which is described in the return value of the function

    -
    - -
    -
    -CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE = 2#
    -

    Instantiation failed due to invalid structure, such as cycles

    -
    - -
    -
    -CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED = 3#
    -

    Instantiation for device launch failed because the graph contained an unsupported operation

    -
    - -
    -
    -CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED = 4#
    -

    Instantiation for device launch failed due to the nodes belonging to different contexts

    -
    - -
    - -
    -
    -class cuda.cuda.CUsynchronizationPolicy(value)#
    -
    -
    -CU_SYNC_POLICY_AUTO = 1#
    -
    - -
    -
    -CU_SYNC_POLICY_SPIN = 2#
    -
    - -
    -
    -CU_SYNC_POLICY_YIELD = 3#
    -
    - -
    -
    -CU_SYNC_POLICY_BLOCKING_SYNC = 4#
    -
    - -
    - -
    -
    -class cuda.cuda.CUclusterSchedulingPolicy(value)#
    -

    Cluster scheduling policies. These may be passed to -cuFuncSetAttribute or cuKernelSetAttribute

    -
    -
    -CU_CLUSTER_SCHEDULING_POLICY_DEFAULT = 0#
    -

    the default policy

    -
    - -
    -
    -CU_CLUSTER_SCHEDULING_POLICY_SPREAD = 1#
    -

    spread the blocks within a cluster to the SMs

    -
    - -
    -
    -CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING = 2#
    -

    allow the hardware to load-balance the blocks in a cluster to the SMs

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchMemSyncDomain(value)#
    -

    Memory Synchronization Domain A kernel can be launched in a -specified memory synchronization domain that affects all memory -operations issued by that kernel. A memory barrier issued in one -domain will only order memory operations in that domain, thus -eliminating latency increase from memory barriers ordering -unrelated traffic. By default, kernels are launched in domain 0. -Kernel launched with CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE -will have a different domain ID. User may also alter the domain ID -with CUlaunchMemSyncDomainMap for a specific stream / -graph node / kernel launch. See -CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN, -cuStreamSetAttribute, cuLaunchKernelEx, -cuGraphKernelNodeSetAttribute. Memory operations done -in kernels launched in different domains are considered system- -scope distanced. In other words, a GPU scoped memory -synchronization is not sufficient for memory order to be observed -by kernels in another memory synchronization domain even if they -are on the same GPU.

    -
    -
    -CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT = 0#
    -

    Launch kernels in the default domain

    -
    - -
    -
    -CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE = 1#
    -

    Launch kernels in the remote domain

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchAttributeID(value)#
    -

    Launch attributes enum; used as id field of -CUlaunchAttribute

    -
    -
    -CU_LAUNCH_ATTRIBUTE_IGNORE = 0#
    -

    Ignored entry, for convenient composition

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1#
    -

    Valid for streams, graph nodes, launches. See accessPolicyWindow.

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_COOPERATIVE = 2#
    -

    Valid for graph nodes, launches. See cooperative.

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3#
    -

    Valid for streams. See syncPolicy.

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = 4#
    -

    Valid for graph nodes, launches. See clusterDim.

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 5#
    -

    Valid for graph nodes, launches. See clusterSchedulingPolicyPreference.

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = 6#
    -

    Valid for launches. Setting programmaticStreamSerializationAllowed to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid’s execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions).

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = 7#
    -

    Valid for launches. Set programmaticEvent to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event through PTX launchdep.release or CUDA builtin function cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be inserted at the beginning of each block’s execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling cuEventSynchronize()) are not guaranteed to observe the release precisely when it is released. For example, cuEventSynchronize() may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks.

    -
    -

    The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the CU_EVENT_DISABLE_TIMING flag set).

    -
    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_PRIORITY = 8#
    -

    Valid for streams, graph nodes, launches. See priority.

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = 9#
    -

    Valid for streams, graph nodes, launches. See memSyncDomainMap.

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = 10#
    -

    Valid for streams, graph nodes, launches. See memSyncDomain.

    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = 12#
    -

    Valid for launches. Set launchCompletionEvent to record the event.

    -
    -

    Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock.

    -

    A launch completion event is nominally similar to a programmatic event with triggerAtBlockStart set except that it is not visible to cudaGridDependencySynchronize() and can be used with compute capability less than 9.0.

    -

    The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the CU_EVENT_DISABLE_TIMING flag set).

    -
    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = 13#
    -

    Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error.

    -
    -

    CUlaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via CUlaunchAttributeValue::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node’s kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see cudaGraphKernelNodeUpdatesApply.

    -

    Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via cuGraphDestroyNode. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via cuGraphKernelNodeCopyAttributes. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to cuGraphExecUpdate.

    -

    If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with cuGraphUpload before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again.

    -
    -
    - -
    -
    -CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 14#
    -

    Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting sharedMemCarveout to a percentage between 0-100 signals the CUDA driver to set the shared memory carveout preference, in percent of the total shared memory for that kernel launch. This attribute takes precedence over CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT. This is only a hint, and the CUDA driver can choose a different configuration if required for the launch.

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamCaptureStatus(value)#
    -

    Possible stream capture statuses returned by -cuStreamIsCapturing

    -
    -
    -CU_STREAM_CAPTURE_STATUS_NONE = 0#
    -

    Stream is not capturing

    -
    - -
    -
    -CU_STREAM_CAPTURE_STATUS_ACTIVE = 1#
    -

    Stream is actively capturing

    -
    - -
    -
    -CU_STREAM_CAPTURE_STATUS_INVALIDATED = 2#
    -

    Stream is part of a capture sequence that has been invalidated, but not terminated

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamCaptureMode(value)#
    -

    Possible modes for stream capture thread interactions. For more -details see cuStreamBeginCapture and -cuThreadExchangeStreamCaptureMode

    -
    -
    -CU_STREAM_CAPTURE_MODE_GLOBAL = 0#
    -
    - -
    -
    -CU_STREAM_CAPTURE_MODE_THREAD_LOCAL = 1#
    -
    - -
    -
    -CU_STREAM_CAPTURE_MODE_RELAXED = 2#
    -
    - -
    - -
    -
    -class cuda.cuda.CUdriverProcAddress_flags(value)#
    -

    Flags to specify search options. For more details see -cuGetProcAddress

    -
    -
    -CU_GET_PROC_ADDRESS_DEFAULT = 0#
    -

    Default search mode for driver symbols.

    -
    - -
    -
    -CU_GET_PROC_ADDRESS_LEGACY_STREAM = 1#
    -

    Search for legacy versions of driver symbols.

    -
    - -
    -
    -CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM = 2#
    -

    Search for per-thread versions of driver symbols.

    -
    - -
    - -
    -
    -class cuda.cuda.CUdriverProcAddressQueryResult(value)#
    -

    Flags to indicate search status. For more details see -cuGetProcAddress

    -
    -
    -CU_GET_PROC_ADDRESS_SUCCESS = 0#
    -

    Symbol was succesfully found

    -
    - -
    -
    -CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND = 1#
    -

    Symbol was not found in search

    -
    - -
    -
    -CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT = 2#
    -

    Symbol was found but version supplied was not sufficient

    -
    - -
    - -
    -
    -class cuda.cuda.CUexecAffinityType(value)#
    -

    Execution Affinity Types

    -
    -
    -CU_EXEC_AFFINITY_TYPE_SM_COUNT = 0#
    -

    Create a context with limited SMs.

    -
    - -
    -
    -CU_EXEC_AFFINITY_TYPE_MAX = 1#
    -
    - -
    - -
    -
    -class cuda.cuda.CUcigDataType(value)#
    -
    -
    -CIG_DATA_TYPE_D3D12_COMMAND_QUEUE = 1#
    -
    - -
    - -
    -
    -class cuda.cuda.CUlibraryOption(value)#
    -

    Library options to be specified with -cuLibraryLoadData() or -cuLibraryLoadFromFile()

    -
    -
    -CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE = 0#
    -
    - -
    -
    -CU_LIBRARY_BINARY_IS_PRESERVED = 1#
    -

    Specifes that the argument code passed to cuLibraryLoadData() will be preserved. Specifying this option will let the driver know that code can be accessed at any point until cuLibraryUnload(). The default behavior is for the driver to allocate and maintain its own copy of code. Note that this is only a memory usage optimization hint and the driver can choose to ignore it if required. Specifying this option with cuLibraryLoadFromFile() is invalid and will return CUDA_ERROR_INVALID_VALUE.

    -
    - -
    -
    -CU_LIBRARY_NUM_OPTIONS = 2#
    -
    - -
    - -
    -
    -class cuda.cuda.CUresult(value)#
    -

    Error codes

    -
    -
    -CUDA_SUCCESS = 0#
    -

    The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see cuEventQuery() and cuStreamQuery()).

    -
    - -
    -
    -CUDA_ERROR_INVALID_VALUE = 1#
    -

    This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values.

    -
    - -
    -
    -CUDA_ERROR_OUT_OF_MEMORY = 2#
    -

    The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation.

    -
    - -
    -
    -CUDA_ERROR_NOT_INITIALIZED = 3#
    -

    This indicates that the CUDA driver has not been initialized with cuInit() or that initialization has failed.

    -
    - -
    -
    -CUDA_ERROR_DEINITIALIZED = 4#
    -

    This indicates that the CUDA driver is in the process of shutting down.

    -
    - -
    -
    -CUDA_ERROR_PROFILER_DISABLED = 5#
    -

    This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler.

    -
    - -
    -
    -CUDA_ERROR_PROFILER_NOT_INITIALIZED = 6#
    -

    [Deprecated]

    -
    - -
    -
    -CUDA_ERROR_PROFILER_ALREADY_STARTED = 7#
    -

    [Deprecated]

    -
    - -
    -
    -CUDA_ERROR_PROFILER_ALREADY_STOPPED = 8#
    -

    [Deprecated]

    -
    - -
    -
    -CUDA_ERROR_STUB_LIBRARY = 34#
    -

    This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error.

    -
    - -
    -
    -CUDA_ERROR_DEVICE_UNAVAILABLE = 46#
    -

    This indicates that requested CUDA device is unavailable at the current time. Devices are often unavailable due to use of CU_COMPUTEMODE_EXCLUSIVE_PROCESS or CU_COMPUTEMODE_PROHIBITED.

    -
    - -
    -
    -CUDA_ERROR_NO_DEVICE = 100#
    -

    This indicates that no CUDA-capable devices were detected by the installed CUDA driver.

    -
    - -
    -
    -CUDA_ERROR_INVALID_DEVICE = 101#
    -

    This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device.

    -
    - -
    -
    -CUDA_ERROR_DEVICE_NOT_LICENSED = 102#
    -

    This error indicates that the Grid license is not applied.

    -
    - -
    -
    -CUDA_ERROR_INVALID_IMAGE = 200#
    -

    This indicates that the device kernel image is invalid. This can also indicate an invalid CUDA module.

    -
    - -
    -
    -CUDA_ERROR_INVALID_CONTEXT = 201#
    -

    This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had cuCtxDestroy() invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See cuCtxGetApiVersion() for more details. This can also be returned if the green context passed to an API call was not converted to a CUcontext using cuCtxFromGreenCtx API.

    -
    - -
    -
    -CUDA_ERROR_CONTEXT_ALREADY_CURRENT = 202#
    -

    This indicated that the context being supplied as a parameter to the API call was already the active context. [Deprecated]

    -
    - -
    -
    -CUDA_ERROR_MAP_FAILED = 205#
    -

    This indicates that a map or register operation has failed.

    -
    - -
    -
    -CUDA_ERROR_UNMAP_FAILED = 206#
    -

    This indicates that an unmap or unregister operation has failed.

    -
    - -
    -
    -CUDA_ERROR_ARRAY_IS_MAPPED = 207#
    -

    This indicates that the specified array is currently mapped and thus cannot be destroyed.

    -
    - -
    -
    -CUDA_ERROR_ALREADY_MAPPED = 208#
    -

    This indicates that the resource is already mapped.

    -
    - -
    -
    -CUDA_ERROR_NO_BINARY_FOR_GPU = 209#
    -

    This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration.

    -
    - -
    -
    -CUDA_ERROR_ALREADY_ACQUIRED = 210#
    -

    This indicates that a resource has already been acquired.

    -
    - -
    -
    -CUDA_ERROR_NOT_MAPPED = 211#
    -

    This indicates that a resource is not mapped.

    -
    - -
    -
    -CUDA_ERROR_NOT_MAPPED_AS_ARRAY = 212#
    -

    This indicates that a mapped resource is not available for access as an array.

    -
    - -
    -
    -CUDA_ERROR_NOT_MAPPED_AS_POINTER = 213#
    -

    This indicates that a mapped resource is not available for access as a pointer.

    -
    - -
    -
    -CUDA_ERROR_ECC_UNCORRECTABLE = 214#
    -

    This indicates that an uncorrectable ECC error was detected during execution.

    -
    - -
    -
    -CUDA_ERROR_UNSUPPORTED_LIMIT = 215#
    -

    This indicates that the CUlimit passed to the API call is not supported by the active device.

    -
    - -
    -
    -CUDA_ERROR_CONTEXT_ALREADY_IN_USE = 216#
    -

    This indicates that the CUcontext passed to the API call can only be bound to a single CPU thread at a time but is already bound to a CPU thread.

    -
    - -
    -
    -CUDA_ERROR_PEER_ACCESS_UNSUPPORTED = 217#
    -

    This indicates that peer access is not supported across the given devices.

    -
    - -
    -
    -CUDA_ERROR_INVALID_PTX = 218#
    -

    This indicates that a PTX JIT compilation failed.

    -
    - -
    -
    -CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = 219#
    -

    This indicates an error with OpenGL or DirectX context.

    -
    - -
    - -

    This indicates that an uncorrectable NVLink error was detected during the execution.

    -
    - -
    -
    -CUDA_ERROR_JIT_COMPILER_NOT_FOUND = 221#
    -

    This indicates that the PTX JIT compiler library was not found.

    -
    - -
    -
    -CUDA_ERROR_UNSUPPORTED_PTX_VERSION = 222#
    -

    This indicates that the provided PTX was compiled with an unsupported toolchain.

    -
    - -
    -
    -CUDA_ERROR_JIT_COMPILATION_DISABLED = 223#
    -

    This indicates that the PTX JIT compilation was disabled.

    -
    - -
    -
    -CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY = 224#
    -

    This indicates that the CUexecAffinityType passed to the API call is not supported by the active device.

    -
    - -
    -
    -CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC = 225#
    -

    This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize.

    -
    - -
    -
    -CUDA_ERROR_INVALID_SOURCE = 300#
    -

    This indicates that the device kernel source is invalid. This includes compilation/linker errors encountered in device code or user error.

    -
    - -
    -
    -CUDA_ERROR_FILE_NOT_FOUND = 301#
    -

    This indicates that the file specified was not found.

    -
    - -
    -
    -CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = 302#
    -

    This indicates that a link to a shared object failed to resolve.

    -
    - -
    -
    -CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = 303#
    -

    This indicates that initialization of a shared object failed.

    -
    - -
    -
    -CUDA_ERROR_OPERATING_SYSTEM = 304#
    -

    This indicates that an OS call failed.

    -
    - -
    -
    -CUDA_ERROR_INVALID_HANDLE = 400#
    -

    This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like CUstream and CUevent.

    -
    - -
    -
    -CUDA_ERROR_ILLEGAL_STATE = 401#
    -

    This indicates that a resource required by the API call is not in a valid state to perform the requested operation.

    -
    - -
    -
    -CUDA_ERROR_LOSSY_QUERY = 402#
    -

    This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments.

    -
    - -
    -
    -CUDA_ERROR_NOT_FOUND = 500#
    -

    This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names.

    -
    - -
    -
    -CUDA_ERROR_NOT_READY = 600#
    -

    This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than CUDA_SUCCESS (which indicates completion). Calls that may return this value include cuEventQuery() and cuStreamQuery().

    -
    - -
    -
    -CUDA_ERROR_ILLEGAL_ADDRESS = 700#
    -

    While executing a kernel, the device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = 701#
    -

    This indicates that a launch did not occur because it did not have appropriate resources. This error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel’s register count. Passing arguments of the wrong size (i.e. a 64-bit pointer when a 32-bit int is expected) is equivalent to passing too many arguments and can also result in this error.

    -
    - -
    -
    -CUDA_ERROR_LAUNCH_TIMEOUT = 702#
    -

    This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device attribute CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = 703#
    -

    This error indicates a kernel launch that uses an incompatible texturing mode.

    -
    - -
    -
    -CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = 704#
    -

    This error indicates that a call to cuCtxEnablePeerAccess() is trying to re-enable peer access to a context which has already had peer access to it enabled.

    -
    - -
    -
    -CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = 705#
    -

    This error indicates that cuCtxDisablePeerAccess() is trying to disable peer access which has not been enabled yet via cuCtxEnablePeerAccess().

    -
    - -
    -
    -CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = 708#
    -

    This error indicates that the primary context for the specified device has already been initialized.

    -
    - -
    -
    -CUDA_ERROR_CONTEXT_IS_DESTROYED = 709#
    -

    This error indicates that the context current to the calling thread has been destroyed using cuCtxDestroy, or is a primary context which has not yet been initialized.

    -
    - -
    -
    -CUDA_ERROR_ASSERT = 710#
    -

    A device-side assert triggered during kernel execution. The context cannot be used anymore, and must be destroyed. All existing device memory allocations from this context are invalid and must be reconstructed if the program is to continue using CUDA.

    -
    - -
    -
    -CUDA_ERROR_TOO_MANY_PEERS = 711#
    -

    This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to cuCtxEnablePeerAccess().

    -
    - -
    -
    -CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = 712#
    -

    This error indicates that the memory range passed to cuMemHostRegister() has already been registered.

    -
    - -
    -
    -CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = 713#
    -

    This error indicates that the pointer passed to cuMemHostUnregister() does not correspond to any currently registered memory region.

    -
    - -
    -
    -CUDA_ERROR_HARDWARE_STACK_ERROR = 714#
    -

    While executing a kernel, the device encountered a stack error. This can be due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_ILLEGAL_INSTRUCTION = 715#
    -

    While executing a kernel, the device encountered an illegal instruction. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_MISALIGNED_ADDRESS = 716#
    -

    While executing a kernel, the device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_INVALID_ADDRESS_SPACE = 717#
    -

    While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_INVALID_PC = 718#
    -

    While executing a kernel, the device program counter wrapped its address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_LAUNCH_FAILED = 719#
    -

    An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE = 720#
    -

    This error indicates that the number of blocks launched per grid for a kernel that was launched via either cuLaunchCooperativeKernel or cuLaunchCooperativeKernelMultiDevice exceeds the maximum number of blocks as allowed by cuOccupancyMaxActiveBlocksPerMultiprocessor or cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors as specified by the device attribute CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT.

    -
    - -
    -
    -CUDA_ERROR_NOT_PERMITTED = 800#
    -

    This error indicates that the attempted operation is not permitted.

    -
    - -
    -
    -CUDA_ERROR_NOT_SUPPORTED = 801#
    -

    This error indicates that the attempted operation is not supported on the current system or device.

    -
    - -
    -
    -CUDA_ERROR_SYSTEM_NOT_READY = 802#
    -

    This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide.

    -
    - -
    -
    -CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803#
    -

    This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions.

    -
    - -
    -
    -CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE = 804#
    -

    This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable.

    -
    - -
    -
    -CUDA_ERROR_MPS_CONNECTION_FAILED = 805#
    -

    This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server.

    -
    - -
    -
    -CUDA_ERROR_MPS_RPC_FAILURE = 806#
    -

    This error indicates that the remote procedural call between the MPS server and the MPS client failed.

    -
    - -
    -
    -CUDA_ERROR_MPS_SERVER_NOT_READY = 807#
    -

    This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure.

    -
    - -
    -
    -CUDA_ERROR_MPS_MAX_CLIENTS_REACHED = 808#
    -

    This error indicates that the hardware resources required to create MPS client have been exhausted.

    -
    - -
    -
    -CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED = 809#
    -

    This error indicates the the hardware resources required to support device connections have been exhausted.

    -
    - -
    -
    -CUDA_ERROR_MPS_CLIENT_TERMINATED = 810#
    -

    This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_CDP_NOT_SUPPORTED = 811#
    -

    This error indicates that the module is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it.

    -
    - -
    -
    -CUDA_ERROR_CDP_VERSION_MISMATCH = 812#
    -

    This error indicates that a module contains an unsupported interaction between different versions of CUDA Dynamic Parallelism.

    -
    - -
    -
    -CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED = 900#
    -

    This error indicates that the operation is not permitted when the stream is capturing.

    -
    - -
    -
    -CUDA_ERROR_STREAM_CAPTURE_INVALIDATED = 901#
    -

    This error indicates that the current capture sequence on the stream has been invalidated due to a previous error.

    -
    - -
    -
    -CUDA_ERROR_STREAM_CAPTURE_MERGE = 902#
    -

    This error indicates that the operation would have resulted in a merge of two independent capture sequences.

    -
    - -
    -
    -CUDA_ERROR_STREAM_CAPTURE_UNMATCHED = 903#
    -

    This error indicates that the capture was not initiated in this stream.

    -
    - -
    -
    -CUDA_ERROR_STREAM_CAPTURE_UNJOINED = 904#
    -

    This error indicates that the capture sequence contains a fork that was not joined to the primary stream.

    -
    - -
    -
    -CUDA_ERROR_STREAM_CAPTURE_ISOLATION = 905#
    -

    This error indicates that a dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary.

    -
    - -
    -
    -CUDA_ERROR_STREAM_CAPTURE_IMPLICIT = 906#
    -

    This error indicates a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy.

    -
    - -
    -
    -CUDA_ERROR_CAPTURED_EVENT = 907#
    -

    This error indicates that the operation is not permitted on an event which was last recorded in a capturing stream.

    -
    - -
    -
    -CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD = 908#
    -

    A stream capture sequence not initiated with the CU_STREAM_CAPTURE_MODE_RELAXED argument to cuStreamBeginCapture was passed to cuStreamEndCapture in a different thread.

    -
    - -
    -
    -CUDA_ERROR_TIMEOUT = 909#
    -

    This error indicates that the timeout specified for the wait operation has lapsed.

    -
    - -
    -
    -CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE = 910#
    -

    This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update.

    -
    - -
    -
    -CUDA_ERROR_EXTERNAL_DEVICE = 911#
    -

    This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device’s signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -CUDA_ERROR_INVALID_CLUSTER_SIZE = 912#
    -

    Indicates a kernel launch error due to cluster misconfiguration.

    -
    - -
    -
    -CUDA_ERROR_FUNCTION_NOT_LOADED = 913#
    -

    Indiciates a function handle is not loaded when calling an API that requires a loaded function.

    -
    - -
    -
    -CUDA_ERROR_INVALID_RESOURCE_TYPE = 914#
    -

    This error indicates one or more resources passed in are not valid resource types for the operation.

    -
    - -
    -
    -CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION = 915#
    -

    This error indicates one or more resources are insufficient or non-applicable for the operation.

    -
    - -
    -
    -CUDA_ERROR_UNKNOWN = 999#
    -

    This indicates that an unknown internal error has occurred.

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevice_P2PAttribute(value)#
    -

    P2P Attributes

    -
    -
    -CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = 1#
    -

    A relative value indicating the performance of the link between two devices

    -
    - -
    -
    -CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = 2#
    -

    P2P Access is enable

    -
    - -
    -
    -CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = 3#
    -

    Atomic operation over the link supported

    -
    - -
    -
    -CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED = 4#
    -

    [Deprecated]

    -
    - -
    -
    -CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = 4#
    -

    Accessing CUDA arrays over the link supported

    -
    - -
    - -
    -
    -class cuda.cuda.CUresourceViewFormat(value)#
    -

    Resource view format

    -
    -
    -CU_RES_VIEW_FORMAT_NONE = 0#
    -

    No resource view format (use underlying resource format)

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UINT_1X8 = 1#
    -

    1 channel unsigned 8-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UINT_2X8 = 2#
    -

    2 channel unsigned 8-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UINT_4X8 = 3#
    -

    4 channel unsigned 8-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SINT_1X8 = 4#
    -

    1 channel signed 8-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SINT_2X8 = 5#
    -

    2 channel signed 8-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SINT_4X8 = 6#
    -

    4 channel signed 8-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UINT_1X16 = 7#
    -

    1 channel unsigned 16-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UINT_2X16 = 8#
    -

    2 channel unsigned 16-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UINT_4X16 = 9#
    -

    4 channel unsigned 16-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SINT_1X16 = 10#
    -

    1 channel signed 16-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SINT_2X16 = 11#
    -

    2 channel signed 16-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SINT_4X16 = 12#
    -

    4 channel signed 16-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UINT_1X32 = 13#
    -

    1 channel unsigned 32-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UINT_2X32 = 14#
    -

    2 channel unsigned 32-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UINT_4X32 = 15#
    -

    4 channel unsigned 32-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SINT_1X32 = 16#
    -

    1 channel signed 32-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SINT_2X32 = 17#
    -

    2 channel signed 32-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SINT_4X32 = 18#
    -

    4 channel signed 32-bit integers

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_FLOAT_1X16 = 19#
    -

    1 channel 16-bit floating point

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_FLOAT_2X16 = 20#
    -

    2 channel 16-bit floating point

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_FLOAT_4X16 = 21#
    -

    4 channel 16-bit floating point

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_FLOAT_1X32 = 22#
    -

    1 channel 32-bit floating point

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_FLOAT_2X32 = 23#
    -

    2 channel 32-bit floating point

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_FLOAT_4X32 = 24#
    -

    4 channel 32-bit floating point

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UNSIGNED_BC1 = 25#
    -

    Block compressed 1

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UNSIGNED_BC2 = 26#
    -

    Block compressed 2

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UNSIGNED_BC3 = 27#
    -

    Block compressed 3

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UNSIGNED_BC4 = 28#
    -

    Block compressed 4 unsigned

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SIGNED_BC4 = 29#
    -

    Block compressed 4 signed

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UNSIGNED_BC5 = 30#
    -

    Block compressed 5 unsigned

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SIGNED_BC5 = 31#
    -

    Block compressed 5 signed

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UNSIGNED_BC6H = 32#
    -

    Block compressed 6 unsigned half-float

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_SIGNED_BC6H = 33#
    -

    Block compressed 6 signed half-float

    -
    - -
    -
    -CU_RES_VIEW_FORMAT_UNSIGNED_BC7 = 34#
    -

    Block compressed 7

    -
    - -
    - -
    -
    -class cuda.cuda.CUtensorMapDataType(value)#
    -

    Tensor map data type

    -
    -
    -CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_UINT16 = 1#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_UINT32 = 2#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_INT32 = 3#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_UINT64 = 4#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_INT64 = 5#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_FLOAT16 = 6#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_FLOAT32 = 7#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_FLOAT64 = 8#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 = 9#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ = 10#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 = 11#
    -
    - -
    -
    -CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ = 12#
    -
    - -
    - -
    -
    -class cuda.cuda.CUtensorMapInterleave(value)#
    -

    Tensor map interleave layout type

    -
    -
    -CU_TENSOR_MAP_INTERLEAVE_NONE = 0#
    -
    - -
    -
    -CU_TENSOR_MAP_INTERLEAVE_16B = 1#
    -
    - -
    -
    -CU_TENSOR_MAP_INTERLEAVE_32B = 2#
    -
    - -
    - -
    -
    -class cuda.cuda.CUtensorMapSwizzle(value)#
    -

    Tensor map swizzling mode of shared memory banks

    -
    -
    -CU_TENSOR_MAP_SWIZZLE_NONE = 0#
    -
    - -
    -
    -CU_TENSOR_MAP_SWIZZLE_32B = 1#
    -
    - -
    -
    -CU_TENSOR_MAP_SWIZZLE_64B = 2#
    -
    - -
    -
    -CU_TENSOR_MAP_SWIZZLE_128B = 3#
    -
    - -
    - -
    -
    -class cuda.cuda.CUtensorMapL2promotion(value)#
    -

    Tensor map L2 promotion type

    -
    -
    -CU_TENSOR_MAP_L2_PROMOTION_NONE = 0#
    -
    - -
    -
    -CU_TENSOR_MAP_L2_PROMOTION_L2_64B = 1#
    -
    - -
    -
    -CU_TENSOR_MAP_L2_PROMOTION_L2_128B = 2#
    -
    - -
    -
    -CU_TENSOR_MAP_L2_PROMOTION_L2_256B = 3#
    -
    - -
    - -
    -
    -class cuda.cuda.CUtensorMapFloatOOBfill(value)#
    -

    Tensor map out-of-bounds fill type

    -
    -
    -CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0#
    -
    - -
    -
    -CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA = 1#
    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS(value)#
    -

    Access flags that specify the level of access the current context’s -device has on the memory referenced.

    -
    -
    -CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE = 0#
    -

    No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ = 1#
    -

    Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case.

    -
    - -
    -
    -CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE = 3#
    -

    Read-write access, the device has full read-write access to the memory

    -
    - -
    - -
    -
    -class cuda.cuda.CUexternalMemoryHandleType(value)#
    -

    External memory handle types

    -
    -
    -CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1#
    -

    Handle is an opaque file descriptor

    -
    - -
    -
    -CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2#
    -

    Handle is an opaque shared NT handle

    -
    - -
    -
    -CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3#
    -

    Handle is an opaque, globally shared handle

    -
    - -
    -
    -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4#
    -

    Handle is a D3D12 heap object

    -
    - -
    -
    -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5#
    -

    Handle is a D3D12 committed resource

    -
    - -
    -
    -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6#
    -

    Handle is a shared NT handle to a D3D11 resource

    -
    - -
    -
    -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7#
    -

    Handle is a globally shared handle to a D3D11 resource

    -
    - -
    -
    -CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8#
    -

    Handle is an NvSciBuf object

    -
    - -
    - -
    -
    -class cuda.cuda.CUexternalSemaphoreHandleType(value)#
    -

    External semaphore handle types

    -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1#
    -

    Handle is an opaque file descriptor

    -
    - -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2#
    -

    Handle is an opaque shared NT handle

    -
    - -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3#
    -

    Handle is an opaque, globally shared handle

    -
    - -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4#
    -

    Handle is a shared NT handle referencing a D3D12 fence object

    -
    - -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5#
    -

    Handle is a shared NT handle referencing a D3D11 fence object

    -
    - -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6#
    -

    Opaque handle to NvSciSync Object

    -
    - -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7#
    -

    Handle is a shared NT handle referencing a D3D11 keyed mutex object

    -
    - -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8#
    -

    Handle is a globally shared handle referencing a D3D11 keyed mutex object

    -
    - -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = 9#
    -

    Handle is an opaque file descriptor referencing a timeline semaphore

    -
    - -
    -
    -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10#
    -

    Handle is an opaque shared NT handle referencing a timeline semaphore

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAllocationHandleType(value)#
    -

    Flags for specifying particular handle types

    -
    -
    -CU_MEM_HANDLE_TYPE_NONE = 0#
    -

    Does not allow any export mechanism. >

    -
    - -
    -
    -CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR = 1#
    -

    Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int)

    -
    - -
    -
    -CU_MEM_HANDLE_TYPE_WIN32 = 2#
    -

    Allows a Win32 NT handle to be used for exporting. (HANDLE)

    -
    - -
    -
    -CU_MEM_HANDLE_TYPE_WIN32_KMT = 4#
    -

    Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE)

    -
    - -
    -
    -CU_MEM_HANDLE_TYPE_FABRIC = 8#
    -

    Allows a fabric handle to be used for exporting. (CUmemFabricHandle)

    -
    - -
    -
    -CU_MEM_HANDLE_TYPE_MAX = 2147483647#
    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAccess_flags(value)#
    -

    Specifies the memory protection flags for mapping.

    -
    -
    -CU_MEM_ACCESS_FLAGS_PROT_NONE = 0#
    -

    Default, make the address range not accessible

    -
    - -
    -
    -CU_MEM_ACCESS_FLAGS_PROT_READ = 1#
    -

    Make the address range read accessible

    -
    - -
    -
    -CU_MEM_ACCESS_FLAGS_PROT_READWRITE = 3#
    -

    Make the address range read-write accessible

    -
    - -
    -
    -CU_MEM_ACCESS_FLAGS_PROT_MAX = 2147483647#
    -
    - -
    - -
    -
    -class cuda.cuda.CUmemLocationType(value)#
    -

    Specifies the type of location

    -
    -
    -CU_MEM_LOCATION_TYPE_INVALID = 0#
    -
    - -
    -
    -CU_MEM_LOCATION_TYPE_DEVICE = 1#
    -

    Location is a device location, thus id is a device ordinal

    -
    - -
    -
    -CU_MEM_LOCATION_TYPE_HOST = 2#
    -

    Location is host, id is ignored

    -
    - -
    -
    -CU_MEM_LOCATION_TYPE_HOST_NUMA = 3#
    -

    Location is a host NUMA node, thus id is a host NUMA node id

    -
    - -
    -
    -CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT = 4#
    -

    Location is a host NUMA node of the current thread, id is ignored

    -
    - -
    -
    -CU_MEM_LOCATION_TYPE_MAX = 2147483647#
    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAllocationType(value)#
    -

    Defines the allocation types available

    -
    -
    -CU_MEM_ALLOCATION_TYPE_INVALID = 0#
    -
    - -
    -
    -CU_MEM_ALLOCATION_TYPE_PINNED = 1#
    -

    This allocation type is ‘pinned’, i.e. cannot migrate from its current location while the application is actively using it

    -
    - -
    -
    -CU_MEM_ALLOCATION_TYPE_MAX = 2147483647#
    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAllocationGranularity_flags(value)#
    -

    Flag for requesting different optimal and required granularities -for an allocation.

    -
    -
    -CU_MEM_ALLOC_GRANULARITY_MINIMUM = 0#
    -

    Minimum required granularity for allocation

    -
    - -
    - -

    Recommended granularity for allocation for best performance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemRangeHandleType(value)#
    -

    Specifies the handle type for address range

    -
    -
    -CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD = 1#
    -
    - -
    -
    -CU_MEM_RANGE_HANDLE_TYPE_MAX = 2147483647#
    -
    - -
    - -
    -
    -class cuda.cuda.CUarraySparseSubresourceType(value)#
    -

    Sparse subresource types

    -
    -
    -CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = 0#
    -
    - -
    -
    -CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = 1#
    -
    - -
    - -
    -
    -class cuda.cuda.CUmemOperationType(value)#
    -

    Memory operation types

    -
    -
    -CU_MEM_OPERATION_TYPE_MAP = 1#
    -
    - -
    -
    -CU_MEM_OPERATION_TYPE_UNMAP = 2#
    -
    - -
    - -
    -
    -class cuda.cuda.CUmemHandleType(value)#
    -

    Memory handle types

    -
    -
    -CU_MEM_HANDLE_TYPE_GENERIC = 0#
    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAllocationCompType(value)#
    -

    Specifies compression attribute for an allocation.

    -
    -
    -CU_MEM_ALLOCATION_COMP_NONE = 0#
    -

    Allocating non-compressible memory

    -
    - -
    -
    -CU_MEM_ALLOCATION_COMP_GENERIC = 1#
    -

    Allocating compressible memory

    -
    - -
    - -
    -
    -class cuda.cuda.CUmulticastGranularity_flags(value)#
    -

    Flags for querying different granularities for a multicast object

    -
    -
    -CU_MULTICAST_GRANULARITY_MINIMUM = 0#
    -

    Minimum required granularity

    -
    - -
    - -

    Recommended granularity for best performance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphExecUpdateResult(value)#
    -

    CUDA Graph Update error types

    -
    -
    -CU_GRAPH_EXEC_UPDATE_SUCCESS = 0#
    -

    The update succeeded

    -
    - -
    -
    -CU_GRAPH_EXEC_UPDATE_ERROR = 1#
    -

    The update failed for an unexpected reason which is described in the return value of the function

    -
    - -
    -
    -CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED = 2#
    -

    The update failed because the topology changed

    -
    - -
    -
    -CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED = 3#
    -

    The update failed because a node type changed

    -
    - -
    -
    -CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED = 4#
    -

    The update failed because the function of a kernel node changed (CUDA driver < 11.2)

    -
    - -
    -
    -CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED = 5#
    -

    The update failed because the parameters changed in a way that is not supported

    -
    - -
    -
    -CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED = 6#
    -

    The update failed because something about the node is not supported

    -
    - -
    -
    -CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE = 7#
    -

    The update failed because the function of a kernel node changed in an unsupported way

    -
    - -
    -
    -CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED = 8#
    -

    The update failed because the node attributes changed in a way that is not supported

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemPool_attribute(value)#
    -

    CUDA memory pool attributes

    -
    -
    -CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES = 1#
    -

    (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled)

    -
    - -
    -
    -CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC = 2#
    -

    (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled)

    -
    - -
    -
    -CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES = 3#
    -

    (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled).

    -
    - -
    -
    -CU_MEMPOOL_ATTR_RELEASE_THRESHOLD = 4#
    -

    (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0)

    -
    - -
    -
    -CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT = 5#
    -

    (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool.

    -
    - -
    -
    -CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH = 6#
    -

    (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero.

    -
    - -
    -
    -CU_MEMPOOL_ATTR_USED_MEM_CURRENT = 7#
    -

    (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application.

    -
    - -
    -
    -CU_MEMPOOL_ATTR_USED_MEM_HIGH = 8#
    -

    (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero.

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphMem_attribute(value)#
    -
    -
    -CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT = 0#
    -

    (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs

    -
    - -
    -
    -CU_GRAPH_MEM_ATTR_USED_MEM_HIGH = 1#
    -

    (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero.

    -
    - -
    -
    -CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT = 2#
    -

    (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator.

    -
    - -
    -
    -CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH = 3#
    -

    (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator.

    -
    - -
    - -
    -
    -class cuda.cuda.CUflushGPUDirectRDMAWritesOptions(value)#
    -

    Bitmasks for -CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS

    -
    -
    -CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST = 1#
    -

    cuFlushGPUDirectRDMAWrites() and its CUDA Runtime API counterpart are supported on the device.

    -
    - -
    -
    -CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS = 2#
    -

    The CU_STREAM_WAIT_VALUE_FLUSH flag and the CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device.

    -
    - -
    - -
    -
    -class cuda.cuda.CUGPUDirectRDMAWritesOrdering(value)#
    -

    Platform native ordering for GPUDirect RDMA writes

    -
    -
    -CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE = 0#
    -

    The device does not natively support ordering of remote writes. cuFlushGPUDirectRDMAWrites() can be leveraged if supported.

    -
    - -
    -
    -CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER = 100#
    -

    Natively, the device can consistently consume remote writes, although other CUDA devices may not.

    -
    - -
    -
    -CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES = 200#
    -

    Any CUDA device in the system can consistently consume remote writes to this device.

    -
    - -
    - -
    -
    -class cuda.cuda.CUflushGPUDirectRDMAWritesScope(value)#
    -

    The scopes for cuFlushGPUDirectRDMAWrites

    -
    -
    -CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER = 100#
    -

    Blocks until remote writes are visible to the CUDA device context owning the data.

    -
    - -
    -
    -CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES = 200#
    -

    Blocks until remote writes are visible to all CUDA device contexts.

    -
    - -
    - -
    -
    -class cuda.cuda.CUflushGPUDirectRDMAWritesTarget(value)#
    -

    The targets for cuFlushGPUDirectRDMAWrites

    -
    -
    -CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX = 0#
    -

    Sets the target for cuFlushGPUDirectRDMAWrites() to the currently active CUDA device context.

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphDebugDot_flags(value)#
    -

    The additional write options for cuGraphDebugDotPrint

    -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE = 1#
    -

    Output all debug data as if every debug flag is enabled

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES = 2#
    -

    Use CUDA Runtime structures for output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS = 4#
    -

    Adds CUDA_KERNEL_NODE_PARAMS values to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS = 8#
    -

    Adds CUDA_MEMCPY3D values to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS = 16#
    -

    Adds CUDA_MEMSET_NODE_PARAMS values to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS = 32#
    -

    Adds CUDA_HOST_NODE_PARAMS values to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS = 64#
    -

    Adds CUevent handle from record and wait nodes to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS = 128#
    -

    Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS = 256#
    -

    Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES = 512#
    -

    Adds CUkernelNodeAttrValue values to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES = 1024#
    -

    Adds node handles and every kernel function handle to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS = 2048#
    -

    Adds memory alloc node parameters to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS = 4096#
    -

    Adds memory free node parameters to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS = 8192#
    -

    Adds batch mem op node parameters to output

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO = 16384#
    -

    Adds edge numbering information

    -
    - -
    -
    -CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS = 32768#
    -

    Adds conditional node parameters to output

    -
    - -
    - -
    -
    -class cuda.cuda.CUuserObject_flags(value)#
    -

    Flags for user objects for graphs

    -
    -
    -CU_USER_OBJECT_NO_DESTRUCTOR_SYNC = 1#
    -

    Indicates the destructor execution is not synchronized by any CUDA handle.

    -
    - -
    - -
    -
    -class cuda.cuda.CUuserObjectRetain_flags(value)#
    -

    Flags for retaining user object references for graphs

    -
    -
    -CU_GRAPH_USER_OBJECT_MOVE = 1#
    -

    Transfer references from the caller rather than creating new references.

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphInstantiate_flags(value)#
    -

    Flags for instantiating a graph

    -
    -
    -CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH = 1#
    -

    Automatically free memory allocated in a graph before relaunching.

    -
    - -
    -
    -CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD = 2#
    -

    Automatically upload the graph after instantiation. Only supported by cuGraphInstantiateWithParams. The upload will be performed using the stream provided in instantiateParams.

    -
    - -
    -
    -CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH = 4#
    -

    Instantiate the graph to be launchable from the device. This flag can only be used on platforms which support unified addressing. This flag cannot be used in conjunction with CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH.

    -
    - -
    -
    -CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY = 8#
    -

    Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into.

    -
    - -
    - -
    -
    -class cuda.cuda.CUdeviceNumaConfig(value)#
    -

    CUDA device NUMA configuration

    -
    -
    -CU_DEVICE_NUMA_CONFIG_NONE = 0#
    -

    The GPU is not a NUMA node

    -
    - -
    -
    -CU_DEVICE_NUMA_CONFIG_NUMA_NODE = 1#
    -

    The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its NUMA ID

    -
    - -
    - -
    -
    -class cuda.cuda.CUeglFrameType(value)#
    -

    CUDA EglFrame type - array or pointer

    -
    -
    -CU_EGL_FRAME_TYPE_ARRAY = 0#
    -

    Frame type CUDA array

    -
    - -
    -
    -CU_EGL_FRAME_TYPE_PITCH = 1#
    -

    Frame type pointer

    -
    - -
    - -
    -
    -class cuda.cuda.CUeglResourceLocationFlags(value)#
    -

    Resource location flags- sysmem or vidmem For CUDA context on -iGPU, since video and system memory are equivalent - these flags -will not have an effect on the execution. For CUDA context on -dGPU, applications can use the flag -CUeglResourceLocationFlags to give a hint about the -desired location. CU_EGL_RESOURCE_LOCATION_SYSMEM - -the frame data is made resident on the system memory to be accessed -by CUDA. CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame -data is made resident on the dedicated video memory to be accessed -by CUDA. There may be an additional latency due to new allocation -and data migration, if the frame is produced on a different memory.

    -
    -
    -CU_EGL_RESOURCE_LOCATION_SYSMEM = 0#
    -

    Resource location sysmem

    -
    - -
    -
    -CU_EGL_RESOURCE_LOCATION_VIDMEM = 1#
    -

    Resource location vidmem

    -
    - -
    - -
    -
    -class cuda.cuda.CUeglColorFormat(value)#
    -

    CUDA EGL Color Format - The different planar and multiplanar -formats currently supported for CUDA_EGL interops. Three channel -formats are currently not supported for -CU_EGL_FRAME_TYPE_ARRAY

    -
    -
    -CU_EGL_COLOR_FORMAT_YUV420_PLANAR = 0#
    -

    Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR = 1#
    -

    Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV422_PLANAR = 2#
    -

    Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR = 3#
    -

    Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_RGB = 4#
    -

    R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BGR = 5#
    -

    R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_ARGB = 6#
    -

    R/G/B/A four channels in one surface with BGRA byte ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_RGBA = 7#
    -

    R/G/B/A four channels in one surface with ABGR byte ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_L = 8#
    -

    single luminance channel in one surface.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_R = 9#
    -

    single color channel in one surface.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV444_PLANAR = 10#
    -

    Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR = 11#
    -

    Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUYV_422 = 12#
    -

    Y, U, V in one surface, interleaved as UYVY in one channel.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_UYVY_422 = 13#
    -

    Y, U, V in one surface, interleaved as YUYV in one channel.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_ABGR = 14#
    -

    R/G/B/A four channels in one surface with RGBA byte ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BGRA = 15#
    -

    R/G/B/A four channels in one surface with ARGB byte ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_A = 16#
    -

    Alpha color format - one channel in one surface.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_RG = 17#
    -

    R/G color format - two channels in one surface with GR byte ordering

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_AYUV = 18#
    -

    Y, U, V, A four channels in one surface, interleaved as VUYA.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR = 19#
    -

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR = 20#
    -

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR = 21#
    -

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR = 22#
    -

    Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR = 23#
    -

    Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR = 24#
    -

    Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR = 25#
    -

    Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_VYUY_ER = 26#
    -

    Extended Range Y, U, V in one surface, interleaved as YVYU in one channel.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_UYVY_ER = 27#
    -

    Extended Range Y, U, V in one surface, interleaved as YUYV in one channel.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUYV_ER = 28#
    -

    Extended Range Y, U, V in one surface, interleaved as UYVY in one channel.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVYU_ER = 29#
    -

    Extended Range Y, U, V in one surface, interleaved as VYUY in one channel.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV_ER = 30#
    -

    Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUVA_ER = 31#
    -

    Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_AYUV_ER = 32#
    -

    Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER = 33#
    -

    Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER = 34#
    -

    Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER = 35#
    -

    Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER = 36#
    -

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER = 37#
    -

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER = 38#
    -

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER = 39#
    -

    Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER = 40#
    -

    Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER = 41#
    -

    Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER = 42#
    -

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER = 43#
    -

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER = 44#
    -

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_RGGB = 45#
    -

    Bayer format - one channel in one surface with interleaved RGGB ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_BGGR = 46#
    -

    Bayer format - one channel in one surface with interleaved BGGR ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_GRBG = 47#
    -

    Bayer format - one channel in one surface with interleaved GRBG ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_GBRG = 48#
    -

    Bayer format - one channel in one surface with interleaved GBRG ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER10_RGGB = 49#
    -

    Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER10_BGGR = 50#
    -

    Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER10_GRBG = 51#
    -

    Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER10_GBRG = 52#
    -

    Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER12_RGGB = 53#
    -

    Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER12_BGGR = 54#
    -

    Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER12_GRBG = 55#
    -

    Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER12_GBRG = 56#
    -

    Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER14_RGGB = 57#
    -

    Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER14_BGGR = 58#
    -

    Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER14_GRBG = 59#
    -

    Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER14_GBRG = 60#
    -

    Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER20_RGGB = 61#
    -

    Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER20_BGGR = 62#
    -

    Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER20_GRBG = 63#
    -

    Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER20_GBRG = 64#
    -

    Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU444_PLANAR = 65#
    -

    Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU422_PLANAR = 66#
    -

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU420_PLANAR = 67#
    -

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB = 68#
    -

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR = 69#
    -

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG = 70#
    -

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG = 71#
    -

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_BCCR = 72#
    -

    Bayer format - one channel in one surface with interleaved BCCR ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_RCCB = 73#
    -

    Bayer format - one channel in one surface with interleaved RCCB ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_CRBC = 74#
    -

    Bayer format - one channel in one surface with interleaved CRBC ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER_CBRC = 75#
    -

    Bayer format - one channel in one surface with interleaved CBRC ordering.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER10_CCCC = 76#
    -

    Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER12_BCCR = 77#
    -

    Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER12_RCCB = 78#
    -

    Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER12_CRBC = 79#
    -

    Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER12_CBRC = 80#
    -

    Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_BAYER12_CCCC = 81#
    -

    Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y = 82#
    -

    Color format for single Y plane.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 = 83#
    -

    Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 = 84#
    -

    Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 = 85#
    -

    Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 = 86#
    -

    Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 = 87#
    -

    Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 = 88#
    -

    Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 = 89#
    -

    Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 = 90#
    -

    Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 = 91#
    -

    Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 = 92#
    -

    Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 = 93#
    -

    Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR = 94#
    -

    Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 = 95#
    -

    Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y_ER = 96#
    -

    Extended Range Color format for single Y plane.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y_709_ER = 97#
    -

    Extended Range Color format for single Y plane.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10_ER = 98#
    -

    Extended Range Color format for single Y10 plane.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10_709_ER = 99#
    -

    Extended Range Color format for single Y10 plane.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y12_ER = 100#
    -

    Extended Range Color format for single Y12 plane.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y12_709_ER = 101#
    -

    Extended Range Color format for single Y12 plane.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUVA = 102#
    -

    Y, U, V, A four channels in one surface, interleaved as AVUY.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YUV = 103#
    -

    Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_YVYU = 104#
    -

    Y, U, V in one surface, interleaved as YVYU in one channel.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_VYUY = 105#
    -

    Y, U, V in one surface, interleaved as VYUY in one channel.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER = 106#
    -

    Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER = 107#
    -

    Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER = 108#
    -

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER = 109#
    -

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER = 110#
    -

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER = 111#
    -

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER = 112#
    -

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER = 113#
    -

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -CU_EGL_COLOR_FORMAT_MAX = 114#
    -
    - -
    - -
    -
    -class cuda.cuda.CUdeviceptr_v2#
    -

    CUDA device pointer CUdeviceptr is defined as an unsigned integer type whose size matches the size of a pointer on the target platform.

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdeviceptr#
    -

    CUDA device pointer CUdeviceptr is defined as an unsigned integer type whose size matches the size of a pointer on the target platform.

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevice_v1#
    -

    CUDA device

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevice#
    -

    CUDA device

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUcontext(*args, **kwargs)#
    -

    A regular context handle

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmodule(*args, **kwargs)#
    -

    CUDA module

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUfunction(*args, **kwargs)#
    -

    CUDA function

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlibrary(*args, **kwargs)#
    -

    CUDA library

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUkernel(*args, **kwargs)#
    -

    CUDA kernel

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUarray(*args, **kwargs)#
    -

    CUDA array

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmipmappedArray(*args, **kwargs)#
    -

    CUDA mipmapped array

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUtexref(*args, **kwargs)#
    -

    CUDA texture reference

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUsurfref(*args, **kwargs)#
    -

    CUDA surface reference

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUevent(*args, **kwargs)#
    -

    CUDA event

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUstream(*args, **kwargs)#
    -

    CUDA stream

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphicsResource(*args, **kwargs)#
    -

    CUDA graphics interop resource

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUtexObject_v1#
    -

    An opaque value that represents a CUDA texture object

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUtexObject#
    -

    An opaque value that represents a CUDA texture object

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUsurfObject_v1#
    -

    An opaque value that represents a CUDA surface object

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUsurfObject#
    -

    An opaque value that represents a CUDA surface object

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUexternalMemory(*args, **kwargs)#
    -

    CUDA external memory

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUexternalSemaphore(*args, **kwargs)#
    -

    CUDA external semaphore

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraph(*args, **kwargs)#
    -

    CUDA graph

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphNode(*args, **kwargs)#
    -

    CUDA graph node

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphExec(*args, **kwargs)#
    -

    CUDA executable graph

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemoryPool(*args, **kwargs)#
    -

    CUDA memory pool

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUuserObject(*args, **kwargs)#
    -

    CUDA user object for graphs

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphConditionalHandle#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphDeviceNode(*args, **kwargs)#
    -

    CUDA graph device node handle

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUasyncCallbackHandle(*args, **kwargs)#
    -

    CUDA async notification callback handle

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgreenCtx(*args, **kwargs)#
    -

    A green context handle. This handle can be used safely from only one CPU thread at a time. Created via cuGreenCtxCreate

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUuuid#
    -
    -
    -bytes#
    -

    < CUDA definition of UUID

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemFabricHandle_v1#
    -

    Fabric handle - An opaque handle representing a memory allocation -that can be exported to processes in same or different nodes. For -IPC between processes on different nodes they must be connected via -the NVSwitch fabric.

    -
    -
    -data#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemFabricHandle#
    -

    Fabric handle - An opaque handle representing a memory allocation -that can be exported to processes in same or different nodes. For -IPC between processes on different nodes they must be connected via -the NVSwitch fabric.

    -
    -
    -data#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUipcEventHandle_v1#
    -

    CUDA IPC event handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUipcEventHandle#
    -

    CUDA IPC event handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUipcMemHandle_v1#
    -

    CUDA IPC mem handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUipcMemHandle#
    -

    CUDA IPC mem handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamBatchMemOpParams_v1#
    -

    Per-operation parameters for cuStreamBatchMemOp

    -
    -
    -operation#
    -
    -
    Type:
    -

    CUstreamBatchMemOpType

    -
    -
    -
    - -
    -
    -waitValue#
    -
    -
    Type:
    -

    CUstreamMemOpWaitValueParams_st

    -
    -
    -
    - -
    -
    -writeValue#
    -
    -
    Type:
    -

    CUstreamMemOpWriteValueParams_st

    -
    -
    -
    - -
    -
    -flushRemoteWrites#
    -
    -
    Type:
    -

    CUstreamMemOpFlushRemoteWritesParams_st

    -
    -
    -
    - -
    -
    -memoryBarrier#
    -
    -
    Type:
    -

    CUstreamMemOpMemoryBarrierParams_st

    -
    -
    -
    - -
    -
    -pad#
    -
    -
    Type:
    -

    List[cuuint64_t]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamBatchMemOpParams#
    -

    Per-operation parameters for cuStreamBatchMemOp

    -
    -
    -operation#
    -
    -
    Type:
    -

    CUstreamBatchMemOpType

    -
    -
    -
    - -
    -
    -waitValue#
    -
    -
    Type:
    -

    CUstreamMemOpWaitValueParams_st

    -
    -
    -
    - -
    -
    -writeValue#
    -
    -
    Type:
    -

    CUstreamMemOpWriteValueParams_st

    -
    -
    -
    - -
    -
    -flushRemoteWrites#
    -
    -
    Type:
    -

    CUstreamMemOpFlushRemoteWritesParams_st

    -
    -
    -
    - -
    -
    -memoryBarrier#
    -
    -
    Type:
    -

    CUstreamMemOpMemoryBarrierParams_st

    -
    -
    -
    - -
    -
    -pad#
    -
    -
    Type:
    -

    List[cuuint64_t]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1#
    -
    -
    -ctx#
    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -count#
    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -paramArray#
    -
    -
    Type:
    -

    CUstreamBatchMemOpParams

    -
    -
    -
    - -
    -
    -flags#
    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS#
    -
    -
    -ctx#
    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -count#
    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -paramArray#
    -
    -
    Type:
    -

    CUstreamBatchMemOpParams

    -
    -
    -
    - -
    -
    -flags#
    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2#
    -

    Batch memory operation node parameters

    -
    -
    -ctx#
    -

    Context to use for the operations.

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -count#
    -

    Number of operations in paramArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -paramArray#
    -

    Array of batch memory operations.

    -
    -
    Type:
    -

    CUstreamBatchMemOpParams

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags to control the node.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUasyncNotificationInfo#
    -

    Information passed to the user via the async notification callback

    -
    -
    -type#
    -
    -
    Type:
    -

    CUasyncNotificationType

    -
    -
    -
    - -
    -
    -info#
    -
    -
    Type:
    -

    anon_union2

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUasyncCallback(*args, **kwargs)#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevprop_v1#
    -

    Legacy device properties

    -
    -
    -maxThreadsPerBlock#
    -

    Maximum number of threads per block

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxThreadsDim#
    -

    Maximum size of each dimension of a block

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxGridSize#
    -

    Maximum size of each dimension of a grid

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -sharedMemPerBlock#
    -

    Shared memory available per block in bytes

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -totalConstantMemory#
    -

    Constant memory available on device in bytes

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -SIMDWidth#
    -

    Warp size in threads

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memPitch#
    -

    Maximum pitch in bytes allowed by memory copies

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -regsPerBlock#
    -

    32-bit registers available per block

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -clockRate#
    -

    Clock frequency in kilohertz

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -textureAlign#
    -

    Alignment requirement for textures

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevprop#
    -

    Legacy device properties

    -
    -
    -maxThreadsPerBlock#
    -

    Maximum number of threads per block

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxThreadsDim#
    -

    Maximum size of each dimension of a block

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxGridSize#
    -

    Maximum size of each dimension of a grid

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -sharedMemPerBlock#
    -

    Shared memory available per block in bytes

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -totalConstantMemory#
    -

    Constant memory available on device in bytes

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -SIMDWidth#
    -

    Warp size in threads

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memPitch#
    -

    Maximum pitch in bytes allowed by memory copies

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -regsPerBlock#
    -

    32-bit registers available per block

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -clockRate#
    -

    Clock frequency in kilohertz

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -textureAlign#
    -

    Alignment requirement for textures

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlinkState(*args, **kwargs)#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUhostFn(*args, **kwargs)#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUaccessPolicyWindow_v1#
    -

    Specifies an access policy for a window, a contiguous extent of -memory beginning at base_ptr and ending at base_ptr + num_bytes. -num_bytes is limited by -CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. Partition into -many segments and assign segments such that: sum of “hit segments” -/ window == approx. ratio. sum of “miss segments” / window == -approx 1-ratio. Segments and ratio specifications are fitted to the -capabilities of the architecture. Accesses in a hit segment apply -the hitProp access policy. Accesses in a miss segment apply the -missProp access policy.

    -
    -
    -base_ptr#
    -

    Starting address of the access policy window. CUDA driver may align -it.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -num_bytes#
    -

    Size in bytes of the window policy. CUDA driver may restrict the -maximum size and alignment.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -hitRatio#
    -

    hitRatio specifies percentage of lines assigned hitProp, rest are -assigned missProp.

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -hitProp#
    -

    CUaccessProperty set for hit.

    -
    -
    Type:
    -

    CUaccessProperty

    -
    -
    -
    - -
    -
    -missProp#
    -

    CUaccessProperty set for miss. Must be either NORMAL or STREAMING

    -
    -
    Type:
    -

    CUaccessProperty

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUaccessPolicyWindow#
    -

    Specifies an access policy for a window, a contiguous extent of -memory beginning at base_ptr and ending at base_ptr + num_bytes. -num_bytes is limited by -CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. Partition into -many segments and assign segments such that: sum of “hit segments” -/ window == approx. ratio. sum of “miss segments” / window == -approx 1-ratio. Segments and ratio specifications are fitted to the -capabilities of the architecture. Accesses in a hit segment apply -the hitProp access policy. Accesses in a miss segment apply the -missProp access policy.

    -
    -
    -base_ptr#
    -

    Starting address of the access policy window. CUDA driver may align -it.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -num_bytes#
    -

    Size in bytes of the window policy. CUDA driver may restrict the -maximum size and alignment.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -hitRatio#
    -

    hitRatio specifies percentage of lines assigned hitProp, rest are -assigned missProp.

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -hitProp#
    -

    CUaccessProperty set for hit.

    -
    -
    Type:
    -

    CUaccessProperty

    -
    -
    -
    - -
    -
    -missProp#
    -

    CUaccessProperty set for miss. Must be either NORMAL or STREAMING

    -
    -
    Type:
    -

    CUaccessProperty

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v1#
    -

    GPU kernel node parameters

    -
    -
    -func#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -extra#
    -

    Extra options

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v2#
    -

    GPU kernel node parameters

    -
    -
    -func#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -extra#
    -

    Extra options

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -kern#
    -

    Kernel to launch, will only be referenced if func is NULL

    -
    -
    Type:
    -

    CUkernel

    -
    -
    -
    - -
    -
    -ctx#
    -

    Context for the kernel task to run in. The value NULL will indicate -the current context should be used by the api. This field is -ignored if func is set.

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_KERNEL_NODE_PARAMS#
    -

    GPU kernel node parameters

    -
    -
    -func#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -extra#
    -

    Extra options

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -kern#
    -

    Kernel to launch, will only be referenced if func is NULL

    -
    -
    Type:
    -

    CUkernel

    -
    -
    -
    - -
    -
    -ctx#
    -

    Context for the kernel task to run in. The value NULL will indicate -the current context should be used by the api. This field is -ignored if func is set.

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v3#
    -

    GPU kernel node parameters

    -
    -
    -func#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -extra#
    -

    Extra options

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -kern#
    -

    Kernel to launch, will only be referenced if func is NULL

    -
    -
    Type:
    -

    CUkernel

    -
    -
    -
    - -
    -
    -ctx#
    -

    Context for the kernel task to run in. The value NULL will indicate -the current context should be used by the api. This field is -ignored if func is set.

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v1#
    -

    Memset node parameters

    -
    -
    -dst#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of destination device pointer. Unused if height is 1

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -value#
    -

    Value to be set

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -elementSize#
    -

    Size of each element in bytes. Must be 1, 2, or 4.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the row in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Number of rows

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMSET_NODE_PARAMS#
    -

    Memset node parameters

    -
    -
    -dst#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of destination device pointer. Unused if height is 1

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -value#
    -

    Value to be set

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -elementSize#
    -

    Size of each element in bytes. Must be 1, 2, or 4.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the row in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Number of rows

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v2#
    -

    Memset node parameters

    -
    -
    -dst#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of destination device pointer. Unused if height is 1

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -value#
    -

    Value to be set

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -elementSize#
    -

    Size of each element in bytes. Must be 1, 2, or 4.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the row in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Number of rows

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -ctx#
    -

    Context on which to run the node

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_HOST_NODE_PARAMS_v1#
    -

    Host node parameters

    -
    -
    -fn#
    -

    The function to call when the node executes

    -
    -
    Type:
    -

    CUhostFn

    -
    -
    -
    - -
    -
    -userData#
    -

    Argument to pass to the function

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_HOST_NODE_PARAMS#
    -

    Host node parameters

    -
    -
    -fn#
    -

    The function to call when the node executes

    -
    -
    Type:
    -

    CUhostFn

    -
    -
    -
    - -
    -
    -userData#
    -

    Argument to pass to the function

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_HOST_NODE_PARAMS_v2#
    -

    Host node parameters

    -
    -
    -fn#
    -

    The function to call when the node executes

    -
    -
    Type:
    -

    CUhostFn

    -
    -
    -
    - -
    -
    -userData#
    -

    Argument to pass to the function

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_CONDITIONAL_NODE_PARAMS(void_ptr _ptr=0)#
    -

    Conditional node parameters

    -
    -
    -handle#
    -

    Conditional node handle. Handles must be created in advance of -creating the node using cuGraphConditionalHandleCreate.

    -
    -
    Type:
    -

    CUgraphConditionalHandle

    -
    -
    -
    - -
    -
    -type#
    -

    Type of conditional node.

    -
    -
    Type:
    -

    CUgraphConditionalNodeType

    -
    -
    -
    - -
    -
    -size#
    -

    Size of graph output array. Must be 1.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -phGraph_out#
    -

    CUDA-owned array populated with conditional node child graphs -during creation of the node. Valid for the lifetime of the -conditional node. The contents of the graph(s) are subject to the -following constraints: - Allowed node types are kernel nodes, -empty nodes, child graphs, memsets, memcopies, and conditionals. -This applies recursively to child graphs and conditional bodies. -- All kernels, including kernels in nested conditionals or child -graphs at any level, must belong to the same CUDA context. -These graphs may be populated using graph node creation APIs or -cuStreamBeginCaptureToGraph.

    -
    -
    Type:
    -

    CUgraph

    -
    -
    -
    - -
    -
    -ctx#
    -

    Context on which to run the node. Must match context used to create -the handle and all body nodes.

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphEdgeData#
    -

    Optional annotation for edges in a CUDA graph. Note, all edges -implicitly have annotations and default to a zero-initialized value -if not specified. A zero-initialized struct indicates a standard -full serialization of two nodes with memory visibility.

    -
    -
    -from_port#
    -

    This indicates when the dependency is triggered from the upstream -node on the edge. The meaning is specfic to the node type. A value -of 0 in all cases means full completion of the upstream node, with -memory visibility to the downstream node or portion thereof -(indicated by to_port). Only kernel nodes define non-zero -ports. A kernel node can use the following output port types: -CU_GRAPH_KERNEL_NODE_PORT_DEFAULT, -CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC, or -CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -to_port#
    -

    This indicates what portion of the downstream node is dependent on -the upstream node or portion thereof (indicated by from_port). -The meaning is specific to the node type. A value of 0 in all cases -means the entirety of the downstream node is dependent on the -upstream work. Currently no node types define non-zero ports. -Accordingly, this field must be set to zero.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -type#
    -

    This should be populated with a value from CUgraphDependencyType. -(It is typed as char due to compiler-specific layout of bitfields.) -See CUgraphDependencyType.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -reserved#
    -

    These bytes are unused and must be zeroed. This ensures -compatibility if additional fields are added in the future.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_GRAPH_INSTANTIATE_PARAMS#
    -

    Graph instantiation parameters

    -
    -
    -flags#
    -

    Instantiation flags

    -
    -
    Type:
    -

    cuuint64_t

    -
    -
    -
    - -
    -
    -hUploadStream#
    -

    Upload stream

    -
    -
    Type:
    -

    CUstream

    -
    -
    -
    - -
    -
    -hErrNode_out#
    -

    The node which caused instantiation to fail, if any

    -
    -
    Type:
    -

    CUgraphNode

    -
    -
    -
    - -
    -
    -result_out#
    -

    Whether instantiation was successful. If it failed, the reason why

    -
    -
    Type:
    -

    CUgraphInstantiateResult

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchMemSyncDomainMap#
    -

    Memory Synchronization Domain map See ::cudaLaunchMemSyncDomain. -By default, kernels are launched in domain 0. Kernel launched with -CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a different domain ID. -User may also alter the domain ID with CUlaunchMemSyncDomainMap for -a specific stream / graph node / kernel launch. See -CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. Domain ID range is -available through CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.

    -
    -
    -default_#
    -

    The default domain ID to use for designated kernels

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -remote#
    -

    The remote domain ID to use for designated kernels

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchAttributeValue#
    -

    Launch attributes union; used as value field of CUlaunchAttribute

    -
    -
    -pad#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -accessPolicyWindow#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    -
    -
    Type:
    -

    CUaccessPolicyWindow

    -
    -
    -
    - -
    -
    -cooperative#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero -indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -syncPolicy#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. -::CUsynchronizationPolicy for work queued up in this stream

    -
    -
    Type:
    -

    CUsynchronizationPolicy

    -
    -
    -
    - -
    -
    -clusterDim#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION -that represents the desired cluster dimensions for the kernel. -Opaque type with the following fields: - x - The X dimension of -the cluster, in blocks. Must be a divisor of the grid X dimension. -- y - The Y dimension of the cluster, in blocks. Must be a -divisor of the grid Y dimension. - z - The Z dimension of the -cluster, in blocks. Must be a divisor of the grid Z dimension.

    -
    -
    Type:
    -

    anon_struct1

    -
    -
    -
    - -
    -
    -clusterSchedulingPolicyPreference#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster -scheduling policy preference for the kernel.

    -
    -
    Type:
    -

    CUclusterSchedulingPolicy

    -
    -
    -
    - -
    -
    -programmaticStreamSerializationAllowed#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -programmaticEvent#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT -with the following fields: - CUevent event - Event to fire when -all blocks trigger it. - Event record flags, see -cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. -- triggerAtBlockStart - If this is set to non-0, each block -launch will automatically trigger the event.

    -
    -
    Type:
    -

    anon_struct2

    -
    -
    -
    - -
    -
    -launchCompletionEvent#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following -fields: - CUevent event - Event to fire when the last block -launches - int flags; - Event record flags, see -cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    -
    -
    Type:
    -

    anon_struct3

    -
    -
    -
    - -
    -
    -priority#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution -priority of the kernel.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memSyncDomainMap#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. -See CUlaunchMemSyncDomainMap.

    -
    -
    Type:
    -

    CUlaunchMemSyncDomainMap

    -
    -
    -
    - -
    -
    -memSyncDomain#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. -See::CUlaunchMemSyncDomain

    -
    -
    Type:
    -

    CUlaunchMemSyncDomain

    -
    -
    -
    - -
    -
    -deviceUpdatableKernelNode#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the -following fields: - int deviceUpdatable - Whether or not the -resulting kernel node should be device-updatable. - -CUgraphDeviceNode devNode - Returns a handle to pass to the -various device-side update functions.

    -
    -
    Type:
    -

    anon_struct4

    -
    -
    -
    - -
    -
    -sharedMemCarveout#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchAttribute#
    -

    Launch attribute

    -
    -
    -id#
    -

    Attribute to set

    -
    -
    Type:
    -

    CUlaunchAttributeID

    -
    -
    -
    - -
    -
    -value#
    -

    Value of the attribute

    -
    -
    Type:
    -

    CUlaunchAttributeValue

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlaunchConfig#
    -

    CUDA extensible launch configuration

    -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -hStream#
    -

    Stream identifier

    -
    -
    Type:
    -

    CUstream

    -
    -
    -
    - -
    -
    -attrs#
    -

    List of attributes; nullable if CUlaunchConfig::numAttrs == 0

    -
    -
    Type:
    -

    CUlaunchAttribute

    -
    -
    -
    - -
    -
    -numAttrs#
    -

    Number of attributes populated in CUlaunchConfig::attrs

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUkernelNodeAttrID(value)#
    -

    Launch attributes enum; used as id field of -CUlaunchAttribute

    -
    - -
    -
    -class cuda.cuda.CUkernelNodeAttrValue_v1#
    -

    Launch attributes union; used as value field of CUlaunchAttribute

    -
    -
    -pad#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -accessPolicyWindow#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    -
    -
    Type:
    -

    CUaccessPolicyWindow

    -
    -
    -
    - -
    -
    -cooperative#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero -indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -syncPolicy#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. -::CUsynchronizationPolicy for work queued up in this stream

    -
    -
    Type:
    -

    CUsynchronizationPolicy

    -
    -
    -
    - -
    -
    -clusterDim#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION -that represents the desired cluster dimensions for the kernel. -Opaque type with the following fields: - x - The X dimension of -the cluster, in blocks. Must be a divisor of the grid X dimension. -- y - The Y dimension of the cluster, in blocks. Must be a -divisor of the grid Y dimension. - z - The Z dimension of the -cluster, in blocks. Must be a divisor of the grid Z dimension.

    -
    -
    Type:
    -

    anon_struct1

    -
    -
    -
    - -
    -
    -clusterSchedulingPolicyPreference#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster -scheduling policy preference for the kernel.

    -
    -
    Type:
    -

    CUclusterSchedulingPolicy

    -
    -
    -
    - -
    -
    -programmaticStreamSerializationAllowed#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -programmaticEvent#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT -with the following fields: - CUevent event - Event to fire when -all blocks trigger it. - Event record flags, see -cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. -- triggerAtBlockStart - If this is set to non-0, each block -launch will automatically trigger the event.

    -
    -
    Type:
    -

    anon_struct2

    -
    -
    -
    - -
    -
    -launchCompletionEvent#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following -fields: - CUevent event - Event to fire when the last block -launches - int flags; - Event record flags, see -cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    -
    -
    Type:
    -

    anon_struct3

    -
    -
    -
    - -
    -
    -priority#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution -priority of the kernel.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memSyncDomainMap#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. -See CUlaunchMemSyncDomainMap.

    -
    -
    Type:
    -

    CUlaunchMemSyncDomainMap

    -
    -
    -
    - -
    -
    -memSyncDomain#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. -See::CUlaunchMemSyncDomain

    -
    -
    Type:
    -

    CUlaunchMemSyncDomain

    -
    -
    -
    - -
    -
    -deviceUpdatableKernelNode#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the -following fields: - int deviceUpdatable - Whether or not the -resulting kernel node should be device-updatable. - -CUgraphDeviceNode devNode - Returns a handle to pass to the -various device-side update functions.

    -
    -
    Type:
    -

    anon_struct4

    -
    -
    -
    - -
    -
    -sharedMemCarveout#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUkernelNodeAttrValue#
    -

    Launch attributes union; used as value field of CUlaunchAttribute

    -
    -
    -pad#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -accessPolicyWindow#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    -
    -
    Type:
    -

    CUaccessPolicyWindow

    -
    -
    -
    - -
    -
    -cooperative#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero -indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -syncPolicy#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. -::CUsynchronizationPolicy for work queued up in this stream

    -
    -
    Type:
    -

    CUsynchronizationPolicy

    -
    -
    -
    - -
    -
    -clusterDim#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION -that represents the desired cluster dimensions for the kernel. -Opaque type with the following fields: - x - The X dimension of -the cluster, in blocks. Must be a divisor of the grid X dimension. -- y - The Y dimension of the cluster, in blocks. Must be a -divisor of the grid Y dimension. - z - The Z dimension of the -cluster, in blocks. Must be a divisor of the grid Z dimension.

    -
    -
    Type:
    -

    anon_struct1

    -
    -
    -
    - -
    -
    -clusterSchedulingPolicyPreference#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster -scheduling policy preference for the kernel.

    -
    -
    Type:
    -

    CUclusterSchedulingPolicy

    -
    -
    -
    - -
    -
    -programmaticStreamSerializationAllowed#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -programmaticEvent#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT -with the following fields: - CUevent event - Event to fire when -all blocks trigger it. - Event record flags, see -cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. -- triggerAtBlockStart - If this is set to non-0, each block -launch will automatically trigger the event.

    -
    -
    Type:
    -

    anon_struct2

    -
    -
    -
    - -
    -
    -launchCompletionEvent#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following -fields: - CUevent event - Event to fire when the last block -launches - int flags; - Event record flags, see -cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    -
    -
    Type:
    -

    anon_struct3

    -
    -
    -
    - -
    -
    -priority#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution -priority of the kernel.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memSyncDomainMap#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. -See CUlaunchMemSyncDomainMap.

    -
    -
    Type:
    -

    CUlaunchMemSyncDomainMap

    -
    -
    -
    - -
    -
    -memSyncDomain#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. -See::CUlaunchMemSyncDomain

    -
    -
    Type:
    -

    CUlaunchMemSyncDomain

    -
    -
    -
    - -
    -
    -deviceUpdatableKernelNode#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the -following fields: - int deviceUpdatable - Whether or not the -resulting kernel node should be device-updatable. - -CUgraphDeviceNode devNode - Returns a handle to pass to the -various device-side update functions.

    -
    -
    Type:
    -

    anon_struct4

    -
    -
    -
    - -
    -
    -sharedMemCarveout#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamAttrID(value)#
    -

    Launch attributes enum; used as id field of -CUlaunchAttribute

    -
    - -
    -
    -class cuda.cuda.CUstreamAttrValue_v1#
    -

    Launch attributes union; used as value field of CUlaunchAttribute

    -
    -
    -pad#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -accessPolicyWindow#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    -
    -
    Type:
    -

    CUaccessPolicyWindow

    -
    -
    -
    - -
    -
    -cooperative#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero -indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -syncPolicy#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. -::CUsynchronizationPolicy for work queued up in this stream

    -
    -
    Type:
    -

    CUsynchronizationPolicy

    -
    -
    -
    - -
    -
    -clusterDim#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION -that represents the desired cluster dimensions for the kernel. -Opaque type with the following fields: - x - The X dimension of -the cluster, in blocks. Must be a divisor of the grid X dimension. -- y - The Y dimension of the cluster, in blocks. Must be a -divisor of the grid Y dimension. - z - The Z dimension of the -cluster, in blocks. Must be a divisor of the grid Z dimension.

    -
    -
    Type:
    -

    anon_struct1

    -
    -
    -
    - -
    -
    -clusterSchedulingPolicyPreference#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster -scheduling policy preference for the kernel.

    -
    -
    Type:
    -

    CUclusterSchedulingPolicy

    -
    -
    -
    - -
    -
    -programmaticStreamSerializationAllowed#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -programmaticEvent#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT -with the following fields: - CUevent event - Event to fire when -all blocks trigger it. - Event record flags, see -cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. -- triggerAtBlockStart - If this is set to non-0, each block -launch will automatically trigger the event.

    -
    -
    Type:
    -

    anon_struct2

    -
    -
    -
    - -
    -
    -launchCompletionEvent#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following -fields: - CUevent event - Event to fire when the last block -launches - int flags; - Event record flags, see -cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    -
    -
    Type:
    -

    anon_struct3

    -
    -
    -
    - -
    -
    -priority#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution -priority of the kernel.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memSyncDomainMap#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. -See CUlaunchMemSyncDomainMap.

    -
    -
    Type:
    -

    CUlaunchMemSyncDomainMap

    -
    -
    -
    - -
    -
    -memSyncDomain#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. -See::CUlaunchMemSyncDomain

    -
    -
    Type:
    -

    CUlaunchMemSyncDomain

    -
    -
    -
    - -
    -
    -deviceUpdatableKernelNode#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the -following fields: - int deviceUpdatable - Whether or not the -resulting kernel node should be device-updatable. - -CUgraphDeviceNode devNode - Returns a handle to pass to the -various device-side update functions.

    -
    -
    Type:
    -

    anon_struct4

    -
    -
    -
    - -
    -
    -sharedMemCarveout#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamAttrValue#
    -

    Launch attributes union; used as value field of CUlaunchAttribute

    -
    -
    -pad#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -accessPolicyWindow#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    -
    -
    Type:
    -

    CUaccessPolicyWindow

    -
    -
    -
    - -
    -
    -cooperative#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero -indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -syncPolicy#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. -::CUsynchronizationPolicy for work queued up in this stream

    -
    -
    Type:
    -

    CUsynchronizationPolicy

    -
    -
    -
    - -
    -
    -clusterDim#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION -that represents the desired cluster dimensions for the kernel. -Opaque type with the following fields: - x - The X dimension of -the cluster, in blocks. Must be a divisor of the grid X dimension. -- y - The Y dimension of the cluster, in blocks. Must be a -divisor of the grid Y dimension. - z - The Z dimension of the -cluster, in blocks. Must be a divisor of the grid Z dimension.

    -
    -
    Type:
    -

    anon_struct1

    -
    -
    -
    - -
    -
    -clusterSchedulingPolicyPreference#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster -scheduling policy preference for the kernel.

    -
    -
    Type:
    -

    CUclusterSchedulingPolicy

    -
    -
    -
    - -
    -
    -programmaticStreamSerializationAllowed#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -programmaticEvent#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT -with the following fields: - CUevent event - Event to fire when -all blocks trigger it. - Event record flags, see -cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. -- triggerAtBlockStart - If this is set to non-0, each block -launch will automatically trigger the event.

    -
    -
    Type:
    -

    anon_struct2

    -
    -
    -
    - -
    -
    -launchCompletionEvent#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following -fields: - CUevent event - Event to fire when the last block -launches - int flags; - Event record flags, see -cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    -
    -
    Type:
    -

    anon_struct3

    -
    -
    -
    - -
    -
    -priority#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution -priority of the kernel.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memSyncDomainMap#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. -See CUlaunchMemSyncDomainMap.

    -
    -
    Type:
    -

    CUlaunchMemSyncDomainMap

    -
    -
    -
    - -
    -
    -memSyncDomain#
    -

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. -See::CUlaunchMemSyncDomain

    -
    -
    Type:
    -

    CUlaunchMemSyncDomain

    -
    -
    -
    - -
    -
    -deviceUpdatableKernelNode#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the -following fields: - int deviceUpdatable - Whether or not the -resulting kernel node should be device-updatable. - -CUgraphDeviceNode devNode - Returns a handle to pass to the -various device-side update functions.

    -
    -
    Type:
    -

    anon_struct4

    -
    -
    -
    - -
    -
    -sharedMemCarveout#
    -

    Value of launch attribute -CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUexecAffinitySmCount_v1#
    -

    Value for CU_EXEC_AFFINITY_TYPE_SM_COUNT

    -
    -
    -val#
    -

    The number of SMs the context is limited to use.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUexecAffinitySmCount#
    -

    Value for CU_EXEC_AFFINITY_TYPE_SM_COUNT

    -
    -
    -val#
    -

    The number of SMs the context is limited to use.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUexecAffinityParam_v1#
    -

    Execution Affinity Parameters

    -
    -
    -type#
    -
    -
    Type:
    -

    CUexecAffinityType

    -
    -
    -
    - -
    -
    -param#
    -
    -
    Type:
    -

    anon_union3

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUexecAffinityParam#
    -

    Execution Affinity Parameters

    -
    -
    -type#
    -
    -
    Type:
    -

    CUexecAffinityType

    -
    -
    -
    - -
    -
    -param#
    -
    -
    Type:
    -

    anon_union3

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUctxCigParam#
    -

    CIG Context Create Params

    -
    -
    -sharedDataType#
    -
    -
    Type:
    -

    CUcigDataType

    -
    -
    -
    - -
    -
    -sharedData#
    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUctxCreateParams#
    -

    Params for creating CUDA context Exactly one of execAffinityParams -and cigParams must be non-NULL.

    -
    -
    -execAffinityParams#
    -
    -
    Type:
    -

    CUexecAffinityParam

    -
    -
    -
    - -
    -
    -numExecAffinityParams#
    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -cigParams#
    -
    -
    Type:
    -

    CUctxCigParam

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUlibraryHostUniversalFunctionAndDataTable#
    -
    -
    -functionTable#
    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -functionWindowSize#
    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dataTable#
    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dataWindowSize#
    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUstreamCallback(*args, **kwargs)#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUoccupancyB2DSize(*args, **kwargs)#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY2D_v2#
    -

    2D memory copy parameters

    -
    -
    -srcXInBytes#
    -

    Source X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcY#
    -

    Source Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcMemoryType#
    -

    Source memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -srcHost#
    -

    Source host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -srcArray#
    -

    Source array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -srcPitch#
    -

    Source pitch (ignored when src is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstXInBytes#
    -

    Destination X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstY#
    -

    Destination Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstMemoryType#
    -

    Destination memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -dstHost#
    -

    Destination host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -dstPitch#
    -

    Destination pitch (ignored when dst is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -WidthInBytes#
    -

    Width of 2D memory copy in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 2D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY2D#
    -

    2D memory copy parameters

    -
    -
    -srcXInBytes#
    -

    Source X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcY#
    -

    Source Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcMemoryType#
    -

    Source memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -srcHost#
    -

    Source host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -srcArray#
    -

    Source array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -srcPitch#
    -

    Source pitch (ignored when src is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstXInBytes#
    -

    Destination X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstY#
    -

    Destination Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstMemoryType#
    -

    Destination memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -dstHost#
    -

    Destination host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -dstPitch#
    -

    Destination pitch (ignored when dst is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -WidthInBytes#
    -

    Width of 2D memory copy in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 2D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY3D_v2#
    -

    3D memory copy parameters

    -
    -
    -srcXInBytes#
    -

    Source X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcY#
    -

    Source Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcZ#
    -

    Source Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcLOD#
    -

    Source LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcMemoryType#
    -

    Source memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -srcHost#
    -

    Source host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -srcArray#
    -

    Source array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -reserved0#
    -

    Must be NULL

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcPitch#
    -

    Source pitch (ignored when src is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcHeight#
    -

    Source height (ignored when src is array; may be 0 if Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstXInBytes#
    -

    Destination X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstY#
    -

    Destination Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstZ#
    -

    Destination Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstLOD#
    -

    Destination LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstMemoryType#
    -

    Destination memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -dstHost#
    -

    Destination host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -reserved1#
    -

    Must be NULL

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstPitch#
    -

    Destination pitch (ignored when dst is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstHeight#
    -

    Destination height (ignored when dst is array; may be 0 if -Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -WidthInBytes#
    -

    Width of 3D memory copy in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Depth#
    -

    Depth of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY3D#
    -

    3D memory copy parameters

    -
    -
    -srcXInBytes#
    -

    Source X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcY#
    -

    Source Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcZ#
    -

    Source Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcLOD#
    -

    Source LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcMemoryType#
    -

    Source memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -srcHost#
    -

    Source host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -srcArray#
    -

    Source array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -reserved0#
    -

    Must be NULL

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcPitch#
    -

    Source pitch (ignored when src is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcHeight#
    -

    Source height (ignored when src is array; may be 0 if Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstXInBytes#
    -

    Destination X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstY#
    -

    Destination Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstZ#
    -

    Destination Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstLOD#
    -

    Destination LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstMemoryType#
    -

    Destination memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -dstHost#
    -

    Destination host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -reserved1#
    -

    Must be NULL

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstPitch#
    -

    Destination pitch (ignored when dst is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstHeight#
    -

    Destination height (ignored when dst is array; may be 0 if -Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -WidthInBytes#
    -

    Width of 3D memory copy in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Depth#
    -

    Depth of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY3D_PEER_v1#
    -

    3D memory cross-context copy parameters

    -
    -
    -srcXInBytes#
    -

    Source X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcY#
    -

    Source Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcZ#
    -

    Source Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcLOD#
    -

    Source LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcMemoryType#
    -

    Source memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -srcHost#
    -

    Source host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -srcArray#
    -

    Source array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -srcContext#
    -

    Source context (ignored with srcMemoryType is CU_MEMORYTYPE_ARRAY)

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -srcPitch#
    -

    Source pitch (ignored when src is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcHeight#
    -

    Source height (ignored when src is array; may be 0 if Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstXInBytes#
    -

    Destination X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstY#
    -

    Destination Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstZ#
    -

    Destination Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstLOD#
    -

    Destination LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstMemoryType#
    -

    Destination memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -dstHost#
    -

    Destination host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -dstContext#
    -

    Destination context (ignored with dstMemoryType is -CU_MEMORYTYPE_ARRAY)

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -dstPitch#
    -

    Destination pitch (ignored when dst is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstHeight#
    -

    Destination height (ignored when dst is array; may be 0 if -Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -WidthInBytes#
    -

    Width of 3D memory copy in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Depth#
    -

    Depth of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY3D_PEER#
    -

    3D memory cross-context copy parameters

    -
    -
    -srcXInBytes#
    -

    Source X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcY#
    -

    Source Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcZ#
    -

    Source Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcLOD#
    -

    Source LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcMemoryType#
    -

    Source memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -srcHost#
    -

    Source host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -srcArray#
    -

    Source array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -srcContext#
    -

    Source context (ignored with srcMemoryType is CU_MEMORYTYPE_ARRAY)

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -srcPitch#
    -

    Source pitch (ignored when src is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -srcHeight#
    -

    Source height (ignored when src is array; may be 0 if Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstXInBytes#
    -

    Destination X in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstY#
    -

    Destination Y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstZ#
    -

    Destination Z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstLOD#
    -

    Destination LOD

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstMemoryType#
    -

    Destination memory type (host, device, array)

    -
    -
    Type:
    -

    CUmemorytype

    -
    -
    -
    - -
    -
    -dstHost#
    -

    Destination host pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device pointer

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination array reference

    -
    -
    Type:
    -

    CUarray

    -
    -
    -
    - -
    -
    -dstContext#
    -

    Destination context (ignored with dstMemoryType is -CU_MEMORYTYPE_ARRAY)

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -dstPitch#
    -

    Destination pitch (ignored when dst is array)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dstHeight#
    -

    Destination height (ignored when dst is array; may be 0 if -Depth==1)

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -WidthInBytes#
    -

    Width of 3D memory copy in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Depth#
    -

    Depth of 3D memory copy

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEMCPY_NODE_PARAMS#
    -

    Memcpy node parameters

    -
    -
    -flags#
    -

    Must be zero

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -reserved#
    -

    Must be zero

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -copyCtx#
    -

    Context on which to run the node

    -
    -
    Type:
    -

    CUcontext

    -
    -
    -
    - -
    -
    -copyParams#
    -

    Parameters for the memory copy

    -
    -
    Type:
    -

    CUDA_MEMCPY3D

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY_DESCRIPTOR_v2#
    -

    Array descriptor

    -
    -
    -Width#
    -

    Width of array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Format#
    -

    Array format

    -
    -
    Type:
    -

    CUarray_format

    -
    -
    -
    - -
    -
    -NumChannels#
    -

    Channels per array element

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY_DESCRIPTOR#
    -

    Array descriptor

    -
    -
    -Width#
    -

    Width of array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Format#
    -

    Array format

    -
    -
    Type:
    -

    CUarray_format

    -
    -
    -
    - -
    -
    -NumChannels#
    -

    Channels per array element

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR_v2#
    -

    3D array descriptor

    -
    -
    -Width#
    -

    Width of 3D array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 3D array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Depth#
    -

    Depth of 3D array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Format#
    -

    Array format

    -
    -
    Type:
    -

    CUarray_format

    -
    -
    -
    - -
    -
    -NumChannels#
    -

    Channels per array element

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -Flags#
    -

    Flags

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR#
    -

    3D array descriptor

    -
    -
    -Width#
    -

    Width of 3D array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Height#
    -

    Height of 3D array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Depth#
    -

    Depth of 3D array

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -Format#
    -

    Array format

    -
    -
    Type:
    -

    CUarray_format

    -
    -
    -
    - -
    -
    -NumChannels#
    -

    Channels per array element

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -Flags#
    -

    Flags

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES_v1#
    -

    CUDA array sparse properties

    -
    -
    -tileExtent#
    -
    -
    Type:
    -

    anon_struct5

    -
    -
    -
    - -
    -
    -miptailFirstLevel#
    -

    First mip level at which the mip tail begins.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -miptailSize#
    -

    Total size of the mip tail.

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags will either be zero or -CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES#
    -

    CUDA array sparse properties

    -
    -
    -tileExtent#
    -
    -
    Type:
    -

    anon_struct5

    -
    -
    -
    - -
    -
    -miptailFirstLevel#
    -

    First mip level at which the mip tail begins.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -miptailSize#
    -

    Total size of the mip tail.

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags will either be zero or -CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1#
    -

    CUDA array memory requirements

    -
    -
    -size#
    -

    Total required memory size

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -alignment#
    -

    alignment requirement

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS#
    -

    CUDA array memory requirements

    -
    -
    -size#
    -

    Total required memory size

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -alignment#
    -

    alignment requirement

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_RESOURCE_DESC_v1#
    -

    CUDA Resource descriptor

    -
    -
    -resType#
    -

    Resource type

    -
    -
    Type:
    -

    CUresourcetype

    -
    -
    -
    - -
    -
    -res#
    -
    -
    Type:
    -

    anon_union4

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags (must be zero)

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_RESOURCE_DESC#
    -

    CUDA Resource descriptor

    -
    -
    -resType#
    -

    Resource type

    -
    -
    Type:
    -

    CUresourcetype

    -
    -
    -
    - -
    -
    -res#
    -
    -
    Type:
    -

    anon_union4

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags (must be zero)

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_TEXTURE_DESC_v1#
    -

    Texture descriptor

    -
    -
    -addressMode#
    -

    Address modes

    -
    -
    Type:
    -

    List[CUaddress_mode]

    -
    -
    -
    - -
    -
    -filterMode#
    -

    Filter mode

    -
    -
    Type:
    -

    CUfilter_mode

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -maxAnisotropy#
    -

    Maximum anisotropy ratio

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -mipmapFilterMode#
    -

    Mipmap filter mode

    -
    -
    Type:
    -

    CUfilter_mode

    -
    -
    -
    - -
    -
    -mipmapLevelBias#
    -

    Mipmap level bias

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -minMipmapLevelClamp#
    -

    Mipmap minimum level clamp

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -maxMipmapLevelClamp#
    -

    Mipmap maximum level clamp

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -borderColor#
    -

    Border Color

    -
    -
    Type:
    -

    List[float]

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_TEXTURE_DESC#
    -

    Texture descriptor

    -
    -
    -addressMode#
    -

    Address modes

    -
    -
    Type:
    -

    List[CUaddress_mode]

    -
    -
    -
    - -
    -
    -filterMode#
    -

    Filter mode

    -
    -
    Type:
    -

    CUfilter_mode

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -maxAnisotropy#
    -

    Maximum anisotropy ratio

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -mipmapFilterMode#
    -

    Mipmap filter mode

    -
    -
    Type:
    -

    CUfilter_mode

    -
    -
    -
    - -
    -
    -mipmapLevelBias#
    -

    Mipmap level bias

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -minMipmapLevelClamp#
    -

    Mipmap minimum level clamp

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -maxMipmapLevelClamp#
    -

    Mipmap maximum level clamp

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -borderColor#
    -

    Border Color

    -
    -
    Type:
    -

    List[float]

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_RESOURCE_VIEW_DESC_v1#
    -

    Resource view descriptor

    -
    -
    -format#
    -

    Resource view format

    -
    -
    Type:
    -

    CUresourceViewFormat

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Height of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -firstMipmapLevel#
    -

    First defined mipmap level

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -lastMipmapLevel#
    -

    Last defined mipmap level

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -firstLayer#
    -

    First layer index

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -lastLayer#
    -

    Last layer index

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_RESOURCE_VIEW_DESC#
    -

    Resource view descriptor

    -
    -
    -format#
    -

    Resource view format

    -
    -
    Type:
    -

    CUresourceViewFormat

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Height of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -firstMipmapLevel#
    -

    First defined mipmap level

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -lastMipmapLevel#
    -

    Last defined mipmap level

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -firstLayer#
    -

    First layer index

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -lastLayer#
    -

    Last layer index

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUtensorMap#
    -

    Tensor map descriptor. Requires compiler support for aligning to 64 -bytes.

    -
    -
    -opaque#
    -
    -
    Type:
    -

    List[cuuint64_t]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1#
    -

    GPU Direct v3 tokens

    -
    -
    -p2pToken#
    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -vaSpaceToken#
    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS#
    -

    GPU Direct v3 tokens

    -
    -
    -p2pToken#
    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -vaSpaceToken#
    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_LAUNCH_PARAMS_v1#
    -

    Kernel launch parameters

    -
    -
    -function#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -hStream#
    -

    Stream identifier

    -
    -
    Type:
    -

    CUstream

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_LAUNCH_PARAMS#
    -

    Kernel launch parameters

    -
    -
    -function#
    -

    Kernel to launch

    -
    -
    Type:
    -

    CUfunction

    -
    -
    -
    - -
    -
    -gridDimX#
    -

    Width of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimY#
    -

    Height of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gridDimZ#
    -

    Depth of grid in blocks

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimX#
    -

    X dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimY#
    -

    Y dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -blockDimZ#
    -

    Z dimension of each thread block

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -hStream#
    -

    Stream identifier

    -
    -
    Type:
    -

    CUstream

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to kernel parameters

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1#
    -

    External memory handle descriptor

    -
    -
    -type#
    -

    Type of the handle

    -
    -
    Type:
    -

    CUexternalMemoryHandleType

    -
    -
    -
    - -
    -
    -handle#
    -
    -
    Type:
    -

    anon_union5

    -
    -
    -
    - -
    -
    -size#
    -

    Size of the memory allocation

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags must either be zero or CUDA_EXTERNAL_MEMORY_DEDICATED

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC#
    -

    External memory handle descriptor

    -
    -
    -type#
    -

    Type of the handle

    -
    -
    Type:
    -

    CUexternalMemoryHandleType

    -
    -
    -
    - -
    -
    -handle#
    -
    -
    Type:
    -

    anon_union5

    -
    -
    -
    - -
    -
    -size#
    -

    Size of the memory allocation

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags must either be zero or CUDA_EXTERNAL_MEMORY_DEDICATED

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1#
    -

    External memory buffer descriptor

    -
    -
    -offset#
    -

    Offset into the memory object where the buffer’s base is

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -size#
    -

    Size of the buffer

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags reserved for future use. Must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC#
    -

    External memory buffer descriptor

    -
    -
    -offset#
    -

    Offset into the memory object where the buffer’s base is

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -size#
    -

    Size of the buffer

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags reserved for future use. Must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1#
    -

    External memory mipmap descriptor

    -
    -
    -offset#
    -

    Offset into the memory object where the base level of the mipmap -chain is.

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -arrayDesc#
    -

    Format, dimension and type of base level of the mipmap chain

    -
    -
    Type:
    -

    CUDA_ARRAY3D_DESCRIPTOR

    -
    -
    -
    - -
    -
    -numLevels#
    -

    Total number of levels in the mipmap chain

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC#
    -

    External memory mipmap descriptor

    -
    -
    -offset#
    -

    Offset into the memory object where the base level of the mipmap -chain is.

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -arrayDesc#
    -

    Format, dimension and type of base level of the mipmap chain

    -
    -
    Type:
    -

    CUDA_ARRAY3D_DESCRIPTOR

    -
    -
    -
    - -
    -
    -numLevels#
    -

    Total number of levels in the mipmap chain

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1#
    -

    External semaphore handle descriptor

    -
    -
    -type#
    -

    Type of the handle

    -
    -
    Type:
    -

    CUexternalSemaphoreHandleType

    -
    -
    -
    - -
    -
    -handle#
    -
    -
    Type:
    -

    anon_union6

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags reserved for the future. Must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC#
    -

    External semaphore handle descriptor

    -
    -
    -type#
    -

    Type of the handle

    -
    -
    Type:
    -

    CUexternalSemaphoreHandleType

    -
    -
    -
    - -
    -
    -handle#
    -
    -
    Type:
    -

    anon_union6

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags reserved for the future. Must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1#
    -

    External semaphore signal parameters

    -
    -
    -params#
    -
    -
    Type:
    -

    anon_struct15

    -
    -
    -
    - -
    -
    -flags#
    -

    Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to signal -a CUexternalSemaphore of type -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which -indicates that while signaling the CUexternalSemaphore, no memory -synchronization operations should be performed for any external -memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. -For all other types of CUexternalSemaphore, flags must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS#
    -

    External semaphore signal parameters

    -
    -
    -params#
    -
    -
    Type:
    -

    anon_struct15

    -
    -
    -
    - -
    -
    -flags#
    -

    Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to signal -a CUexternalSemaphore of type -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which -indicates that while signaling the CUexternalSemaphore, no memory -synchronization operations should be performed for any external -memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. -For all other types of CUexternalSemaphore, flags must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1#
    -

    External semaphore wait parameters

    -
    -
    -params#
    -
    -
    Type:
    -

    anon_struct18

    -
    -
    -
    - -
    -
    -flags#
    -

    Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on -a CUexternalSemaphore of type -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is -CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC which indicates -that while waiting for the CUexternalSemaphore, no memory -synchronization operations should be performed for any external -memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. -For all other types of CUexternalSemaphore, flags must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS#
    -

    External semaphore wait parameters

    -
    -
    -params#
    -
    -
    Type:
    -

    anon_struct18

    -
    -
    -
    - -
    -
    -flags#
    -

    Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on -a CUexternalSemaphore of type -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is -CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC which indicates -that while waiting for the CUexternalSemaphore, no memory -synchronization operations should be performed for any external -memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. -For all other types of CUexternalSemaphore, flags must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1#
    -

    Semaphore signal node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore signal parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS#
    -

    Semaphore signal node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore signal parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2#
    -

    Semaphore signal node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore signal parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1#
    -

    Semaphore wait node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore wait parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS#
    -

    Semaphore wait node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore wait parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2#
    -

    Semaphore wait node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    CUexternalSemaphore

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore wait parameters.

    -
    -
    Type:
    -

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemGenericAllocationHandle_v1#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemGenericAllocationHandle#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUarrayMapInfo_v1#
    -

    Specifies the CUDA array or CUDA mipmapped array memory mapping -information

    -
    -
    -resourceType#
    -

    Resource type

    -
    -
    Type:
    -

    CUresourcetype

    -
    -
    -
    - -
    -
    -resource#
    -
    -
    Type:
    -

    anon_union9

    -
    -
    -
    - -
    -
    -subresourceType#
    -

    Sparse subresource type

    -
    -
    Type:
    -

    CUarraySparseSubresourceType

    -
    -
    -
    - -
    -
    -subresource#
    -
    -
    Type:
    -

    anon_union10

    -
    -
    -
    - -
    -
    -memOperationType#
    -

    Memory operation type

    -
    -
    Type:
    -

    CUmemOperationType

    -
    -
    -
    - -
    -
    -memHandleType#
    -

    Memory handle type

    -
    -
    Type:
    -

    CUmemHandleType

    -
    -
    -
    - -
    -
    -memHandle#
    -
    -
    Type:
    -

    anon_union11

    -
    -
    -
    - -
    -
    -offset#
    -

    Offset within mip tail Offset within the memory

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -deviceBitMask#
    -

    Device ordinal bit mask

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -flags#
    -

    flags for future use, must be zero now.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -

    Reserved for future use, must be zero now.

    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUarrayMapInfo#
    -

    Specifies the CUDA array or CUDA mipmapped array memory mapping -information

    -
    -
    -resourceType#
    -

    Resource type

    -
    -
    Type:
    -

    CUresourcetype

    -
    -
    -
    - -
    -
    -resource#
    -
    -
    Type:
    -

    anon_union9

    -
    -
    -
    - -
    -
    -subresourceType#
    -

    Sparse subresource type

    -
    -
    Type:
    -

    CUarraySparseSubresourceType

    -
    -
    -
    - -
    -
    -subresource#
    -
    -
    Type:
    -

    anon_union10

    -
    -
    -
    - -
    -
    -memOperationType#
    -

    Memory operation type

    -
    -
    Type:
    -

    CUmemOperationType

    -
    -
    -
    - -
    -
    -memHandleType#
    -

    Memory handle type

    -
    -
    Type:
    -

    CUmemHandleType

    -
    -
    -
    - -
    -
    -memHandle#
    -
    -
    Type:
    -

    anon_union11

    -
    -
    -
    - -
    -
    -offset#
    -

    Offset within mip tail Offset within the memory

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -deviceBitMask#
    -

    Device ordinal bit mask

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -flags#
    -

    flags for future use, must be zero now.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -

    Reserved for future use, must be zero now.

    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemLocation_v1#
    -

    Specifies a memory location.

    -
    -
    -type#
    -

    Specifies the location type, which modifies the meaning of id.

    -
    -
    Type:
    -

    CUmemLocationType

    -
    -
    -
    - -
    -
    -id#
    -

    identifier for a given this location’s CUmemLocationType.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemLocation#
    -

    Specifies a memory location.

    -
    -
    -type#
    -

    Specifies the location type, which modifies the meaning of id.

    -
    -
    Type:
    -

    CUmemLocationType

    -
    -
    -
    - -
    -
    -id#
    -

    identifier for a given this location’s CUmemLocationType.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAllocationProp_v1#
    -

    Specifies the allocation properties for a allocation.

    -
    -
    -type#
    -

    Allocation type

    -
    -
    Type:
    -

    CUmemAllocationType

    -
    -
    -
    - -
    -
    -requestedHandleTypes#
    -

    requested CUmemAllocationHandleType

    -
    -
    Type:
    -

    CUmemAllocationHandleType

    -
    -
    -
    - -
    -
    -location#
    -

    Location of allocation

    -
    -
    Type:
    -

    CUmemLocation

    -
    -
    -
    - -
    -
    -win32HandleMetaData#
    -

    Windows-specific POBJECT_ATTRIBUTES required when -CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes -structure includes security attributes that define the scope of -which exported allocations may be transferred to other processes. -In all other cases, this field is required to be zero.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -allocFlags#
    -
    -
    Type:
    -

    anon_struct21

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAllocationProp#
    -

    Specifies the allocation properties for a allocation.

    -
    -
    -type#
    -

    Allocation type

    -
    -
    Type:
    -

    CUmemAllocationType

    -
    -
    -
    - -
    -
    -requestedHandleTypes#
    -

    requested CUmemAllocationHandleType

    -
    -
    Type:
    -

    CUmemAllocationHandleType

    -
    -
    -
    - -
    -
    -location#
    -

    Location of allocation

    -
    -
    Type:
    -

    CUmemLocation

    -
    -
    -
    - -
    -
    -win32HandleMetaData#
    -

    Windows-specific POBJECT_ATTRIBUTES required when -CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes -structure includes security attributes that define the scope of -which exported allocations may be transferred to other processes. -In all other cases, this field is required to be zero.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -allocFlags#
    -
    -
    Type:
    -

    anon_struct21

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmulticastObjectProp_v1#
    -

    Specifies the properties for a multicast object.

    -
    -
    -numDevices#
    -

    The number of devices in the multicast team that will bind memory -to this object

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -size#
    -

    The maximum amount of memory that can be bound to this multicast -object per device

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -handleTypes#
    -

    Bitmask of exportable handle types (see CUmemAllocationHandleType) -for this object

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags for future use, must be zero now

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmulticastObjectProp#
    -

    Specifies the properties for a multicast object.

    -
    -
    -numDevices#
    -

    The number of devices in the multicast team that will bind memory -to this object

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -size#
    -

    The maximum amount of memory that can be bound to this multicast -object per device

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -handleTypes#
    -

    Bitmask of exportable handle types (see CUmemAllocationHandleType) -for this object

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags for future use, must be zero now

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAccessDesc_v1#
    -

    Memory access descriptor

    -
    -
    -location#
    -

    Location on which the request is to change it’s accessibility

    -
    -
    Type:
    -

    CUmemLocation

    -
    -
    -
    - -
    -
    -flags#
    -

    ::CUmemProt accessibility flags to set on the request

    -
    -
    Type:
    -

    CUmemAccess_flags

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemAccessDesc#
    -

    Memory access descriptor

    -
    -
    -location#
    -

    Location on which the request is to change it’s accessibility

    -
    -
    Type:
    -

    CUmemLocation

    -
    -
    -
    - -
    -
    -flags#
    -

    ::CUmemProt accessibility flags to set on the request

    -
    -
    Type:
    -

    CUmemAccess_flags

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphExecUpdateResultInfo_v1#
    -

    Result information returned by cuGraphExecUpdate

    -
    -
    -result#
    -

    Gives more specific detail when a cuda graph update fails.

    -
    -
    Type:
    -

    CUgraphExecUpdateResult

    -
    -
    -
    - -
    -
    -errorNode#
    -

    The “to node” of the error edge when the topologies do not match. -The error node when the error is associated with a specific node. -NULL when the error is generic.

    -
    -
    Type:
    -

    CUgraphNode

    -
    -
    -
    - -
    -
    -errorFromNode#
    -

    The from node of error edge when the topologies do not match. -Otherwise NULL.

    -
    -
    Type:
    -

    CUgraphNode

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphExecUpdateResultInfo#
    -

    Result information returned by cuGraphExecUpdate

    -
    -
    -result#
    -

    Gives more specific detail when a cuda graph update fails.

    -
    -
    Type:
    -

    CUgraphExecUpdateResult

    -
    -
    -
    - -
    -
    -errorNode#
    -

    The “to node” of the error edge when the topologies do not match. -The error node when the error is associated with a specific node. -NULL when the error is generic.

    -
    -
    Type:
    -

    CUgraphNode

    -
    -
    -
    - -
    -
    -errorFromNode#
    -

    The from node of error edge when the topologies do not match. -Otherwise NULL.

    -
    -
    Type:
    -

    CUgraphNode

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemPoolProps_v1#
    -

    Specifies the properties of allocations made from the pool.

    -
    -
    -allocType#
    -

    Allocation type. Currently must be specified as -CU_MEM_ALLOCATION_TYPE_PINNED

    -
    -
    Type:
    -

    CUmemAllocationType

    -
    -
    -
    - -
    -
    -handleTypes#
    -

    Handle types that will be supported by allocations from the pool.

    -
    -
    Type:
    -

    CUmemAllocationHandleType

    -
    -
    -
    - -
    -
    -location#
    -

    Location where allocations should reside.

    -
    -
    Type:
    -

    CUmemLocation

    -
    -
    -
    - -
    -
    -win32SecurityAttributes#
    -

    Windows-specific LPSECURITYATTRIBUTES required when -CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute -defines the scope of which exported allocations may be transferred -to other processes. In all other cases, this field is required to -be zero.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -maxSize#
    -

    Maximum pool size. When set to 0, defaults to a system dependent -value.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -usage#
    -

    Bitmask indicating intended usage for the pool.

    -
    -
    Type:
    -

    unsigned short

    -
    -
    -
    - -
    -
    -reserved#
    -

    reserved for future use, must be 0

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemPoolProps#
    -

    Specifies the properties of allocations made from the pool.

    -
    -
    -allocType#
    -

    Allocation type. Currently must be specified as -CU_MEM_ALLOCATION_TYPE_PINNED

    -
    -
    Type:
    -

    CUmemAllocationType

    -
    -
    -
    - -
    -
    -handleTypes#
    -

    Handle types that will be supported by allocations from the pool.

    -
    -
    Type:
    -

    CUmemAllocationHandleType

    -
    -
    -
    - -
    -
    -location#
    -

    Location where allocations should reside.

    -
    -
    Type:
    -

    CUmemLocation

    -
    -
    -
    - -
    -
    -win32SecurityAttributes#
    -

    Windows-specific LPSECURITYATTRIBUTES required when -CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute -defines the scope of which exported allocations may be transferred -to other processes. In all other cases, this field is required to -be zero.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -maxSize#
    -

    Maximum pool size. When set to 0, defaults to a system dependent -value.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -usage#
    -

    Bitmask indicating intended usage for the pool.

    -
    -
    Type:
    -

    unsigned short

    -
    -
    -
    - -
    -
    -reserved#
    -

    reserved for future use, must be 0

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemPoolPtrExportData_v1#
    -

    Opaque data for exporting a pool allocation

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUmemPoolPtrExportData#
    -

    Opaque data for exporting a pool allocation

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1#
    -

    Memory allocation node parameters

    -
    -
    -poolProps#
    -

    in: location where the allocation should reside (specified in -::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is -not supported.

    -
    -
    Type:
    -

    CUmemPoolProps

    -
    -
    -
    - -
    -
    -accessDescs#
    -

    in: array of memory access descriptors. Used to describe peer GPU -access

    -
    -
    Type:
    -

    CUmemAccessDesc

    -
    -
    -
    - -
    -
    -accessDescCount#
    -

    in: number of memory access descriptors. Must not exceed the number -of GPUs.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -bytesize#
    -

    in: size in bytes of the requested allocation

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dptr#
    -

    out: address of the allocation returned by CUDA

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS#
    -

    Memory allocation node parameters

    -
    -
    -poolProps#
    -

    in: location where the allocation should reside (specified in -::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is -not supported.

    -
    -
    Type:
    -

    CUmemPoolProps

    -
    -
    -
    - -
    -
    -accessDescs#
    -

    in: array of memory access descriptors. Used to describe peer GPU -access

    -
    -
    Type:
    -

    CUmemAccessDesc

    -
    -
    -
    - -
    -
    -accessDescCount#
    -

    in: number of memory access descriptors. Must not exceed the number -of GPUs.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -bytesize#
    -

    in: size in bytes of the requested allocation

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dptr#
    -

    out: address of the allocation returned by CUDA

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2#
    -

    Memory allocation node parameters

    -
    -
    -poolProps#
    -

    in: location where the allocation should reside (specified in -::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is -not supported.

    -
    -
    Type:
    -

    CUmemPoolProps

    -
    -
    -
    - -
    -
    -accessDescs#
    -

    in: array of memory access descriptors. Used to describe peer GPU -access

    -
    -
    Type:
    -

    CUmemAccessDesc

    -
    -
    -
    - -
    -
    -accessDescCount#
    -

    in: number of memory access descriptors. Must not exceed the number -of GPUs.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -bytesize#
    -

    in: size in bytes of the requested allocation

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dptr#
    -

    out: address of the allocation returned by CUDA

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_MEM_FREE_NODE_PARAMS#
    -

    Memory free node parameters

    -
    -
    -dptr#
    -

    in: the pointer to free

    -
    -
    Type:
    -

    CUdeviceptr

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_CHILD_GRAPH_NODE_PARAMS#
    -

    Child graph node parameters

    -
    -
    -graph#
    -

    The child graph to clone into the node for node creation, or a -handle to the graph owned by the node for node query

    -
    -
    Type:
    -

    CUgraph

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EVENT_RECORD_NODE_PARAMS#
    -

    Event record node parameters

    -
    -
    -event#
    -

    The event to record when the node executes

    -
    -
    Type:
    -

    CUevent

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUDA_EVENT_WAIT_NODE_PARAMS#
    -

    Event wait node parameters

    -
    -
    -event#
    -

    The event to wait on from the node

    -
    -
    Type:
    -

    CUevent

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgraphNodeParams#
    -

    Graph node parameters. See cuGraphAddNode.

    -
    -
    -type#
    -

    Type of the node

    -
    -
    Type:
    -

    CUgraphNodeType

    -
    -
    -
    - -
    -
    -reserved0#
    -

    Reserved. Must be zero.

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -reserved1#
    -

    Padding. Unused bytes must be zero.

    -
    -
    Type:
    -

    List[long long]

    -
    -
    -
    - -
    -
    -kernel#
    -

    Kernel node parameters.

    -
    -
    Type:
    -

    CUDA_KERNEL_NODE_PARAMS_v3

    -
    -
    -
    - -
    -
    -memcpy#
    -

    Memcpy node parameters.

    -
    -
    Type:
    -

    CUDA_MEMCPY_NODE_PARAMS

    -
    -
    -
    - -
    -
    -memset#
    -

    Memset node parameters.

    -
    -
    Type:
    -

    CUDA_MEMSET_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -host#
    -

    Host node parameters.

    -
    -
    Type:
    -

    CUDA_HOST_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -graph#
    -

    Child graph node parameters.

    -
    -
    Type:
    -

    CUDA_CHILD_GRAPH_NODE_PARAMS

    -
    -
    -
    - -
    -
    -eventWait#
    -

    Event wait node parameters.

    -
    -
    Type:
    -

    CUDA_EVENT_WAIT_NODE_PARAMS

    -
    -
    -
    - -
    -
    -eventRecord#
    -

    Event record node parameters.

    -
    -
    Type:
    -

    CUDA_EVENT_RECORD_NODE_PARAMS

    -
    -
    -
    - -
    -
    -extSemSignal#
    -

    External semaphore signal node parameters.

    -
    -
    Type:
    -

    CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -extSemWait#
    -

    External semaphore wait node parameters.

    -
    -
    Type:
    -

    CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -alloc#
    -

    Memory allocation node parameters.

    -
    -
    Type:
    -

    CUDA_MEM_ALLOC_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -free#
    -

    Memory free node parameters.

    -
    -
    Type:
    -

    CUDA_MEM_FREE_NODE_PARAMS

    -
    -
    -
    - -
    -
    -memOp#
    -

    MemOp node parameters.

    -
    -
    Type:
    -

    CUDA_BATCH_MEM_OP_NODE_PARAMS_v2

    -
    -
    -
    - -
    -
    -conditional#
    -

    Conditional node parameters.

    -
    -
    Type:
    -

    CUDA_CONDITIONAL_NODE_PARAMS

    -
    -
    -
    - -
    -
    -reserved2#
    -

    Reserved bytes. Must be zero.

    -
    -
    Type:
    -

    long long

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUeglFrame_v1#
    -

    CUDA EGLFrame structure Descriptor - structure defining one frame -of EGL. Each frame may contain one or more planes depending on -whether the surface * is Multiplanar or not.

    -
    -
    -frame#
    -
    -
    Type:
    -

    anon_union14

    -
    -
    -
    - -
    -
    -width#
    -

    Width of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -height#
    -

    Height of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -planeCount#
    -

    Number of planes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -numChannels#
    -

    Number of channels for the plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -frameType#
    -

    Array or Pitch

    -
    -
    Type:
    -

    CUeglFrameType

    -
    -
    -
    - -
    -
    -eglColorFormat#
    -

    CUDA EGL Color Format

    -
    -
    Type:
    -

    CUeglColorFormat

    -
    -
    -
    - -
    -
    -cuFormat#
    -

    CUDA Array Format

    -
    -
    Type:
    -

    CUarray_format

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUeglFrame#
    -

    CUDA EGLFrame structure Descriptor - structure defining one frame -of EGL. Each frame may contain one or more planes depending on -whether the surface * is Multiplanar or not.

    -
    -
    -frame#
    -
    -
    Type:
    -

    anon_union14

    -
    -
    -
    - -
    -
    -width#
    -

    Width of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -height#
    -

    Height of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of first plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -planeCount#
    -

    Number of planes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -numChannels#
    -

    Number of channels for the plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -frameType#
    -

    Array or Pitch

    -
    -
    Type:
    -

    CUeglFrameType

    -
    -
    -
    - -
    -
    -eglColorFormat#
    -

    CUDA EGL Color Format

    -
    -
    Type:
    -

    CUeglColorFormat

    -
    -
    -
    - -
    -
    -cuFormat#
    -

    CUDA Array Format

    -
    -
    Type:
    -

    CUarray_format

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUeglStreamConnection(*args, **kwargs)#
    -

    CUDA EGLSream Connection

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -cuda.CUDA_VERSION = 12060#
    -

    CUDA API version number

    -
    - -
    -
    -cuda.CU_IPC_HANDLE_SIZE = 64#
    -

    CUDA IPC handle size

    -
    - -
    -
    -cuda.CU_STREAM_LEGACY = 1#
    -

    Legacy stream handle

    -

    Stream handle that can be passed as a CUstream to use an implicit stream with legacy synchronization behavior.

    -

    See details of the link_sync_behavior

    -
    - -
    -
    -cuda.CU_STREAM_PER_THREAD = 2#
    -

    Per-thread stream handle

    -

    Stream handle that can be passed as a CUstream to use an implicit stream with per-thread synchronization behavior.

    -

    See details of the link_sync_behavior

    -
    - -
    -
    -cuda.CU_COMPUTE_ACCELERATED_TARGET_BASE = 65536#
    -
    - -
    -
    -cuda.CU_GRAPH_COND_ASSIGN_DEFAULT = 1#
    -

    Conditional node handle flags Default value is applied when graph is launched.

    -
    - -
    -
    -cuda.CU_GRAPH_KERNEL_NODE_PORT_DEFAULT = 0#
    -

    This port activates when the kernel has finished executing.

    -
    - -
    -
    -cuda.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC = 1#
    -

    This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC. See also CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT.

    -
    - -
    -
    -cuda.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER = 2#
    -

    This port activates when all blocks of the kernel have begun execution. See also CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT.

    -
    - -
    -
    -cuda.CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1#
    -
    - -
    -
    -cuda.CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE = 2#
    -
    - -
    -
    -cuda.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION = 4#
    -
    - -
    -
    -cuda.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 5#
    -
    - -
    -
    -cuda.CU_KERNEL_NODE_ATTRIBUTE_PRIORITY = 8#
    -
    - -
    -
    -cuda.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = 9#
    -
    - -
    -
    -cuda.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN = 10#
    -
    - -
    -
    -cuda.CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = 13#
    -
    - -
    -
    -cuda.CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 14#
    -
    - -
    -
    -cuda.CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1#
    -
    - -
    -
    -cuda.CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3#
    -
    - -
    -
    -cuda.CU_STREAM_ATTRIBUTE_PRIORITY = 8#
    -
    - -
    -
    -cuda.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = 9#
    -
    - -
    -
    -cuda.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN = 10#
    -
    - -
    -
    -cuda.CU_MEMHOSTALLOC_PORTABLE = 1#
    -

    If set, host memory is portable between CUDA contexts. Flag for cuMemHostAlloc()

    -
    - -
    -
    -cuda.CU_MEMHOSTALLOC_DEVICEMAP = 2#
    -

    If set, host memory is mapped into CUDA address space and cuMemHostGetDevicePointer() may be called on the host pointer. Flag for cuMemHostAlloc()

    -
    - -
    -
    -cuda.CU_MEMHOSTALLOC_WRITECOMBINED = 4#
    -

    If set, host memory is allocated as write-combined - fast to write, faster to DMA, slow to read except via SSE4 streaming load instruction (MOVNTDQA). Flag for cuMemHostAlloc()

    -
    - -
    -
    -cuda.CU_MEMHOSTREGISTER_PORTABLE = 1#
    -

    If set, host memory is portable between CUDA contexts. Flag for cuMemHostRegister()

    -
    - -
    -
    -cuda.CU_MEMHOSTREGISTER_DEVICEMAP = 2#
    -

    If set, host memory is mapped into CUDA address space and cuMemHostGetDevicePointer() may be called on the host pointer. Flag for cuMemHostRegister()

    -
    - -
    -
    -cuda.CU_MEMHOSTREGISTER_IOMEMORY = 4#
    -

    If set, the passed memory pointer is treated as pointing to some memory-mapped I/O space, e.g. belonging to a third-party PCIe device. On Windows the flag is a no-op. On Linux that memory is marked as non cache-coherent for the GPU and is expected to be physically contiguous. It may return CUDA_ERROR_NOT_PERMITTED if run as an unprivileged user, CUDA_ERROR_NOT_SUPPORTED on older Linux kernel versions. On all other platforms, it is not supported and CUDA_ERROR_NOT_SUPPORTED is returned. Flag for cuMemHostRegister()

    -
    - -
    -
    -cuda.CU_MEMHOSTREGISTER_READ_ONLY = 8#
    -

    If set, the passed memory pointer is treated as pointing to memory that is considered read-only by the device. On platforms without CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, this flag is required in order to register memory mapped to the CPU as read-only. Support for the use of this flag can be queried from the device attribute CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED. Using this flag with a current context associated with a device that does not have this attribute set will cause cuMemHostRegister to error with CUDA_ERROR_NOT_SUPPORTED.

    -
    - -
    -
    -cuda.CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL = 1#
    -

    Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers

    -
    - -
    -
    -cuda.CU_TENSOR_MAP_NUM_QWORDS = 16#
    -

    Size of tensor map descriptor

    -
    - -
    -
    -cuda.CUDA_EXTERNAL_MEMORY_DEDICATED = 1#
    -

    Indicates that the external memory object is a dedicated resource

    -
    - -
    -
    -cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC = 1#
    -

    When the flags parameter of CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects.

    -
    - -
    -
    -cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC = 2#
    -

    When the flags parameter of CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS contains this flag, it indicates that waiting on an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects.

    -
    - -
    -
    -cuda.CUDA_NVSCISYNC_ATTR_SIGNAL = 1#
    -

    When flags of cuDeviceGetNvSciSyncAttributes is set to this, it indicates that application needs signaler specific NvSciSyncAttr to be filled by cuDeviceGetNvSciSyncAttributes.

    -
    - -
    -
    -cuda.CUDA_NVSCISYNC_ATTR_WAIT = 2#
    -

    When flags of cuDeviceGetNvSciSyncAttributes is set to this, it indicates that application needs waiter specific NvSciSyncAttr to be filled by cuDeviceGetNvSciSyncAttributes.

    -
    - -
    -
    -cuda.CU_MEM_CREATE_USAGE_TILE_POOL = 1#
    -

    This flag if set indicates that the memory will be used as a tile pool.

    -
    - -
    -
    -cuda.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC = 1#
    -

    If set, each kernel launched as part of cuLaunchCooperativeKernelMultiDevice only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution.

    -
    - -
    -
    -cuda.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC = 2#
    -

    If set, any subsequent work pushed in a stream that participated in a call to cuLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution.

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_LAYERED = 1#
    -

    If set, the CUDA array is a collection of layers, where each layer is either a 1D or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number of layers, not the depth of a 3D array.

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_2DARRAY = 1#
    -

    Deprecated, use CUDA_ARRAY3D_LAYERED

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_SURFACE_LDST = 2#
    -

    This flag must be set in order to bind a surface reference to the CUDA array

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_CUBEMAP = 4#
    -

    If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The width of such a CUDA array must be equal to its height, and Depth must be six. If CUDA_ARRAY3D_LAYERED flag is also set, then the CUDA array is a collection of cubemaps and Depth must be a multiple of six.

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_TEXTURE_GATHER = 8#
    -

    This flag must be set in order to perform texture gather operations on a CUDA array.

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_DEPTH_TEXTURE = 16#
    -

    This flag if set indicates that the CUDA array is a DEPTH_TEXTURE.

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_COLOR_ATTACHMENT = 32#
    -

    This flag indicates that the CUDA array may be bound as a color target in an external graphics API

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_SPARSE = 64#
    -

    This flag if set indicates that the CUDA array or CUDA mipmapped array is a sparse CUDA array or CUDA mipmapped array respectively

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_DEFERRED_MAPPING = 128#
    -

    This flag if set indicates that the CUDA array or CUDA mipmapped array will allow deferred memory mapping

    -
    - -
    -
    -cuda.CUDA_ARRAY3D_VIDEO_ENCODE_DECODE = 256#
    -

    This flag indicates that the CUDA array will be used for hardware accelerated video encode/decode operations.

    -
    - -
    -
    -cuda.CU_TRSA_OVERRIDE_FORMAT = 1#
    -

    Override the texref format with a format inferred from the array. Flag for cuTexRefSetArray()

    -
    - -
    -
    -cuda.CU_TRSF_READ_AS_INTEGER = 1#
    -

    Read the texture as integers rather than promoting the values to floats in the range [0,1]. Flag for cuTexRefSetFlags() and cuTexObjectCreate()

    -
    - -
    -
    -cuda.CU_TRSF_NORMALIZED_COORDINATES = 2#
    -

    Use normalized texture coordinates in the range [0,1) instead of [0,dim). Flag for cuTexRefSetFlags() and cuTexObjectCreate()

    -
    - -
    -
    -cuda.CU_TRSF_SRGB = 16#
    -

    Perform sRGB->linear conversion during texture read. Flag for cuTexRefSetFlags() and cuTexObjectCreate()

    -
    - -
    -
    -cuda.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION = 32#
    -

    Disable any trilinear filtering optimizations. Flag for cuTexRefSetFlags() and cuTexObjectCreate()

    -
    - -
    -
    -cuda.CU_TRSF_SEAMLESS_CUBEMAP = 64#
    -

    Enable seamless cube map filtering. Flag for cuTexObjectCreate()

    -
    - -
    -
    -cuda.CU_LAUNCH_PARAM_END_AS_INT = 0#
    -

    C++ compile time constant for CU_LAUNCH_PARAM_END

    -
    - -
    -
    -cuda.CU_LAUNCH_PARAM_END = 0#
    -

    End of array terminator for the extra parameter to cuLaunchKernel

    -
    - -
    -
    -cuda.CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT = 1#
    -

    C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_POINTER

    -
    - -
    -
    -cuda.CU_LAUNCH_PARAM_BUFFER_POINTER = 1#
    -

    Indicator that the next value in the extra parameter to cuLaunchKernel will be a pointer to a buffer containing all kernel parameters used for launching kernel f. This buffer needs to honor all alignment/padding requirements of the individual parameters. If CU_LAUNCH_PARAM_BUFFER_SIZE is not also specified in the extra array, then CU_LAUNCH_PARAM_BUFFER_POINTER will have no effect.

    -
    - -
    -
    -cuda.CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT = 2#
    -

    C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_SIZE

    -
    - -
    -
    -cuda.CU_LAUNCH_PARAM_BUFFER_SIZE = 2#
    -

    Indicator that the next value in the extra parameter to cuLaunchKernel will be a pointer to a size_t which contains the size of the buffer specified with CU_LAUNCH_PARAM_BUFFER_POINTER. It is required that CU_LAUNCH_PARAM_BUFFER_POINTER also be specified in the extra array if the value associated with CU_LAUNCH_PARAM_BUFFER_SIZE is not zero.

    -
    - -
    -
    -cuda.CU_PARAM_TR_DEFAULT = -1#
    -

    For texture references loaded into the module, use default texunit from texture reference.

    -
    - -
    -
    -cuda.CU_DEVICE_CPU = -1#
    -

    Device that represents the CPU

    -
    - -
    -
    -cuda.CU_DEVICE_INVALID = -2#
    -

    Device that represents an invalid device

    -
    - -
    -
    -cuda.MAX_PLANES = 3#
    -

    Maximum number of planes per frame

    -
    - -
    -
    -cuda.CUDA_EGL_INFINITE_TIMEOUT = -1#
    -

    Indicates that timeout for cuEGLStreamConsumerAcquireFrame is infinite.

    -
    - -
    -
    -

    Error Handling#

    -

    This section describes the error handling functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuGetErrorString(error: CUresult)#
    -

    Gets the string description of an error code.

    -

    Sets *pStr to the address of a NULL-terminated string description of -the error code error. If the error code is not recognized, -CUDA_ERROR_INVALID_VALUE will be returned and *pStr will -be set to the NULL address.

    -
    -
    Parameters:
    -

    error (CUresult) – Error code to convert to string

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGetErrorName(error: CUresult)#
    -

    Gets the string representation of an error code enum name.

    -

    Sets *pStr to the address of a NULL-terminated string representation -of the name of the enum error code error. If the error code is not -recognized, CUDA_ERROR_INVALID_VALUE will be returned and -*pStr will be set to the NULL address.

    -
    -
    Parameters:
    -

    error (CUresult) – Error code to convert to string

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    CUresult, cudaGetErrorName

    -
    -
    - -
    -
    -

    Initialization#

    -

    This section describes the initialization functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuInit(unsigned int Flags)#
    -

    Initialize the CUDA driver API Initializes the driver API and must be called before any other function from the driver API in the current process. Currently, the Flags parameter must be 0. If cuInit() has not been called, any function from the driver API will return CUDA_ERROR_NOT_INITIALIZED.

    -
    -
    Parameters:
    -

    Flags (unsigned int) – Initialization flag for CUDA.

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_SYSTEM_DRIVER_MISMATCH, CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    - -
    -
    -

    Version Management#

    -

    This section describes the version management functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuDriverGetVersion()#
    -

    Returns the latest CUDA version supported by driver.

    -

    Returns in *driverVersion the version of CUDA supported by the -driver. The version is returned as (1000 * major + 10 * minor). For -example, CUDA 9.2 would be represented by 9020.

    -

    This function automatically returns -CUDA_ERROR_INVALID_VALUE if driverVersion is NULL.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Device Management#

    -

    This section describes the device management functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuDeviceGet(int ordinal)#
    -

    Returns a handle to a compute device.

    -

    Returns in *device a device handle given an ordinal in the range [0, -cuDeviceGetCount()-1].

    -
    -
    Parameters:
    -

    ordinal (int) – Device number to get handle for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetCount()#
    -

    Returns the number of compute-capable devices.

    -

    Returns in *count the number of devices with compute capability -greater than or equal to 2.0 that are available for execution. If there -is no such device, cuDeviceGetCount() returns 0.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetName(int length, dev)#
    -

    Returns an identifier string for the device.

    -

    Returns an ASCII string identifying the device dev in the NULL- -terminated string pointed to by name. length specifies the maximum -length of the string that may be returned.

    -
    -
    Parameters:
    -
      -
    • length (int) – Maximum length of string to store in name

    • -
    • dev (CUdevice) – Device to get identifier string for

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetUuid(dev)#
    -

    Return an UUID for the device.

    -

    Note there is a later version of this API, -cuDeviceGetUuid_v2. It will supplant this version in 12.0, -which is retained for minor version compatibility.

    -

    Returns 16-octets identifying the device dev in the structure pointed -by the uuid.

    -
    -
    Parameters:
    -

    dev (CUdevice) – Device to get identifier string for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetUuid_v2(dev)#
    -

    Return an UUID for the device (11.4+)

    -

    Returns 16-octets identifying the device dev in the structure pointed -by the uuid. If the device is in MIG mode, returns its MIG UUID which -uniquely identifies the subscribed MIG compute instance.

    -
    -
    Parameters:
    -

    dev (CUdevice) – Device to get identifier string for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetLuid(dev)#
    -

    Return an LUID and device node mask for the device.

    -

    Return identifying information (luid and deviceNodeMask) to allow -matching device with graphics APIs.

    -
    -
    Parameters:
    -

    dev (CUdevice) – Device to get identifier string for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceTotalMem(dev)#
    -

    Returns the total amount of memory on the device.

    -

    Returns in *bytes the total amount of memory available on the device -dev in bytes.

    -
    -
    Parameters:
    -

    dev (CUdevice) – Device handle

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetTexture1DLinearMaxWidth(pformat: CUarray_format, unsigned int numChannels, dev)#
    -

    Returns the maximum number of elements allocatable in a 1D linear texture for a given texture element size.

    -

    Returns in maxWidthInElements the maximum number of texture elements -allocatable in a 1D linear texture for given pformat and -numChannels.

    -
    -
    Parameters:
    -
      -
    • pformat (CUarray_format) – Texture format.

    • -
    • numChannels (unsigned) – Number of channels per texture element.

    • -
    • dev (CUdevice) – Device handle.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetAttribute(attrib: CUdevice_attribute, dev)#
    -

    Returns information about the device.

    -

    Returns in *pi the integer value of the attribute attrib on device -dev. The supported attributes are:

    - -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, dev, int flags)#
    -

    Return NvSciSync attributes that this device can support.

    -

    Returns in nvSciSyncAttrList, the properties of NvSciSync that this -CUDA device, dev can support. The returned nvSciSyncAttrList can be -used to create an NvSciSync object that matches this device’s -capabilities.

    -

    If NvSciSyncAttrKey_RequiredPerm field in nvSciSyncAttrList is -already set this API will return CUDA_ERROR_INVALID_VALUE.

    -

    The applications should set nvSciSyncAttrList to a valid -NvSciSyncAttrList failing which this API will return -CUDA_ERROR_INVALID_HANDLE.

    -

    The flags controls how applications intends to use the NvSciSync -created from the nvSciSyncAttrList. The valid flags are:

    - -

    At least one of these flags must be set, failing which the API returns -CUDA_ERROR_INVALID_VALUE. Both the flags are orthogonal to -one another: a developer may set both these flags that allows to set -both wait and signal specific attributes in the same -nvSciSyncAttrList.

    -

    Note that this API updates the input nvSciSyncAttrList with values -equivalent to the following public attribute key-values: -NvSciSyncAttrKey_RequiredPerm is set to

    -
      -
    • NvSciSyncAccessPerm_SignalOnly if -CUDA_NVSCISYNC_ATTR_SIGNAL is set in flags.

    • -
    • NvSciSyncAccessPerm_WaitOnly if CUDA_NVSCISYNC_ATTR_WAIT -is set in flags.

    • -
    • NvSciSyncAccessPerm_WaitSignal if both -CUDA_NVSCISYNC_ATTR_WAIT and -CUDA_NVSCISYNC_ATTR_SIGNAL are set in flags. -NvSciSyncAttrKey_PrimitiveInfo is set to

    • -
    • NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid device.

    • -
    • NvSciSyncAttrValPrimitiveType_Syncpoint if device is a Tegra -device.

    • -
    • NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if device -is GA10X+. NvSciSyncAttrKey_GpuId is set to the same UUID that is -returned for this device from cuDeviceGetUuid.

    • -
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, -CUDA_ERROR_NOT_INITIALIZED, -CUDA_ERROR_INVALID_VALUE, -CUDA_ERROR_INVALID_HANDLE, -CUDA_ERROR_INVALID_DEVICE, -CUDA_ERROR_NOT_SUPPORTED, -CUDA_ERROR_OUT_OF_MEMORY

    -
    -
    Parameters:
    -
      -
    • nvSciSyncAttrList (Any) – Return NvSciSync attributes supported.

    • -
    • dev (CUdevice) – Valid Cuda Device to get NvSciSync attributes for.

    • -
    • flags (int) – flags describing NvSciSync usage.

    • -
    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceSetMemPool(dev, pool)#
    -

    Sets the current memory pool of a device.

    -

    The memory pool must be local to the specified device. -cuMemAllocAsync allocates from the current mempool of the -provided stream’s device. By default, a device’s current memory pool is -its default memory pool.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    Use cuMemAllocFromPoolAsync to specify asynchronous allocations from a device different than the one the stream runs on.

    -
    - -
    -
    -cuda.cuda.cuDeviceGetMemPool(dev)#
    -

    Gets the current mempool for a device.

    -

    Returns the last pool provided to cuDeviceSetMemPool for -this device or the device’s default memory pool if -cuDeviceSetMemPool has never been called. By default the -current mempool is the default mempool for a device. Otherwise the -returned pool must have been set with cuDeviceSetMemPool.

    -
    -
    Parameters:
    -

    dev (CUdevice) – None

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetDefaultMemPool(dev)#
    -

    Returns the default mempool of a device.

    -

    The default mempool of a device contains device memory from that -device.

    -
    -
    Parameters:
    -

    dev (CUdevice) – None

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetExecAffinitySupport(typename: CUexecAffinityType, dev)#
    -

    Returns information about the execution affinity support of the device.

    -

    Returns in *pi whether execution affinity type typename is -supported by device dev. The supported types are:

    - -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuFlushGPUDirectRDMAWrites(target: CUflushGPUDirectRDMAWritesTarget, scope: CUflushGPUDirectRDMAWritesScope)#
    -

    Blocks until remote writes are visible to the specified scope.

    -

    Blocks until GPUDirect RDMA writes to the target context via mappings -created through APIs like nvidia_p2p_get_pages (see -https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are -visible to the specified scope.

    -

    If the scope equals or lies within the scope indicated by -CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING, the -call will be a no-op and can be safely omitted for performance. This -can be determined by comparing the numerical values between the two -enums, with smaller scopes having smaller values.

    -

    Users may query support for this API via -CU_DEVICE_ATTRIBUTE_FLUSH_FLUSH_GPU_DIRECT_RDMA_OPTIONS.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    - -
    -
    -

    Primary Context Management#

    -

    This section describes the primary context management functions of the low-level CUDA driver application programming interface.

    -

    The primary context is unique per device and shared with the CUDA runtime API. These functions allow integration with other libraries using CUDA.

    -
    -
    -cuda.cuda.cuDevicePrimaryCtxRetain(dev)#
    -

    Retain the primary context on the GPU.

    -

    Retains the primary context on the device. Once the user successfully -retains the primary context, the primary context will be active and -available to the user until the user releases it with -cuDevicePrimaryCtxRelease() or resets it with -cuDevicePrimaryCtxReset(). Unlike cuCtxCreate() -the newly retained context is not pushed onto the stack.

    -

    Retaining the primary context for the first time will fail with -CUDA_ERROR_UNKNOWN if the compute mode of the device is -CU_COMPUTEMODE_PROHIBITED. The function -cuDeviceGetAttribute() can be used with -CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute -mode of the device. The nvidia-smi tool can be used to set the -compute mode for devices. Documentation for nvidia-smi can be -obtained by passing a -h option to it.

    -

    Please note that the primary context always supports pinned -allocations. Other flags can be specified by -cuDevicePrimaryCtxSetFlags().

    -
    -
    Parameters:
    -

    dev (CUdevice) – Device for which primary context is requested

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDevicePrimaryCtxRelease(dev)#
    -

    Release the primary context on the GPU.

    -

    Releases the primary context interop on the device. A retained context -should always be released once the user is done using it. The context -is automatically reset once the last reference to it is released. This -behavior is different when the primary context was retained by the CUDA -runtime from CUDA 4.0 and earlier. In this case, the primary context -remains always active.

    -

    Releasing a primary context that has not been previously retained will -fail with CUDA_ERROR_INVALID_CONTEXT.

    -

    Please note that unlike cuCtxDestroy() this method does not -pop the context from stack in any circumstances.

    -
    -
    Parameters:
    -

    dev (CUdevice) – Device which primary context is released

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_INVALID_CONTEXT

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDevicePrimaryCtxSetFlags(dev, unsigned int flags)#
    -

    Set flags for the primary context.

    -

    Sets the flags for the primary context on the device overwriting -perviously set ones.

    -

    The three LSBs of the flags parameter can be used to control how the -OS thread, which owns the CUDA context at the time of an API call, -interacts with the OS scheduler when waiting for results from the GPU. -Only one of the scheduling flags can be set when creating a context.

    -
      -
    • CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when -waiting for results from the GPU. This can decrease latency when -waiting for the GPU, but may lower the performance of CPU threads if -they are performing work in parallel with the CUDA thread.

    • -
    • CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread -when waiting for results from the GPU. This can increase latency when -waiting for the GPU, but can increase the performance of CPU threads -performing work in parallel with the GPU.

    • -
    • CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the -CPU thread on a synchronization primitive when waiting for the GPU to -finish work.

    • -
    • CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU -thread on a synchronization primitive when waiting for the GPU to -finish work. Deprecated: This flag was deprecated as of CUDA 4.0 -and was replaced with CU_CTX_SCHED_BLOCKING_SYNC.

    • -
    • CU_CTX_SCHED_AUTO: The default value if the flags -parameter is zero, uses a heuristic based on the number of active -CUDA contexts in the process C and the number of logical processors -in the system P. If C > P, then CUDA will yield to other OS -threads when waiting for the GPU (CU_CTX_SCHED_YIELD), -otherwise CUDA will not yield while waiting for results and actively -spin on the processor (CU_CTX_SCHED_SPIN). Additionally, -on Tegra devices, CU_CTX_SCHED_AUTO uses a heuristic -based on the power profile of the platform and may choose -CU_CTX_SCHED_BLOCKING_SYNC for low-powered devices.

    • -
    • CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce -local memory after resizing local memory for a kernel. This can -prevent thrashing by local memory allocations when launching many -kernels with high local memory usage at the cost of potentially -increased memory usage. Deprecated: This flag is deprecated and the -behavior enabled by this flag is now the default and cannot be -disabled.

    • -
    • CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been -enabled globally with cuCoredumpSetAttributeGlobal or -environment variables, this flag can be set during context creation -to instruct CUDA to create a coredump if this context raises an -exception during execution. These environment variables are described -in the CUDA-GDB user guide under the “GPU core dump support” section. -The initial settings will be taken from the global settings at the -time of context creation. The other settings that control coredump -output can be modified by calling cuCoredumpSetAttribute -from the created context after it becomes current.

    • -
    • CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU -coredumps have not been enabled globally with -cuCoredumpSetAttributeGlobal or environment variables, -this flag can be set during context creation to instruct CUDA to -create a coredump if data is written to a certain pipe that is -present in the OS space. These environment variables are described in -the CUDA-GDB user guide under the “GPU core dump support” section. It -is important to note that the pipe name must be set with -cuCoredumpSetAttributeGlobal before creating the context -if this flag is used. Setting this flag implies that -CU_CTX_COREDUMP_ENABLE is set. The initial settings will -be taken from the global settings at the time of context creation. -The other settings that control coredump output can be modified by -calling cuCoredumpSetAttribute from the created context -after it becomes current.

    • -
    • CU_CTX_SYNC_MEMOPS: Ensures that synchronous memory -operations initiated on this context will always synchronize. See -further documentation in the section titled “API Synchronization -behavior” to learn more about cases when synchronous memory -operations can exhibit asynchronous behavior.

    • -
    -
    -
    Parameters:
    -
      -
    • dev (CUdevice) – Device for which the primary context flags are set

    • -
    • flags (unsigned int) – New flags for the device

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDevicePrimaryCtxGetState(dev)#
    -

    Get the state of the primary context.

    -

    Returns in *flags the flags for the primary context of dev, and in -*active whether it is active. See -cuDevicePrimaryCtxSetFlags for flag values.

    -
    -
    Parameters:
    -

    dev (CUdevice) – Device to get primary context flags for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDevicePrimaryCtxReset(dev)#
    -

    Destroy all allocations and reset all state on the primary context.

    -

    Explicitly destroys and cleans up all resources associated with the -current device in the current process.

    -

    Note that it is responsibility of the calling function to ensure that -no other module in the process is using the device any more. For that -reason it is recommended to use cuDevicePrimaryCtxRelease() -in most cases. However it is safe for other modules to call -cuDevicePrimaryCtxRelease() even after resetting the -device. Resetting the primary context does not release it, an -application that has retained the primary context should explicitly -release its usage.

    -
    -
    Parameters:
    -

    dev (CUdevice) – Device for which primary context is destroyed

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -

    Context Management#

    -

    This section describes the context management functions of the low-level CUDA driver application programming interface.

    -

    Please note that some functions are described in Primary Context Management section.

    -
    -
    -cuda.cuda.cuCtxCreate(unsigned int flags, dev)#
    -

    Create a CUDA context.

    -

    Creates a new CUDA context and associates it with the calling thread. -The flags parameter is described below. The context is created with a -usage count of 1 and the caller of cuCtxCreate() must call -cuCtxDestroy() when done using the context. If a context is -already current to the thread, it is supplanted by the newly created -context and may be restored by a subsequent call to -cuCtxPopCurrent().

    -

    The three LSBs of the flags parameter can be used to control how the -OS thread, which owns the CUDA context at the time of an API call, -interacts with the OS scheduler when waiting for results from the GPU. -Only one of the scheduling flags can be set when creating a context.

    -
      -
    • CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when -waiting for results from the GPU. This can decrease latency when -waiting for the GPU, but may lower the performance of CPU threads if -they are performing work in parallel with the CUDA thread.

    • -
    • CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread -when waiting for results from the GPU. This can increase latency when -waiting for the GPU, but can increase the performance of CPU threads -performing work in parallel with the GPU.

    • -
    • CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the -CPU thread on a synchronization primitive when waiting for the GPU to -finish work.

    • -
    • CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU -thread on a synchronization primitive when waiting for the GPU to -finish work. Deprecated: This flag was deprecated as of CUDA 4.0 -and was replaced with CU_CTX_SCHED_BLOCKING_SYNC.

    • -
    • CU_CTX_SCHED_AUTO: The default value if the flags -parameter is zero, uses a heuristic based on the number of active -CUDA contexts in the process C and the number of logical processors -in the system P. If C > P, then CUDA will yield to other OS -threads when waiting for the GPU (CU_CTX_SCHED_YIELD), -otherwise CUDA will not yield while waiting for results and actively -spin on the processor (CU_CTX_SCHED_SPIN). Additionally, -on Tegra devices, CU_CTX_SCHED_AUTO uses a heuristic -based on the power profile of the platform and may choose -CU_CTX_SCHED_BLOCKING_SYNC for low-powered devices.

    • -
    • CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned -allocations. This flag must be set in order to allocate pinned host -memory that is accessible to the GPU.

    • -
    • CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce -local memory after resizing local memory for a kernel. This can -prevent thrashing by local memory allocations when launching many -kernels with high local memory usage at the cost of potentially -increased memory usage. Deprecated: This flag is deprecated and the -behavior enabled by this flag is now the default and cannot be -disabled. Instead, the per-thread stack size can be controlled with -cuCtxSetLimit().

    • -
    • CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been -enabled globally with cuCoredumpSetAttributeGlobal or -environment variables, this flag can be set during context creation -to instruct CUDA to create a coredump if this context raises an -exception during execution. These environment variables are described -in the CUDA-GDB user guide under the “GPU core dump support” section. -The initial attributes will be taken from the global attributes at -the time of context creation. The other attributes that control -coredump output can be modified by calling -cuCoredumpSetAttribute from the created context after it -becomes current.

    • -
    • CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU -coredumps have not been enabled globally with -cuCoredumpSetAttributeGlobal or environment variables, -this flag can be set during context creation to instruct CUDA to -create a coredump if data is written to a certain pipe that is -present in the OS space. These environment variables are described in -the CUDA-GDB user guide under the “GPU core dump support” section. It -is important to note that the pipe name must be set with -cuCoredumpSetAttributeGlobal before creating the context -if this flag is used. Setting this flag implies that -CU_CTX_COREDUMP_ENABLE is set. The initial attributes -will be taken from the global attributes at the time of context -creation. The other attributes that control coredump output can be -modified by calling cuCoredumpSetAttribute from the -created context after it becomes current. Setting this flag on any -context creation is equivalent to setting the -CU_COREDUMP_ENABLE_USER_TRIGGER attribute to true -globally.

    • -
    • CU_CTX_SYNC_MEMOPS: Ensures that synchronous memory -operations initiated on this context will always synchronize. See -further documentation in the section titled “API Synchronization -behavior” to learn more about cases when synchronous memory -operations can exhibit asynchronous behavior.

    • -
    -

    Context creation will fail with CUDA_ERROR_UNKNOWN if the -compute mode of the device is CU_COMPUTEMODE_PROHIBITED. -The function cuDeviceGetAttribute() can be used with -CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute -mode of the device. The nvidia-smi tool can be used to set the -compute mode for * devices. Documentation for nvidia-smi can be -obtained by passing a -h option to it.

    -
    -
    Parameters:
    -
      -
    • flags (unsigned int) – Context creation flags

    • -
    • dev (CUdevice) – Device to create context on

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    In most cases it is recommended to use cuDevicePrimaryCtxRetain.

    -
    - -
    -
    -cuda.cuda.cuCtxCreate_v3(paramsArray: Optional[Tuple[CUexecAffinityParam] | List[CUexecAffinityParam]], int numParams, unsigned int flags, dev)#
    -

    Create a CUDA context with execution affinity.

    -

    Creates a new CUDA context with execution affinity and associates it -with the calling thread. The paramsArray and flags parameter are -described below. The context is created with a usage count of 1 and the -caller of cuCtxCreate() must call -cuCtxDestroy() when done using the context. If a context is -already current to the thread, it is supplanted by the newly created -context and may be restored by a subsequent call to -cuCtxPopCurrent().

    -

    The type and the amount of execution resource the context can use is -limited by paramsArray and numParams. The paramsArray is an array -of CUexecAffinityParam and the numParams describes the size of the -array. If two CUexecAffinityParam in the array have the same type, -the latter execution affinity parameter overrides the former execution -affinity parameter. The supported execution affinity types are:

    -
      -
    • CU_EXEC_AFFINITY_TYPE_SM_COUNT limits the portion of SMs -that the context can use. The portion of SMs is specified as the -number of SMs via CUexecAffinitySmCount. This limit will be -internally rounded up to the next hardware-supported amount. Hence, -it is imperative to query the actual execution affinity of the -context via cuCtxGetExecAffinity after context creation. Currently, -this attribute is only supported under Volta+ MPS.

    • -
    -

    The three LSBs of the flags parameter can be used to control how the -OS thread, which owns the CUDA context at the time of an API call, -interacts with the OS scheduler when waiting for results from the GPU. -Only one of the scheduling flags can be set when creating a context.

    -
      -
    • CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when -waiting for results from the GPU. This can decrease latency when -waiting for the GPU, but may lower the performance of CPU threads if -they are performing work in parallel with the CUDA thread.

    • -
    • CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread -when waiting for results from the GPU. This can increase latency when -waiting for the GPU, but can increase the performance of CPU threads -performing work in parallel with the GPU.

    • -
    • CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the -CPU thread on a synchronization primitive when waiting for the GPU to -finish work.

    • -
    • CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU -thread on a synchronization primitive when waiting for the GPU to -finish work. Deprecated: This flag was deprecated as of CUDA 4.0 -and was replaced with CU_CTX_SCHED_BLOCKING_SYNC.

    • -
    • CU_CTX_SCHED_AUTO: The default value if the flags -parameter is zero, uses a heuristic based on the number of active -CUDA contexts in the process C and the number of logical processors -in the system P. If C > P, then CUDA will yield to other OS -threads when waiting for the GPU (CU_CTX_SCHED_YIELD), -otherwise CUDA will not yield while waiting for results and actively -spin on the processor (CU_CTX_SCHED_SPIN). Additionally, -on Tegra devices, CU_CTX_SCHED_AUTO uses a heuristic -based on the power profile of the platform and may choose -CU_CTX_SCHED_BLOCKING_SYNC for low-powered devices.

    • -
    • CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned -allocations. This flag must be set in order to allocate pinned host -memory that is accessible to the GPU.

    • -
    • CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce -local memory after resizing local memory for a kernel. This can -prevent thrashing by local memory allocations when launching many -kernels with high local memory usage at the cost of potentially -increased memory usage. Deprecated: This flag is deprecated and the -behavior enabled by this flag is now the default and cannot be -disabled. Instead, the per-thread stack size can be controlled with -cuCtxSetLimit().

    • -
    • CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been -enabled globally with cuCoredumpSetAttributeGlobal or -environment variables, this flag can be set during context creation -to instruct CUDA to create a coredump if this context raises an -exception during execution. These environment variables are described -in the CUDA-GDB user guide under the “GPU core dump support” section. -The initial attributes will be taken from the global attributes at -the time of context creation. The other attributes that control -coredump output can be modified by calling -cuCoredumpSetAttribute from the created context after it -becomes current.

    • -
    • CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU -coredumps have not been enabled globally with -cuCoredumpSetAttributeGlobal or environment variables, -this flag can be set during context creation to instruct CUDA to -create a coredump if data is written to a certain pipe that is -present in the OS space. These environment variables are described in -the CUDA-GDB user guide under the “GPU core dump support” section. It -is important to note that the pipe name must be set with -cuCoredumpSetAttributeGlobal before creating the context -if this flag is used. Setting this flag implies that -CU_CTX_COREDUMP_ENABLE is set. The initial attributes -will be taken from the global attributes at the time of context -creation. The other attributes that control coredump output can be -modified by calling cuCoredumpSetAttribute from the -created context after it becomes current. Setting this flag on any -context creation is equivalent to setting the -CU_COREDUMP_ENABLE_USER_TRIGGER attribute to true -globally.

    • -
    -

    Context creation will fail with CUDA_ERROR_UNKNOWN if the -compute mode of the device is CU_COMPUTEMODE_PROHIBITED. -The function cuDeviceGetAttribute() can be used with -CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute -mode of the device. The nvidia-smi tool can be used to set the -compute mode for * devices. Documentation for nvidia-smi can be -obtained by passing a -h option to it.

    -
    -
    Parameters:
    -
      -
    • paramsArray (List[CUexecAffinityParam]) – Execution affinity parameters

    • -
    • numParams (int) – Number of execution affinity parameters

    • -
    • flags (unsigned int) – Context creation flags

    • -
    • dev (CUdevice) – Device to create context on

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxCreate_v4(CUctxCreateParams ctxCreateParams: Optional[CUctxCreateParams], unsigned int flags, dev)#
    -

    Create a CUDA context.

    -

    Creates a new CUDA context and associates it with the calling thread. -The flags parameter is described below. The context is created with a -usage count of 1 and the caller of cuCtxCreate() must call -cuCtxDestroy() when done using the context. If a context is -already current to the thread, it is supplanted by the newly created -context and may be restored by a subsequent call to -cuCtxPopCurrent().

    -

    CUDA context can be created with execution affinity. The type and the -amount of execution resource the context can use is limited by -paramsArray and numExecAffinityParams in execAffinity. The -paramsArray is an array of CUexecAffinityParam and the -numExecAffinityParams describes the size of the paramsArray. If two -CUexecAffinityParam in the array have the same type, the latter -execution affinity parameter overrides the former execution affinity -parameter. The supported execution affinity types are:

    -
      -
    • CU_EXEC_AFFINITY_TYPE_SM_COUNT limits the portion of SMs -that the context can use. The portion of SMs is specified as the -number of SMs via CUexecAffinitySmCount. This limit will be -internally rounded up to the next hardware-supported amount. Hence, -it is imperative to query the actual execution affinity of the -context via cuCtxGetExecAffinity after context creation. Currently, -this attribute is only supported under Volta+ MPS.

    • -
    -

    CUDA context can be created in CIG(CUDA in Graphics) mode by setting /p -cigParams. Hardware support and software support for graphics clients -can be determined using cuDeviceGetAttribute() with -CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED. Data from graphics -client is shared with CUDA via the /p sharedData in /pcigParams. For -D3D12, /p sharedData is a ID3D12CommandQueue handle.

    -

    Either /p execAffinityParams or /p cigParams can be set to a non-null -value. Setting both to a non-null value will result in an undefined -behavior.

    -

    The three LSBs of the flags parameter can be used to control how the -OS thread, which owns the CUDA context at the time of an API call, -interacts with the OS scheduler when waiting for results from the GPU. -Only one of the scheduling flags can be set when creating a context.

    -
      -
    • CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when -waiting for results from the GPU. This can decrease latency when -waiting for the GPU, but may lower the performance of CPU threads if -they are performing work in parallel with the CUDA thread.

    • -
    • CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread -when waiting for results from the GPU. This can increase latency when -waiting for the GPU, but can increase the performance of CPU threads -performing work in parallel with the GPU.

    • -
    • CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the -CPU thread on a synchronization primitive when waiting for the GPU to -finish work.

    • -
    • CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU -thread on a synchronization primitive when waiting for the GPU to -finish work. Deprecated: This flag was deprecated as of CUDA 4.0 -and was replaced with CU_CTX_SCHED_BLOCKING_SYNC.

    • -
    • CU_CTX_SCHED_AUTO: The default value if the flags -parameter is zero, uses a heuristic based on the number of active -CUDA contexts in the process C and the number of logical processors -in the system P. If C > P, then CUDA will yield to other OS -threads when waiting for the GPU (CU_CTX_SCHED_YIELD), -otherwise CUDA will not yield while waiting for results and actively -spin on the processor (CU_CTX_SCHED_SPIN). Additionally, -on Tegra devices, CU_CTX_SCHED_AUTO uses a heuristic -based on the power profile of the platform and may choose -CU_CTX_SCHED_BLOCKING_SYNC for low-powered devices.

    • -
    • CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned -allocations. This flag must be set in order to allocate pinned host -memory that is accessible to the GPU.

    • -
    • CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce -local memory after resizing local memory for a kernel. This can -prevent thrashing by local memory allocations when launching many -kernels with high local memory usage at the cost of potentially -increased memory usage. Deprecated: This flag is deprecated and the -behavior enabled by this flag is now the default and cannot be -disabled. Instead, the per-thread stack size can be controlled with -cuCtxSetLimit().

    • -
    • CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been -enabled globally with cuCoredumpSetAttributeGlobal or -environment variables, this flag can be set during context creation -to instruct CUDA to create a coredump if this context raises an -exception during execution. These environment variables are described -in the CUDA-GDB user guide under the “GPU core dump support” section. -The initial attributes will be taken from the global attributes at -the time of context creation. The other attributes that control -coredump output can be modified by calling -cuCoredumpSetAttribute from the created context after it -becomes current. This flag is not supported when CUDA context is -created in CIG(CUDA in Graphics) mode.

    • -
    • CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU -coredumps have not been enabled globally with -cuCoredumpSetAttributeGlobal or environment variables, -this flag can be set during context creation to instruct CUDA to -create a coredump if data is written to a certain pipe that is -present in the OS space. These environment variables are described in -the CUDA-GDB user guide under the “GPU core dump support” section. It -is important to note that the pipe name must be set with -cuCoredumpSetAttributeGlobal before creating the context -if this flag is used. Setting this flag implies that -CU_CTX_COREDUMP_ENABLE is set. The initial attributes -will be taken from the global attributes at the time of context -creation. The other attributes that control coredump output can be -modified by calling cuCoredumpSetAttribute from the -created context after it becomes current. Setting this flag on any -context creation is equivalent to setting the -CU_COREDUMP_ENABLE_USER_TRIGGER attribute to true -globally. This flag is not supported when CUDA context is created in -CIG(CUDA in Graphics) mode.

    • -
    • CU_CTX_SYNC_MEMOPS: Ensures that synchronous memory -operations initiated on this context will always synchronize. See -further documentation in the section titled “API Synchronization -behavior” to learn more about cases when synchronous memory -operations can exhibit asynchronous behavior.

    • -
    -

    Context creation will fail with CUDA_ERROR_UNKNOWN if the -compute mode of the device is CU_COMPUTEMODE_PROHIBITED. -The function cuDeviceGetAttribute() can be used with -CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute -mode of the device. The nvidia-smi tool can be used to set the -compute mode for * devices. Documentation for nvidia-smi can be -obtained by passing a -h option to it.

    -

    Context creation will fail with :: CUDA_ERROR_INVALID_VALUE if invalid -parameter was passed by client to create the CUDA context.

    -

    Context creation in CIG mode will fail with -CUDA_ERROR_NOT_SUPPORTED if CIG is not supported by the -device or the driver.

    -
    -
    Parameters:
    -
      -
    • ctxCreateParams (CUctxCreateParams) – Context creation parameters

    • -
    • flags (unsigned int) – Context creation flags

    • -
    • dev (CUdevice) – Device to create context on

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxDestroy(ctx)#
    -

    Destroy a CUDA context.

    -

    Destroys the CUDA context specified by ctx. The context ctx will be -destroyed regardless of how many threads it is current to. It is the -responsibility of the calling function to ensure that no API call -issues using ctx while cuCtxDestroy() is executing.

    -

    Destroys and cleans up all resources associated with the context. It is -the caller’s responsibility to ensure that the context or its resources -are not accessed or passed in subsequent API calls and doing so will -result in undefined behavior. These resources include CUDA types -CUmodule, CUfunction, CUstream, -CUevent, CUarray, CUmipmappedArray, -CUtexObject, CUsurfObject, -CUtexref, CUsurfref, -CUgraphicsResource, CUlinkState, -CUexternalMemory and CUexternalSemaphore. These -resources also include memory allocations by cuMemAlloc(), -cuMemAllocHost(), cuMemAllocManaged() and -cuMemAllocPitch().

    -

    If ctx is current to the calling thread then ctx will also be -popped from the current thread’s context stack (as though -cuCtxPopCurrent() were called). If ctx is current to -other threads, then ctx will remain current to those threads, and -attempting to access ctx from those threads will result in the error -CUDA_ERROR_CONTEXT_IS_DESTROYED.

    -
    -
    Parameters:
    -

    ctx (CUcontext) – Context to destroy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    cuCtxDestroy() will not destroy memory allocations by cuMemCreate(), cuMemAllocAsync() and cuMemAllocFromPoolAsync(). These memory allocations are not associated with any CUDA context and need to be destroyed explicitly.

    -
    - -
    -
    -cuda.cuda.cuCtxPushCurrent(ctx)#
    -

    Pushes a context on the current CPU thread.

    -

    Pushes the given context ctx onto the CPU thread’s stack of current -contexts. The specified context becomes the CPU thread’s current -context, so all CUDA functions that operate on the current context are -affected.

    -

    The previous current context may be made current again by calling -cuCtxDestroy() or cuCtxPopCurrent().

    -
    -
    Parameters:
    -

    ctx (CUcontext) – Context to push

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxPopCurrent()#
    -

    Pops the current CUDA context from the current CPU thread.

    -

    Pops the current CUDA context from the CPU thread and passes back the -old context handle in *pctx. That context may then be made current to -a different CPU thread by calling cuCtxPushCurrent().

    -

    If a context was current to the CPU thread before -cuCtxCreate() or cuCtxPushCurrent() was called, -this function makes that context current to the CPU thread again.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxSetCurrent(ctx)#
    -

    Binds the specified CUDA context to the calling CPU thread.

    -

    Binds the specified CUDA context to the calling CPU thread. If ctx is -NULL then the CUDA context previously bound to the calling CPU thread -is unbound and CUDA_SUCCESS is returned.

    -

    If there exists a CUDA context stack on the calling CPU thread, this -will replace the top of that stack with ctx. If ctx is NULL then -this will be equivalent to popping the top of the calling CPU thread’s -CUDA context stack (or a no-op if the calling CPU thread’s CUDA context -stack is empty).

    -
    -
    Parameters:
    -

    ctx (CUcontext) – Context to bind to the calling CPU thread

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxGetCurrent()#
    -

    Returns the CUDA context bound to the calling CPU thread.

    -

    Returns in *pctx the CUDA context bound to the calling CPU thread. If -no context is bound to the calling CPU thread then *pctx is set to -NULL and CUDA_SUCCESS is returned.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxGetDevice()#
    -

    Returns the device ID for the current context.

    -

    Returns in *device the ordinal of the current context’s device.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxGetFlags()#
    -

    Returns the flags for the current context.

    -

    Returns in *flags the flags of the current context. See -cuCtxCreate for flag values.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxSetFlags(unsigned int flags)#
    -

    Sets the flags for the current context.

    -

    Sets the flags for the current context overwriting previously set ones. -See cuDevicePrimaryCtxSetFlags for flag values.

    -
    -
    Parameters:
    -

    flags (unsigned int) – Flags to set on the current context

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxGetId(ctx)#
    -

    Returns the unique Id associated with the context supplied.

    -

    Returns in ctxId the unique Id which is associated with a given -context. The Id is unique for the life of the program for this instance -of CUDA. If context is supplied as NULL and there is one current, the -Id of the current context is returned.

    -
    -
    Parameters:
    -

    ctx (CUcontext) – Context for which to obtain the Id

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxSynchronize()#
    -

    Block for the current context’s tasks to complete.

    -

    Blocks until the current context has completed all preceding requested -tasks. If the current context is the primary context, green contexts -that have been created will also be synchronized. -cuCtxSynchronize() returns an error if one of the preceding -tasks failed. If the context was created with the -CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block -until the GPU context has finished its work.

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxSetLimit(limit: CUlimit, size_t value)#
    -

    Set resource limits.

    -

    Setting limit to value is a request by the application to update -the current limit maintained by the context. The driver is free to -modify the requested value to meet h/w requirements (this could be -clamping to minimum or maximum values, rounding up to nearest element -size, etc). The application can use cuCtxGetLimit() to find -out exactly what the limit has been set to.

    -

    Setting each CUlimit has its own specific restrictions, so -each is discussed here.

    -
      -
    • CU_LIMIT_STACK_SIZE controls the stack size in bytes of -each GPU thread. The driver automatically increases the per-thread -stack size for each kernel launch as needed. This size isn’t reset -back to the original value after each launch. Setting this value will -take effect immediately, and if necessary, the device will block -until all preceding requested tasks are complete.

    • -
    • CU_LIMIT_PRINTF_FIFO_SIZE controls the size in bytes of -the FIFO used by the printf() device system call. Setting -CU_LIMIT_PRINTF_FIFO_SIZE must be performed before -launching any kernel that uses the printf() device system -call, otherwise CUDA_ERROR_INVALID_VALUE will be -returned.

    • -
    • CU_LIMIT_MALLOC_HEAP_SIZE controls the size in bytes of -the heap used by the malloc() and free() -device system calls. Setting CU_LIMIT_MALLOC_HEAP_SIZE -must be performed before launching any kernel that uses the -malloc() or free() device system calls, -otherwise CUDA_ERROR_INVALID_VALUE will be returned.

    • -
    • CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH controls the maximum -nesting depth of a grid at which a thread can safely call -cudaDeviceSynchronize(). Setting this limit must be -performed before any launch of a kernel that uses the device runtime -and calls cudaDeviceSynchronize() above the default sync -depth, two levels of grids. Calls to -cudaDeviceSynchronize() will fail with error code -cudaErrorSyncDepthExceeded if the limitation is violated. -This limit can be set smaller than the default or up the maximum -launch depth of 24. When setting this limit, keep in mind that -additional levels of sync depth require the driver to reserve large -amounts of device memory which can no longer be used for user -allocations. If these reservations of device memory fail, -cuCtxSetLimit() will return -CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a -lower value. This limit is only applicable to devices of compute -capability < 9.0. Attempting to set this limit on devices of other -compute capability versions will result in the error -CUDA_ERROR_UNSUPPORTED_LIMIT being returned.

    • -
    • CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT controls the -maximum number of outstanding device runtime launches that can be -made from the current context. A grid is outstanding from the point -of launch up until the grid is known to have been completed. Device -runtime launches which violate this limitation fail and return -cudaErrorLaunchPendingCountExceeded when -cudaGetLastError() is called after launch. If more -pending launches than the default (2048 launches) are needed for a -module using the device runtime, this limit can be increased. Keep in -mind that being able to sustain additional pending launches will -require the driver to reserve larger amounts of device memory upfront -which can no longer be used for allocations. If these reservations -fail, cuCtxSetLimit() will return -CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a -lower value. This limit is only applicable to devices of compute -capability 3.5 and higher. Attempting to set this limit on devices of -compute capability less than 3.5 will result in the error -CUDA_ERROR_UNSUPPORTED_LIMIT being returned.

    • -
    • CU_LIMIT_MAX_L2_FETCH_GRANULARITY controls the L2 cache -fetch granularity. Values can range from 0B to 128B. This is purely a -performance hint and it can be ignored or clamped depending on the -platform.

    • -
    • CU_LIMIT_PERSISTING_L2_CACHE_SIZE controls size in bytes -available for persisting L2 cache. This is purely a performance hint -and it can be ignored or clamped depending on the platform.

    • -
    -
    -
    Parameters:
    -
      -
    • limit (CUlimit) – Limit to set

    • -
    • value (size_t) – Size of limit

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_UNSUPPORTED_LIMIT, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_INVALID_CONTEXT

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxGetLimit(limit: CUlimit)#
    -

    Returns resource limits.

    -

    Returns in *pvalue the current size of limit. The supported -CUlimit values are:

    - -
    -
    Parameters:
    -

    limit (CUlimit) – Limit to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxGetCacheConfig()#
    -

    Returns the preferred cache configuration for the current context.

    -

    On devices where the L1 cache and shared memory use the same hardware -resources, this function returns through pconfig the preferred cache -configuration for the current context. This is only a preference. The -driver will use the requested configuration if possible, but it is free -to choose a different configuration if required to execute functions.

    -

    This will return a pconfig of CU_FUNC_CACHE_PREFER_NONE -on devices where the size of the L1 cache and shared memory are fixed.

    -

    The supported cache configurations are:

    - -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxSetCacheConfig(config: CUfunc_cache)#
    -

    Sets the preferred cache configuration for the current context.

    -

    On devices where the L1 cache and shared memory use the same hardware -resources, this sets through config the preferred cache configuration -for the current context. This is only a preference. The driver will use -the requested configuration if possible, but it is free to choose a -different configuration if required to execute the function. Any -function preference set via cuFuncSetCacheConfig() or -cuKernelSetCacheConfig() will be preferred over this -context-wide setting. Setting the context-wide cache configuration to -CU_FUNC_CACHE_PREFER_NONE will cause subsequent kernel -launches to prefer to not change the cache configuration unless -required to launch the kernel.

    -

    This setting does nothing on devices where the size of the L1 cache and -shared memory are fixed.

    -

    Launching a kernel with a different preference than the most recent -preference setting may insert a device-side synchronization point.

    -

    The supported cache configurations are:

    - -
    -
    Parameters:
    -

    config (CUfunc_cache) – Requested cache configuration

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxGetApiVersion(ctx)#
    -

    Gets the context’s API version.

    -

    Returns a version number in version corresponding to the capabilities -of the context (e.g. 3010 or 3020), which library developers can use to -direct callers to a specific API version. If ctx is NULL, returns the -API version used to create the currently bound context.

    -

    Note that new API versions are only introduced when context -capabilities are changed that break binary compatibility, so the API -version and driver version may be different. For example, it is valid -for the API version to be 3020 while the driver version is 4020.

    -
    -
    Parameters:
    -

    ctx (CUcontext) – Context to check

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxGetStreamPriorityRange()#
    -

    Returns numerical values that correspond to the least and greatest stream priorities.

    -

    Returns in *leastPriority and *greatestPriority the numerical -values that correspond to the least and greatest stream priorities -respectively. Stream priorities follow a convention where lower numbers -imply greater priorities. The range of meaningful stream priorities is -given by [*greatestPriority, *leastPriority]. If the user attempts -to create a stream with a priority value that is outside the meaningful -range as specified by this API, the priority is automatically clamped -down or up to either *leastPriority or *greatestPriority -respectively. See cuStreamCreateWithPriority for details on -creating a priority stream. A NULL may be passed in for -*leastPriority or *greatestPriority if the value is not desired.

    -

    This function will return ‘0’ in both *leastPriority and -*greatestPriority if the current context’s device does not support -stream priorities (see cuDeviceGetAttribute).

    -
    -
    Returns:
    -

      -
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    • -
    • leastPriority (int) – Pointer to an int in which the numerical value for least stream -priority is returned

    • -
    • greatestPriority (int) – Pointer to an int in which the numerical value for greatest stream -priority is returned

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxResetPersistingL2Cache()#
    -

    Resets all persisting lines in cache to normal status.

    -

    cuCtxResetPersistingL2Cache Resets all persisting lines in -cache to normal status. Takes effect on function return.

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    CUaccessPolicyWindow

    -
    -
    - -
    -
    -cuda.cuda.cuCtxGetExecAffinity(typename: CUexecAffinityType)#
    -

    Returns the execution affinity setting for the current context.

    -

    Returns in *pExecAffinity the current value of typename. The -supported CUexecAffinityType values are:

    - -
    -
    Parameters:
    -

    typename (CUexecAffinityType) – Execution affinity type to query

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    CUexecAffinityParam

    -
    -
    - -
    -
    -cuda.cuda.cuCtxRecordEvent(hCtx, hEvent)#
    -

    Records an event.

    -

    Captures in hEvent all the activities of the context hCtx at the -time of this call. hEvent and hCtx must be from the same CUDA -context, otherwise CUDA_ERROR_INVALID_HANDLE will be -returned. Calls such as cuEventQuery() or -cuCtxWaitEvent() will then examine or wait for completion -of the work that was captured. Uses of hCtx after this call do not -modify hEvent. If the context passed to hCtx is the primary -context, hEvent will capture all the activities of the primary -context and its green contexts. If the context passed to hCtx is a -context converted from green context via -cuCtxFromGreenCtx(), hEvent will capture only the -activities of the green context.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    The API will return CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED if the specified context hCtx has a stream in the capture mode. In such a case, the call will invalidate all the conflicting captures.

    -
    - -
    -
    -cuda.cuda.cuCtxWaitEvent(hCtx, hEvent)#
    -

    Make a context wait on an event.

    -

    Makes all future work submitted to context hCtx wait for all work -captured in hEvent. The synchronization will be performed on the -device and will not block the calling CPU thread. See -cuCtxRecordEvent() for details on what is captured by an -event. If the context passed to hCtx is the primary context, the -primary context and its green contexts will wait for hEvent. If the -context passed to hCtx is a context converted from green context via -cuCtxFromGreenCtx(), the green context will wait for -hEvent.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    hEvent may be from a different context or device than hCtx.

    -

    The API will return CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED and invalidate the capture if the specified event hEvent is part of an ongoing capture sequence or if the specified context hCtx has a stream in the capture mode.

    -
    - -
    -
    -

    Module Management#

    -

    This section describes the module management functions of the low-level CUDA driver application programming interface.

    -
    -
    -class cuda.cuda.CUmoduleLoadingMode(value)#
    -

    CUDA Lazy Loading status

    -
    -
    -CU_MODULE_EAGER_LOADING = 1#
    -

    Lazy Kernel Loading is not enabled

    -
    - -
    -
    -CU_MODULE_LAZY_LOADING = 2#
    -

    Lazy Kernel Loading is enabled

    -
    - -
    - -
    -
    -cuda.cuda.cuModuleLoad(char *fname)#
    -

    Loads a compute module.

    -

    Takes a filename fname and loads the corresponding module module -into the current context. The CUDA driver API does not attempt to -lazily allocate the resources needed by a module; if the memory for -functions and data (constant and global) needed by the module cannot be -allocated, cuModuleLoad() fails. The file should be a -cubin file as output by nvcc, or a PTX file either as output by -nvcc or handwritten, or a fatbin file as output by nvcc from -toolchain 4.0 or later.

    -
    -
    Parameters:
    -

    fname (bytes) – Filename of module to load

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuModuleLoadData(image)#
    -

    Load a module’s data.

    -

    Takes a pointer image and loads the corresponding module module -into the current context. The image may be a cubin or fatbin as -output by nvcc, or a NULL-terminated PTX, either as output by nvcc or -hand-written.

    -
    -
    Parameters:
    -

    image (Any) – Module data to load

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuModuleLoadDataEx(image, unsigned int numOptions, options: Optional[Tuple[CUjit_option] | List[CUjit_option]], optionValues: Optional[Tuple[Any] | List[Any]])#
    -

    Load a module’s data with options.

    -

    Takes a pointer image and loads the corresponding module module -into the current context. The image may be a cubin or fatbin as -output by nvcc, or a NULL-terminated PTX, either as output by nvcc or -hand-written.

    -
    -
    Parameters:
    -
      -
    • image (Any) – Module data to load

    • -
    • numOptions (unsigned int) – Number of options

    • -
    • options (List[CUjit_option]) – Options for JIT

    • -
    • optionValues (List[Any]) – Option values for JIT

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuModuleLoadFatBinary(fatCubin)#
    -

    Load a module’s data.

    -

    Takes a pointer fatCubin and loads the corresponding module module -into the current context. The pointer represents a fat binary object, -which is a collection of different cubin and/or PTX files, all -representing the same device code, but compiled and optimized for -different architectures.

    -

    Prior to CUDA 4.0, there was no documented API for constructing and -using fat binary objects by programmers. Starting with CUDA 4.0, fat -binary objects can be constructed by providing the -fatbin option to -nvcc. More information can be found in the nvcc document.

    -
    -
    Parameters:
    -

    fatCubin (Any) – Fat binary to load

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuModuleUnload(hmod)#
    -

    Unloads a module.

    -

    Unloads a module hmod from the current context. Attempting to unload -a module which was obtained from the Library Management API such as -cuLibraryGetModule will return -CUDA_ERROR_NOT_PERMITTED.

    -
    -
    Parameters:
    -

    hmod (CUmodule) – Module to unload

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_PERMITTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuModuleGetLoadingMode()#
    -

    Query lazy loading mode.

    -

    Returns lazy loading mode Module loading mode is controlled by -CUDA_MODULE_LOADING env variable

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuModuleLoad

    -
    -
    - -
    -
    -cuda.cuda.cuModuleGetFunction(hmod, char *name)#
    -

    Returns a function handle.

    -

    Returns in *hfunc the handle of the function of name name located -in module hmod. If no function of that name exists, -cuModuleGetFunction() returns -CUDA_ERROR_NOT_FOUND.

    -
    -
    Parameters:
    -
      -
    • hmod (CUmodule) – Module to retrieve function from

    • -
    • name (bytes) – Name of function to retrieve

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuModuleGetFunctionCount(mod)#
    -

    Returns the number of functions within a module.

    -

    Returns in count the number of functions in mod.

    -
    -
    Parameters:
    -

    mod (CUmodule) – Module to query

    -
    -
    Returns:
    -

    -

    -
    -
    -
    - -
    -
    -cuda.cuda.cuModuleEnumerateFunctions(unsigned int numFunctions, mod)#
    -

    Returns the function handles within a module.

    -

    Returns in functions a maximum number of numFunctions function -handles within mod. When function loading mode is set to LAZY the -function retrieved may be partially loaded. The loading state of a -function can be queried using cuFunctionIsLoaded. CUDA APIs -may load the function automatically when called with partially loaded -function handle which may incur additional latency. Alternatively, -cuFunctionLoad can be used to explicitly load a function. -The returned function handles become invalid when the module is -unloaded.

    -
    -
    Parameters:
    -
      -
    • numFunctions (unsigned int) – Maximum number of function handles may be returned to the buffer

    • -
    • mod (CUmodule) – Module to query from

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuModuleGetGlobal(hmod, char *name)#
    -

    Returns a global pointer from a module.

    -

    Returns in *dptr and *bytes the base pointer and size of the global -of name name located in module hmod. If no variable of that name -exists, cuModuleGetGlobal() returns -CUDA_ERROR_NOT_FOUND. One of the parameters dptr or -numbytes (not both) can be NULL in which case it is ignored.

    -
    -
    Parameters:
    -
      -
    • hmod (CUmodule) – Module to retrieve global from

    • -
    • name (bytes) – Name of global to retrieve

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuModuleGetFunction, cuModuleGetTexRef, cuModuleLoad, cuModuleLoadData, cuModuleLoadDataEx, cuModuleLoadFatBinary, cuModuleUnload, cudaGetSymbolAddress, cudaGetSymbolSize

    -
    -
    - -
    -
    -cuda.cuda.cuLinkCreate(unsigned int numOptions, options: Optional[Tuple[CUjit_option] | List[CUjit_option]], optionValues: Optional[Tuple[Any] | List[Any]])#
    -

    Creates a pending JIT linker invocation.

    -

    If the call is successful, the caller owns the returned CUlinkState, -which should eventually be destroyed with cuLinkDestroy. -The device code machine size (32 or 64 bit) will match the calling -application.

    -

    Both linker and compiler options may be specified. Compiler options -will be applied to inputs to this linker action which must be compiled -from PTX. The options CU_JIT_WALL_TIME, -CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES, and -CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES will accumulate data -until the CUlinkState is destroyed.

    -

    The data passed in via cuLinkAddData and -cuLinkAddFile will be treated as relocatable (-rdc=true to -nvcc) when linking the final cubin during cuLinkComplete -and will have similar consequences as offline relocatable device code -linking.

    -

    optionValues must remain valid for the life of the CUlinkState if -output options are used. No other references to inputs are maintained -after this call returns.

    -
    -
    Parameters:
    -
      -
    • numOptions (unsigned int) – Size of options arrays

    • -
    • options (List[CUjit_option]) – Array of linker and compiler options

    • -
    • optionValues (List[Any]) – Array of option values, each cast to void *

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted

    -
    - -
    -
    -cuda.cuda.cuLinkAddData(state, typename: CUjitInputType, data, size_t size, char *name, unsigned int numOptions, options: Optional[Tuple[CUjit_option] | List[CUjit_option]], optionValues: Optional[Tuple[Any] | List[Any]])#
    -

    Add an input to a pending linker invocation.

    -

    Ownership of data is retained by the caller. No reference is retained -to any inputs after this call returns.

    -

    This method accepts only compiler options, which are used if the data -must be compiled from PTX, and does not accept any of -CU_JIT_WALL_TIME, CU_JIT_INFO_LOG_BUFFER, -CU_JIT_ERROR_LOG_BUFFER, -CU_JIT_TARGET_FROM_CUCONTEXT, or CU_JIT_TARGET.

    -
    -
    Parameters:
    -
      -
    • state (CUlinkState) – A pending linker action.

    • -
    • typename (CUjitInputType) – The type of the input data.

    • -
    • data (Any) – The input data. PTX must be NULL-terminated.

    • -
    • size (size_t) – The length of the input data.

    • -
    • name (bytes) – An optional name for this input in log messages.

    • -
    • numOptions (unsigned int) – Size of options.

    • -
    • options (List[CUjit_option]) – Options to be applied only for this input (overrides options from -cuLinkCreate).

    • -
    • optionValues (List[Any]) – Array of option values, each cast to void *.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_PTX, CUDA_ERROR_UNSUPPORTED_PTX_VERSION, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_NO_BINARY_FOR_GPU

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted

    -
    - -
    -
    -cuda.cuda.cuLinkAddFile(state, typename: CUjitInputType, char *path, unsigned int numOptions, options: Optional[Tuple[CUjit_option] | List[CUjit_option]], optionValues: Optional[Tuple[Any] | List[Any]])#
    -

    Add a file input to a pending linker invocation.

    -

    No reference is retained to any inputs after this call returns.

    -

    This method accepts only compiler options, which are used if the input -must be compiled from PTX, and does not accept any of -CU_JIT_WALL_TIME, CU_JIT_INFO_LOG_BUFFER, -CU_JIT_ERROR_LOG_BUFFER, -CU_JIT_TARGET_FROM_CUCONTEXT, or CU_JIT_TARGET.

    -

    This method is equivalent to invoking cuLinkAddData on the -contents of the file.

    -
    -
    Parameters:
    -
      -
    • state (CUlinkState) – A pending linker action

    • -
    • typename (CUjitInputType) – The type of the input data

    • -
    • path (bytes) – Path to the input file

    • -
    • numOptions (unsigned int) – Size of options

    • -
    • options (List[CUjit_option]) – Options to be applied only for this input (overrides options from -cuLinkCreate)

    • -
    • optionValues (List[Any]) – Array of option values, each cast to void *

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_FILE_NOT_FOUND CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_PTX, CUDA_ERROR_UNSUPPORTED_PTX_VERSION, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_NO_BINARY_FOR_GPU

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted

    -
    - -
    -
    -cuda.cuda.cuLinkComplete(state)#
    -

    Complete a pending linker invocation.

    -

    Completes the pending linker action and returns the cubin image for the -linked device code, which can be used with -cuModuleLoadData. The cubin is owned by state, so it -should be loaded before state is destroyed via -cuLinkDestroy. This call does not destroy state.

    -
    -
    Parameters:
    -

    state (CUlinkState) – A pending linker invocation

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLinkDestroy(state)#
    -

    Destroys state for a JIT linker invocation.

    -
    -
    Parameters:
    -

    state (CUlinkState) – State object for the linker invocation

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    cuLinkCreate

    -
    -
    - -
    -
    -

    Library Management#

    -

    This section describes the library management functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuLibraryLoadData(code, jitOptions: Optional[Tuple[CUjit_option] | List[CUjit_option]], jitOptionsValues: Optional[Tuple[Any] | List[Any]], unsigned int numJitOptions, libraryOptions: Optional[Tuple[CUlibraryOption] | List[CUlibraryOption]], libraryOptionValues: Optional[Tuple[Any] | List[Any]], unsigned int numLibraryOptions)#
    -

    Load a library with specified code and options.

    -

    Takes a pointer code and loads the corresponding library library -based on the application defined library loading mode:

    -
      -
    • If module loading is set to EAGER, via the environment variables -described in “Module loading”, library is loaded eagerly into all -contexts at the time of the call and future contexts at the time of -creation until the library is unloaded with -cuLibraryUnload().

    • -
    • If the environment variables are set to LAZY, library is not -immediately loaded onto all existent contexts and will only be loaded -when a function is needed for that context, such as a kernel launch.

    • -
    -

    These environment variables are described in the CUDA programming guide -under the “CUDA environment variables” section.

    -

    The code may be a cubin or fatbin as output by nvcc, or a NULL- -terminated PTX, either as output by nvcc or hand-written. A fatbin -should also contain relocatable code when doing separate compilation.

    -

    Options are passed as an array via jitOptions and any corresponding -parameters are passed in jitOptionsValues. The number of total JIT -options is supplied via numJitOptions. Any outputs will be returned -via jitOptionsValues.

    -

    Library load options are passed as an array via libraryOptions and -any corresponding parameters are passed in libraryOptionValues. The -number of total library load options is supplied via -numLibraryOptions.

    -
    -
    Parameters:
    -
      -
    • code (Any) – Code to load

    • -
    • jitOptions (List[CUjit_option]) – Options for JIT

    • -
    • jitOptionsValues (List[Any]) – Option values for JIT

    • -
    • numJitOptions (unsigned int) – Number of options

    • -
    • libraryOptions (List[CUlibraryOption]) – Options for loading

    • -
    • libraryOptionValues (List[Any]) – Option values for loading

    • -
    • numLibraryOptions (unsigned int) – Number of options for loading

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    If the library contains managed variables and no device in the system supports managed variables this call is expected to return CUDA_ERROR_NOT_SUPPORTED

    -
    - -
    -
    -cuda.cuda.cuLibraryLoadFromFile(char *fileName, jitOptions: Optional[Tuple[CUjit_option] | List[CUjit_option]], jitOptionsValues: Optional[Tuple[Any] | List[Any]], unsigned int numJitOptions, libraryOptions: Optional[Tuple[CUlibraryOption] | List[CUlibraryOption]], libraryOptionValues: Optional[Tuple[Any] | List[Any]], unsigned int numLibraryOptions)#
    -

    Load a library with specified file and options.

    -

    Takes a pointer code and loads the corresponding library library -based on the application defined library loading mode:

    -
      -
    • If module loading is set to EAGER, via the environment variables -described in “Module loading”, library is loaded eagerly into all -contexts at the time of the call and future contexts at the time of -creation until the library is unloaded with -cuLibraryUnload().

    • -
    • If the environment variables are set to LAZY, library is not -immediately loaded onto all existent contexts and will only be loaded -when a function is needed for that context, such as a kernel launch.

    • -
    -

    These environment variables are described in the CUDA programming guide -under the “CUDA environment variables” section.

    -

    The file should be a cubin file as output by nvcc, or a PTX file -either as output by nvcc or handwritten, or a fatbin file as output -by nvcc. A fatbin should also contain relocatable code when doing -separate compilation.

    -

    Options are passed as an array via jitOptions and any corresponding -parameters are passed in jitOptionsValues. The number of total -options is supplied via numJitOptions. Any outputs will be returned -via jitOptionsValues.

    -

    Library load options are passed as an array via libraryOptions and -any corresponding parameters are passed in libraryOptionValues. The -number of total library load options is supplied via -numLibraryOptions.

    -
    -
    Parameters:
    -
      -
    • fileName (bytes) – File to load from

    • -
    • jitOptions (List[CUjit_option]) – Options for JIT

    • -
    • jitOptionsValues (List[Any]) – Option values for JIT

    • -
    • numJitOptions (unsigned int) – Number of options

    • -
    • libraryOptions (List[CUlibraryOption]) – Options for loading

    • -
    • libraryOptionValues (List[Any]) – Option values for loading

    • -
    • numLibraryOptions (unsigned int) – Number of options for loading

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    If the library contains managed variables and no device in the system supports managed variables this call is expected to return CUDA_ERROR_NOT_SUPPORTED

    -
    - -
    -
    -cuda.cuda.cuLibraryUnload(library)#
    -

    Unloads a library.

    -

    Unloads the library specified with library

    -
    -
    Parameters:
    -

    library (CUlibrary) – Library to unload

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLibraryGetKernel(library, char *name)#
    -

    Returns a kernel handle.

    -

    Returns in pKernel the handle of the kernel with name name located -in library library. If kernel handle is not found, the call returns -CUDA_ERROR_NOT_FOUND.

    -
    -
    Parameters:
    -
      -
    • library (CUlibrary) – Library to retrieve kernel from

    • -
    • name (bytes) – Name of kernel to retrieve

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLibraryGetKernelCount(lib)#
    -

    Returns the number of kernels within a library.

    -

    Returns in count the number of kernels in lib.

    -
    -
    Parameters:
    -

    lib (CUlibrary) – Library to query

    -
    -
    Returns:
    -

    -

    -
    -
    -
    - -
    -
    -cuda.cuda.cuLibraryEnumerateKernels(unsigned int numKernels, lib)#
    -

    Retrieve the kernel handles within a library.

    -

    Returns in kernels a maximum number of numKernels kernel handles -within lib. The returned kernel handle becomes invalid when the -library is unloaded.

    -
    -
    Parameters:
    -
      -
    • numKernels (unsigned int) – Maximum number of kernel handles may be returned to the buffer

    • -
    • lib (CUlibrary) – Library to query from

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLibraryGetModule(library)#
    -

    Returns a module handle.

    -

    Returns in pMod the module handle associated with the current context -located in library library. If module handle is not found, the call -returns CUDA_ERROR_NOT_FOUND.

    -
    -
    Parameters:
    -

    library (CUlibrary) – Library to retrieve module from

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuKernelGetFunction(kernel)#
    -

    Returns a function handle.

    -

    Returns in pFunc the handle of the function for the requested kernel -kernel and the current context. If function handle is not found, the -call returns CUDA_ERROR_NOT_FOUND.

    -
    -
    Parameters:
    -

    kernel (CUkernel) – Kernel to retrieve function for the requested context

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuKernelGetLibrary(kernel)#
    -

    Returns a library handle.

    -

    Returns in pLib the handle of the library for the requested kernel -kernel

    -
    -
    Parameters:
    -

    kernel (CUkernel) – Kernel to retrieve library handle

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLibraryGetGlobal(library, char *name)#
    -

    Returns a global device pointer.

    -

    Returns in *dptr and *bytes the base pointer and size of the global -with name name for the requested library library and the current -context. If no global for the requested name name exists, the call -returns CUDA_ERROR_NOT_FOUND. One of the parameters dptr -or numbytes (not both) can be NULL in which case it is ignored.

    -
    -
    Parameters:
    -
      -
    • library (CUlibrary) – Library to retrieve global from

    • -
    • name (bytes) – Name of global to retrieve

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLibraryGetManaged(library, char *name)#
    -

    Returns a pointer to managed memory.

    -

    Returns in *dptr and *bytes the base pointer and size of the -managed memory with name name for the requested library library. If -no managed memory with the requested name name exists, the call -returns CUDA_ERROR_NOT_FOUND. One of the parameters dptr -or numbytes (not both) can be NULL in which case it is ignored. Note -that managed memory for library library is shared across devices and -is registered when the library is loaded into atleast one context.

    -
    -
    Parameters:
    -
      -
    • library (CUlibrary) – Library to retrieve managed memory from

    • -
    • name (bytes) – Name of managed memory to retrieve

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLibraryGetUnifiedFunction(library, char *symbol)#
    -

    Returns a pointer to a unified function.

    -

    Returns in *fptr the function pointer to a unified function denoted -by symbol. If no unified function with name symbol exists, the call -returns CUDA_ERROR_NOT_FOUND. If there is no device with -attribute CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS -present in the system, the call may return -CUDA_ERROR_NOT_FOUND.

    -
    -
    Parameters:
    -
      -
    • library (CUlibrary) – Library to retrieve function pointer memory from

    • -
    • symbol (bytes) – Name of function pointer to retrieve

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuKernelGetAttribute(attrib: CUfunction_attribute, kernel, dev)#
    -

    Returns information about a kernel.

    -

    Returns in *pi the integer value of the attribute attrib for the -kernel kernel for the requested device dev. The supported -attributes are:

    -
      -
    • CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum -number of threads per block, beyond which a launch of the kernel -would fail. This number depends on both the kernel and the requested -device.

    • -
    • CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of -statically-allocated shared memory per block required by this kernel. -This does not include dynamically-allocated shared memory requested -by the user at runtime.

    • -
    • CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of -user-allocated constant memory required by this kernel.

    • -
    • CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of -local memory used by each thread of this kernel.

    • -
    • CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used -by each thread of this kernel.

    • -
    • CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual -architecture version for which the kernel was compiled. This value is -the major PTX version * 10

      -
        -
      • the minor PTX version, so a PTX version 1.3 function would return -the value 13. Note that this may return the undefined value of 0 -for cubins compiled prior to CUDA 3.0.

      • -
      -
    • -
    • CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture -version for which the kernel was compiled. This value is the major -binary version * 10 + the minor binary version, so a binary version -1.3 function would return the value 13. Note that this will return a -value of 10 for legacy cubins that do not have a properly-encoded -binary architecture version.

    • -
    • CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether -the kernel has been compiled with user specified option “-Xptxas -–dlcm=ca” set.

    • -
    • CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The -maximum size in bytes of dynamically-allocated shared memory.

    • -
    • CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: -Preferred shared memory-L1 cache split ratio in percent of total -shared memory.

    • -
    • CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this -attribute is set, the kernel must launch with a valid cluster size -specified.

    • -
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required -cluster width in blocks.

    • -
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required -cluster height in blocks.

    • -
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required -cluster depth in blocks.

    • -
    • CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: -Indicates whether the function can be launched with non-portable -cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster -size may only function on the specific SKUs the program is tested on. -The launch might fail if the program is run on a different hardware -platform. CUDA API provides cudaOccupancyMaxActiveClusters to assist -with checking whether the desired size can be launched on the current -device. A portable cluster size is guaranteed to be functional on all -compute capabilities higher than the target compute capability. The -portable cluster size for sm_90 is 8 blocks per cluster. This value -may increase for future compute capabilities. The specific hardware -unit may support higher cluster sizes that’s not guaranteed to be -portable.

    • -
    • CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: -The block scheduling policy of a function. The value type is -CUclusterSchedulingPolicy.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    If another thread is trying to set the same attribute on the same device using cuKernelSetAttribute() simultaneously, the attribute query will give the old or new value depending on the interleavings chosen by the OS scheduler and memory consistency.

    -
    - -
    -
    -cuda.cuda.cuKernelSetAttribute(attrib: CUfunction_attribute, int val, kernel, dev)#
    -

    Sets information about a kernel.

    -

    This call sets the value of a specified attribute attrib on the -kernel kernel for the requested device dev to an integer value -specified by val. This function returns CUDA_SUCCESS if the new value -of the attribute could be successfully set. If the set fails, this call -will return an error. Not all attributes can have values set. -Attempting to set a value on a read-only attribute will result in an -error (CUDA_ERROR_INVALID_VALUE)

    -

    Note that attributes set using cuFuncSetAttribute() will -override the attribute set by this API irrespective of whether the call -to cuFuncSetAttribute() is made before or after this API -call. However, cuKernelGetAttribute() will always return -the attribute value set by this API.

    -

    Supported attributes are:

    - -
    -
    Parameters:
    -
      -
    • attrib (CUfunction_attribute) – Attribute requested

    • -
    • val (int) – Value to set

    • -
    • kernel (CUkernel) – Kernel to set attribute of

    • -
    • dev (CUdevice) – Device to set attribute of

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_OUT_OF_MEMORY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    The API has stricter locking requirements in comparison to its legacy counterpart cuFuncSetAttribute() due to device-wide semantics. If multiple threads are trying to set the same attribute on the same device simultaneously, the attribute setting will depend on the interleavings chosen by the OS scheduler and memory consistency.

    -
    - -
    -
    -cuda.cuda.cuKernelSetCacheConfig(kernel, config: CUfunc_cache, dev)#
    -

    Sets the preferred cache configuration for a device kernel.

    -

    On devices where the L1 cache and shared memory use the same hardware -resources, this sets through config the preferred cache configuration -for the device kernel kernel on the requested device dev. This is -only a preference. The driver will use the requested configuration if -possible, but it is free to choose a different configuration if -required to execute kernel. Any context-wide preference set via -cuCtxSetCacheConfig() will be overridden by this per-kernel -setting.

    -

    Note that attributes set using cuFuncSetCacheConfig() will -override the attribute set by this API irrespective of whether the call -to cuFuncSetCacheConfig() is made before or after this API -call.

    -

    This setting does nothing on devices where the size of the L1 cache and -shared memory are fixed.

    -

    Launching a kernel with a different preference than the most recent -preference setting may insert a device-side synchronization point.

    -

    The supported cache configurations are:

    - -
    -
    Parameters:
    -
      -
    • kernel (CUkernel) – Kernel to configure cache for

    • -
    • config (CUfunc_cache) – Requested cache configuration

    • -
    • dev (CUdevice) – Device to set attribute of

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_OUT_OF_MEMORY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    The API has stricter locking requirements in comparison to its legacy counterpart cuFuncSetCacheConfig() due to device-wide semantics. If multiple threads are trying to set a config on the same device simultaneously, the cache config setting will depend on the interleavings chosen by the OS scheduler and memory consistency.

    -
    - -
    -
    -cuda.cuda.cuKernelGetName(hfunc)#
    -

    Returns the function name for a CUkernel handle.

    -

    Returns in **name the function name associated with the kernel handle -hfunc . The function name is returned as a null-terminated string. -The returned name is only valid when the kernel handle is valid. If the -library is unloaded or reloaded, one must call the API again to get the -updated name. This API may return a mangled name if the function is not -declared as having C linkage. If either **name or hfunc is NULL, -CUDA_ERROR_INVALID_VALUE is returned.

    -
    -
    Parameters:
    -

    hfunc (CUkernel) – The function handle to retrieve the name for

    -
    -
    Returns:
    -

    -

    -
    -
    -
    - -
    -
    -cuda.cuda.cuKernelGetParamInfo(kernel, size_t paramIndex)#
    -

    Returns the offset and size of a kernel parameter in the device-side parameter layout.

    -

    Queries the kernel parameter at paramIndex into kernel’s list of -parameters, and returns in paramOffset and paramSize the offset and -size, respectively, where the parameter will reside in the device-side -parameter layout. This information can be used to update kernel node -parameters from the device via -cudaGraphKernelNodeSetParam() and -cudaGraphKernelNodeUpdatesApply(). paramIndex must be -less than the number of parameters that kernel takes. paramSize can -be set to NULL if only the parameter offset is desired.

    -
    -
    Parameters:
    -
      -
    • kernel (CUkernel) – The kernel to query

    • -
    • paramIndex (size_t) – The parameter index to query

    • -
    -
    -
    Returns:
    -

      -
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    • -
    • paramOffset (int) – Returns the offset into the device-side parameter layout at which -the parameter resides

    • -
    • paramSize (int) – Optionally returns the size of the parameter in the device-side -parameter layout

    • -
    -

    -
    -
    -
    -

    See also

    -

    cuFuncGetParamInfo

    -
    -
    - -
    -
    -

    Memory Management#

    -

    This section describes the memory management functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuMemGetInfo()#
    -

    Gets free and total memory.

    -

    Returns in *total the total amount of memory available to the the -current context. Returns in *free the amount of memory on the device -that is free according to the OS. CUDA is not guaranteed to be able to -allocate all of the memory that the OS reports as free. In a multi- -tenet situation, free estimate returned is prone to race condition -where a new allocation/free done by a different process or a different -thread in the same process between the time when free memory was -estimated and reported, will result in deviation in free value reported -and actual free memory.

    -

    The integrated GPU on Tegra shares memory with CPU and other component -of the SoC. The free and total values returned by the API excludes the -SWAP memory space maintained by the OS on some platforms. The OS may -move some of the memory pages into swap area as the GPU or CPU allocate -or access memory. See Tegra app note on how to calculate total and free -memory on Tegra.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemAlloc(size_t bytesize)#
    -

    Allocates device memory.

    -

    Allocates bytesize bytes of linear memory on the device and returns -in *dptr a pointer to the allocated memory. The allocated memory is -suitably aligned for any kind of variable. The memory is not cleared. -If bytesize is 0, cuMemAlloc() returns -CUDA_ERROR_INVALID_VALUE.

    -
    -
    Parameters:
    -

    bytesize (size_t) – Requested allocation size in bytes

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemAllocPitch(size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes)#
    -

    Allocates pitched device memory.

    -

    Allocates at least WidthInBytes * Height bytes of linear memory on -the device and returns in *dptr a pointer to the allocated memory. -The function may pad the allocation to ensure that corresponding -pointers in any given row will continue to meet the alignment -requirements for coalescing as the address is updated from row to row. -ElementSizeBytes specifies the size of the largest reads and writes -that will be performed on the memory range. ElementSizeBytes may be -4, 8 or 16 (since coalesced memory transactions are not possible on -other data sizes). If ElementSizeBytes is smaller than the actual -read/write size of a kernel, the kernel will run correctly, but -possibly at reduced speed. The pitch returned in *pPitch by -cuMemAllocPitch() is the width in bytes of the allocation. -The intended usage of pitch is as a separate parameter of the -allocation, used to compute addresses within the 2D array. Given the -row and column of an array element of type T, the address is computed -as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    The pitch returned by cuMemAllocPitch() is guaranteed to -work with cuMemcpy2D() under all circumstances. For -allocations of 2D arrays, it is recommended that programmers consider -performing pitch allocations using cuMemAllocPitch(). Due -to alignment restrictions in the hardware, this is especially true if -the application will be performing 2D memory copies between different -regions of device memory (whether linear memory or CUDA arrays).

    -

    The byte alignment of the pitch returned by -cuMemAllocPitch() is guaranteed to match or exceed the -alignment requirement for texture binding with -cuTexRefSetAddress2D().

    -
    -
    Parameters:
    -
      -
    • WidthInBytes (size_t) – Requested allocation width in bytes

    • -
    • Height (size_t) – Requested allocation height in rows

    • -
    • ElementSizeBytes (unsigned int) – Size of largest reads/writes for range

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemFree(dptr)#
    -

    Frees device memory.

    -

    Frees the memory space pointed to by dptr, which must have been -returned by a previous call to one of the following memory allocation -APIs - cuMemAlloc(), cuMemAllocPitch(), -cuMemAllocManaged(), cuMemAllocAsync(), -cuMemAllocFromPoolAsync()

    -

    Note - This API will not perform any implict synchronization when the -pointer was allocated with cuMemAllocAsync or -cuMemAllocFromPoolAsync. Callers must ensure that all -accesses to these pointer have completed before invoking -cuMemFree. For best performance and memory reuse, users -should use cuMemFreeAsync to free memory allocated via the -stream ordered memory allocator. For all other pointers, this API may -perform implicit synchronization.

    -
    -
    Parameters:
    -

    dptr (CUdeviceptr) – Pointer to memory to free

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemGetAddressRange(dptr)#
    -

    Get information on memory allocations.

    -

    Returns the base address in *pbase and size in *psize of the -allocation by cuMemAlloc() or cuMemAllocPitch() -that contains the input pointer dptr. Both parameters pbase and -psize are optional. If one of them is NULL, it is ignored.

    -
    -
    Parameters:
    -

    dptr (CUdeviceptr) – Device pointer to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemAllocHost(size_t bytesize)#
    -

    Allocates page-locked host memory.

    -

    Allocates bytesize bytes of host memory that is page-locked and -accessible to the device. The driver tracks the virtual memory ranges -allocated with this function and automatically accelerates calls to -functions such as cuMemcpy(). Since the memory can be -accessed directly by the device, it can be read or written with much -higher bandwidth than pageable memory obtained with functions such as -malloc().

    -

    On systems where -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES -is true, cuMemAllocHost may not page-lock the allocated -memory.

    -

    Page-locking excessive amounts of memory with -cuMemAllocHost() may degrade system performance, since it -reduces the amount of memory available to the system for paging. As a -result, this function is best used sparingly to allocate staging areas -for data exchange between host and device.

    -

    Note all host memory allocated using cuMemAllocHost() will -automatically be immediately accessible to all contexts on all devices -which support unified addressing (as may be queried using -CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). The device pointer -that may be used to access this host memory from those contexts is -always equal to the returned host pointer *pp. See Unified -Addressing for additional details.

    -
    -
    Parameters:
    -

    bytesize (size_t) – Requested allocation size in bytes

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemFreeHost(p)#
    -

    Frees page-locked host memory.

    -

    Frees the memory space pointed to by p, which must have been returned -by a previous call to cuMemAllocHost().

    -
    -
    Parameters:
    -

    p (Any) – Pointer to memory to free

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemHostAlloc(size_t bytesize, unsigned int Flags)#
    -

    Allocates page-locked host memory.

    -

    Allocates bytesize bytes of host memory that is page-locked and -accessible to the device. The driver tracks the virtual memory ranges -allocated with this function and automatically accelerates calls to -functions such as cuMemcpyHtoD(). Since the memory can be -accessed directly by the device, it can be read or written with much -higher bandwidth than pageable memory obtained with functions such as -malloc().

    -

    On systems where -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES -is true, cuMemHostAlloc may not page-lock the allocated -memory.

    -

    Page-locking excessive amounts of memory may degrade system -performance, since it reduces the amount of memory available to the -system for paging. As a result, this function is best used sparingly to -allocate staging areas for data exchange between host and device.

    -

    The Flags parameter enables different options to be specified that -affect the allocation, as follows.

    -
      -
    • CU_MEMHOSTALLOC_PORTABLE: The memory returned by this -call will be considered as pinned memory by all CUDA contexts, not -just the one that performed the allocation.

    • -
    • CU_MEMHOSTALLOC_DEVICEMAP: Maps the allocation into the -CUDA address space. The device pointer to the memory may be obtained -by calling cuMemHostGetDevicePointer().

    • -
    • CU_MEMHOSTALLOC_WRITECOMBINED: Allocates the memory as -write-combined (WC). WC memory can be transferred across the PCI -Express bus more quickly on some system configurations, but cannot be -read efficiently by most CPUs. WC memory is a good option for buffers -that will be written by the CPU and read by the GPU via mapped pinned -memory or host->device transfers.

    • -
    -

    All of these flags are orthogonal to one another: a developer may -allocate memory that is portable, mapped and/or write-combined with no -restrictions.

    -

    The CU_MEMHOSTALLOC_DEVICEMAP flag may be specified on CUDA -contexts for devices that do not support mapped pinned memory. The -failure is deferred to cuMemHostGetDevicePointer() because -the memory may be mapped into other CUDA contexts via the -CU_MEMHOSTALLOC_PORTABLE flag.

    -

    The memory allocated by this function must be freed with -cuMemFreeHost().

    -

    Note all host memory allocated using cuMemHostAlloc() will -automatically be immediately accessible to all contexts on all devices -which support unified addressing (as may be queried using -CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). Unless the flag -CU_MEMHOSTALLOC_WRITECOMBINED is specified, the device -pointer that may be used to access this host memory from those contexts -is always equal to the returned host pointer *pp. If the flag -CU_MEMHOSTALLOC_WRITECOMBINED is specified, then the -function cuMemHostGetDevicePointer() must be used to query -the device pointer, even if the context supports unified addressing. -See Unified Addressing for additional details.

    -
    -
    Parameters:
    -
      -
    • bytesize (size_t) – Requested allocation size in bytes

    • -
    • Flags (unsigned int) – Flags for allocation request

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemHostGetDevicePointer(p, unsigned int Flags)#
    -

    Passes back device pointer of mapped pinned memory.

    -

    Passes back the device pointer pdptr corresponding to the mapped, -pinned host buffer p allocated by cuMemHostAlloc.

    -

    cuMemHostGetDevicePointer() will fail if the -CU_MEMHOSTALLOC_DEVICEMAP flag was not specified at the -time the memory was allocated, or if the function is called on a GPU -that does not support mapped pinned memory.

    -

    For devices that have a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, -the memory can also be accessed from the device using the host pointer -p. The device pointer returned by -cuMemHostGetDevicePointer() may or may not match the -original host pointer p and depends on the devices visible to the -application. If all devices visible to the application have a non-zero -value for the device attribute, the device pointer returned by -cuMemHostGetDevicePointer() will match the original pointer -p. If any device visible to the application has a zero value for the -device attribute, the device pointer returned by -cuMemHostGetDevicePointer() will not match the original -host pointer p, but it will be suitable for use on all devices -provided Unified Virtual Addressing is enabled. In such systems, it is -valid to access the memory using either pointer on devices that have a -non-zero value for the device attribute. Note however that such devices -should access the memory using only one of the two pointers and not -both.

    -

    Flags provides for future releases. For now, it must be set to 0.

    -
    -
    Parameters:
    -
      -
    • p (Any) – Host pointer

    • -
    • Flags (unsigned int) – Options (must be 0)

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemHostGetFlags(p)#
    -

    Passes back flags that were used for a pinned allocation.

    -

    Passes back the flags pFlags that were specified when allocating the -pinned host buffer p allocated by cuMemHostAlloc.

    -

    cuMemHostGetFlags() will fail if the pointer does not -reside in an allocation performed by cuMemAllocHost() or -cuMemHostAlloc().

    -
    -
    Parameters:
    -

    p (Any) – Host pointer

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemAllocManaged(size_t bytesize, unsigned int flags)#
    -

    Allocates memory that will be automatically managed by the Unified Memory system.

    -

    Allocates bytesize bytes of managed memory on the device and returns -in *dptr a pointer to the allocated memory. If the device doesn’t -support allocating managed memory, CUDA_ERROR_NOT_SUPPORTED -is returned. Support for managed memory can be queried using the device -attribute CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY. The allocated -memory is suitably aligned for any kind of variable. The memory is not -cleared. If bytesize is 0, cuMemAllocManaged returns -CUDA_ERROR_INVALID_VALUE. The pointer is valid on the CPU -and on all GPUs in the system that support managed memory. All accesses -to this pointer must obey the Unified Memory programming model.

    -

    flags specifies the default stream association for this allocation. -flags must be one of CU_MEM_ATTACH_GLOBAL or -CU_MEM_ATTACH_HOST. If CU_MEM_ATTACH_GLOBAL is -specified, then this memory is accessible from any stream on any -device. If CU_MEM_ATTACH_HOST is specified, then the -allocation should not be accessed from devices that have a zero value -for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS; an explicit -call to cuStreamAttachMemAsync will be required to enable -access on such devices.

    -

    If the association is later changed via -cuStreamAttachMemAsync to a single stream, the default -association as specified during cuMemAllocManaged is -restored when that stream is destroyed. For managed variables, the -default association is always CU_MEM_ATTACH_GLOBAL. Note -that destroying a stream is an asynchronous operation, and as a result, -the change to default association won’t happen until all work in the -stream has completed.

    -

    Memory allocated with cuMemAllocManaged should be released -with cuMemFree.

    -

    Device memory oversubscription is possible for GPUs that have a non- -zero value for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Managed -memory on such GPUs may be evicted from device memory to host memory at -any time by the Unified Memory driver in order to make room for other -allocations.

    -

    In a system where all GPUs have a non-zero value for the device -attribute CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, -managed memory may not be populated when this API returns and instead -may be populated on access. In such systems, managed memory can migrate -to any processor’s memory at any time. The Unified Memory driver will -employ heuristics to maintain data locality and prevent excessive page -faults to the extent possible. The application can also guide the -driver about memory usage patterns via cuMemAdvise. The -application can also explicitly migrate memory to a desired processor’s -memory via cuMemPrefetchAsync.

    -

    In a multi-GPU system where all of the GPUs have a zero value for the -device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS and all the -GPUs have peer-to-peer support with each other, the physical storage -for managed memory is created on the GPU which is active at the time -cuMemAllocManaged is called. All other GPUs will reference -the data at reduced bandwidth via peer mappings over the PCIe bus. The -Unified Memory driver does not migrate memory among such GPUs.

    -

    In a multi-GPU system where not all GPUs have peer-to-peer support with -each other and where the value of the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS is zero for -at least one of those GPUs, the location chosen for physical storage of -managed memory is system-dependent.

    -
      -
    • On Linux, the location chosen will be device memory as long as the -current set of active contexts are on devices that either have peer- -to-peer support with each other or have a non-zero value for the -device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If there -is an active context on a GPU that does not have a non-zero value for -that device attribute and it does not have peer-to-peer support with -the other devices that have active contexts on them, then the -location for physical storage will be ‘zero-copy’ or host memory. -Note that this means that managed memory that is located in device -memory is migrated to host memory if a new context is created on a -GPU that doesn’t have a non-zero value for the device attribute and -does not support peer-to-peer with at least one of the other devices -that has an active context. This in turn implies that context -creation may fail if there is insufficient host memory to migrate all -managed allocations.

    • -
    • On Windows, the physical storage is always created in ‘zero-copy’ or -host memory. All GPUs will reference the data at reduced bandwidth -over the PCIe bus. In these circumstances, use of the environment -variable CUDA_VISIBLE_DEVICES is recommended to restrict CUDA to only -use those GPUs that have peer-to-peer support. Alternatively, users -can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero value to -force the driver to always use device memory for physical storage. -When this environment variable is set to a non-zero value, all -contexts created in that process on devices that support managed -memory have to be peer-to-peer compatible with each other. Context -creation will fail if a context is created on a device that supports -managed memory and is not peer-to-peer compatible with any of the -other managed memory supporting devices on which contexts were -previously created, even if those contexts have been destroyed. These -environment variables are described in the CUDA programming guide -under the “CUDA environment variables” section.

    • -
    • On ARM, managed memory is not available on discrete gpu with Drive -PX-2.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceRegisterAsyncNotification(device, callbackFunc, userData)#
    -

    Registers a callback function to receive async notifications.

    -

    Registers callbackFunc to receive async notifications.

    -

    The userData parameter is passed to the callback function at async -notification time. Likewise, callback is also passed to the -callback function to distinguish between multiple registered callbacks.

    -

    The callback function being registered should be designed to return -quickly (~10ms). Any long running tasks should be queued for -execution on an application thread.

    -

    Callbacks may not call cuDeviceRegisterAsyncNotification or -cuDeviceUnregisterAsyncNotification. Doing so will result in -CUDA_ERROR_NOT_PERMITTED. Async notification callbacks -execute in an undefined order and may be serialized.

    -

    Returns in *callback a handle representing the registered callback -instance.

    -
    -
    Parameters:
    -
      -
    • device (CUdevice) – The device on which to register the callback

    • -
    • callbackFunc (CUasyncCallback) – The function to register as a callback

    • -
    • userData (Any) – A generic pointer to user data. This is passed into the callback -function.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceUnregisterAsyncNotification(device, callback)#
    -

    Unregisters an async notification callback.

    -

    Unregisters callback so that the corresponding callback function will -stop receiving async notifications.

    -
    -
    Parameters:
    -
      -
    • device (CUdevice) – The device from which to remove callback.

    • -
    • callback (CUasyncCallbackHandle) – The callback instance to unregister from receiving async -notifications.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS CUDA_ERROR_NOT_SUPPORTED CUDA_ERROR_INVALID_DEVICE CUDA_ERROR_INVALID_VALUE CUDA_ERROR_NOT_PERMITTED CUDA_ERROR_UNKNOWN

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetByPCIBusId(char *pciBusId)#
    -

    Returns a handle to a compute device.

    -

    Returns in *device a device handle given a PCI bus ID string.

    -

    where domain, bus, device, and function are all hexadecimal -values

    -
    -
    Parameters:
    -

    pciBusId (bytes) – String in one of the following forms:

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetPCIBusId(int length, dev)#
    -

    Returns a PCI Bus Id string for the device.

    -

    Returns an ASCII string identifying the device dev in the NULL- -terminated string pointed to by pciBusId. length specifies the -maximum length of the string that may be returned.

    -

    where domain, bus, device, and function are all hexadecimal -values. pciBusId should be large enough to store 13 characters -including the NULL-terminator.

    -
    -
    Parameters:
    -
      -
    • length (int) – Maximum length of string to store in name

    • -
    • dev (CUdevice) – Device to get identifier string for

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuIpcGetEventHandle(event)#
    -

    Gets an interprocess handle for a previously allocated event.

    -

    Takes as input a previously allocated event. This event must have been -created with the CU_EVENT_INTERPROCESS and -CU_EVENT_DISABLE_TIMING flags set. This opaque handle may -be copied into other processes and opened with -cuIpcOpenEventHandle to allow efficient hardware -synchronization between GPU work in different processes.

    -

    After the event has been opened in the importing process, -cuEventRecord, cuEventSynchronize, -cuStreamWaitEvent and cuEventQuery may be used -in either process. Performing operations on the imported event after -the exported event has been freed with cuEventDestroy will -result in undefined behavior.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cuapiDeviceGetAttribute with -CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    -
    -
    Parameters:
    -

    event (CUevent or cudaEvent_t) – Event allocated with CU_EVENT_INTERPROCESS and -CU_EVENT_DISABLE_TIMING flags.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuIpcOpenEventHandle(CUipcEventHandle handle: CUipcEventHandle)#
    -

    Opens an interprocess event handle for use in the current process.

    -

    Opens an interprocess event handle exported from another process with -cuIpcGetEventHandle. This function returns a -CUevent that behaves like a locally created event with the -CU_EVENT_DISABLE_TIMING flag specified. This event must be -freed with cuEventDestroy.

    -

    Performing operations on the imported event after the exported event -has been freed with cuEventDestroy will result in undefined -behavior.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cuapiDeviceGetAttribute with -CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    -
    -
    Parameters:
    -

    handle (CUipcEventHandle) – Interprocess handle to open

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuIpcGetMemHandle(dptr)#
    -

    Gets an interprocess memory handle for an existing device memory allocation.

    -

    Takes a pointer to the base of an existing device memory allocation -created with cuMemAlloc and exports it for use in another -process. This is a lightweight operation and may be called multiple -times on an allocation without adverse effects.

    -

    If a region of memory is freed with cuMemFree and a -subsequent call to cuMemAlloc returns memory with the same -device address, cuIpcGetMemHandle will return a unique -handle for the new memory.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cuapiDeviceGetAttribute with -CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    -
    -
    Parameters:
    -

    dptr (CUdeviceptr) – Base pointer to previously allocated device memory

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuIpcOpenMemHandle(CUipcMemHandle handle: CUipcMemHandle, unsigned int Flags)#
    -

    Opens an interprocess memory handle exported from another process and returns a device pointer usable in the local process.

    -

    Maps memory exported from another process with -cuIpcGetMemHandle into the current device address space. -For contexts on different devices cuIpcOpenMemHandle can -attempt to enable peer access between the devices as if the user called -cuCtxEnablePeerAccess. This behavior is controlled by the -CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS flag. -cuDeviceCanAccessPeer can determine if a mapping is -possible.

    -

    Contexts that may open CUipcMemHandles are restricted in -the following way. CUipcMemHandles from each -CUdevice in a given process may only be opened by one -CUcontext per CUdevice per other process.

    -

    If the memory handle has already been opened by the current context, -the reference count on the handle is incremented by 1 and the existing -device pointer is returned.

    -

    Memory returned from cuIpcOpenMemHandle must be freed with -cuIpcCloseMemHandle.

    -

    Calling cuMemFree on an exported memory region before -calling cuIpcCloseMemHandle in the importing context will -result in undefined behavior.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cuapiDeviceGetAttribute with -CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    No guarantees are made about the address returned in *pdptr. In particular, multiple processes may not receive the same address for the same handle.

    -
    - -
    -
    -cuda.cuda.cuIpcCloseMemHandle(dptr)#
    -

    Attempts to close memory mapped with cuIpcOpenMemHandle.

    -

    Decrements the reference count of the memory returned by -cuIpcOpenMemHandle by 1. When the reference count reaches -0, this API unmaps the memory. The original allocation in the exporting -process as well as imported mappings in other processes will be -unaffected.

    -

    Any resources used to enable peer access will be freed if this is the -last mapping using them.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cuapiDeviceGetAttribute with -CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    -
    -
    Parameters:
    -

    dptr (CUdeviceptr) – Device pointer returned by cuIpcOpenMemHandle

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_MAP_FAILED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemHostRegister(p, size_t bytesize, unsigned int Flags)#
    -

    Registers an existing host memory range for use by CUDA.

    -

    Page-locks the memory range specified by p and bytesize and maps it -for the device(s) as specified by Flags. This memory range also is -added to the same tracking mechanism as cuMemHostAlloc to -automatically accelerate calls to functions such as -cuMemcpyHtoD(). Since the memory can be accessed directly -by the device, it can be read or written with much higher bandwidth -than pageable memory that has not been registered. Page-locking -excessive amounts of memory may degrade system performance, since it -reduces the amount of memory available to the system for paging. As a -result, this function is best used sparingly to register staging areas -for data exchange between host and device.

    -

    On systems where -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES -is true, cuMemHostRegister will not page-lock the memory -range specified by ptr but only populate unpopulated pages.

    -

    The Flags parameter enables different options to be specified that -affect the allocation, as follows.

    - -

    All of these flags are orthogonal to one another: a developer may page- -lock memory that is portable or mapped with no restrictions.

    -

    The CU_MEMHOSTREGISTER_DEVICEMAP flag may be specified on -CUDA contexts for devices that do not support mapped pinned memory. The -failure is deferred to cuMemHostGetDevicePointer() because -the memory may be mapped into other CUDA contexts via the -CU_MEMHOSTREGISTER_PORTABLE flag.

    -

    For devices that have a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, -the memory can also be accessed from the device using the host pointer -p. The device pointer returned by -cuMemHostGetDevicePointer() may or may not match the -original host pointer ptr and depends on the devices visible to the -application. If all devices visible to the application have a non-zero -value for the device attribute, the device pointer returned by -cuMemHostGetDevicePointer() will match the original pointer -ptr. If any device visible to the application has a zero value for -the device attribute, the device pointer returned by -cuMemHostGetDevicePointer() will not match the original -host pointer ptr, but it will be suitable for use on all devices -provided Unified Virtual Addressing is enabled. In such systems, it is -valid to access the memory using either pointer on devices that have a -non-zero value for the device attribute. Note however that such devices -should access the memory using only of the two pointers and not both.

    -

    The memory page-locked by this function must be unregistered with -cuMemHostUnregister().

    -
    -
    Parameters:
    -
      -
    • p (Any) – Host pointer to memory to page-lock

    • -
    • bytesize (size_t) – Size in bytes of the address range to page-lock

    • -
    • Flags (unsigned int) – Flags for allocation request

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemHostUnregister(p)#
    -

    Unregisters a memory range that was registered with cuMemHostRegister.

    -

    Unmaps the memory range whose base address is specified by p, and -makes it pageable again.

    -

    The base address must be the same one specified to -cuMemHostRegister().

    -
    -
    Parameters:
    -

    p (Any) – Host pointer to memory to unregister

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpy(dst, src, size_t ByteCount)#
    -

    Copies memory.

    -

    Copies data between two pointers. dst and src are base pointers of -the destination and source, respectively. ByteCount specifies the -number of bytes to copy. Note that this function infers the type of the -transfer (host to host, host to device, device to device, or device to -host) from the pointer values. This function is only allowed in -contexts which support unified addressing.

    -
    -
    Parameters:
    -
      -
    • dst (CUdeviceptr) – Destination unified virtual address space pointer

    • -
    • src (CUdeviceptr) – Source unified virtual address space pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyPeer(dstDevice, dstContext, srcDevice, srcContext, size_t ByteCount)#
    -

    Copies device memory between two contexts.

    -

    Copies from device memory in one context to device memory in another -context. dstDevice is the base device pointer of the destination -memory and dstContext is the destination context. srcDevice is the -base device pointer of the source memory and srcContext is the source -pointer. ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • dstContext (CUcontext) – Destination context

    • -
    • srcDevice (CUdeviceptr) – Source device pointer

    • -
    • srcContext (CUcontext) – Source context

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyHtoD(dstDevice, srcHost, size_t ByteCount)#
    -

    Copies memory from Host to Device.

    -

    Copies from host memory to device memory. dstDevice and srcHost are -the base addresses of the destination and source, respectively. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • srcHost (Any) – Source host pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyDtoH(dstHost, srcDevice, size_t ByteCount)#
    -

    Copies memory from Device to Host.

    -

    Copies from device to host memory. dstHost and srcDevice specify -the base pointers of the destination and source, respectively. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstHost (Any) – Destination host pointer

    • -
    • srcDevice (CUdeviceptr) – Source device pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyDtoD(dstDevice, srcDevice, size_t ByteCount)#
    -

    Copies memory from Device to Device.

    -

    Copies from device memory to device memory. dstDevice and srcDevice -are the base pointers of the destination and source, respectively. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • srcDevice (CUdeviceptr) – Source device pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyDtoA(dstArray, size_t dstOffset, srcDevice, size_t ByteCount)#
    -

    Copies memory from Device to Array.

    -

    Copies from device memory to a 1D CUDA array. dstArray and -dstOffset specify the CUDA array handle and starting index of the -destination data. srcDevice specifies the base pointer of the source. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstArray (CUarray) – Destination array

    • -
    • dstOffset (size_t) – Offset in bytes of destination array

    • -
    • srcDevice (CUdeviceptr) – Source device pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyAtoD(dstDevice, srcArray, size_t srcOffset, size_t ByteCount)#
    -

    Copies memory from Array to Device.

    -

    Copies from one 1D CUDA array to device memory. dstDevice specifies -the base pointer of the destination and must be naturally aligned with -the CUDA array elements. srcArray and srcOffset specify the CUDA -array handle and the offset in bytes into the array where the copy is -to begin. ByteCount specifies the number of bytes to copy and must be -evenly divisible by the array element size.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • srcArray (CUarray) – Source array

    • -
    • srcOffset (size_t) – Offset in bytes of source array

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyHtoA(dstArray, size_t dstOffset, srcHost, size_t ByteCount)#
    -

    Copies memory from Host to Array.

    -

    Copies from host memory to a 1D CUDA array. dstArray and dstOffset -specify the CUDA array handle and starting offset in bytes of the -destination data. pSrc specifies the base address of the source. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstArray (CUarray) – Destination array

    • -
    • dstOffset (size_t) – Offset in bytes of destination array

    • -
    • srcHost (Any) – Source host pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyAtoH(dstHost, srcArray, size_t srcOffset, size_t ByteCount)#
    -

    Copies memory from Array to Host.

    -

    Copies from one 1D CUDA array to host memory. dstHost specifies the -base pointer of the destination. srcArray and srcOffset specify the -CUDA array handle and starting offset in bytes of the source data. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstHost (Any) – Destination device pointer

    • -
    • srcArray (CUarray) – Source array

    • -
    • srcOffset (size_t) – Offset in bytes of source array

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyAtoA(dstArray, size_t dstOffset, srcArray, size_t srcOffset, size_t ByteCount)#
    -

    Copies memory from Array to Array.

    -

    Copies from one 1D CUDA array to another. dstArray and srcArray -specify the handles of the destination and source CUDA arrays for the -copy, respectively. dstOffset and srcOffset specify the destination -and source offsets in bytes into the CUDA arrays. ByteCount is the -number of bytes to be copied. The size of the elements in the CUDA -arrays need not be the same format, but the elements must be the same -size; and count must be evenly divisible by that size.

    -
    -
    Parameters:
    -
      -
    • dstArray (CUarray) – Destination array

    • -
    • dstOffset (size_t) – Offset in bytes of destination array

    • -
    • srcArray (CUarray) – Source array

    • -
    • srcOffset (size_t) – Offset in bytes of source array

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpy2D(CUDA_MEMCPY2D pCopy: Optional[CUDA_MEMCPY2D])#
    -

    Copies memory for 2D arrays.

    -

    Perform a 2D memory copy according to the parameters specified in -pCopy. The CUDA_MEMCPY2D structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • srcMemoryType and dstMemoryType specify the -type of memory of the source and destination, respectively; -CUmemorytype_enum is defined as:

    • -
    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, -srcDevice and srcPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. srcArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If srcMemoryType is CU_MEMORYTYPE_HOST, -srcHost and srcPitch specify the (host) base -address of the source data and the bytes per row to apply. -srcArray is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, -srcDevice and srcPitch specify the (device) -base address of the source data and the bytes per row to apply. -srcArray is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, -srcArray specifies the handle of the source data. -srcHost, srcDevice and srcPitch are -ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_HOST, -dstHost and dstPitch specify the (host) base -address of the destination data and the bytes per row to apply. -dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, -dstDevice and dstPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. dstArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, -dstDevice and dstPitch specify the (device) -base address of the destination data and the bytes per row to apply. -dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, -dstArray specifies the handle of the destination data. -dstHost, dstDevice and dstPitch are -ignored.

    -
      -
    • srcXInBytes and srcY specify the base address -of the source data for the copy.

    • -
    -

    For host pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, srcXInBytes must be evenly divisible by -the array element size.

    -
      -
    • dstXInBytes and dstY specify the base address -of the destination data for the copy.

    • -
    -

    For host pointers, the base address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, dstXInBytes must be evenly divisible by -the array element size.

    - -

    cuMemcpy2D() returns an error if any pitch is greater than -the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH). -cuMemAllocPitch() passes back pitches that always work with -cuMemcpy2D(). On intra-device memory copies (device to -device, CUDA array to device, CUDA array to CUDA array), -cuMemcpy2D() may fail for pitches not computed by -cuMemAllocPitch(). cuMemcpy2DUnaligned() does -not have this restriction, but may run significantly slower in the -cases where cuMemcpy2D() would have returned an error code.

    -
    -
    Parameters:
    -

    pCopy (CUDA_MEMCPY2D) – Parameters for the memory copy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpy2DUnaligned(CUDA_MEMCPY2D pCopy: Optional[CUDA_MEMCPY2D])#
    -

    Copies memory for 2D arrays.

    -

    Perform a 2D memory copy according to the parameters specified in -pCopy. The CUDA_MEMCPY2D structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • srcMemoryType and dstMemoryType specify the -type of memory of the source and destination, respectively; -CUmemorytype_enum is defined as:

    • -
    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, -srcDevice and srcPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. srcArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If srcMemoryType is CU_MEMORYTYPE_HOST, -srcHost and srcPitch specify the (host) base -address of the source data and the bytes per row to apply. -srcArray is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, -srcDevice and srcPitch specify the (device) -base address of the source data and the bytes per row to apply. -srcArray is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, -srcArray specifies the handle of the source data. -srcHost, srcDevice and srcPitch are -ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, -dstDevice and dstPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. dstArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If dstMemoryType is CU_MEMORYTYPE_HOST, -dstHost and dstPitch specify the (host) base -address of the destination data and the bytes per row to apply. -dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, -dstDevice and dstPitch specify the (device) -base address of the destination data and the bytes per row to apply. -dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, -dstArray specifies the handle of the destination data. -dstHost, dstDevice and dstPitch are -ignored.

    -
      -
    • srcXInBytes and srcY specify the base address -of the source data for the copy.

    • -
    -

    For host pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, srcXInBytes must be evenly divisible by -the array element size.

    -
      -
    • dstXInBytes and dstY specify the base address -of the destination data for the copy.

    • -
    -

    For host pointers, the base address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, dstXInBytes must be evenly divisible by -the array element size.

    - -

    cuMemcpy2D() returns an error if any pitch is greater than -the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH). -cuMemAllocPitch() passes back pitches that always work with -cuMemcpy2D(). On intra-device memory copies (device to -device, CUDA array to device, CUDA array to CUDA array), -cuMemcpy2D() may fail for pitches not computed by -cuMemAllocPitch(). cuMemcpy2DUnaligned() does -not have this restriction, but may run significantly slower in the -cases where cuMemcpy2D() would have returned an error code.

    -
    -
    Parameters:
    -

    pCopy (CUDA_MEMCPY2D) – Parameters for the memory copy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpy3D(CUDA_MEMCPY3D pCopy: Optional[CUDA_MEMCPY3D])#
    -

    Copies memory for 3D arrays.

    -

    Perform a 3D memory copy according to the parameters specified in -pCopy. The CUDA_MEMCPY3D structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • srcMemoryType and dstMemoryType specify the -type of memory of the source and destination, respectively; -CUmemorytype_enum is defined as:

    • -
    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, -srcDevice and srcPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. srcArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If srcMemoryType is CU_MEMORYTYPE_HOST, -srcHost, srcPitch and srcHeight -specify the (host) base address of the source data, the bytes per row, -and the height of each 2D slice of the 3D array. srcArray -is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, -srcDevice, srcPitch and srcHeight -specify the (device) base address of the source data, the bytes per -row, and the height of each 2D slice of the 3D array. -srcArray is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, -srcArray specifies the handle of the source data. -srcHost, srcDevice, srcPitch and -srcHeight are ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, -dstDevice and dstPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. dstArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If dstMemoryType is CU_MEMORYTYPE_HOST, -dstHost and dstPitch specify the (host) base -address of the destination data, the bytes per row, and the height of -each 2D slice of the 3D array. dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, -dstDevice and dstPitch specify the (device) -base address of the destination data, the bytes per row, and the height -of each 2D slice of the 3D array. dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, -dstArray specifies the handle of the destination data. -dstHost, dstDevice, dstPitch and -dstHeight are ignored.

    - -

    For host pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, srcXInBytes must be evenly divisible by -the array element size.

    -
      -
    • dstXInBytes, dstY and dstZ specify the base -address of the destination data for the copy.

    • -
    -

    For host pointers, the base address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, dstXInBytes must be evenly divisible by -the array element size.

    - -

    cuMemcpy3D() returns an error if any pitch is greater than -the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH).

    -

    The srcLOD and dstLOD members of the -CUDA_MEMCPY3D structure must be set to 0.

    -
    -
    Parameters:
    -

    pCopy (CUDA_MEMCPY3D) – Parameters for the memory copy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpy3DPeer(CUDA_MEMCPY3D_PEER pCopy: Optional[CUDA_MEMCPY3D_PEER])#
    -

    Copies memory between contexts.

    -

    Perform a 3D memory copy according to the parameters specified in -pCopy. See the definition of the CUDA_MEMCPY3D_PEER -structure for documentation of its parameters.

    -
    -
    Parameters:
    -

    pCopy (CUDA_MEMCPY3D_PEER) – Parameters for the memory copy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyAsync(dst, src, size_t ByteCount, hStream)#
    -

    Copies memory asynchronously.

    -

    Copies data between two pointers. dst and src are base pointers of -the destination and source, respectively. ByteCount specifies the -number of bytes to copy. Note that this function infers the type of the -transfer (host to host, host to device, device to device, or device to -host) from the pointer values. This function is only allowed in -contexts which support unified addressing.

    -
    -
    Parameters:
    -
      -
    • dst (CUdeviceptr) – Destination unified virtual address space pointer

    • -
    • src (CUdeviceptr) – Source unified virtual address space pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyPeerAsync(dstDevice, dstContext, srcDevice, srcContext, size_t ByteCount, hStream)#
    -

    Copies device memory between two contexts asynchronously.

    -

    Copies from device memory in one context to device memory in another -context. dstDevice is the base device pointer of the destination -memory and dstContext is the destination context. srcDevice is the -base device pointer of the source memory and srcContext is the source -pointer. ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • dstContext (CUcontext) – Destination context

    • -
    • srcDevice (CUdeviceptr) – Source device pointer

    • -
    • srcContext (CUcontext) – Source context

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyHtoDAsync(dstDevice, srcHost, size_t ByteCount, hStream)#
    -

    Copies memory from Host to Device.

    -

    Copies from host memory to device memory. dstDevice and srcHost are -the base addresses of the destination and source, respectively. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • srcHost (Any) – Source host pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyDtoHAsync(dstHost, srcDevice, size_t ByteCount, hStream)#
    -

    Copies memory from Device to Host.

    -

    Copies from device to host memory. dstHost and srcDevice specify -the base pointers of the destination and source, respectively. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstHost (Any) – Destination host pointer

    • -
    • srcDevice (CUdeviceptr) – Source device pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyDtoDAsync(dstDevice, srcDevice, size_t ByteCount, hStream)#
    -

    Copies memory from Device to Device.

    -

    Copies from device memory to device memory. dstDevice and srcDevice -are the base pointers of the destination and source, respectively. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • srcDevice (CUdeviceptr) – Source device pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyHtoAAsync(dstArray, size_t dstOffset, srcHost, size_t ByteCount, hStream)#
    -

    Copies memory from Host to Array.

    -

    Copies from host memory to a 1D CUDA array. dstArray and dstOffset -specify the CUDA array handle and starting offset in bytes of the -destination data. srcHost specifies the base address of the source. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstArray (CUarray) – Destination array

    • -
    • dstOffset (size_t) – Offset in bytes of destination array

    • -
    • srcHost (Any) – Source host pointer

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpyAtoHAsync(dstHost, srcArray, size_t srcOffset, size_t ByteCount, hStream)#
    -

    Copies memory from Array to Host.

    -

    Copies from one 1D CUDA array to host memory. dstHost specifies the -base pointer of the destination. srcArray and srcOffset specify the -CUDA array handle and starting offset in bytes of the source data. -ByteCount specifies the number of bytes to copy.

    -
    -
    Parameters:
    -
      -
    • dstHost (Any) – Destination pointer

    • -
    • srcArray (CUarray) – Source array

    • -
    • srcOffset (size_t) – Offset in bytes of source array

    • -
    • ByteCount (size_t) – Size of memory copy in bytes

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpy2DAsync(CUDA_MEMCPY2D pCopy: Optional[CUDA_MEMCPY2D], hStream)#
    -

    Copies memory for 2D arrays.

    -

    Perform a 2D memory copy according to the parameters specified in -pCopy. The CUDA_MEMCPY2D structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • srcMemoryType and dstMemoryType specify the -type of memory of the source and destination, respectively; -CUmemorytype_enum is defined as:

    • -
    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If srcMemoryType is CU_MEMORYTYPE_HOST, -srcHost and srcPitch specify the (host) base -address of the source data and the bytes per row to apply. -srcArray is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, -srcDevice and srcPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. srcArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, -srcDevice and srcPitch specify the (device) -base address of the source data and the bytes per row to apply. -srcArray is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, -srcArray specifies the handle of the source data. -srcHost, srcDevice and srcPitch are -ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, -dstDevice and dstPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. dstArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If dstMemoryType is CU_MEMORYTYPE_HOST, -dstHost and dstPitch specify the (host) base -address of the destination data and the bytes per row to apply. -dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, -dstDevice and dstPitch specify the (device) -base address of the destination data and the bytes per row to apply. -dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, -dstArray specifies the handle of the destination data. -dstHost, dstDevice and dstPitch are -ignored.

    -
      -
    • srcXInBytes and srcY specify the base address -of the source data for the copy.

    • -
    -

    For host pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, srcXInBytes must be evenly divisible by -the array element size.

    -
      -
    • dstXInBytes and dstY specify the base address -of the destination data for the copy.

    • -
    -

    For host pointers, the base address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, dstXInBytes must be evenly divisible by -the array element size.

    - -

    cuMemcpy2DAsync() returns an error if any pitch is greater -than the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH). -cuMemAllocPitch() passes back pitches that always work with -cuMemcpy2D(). On intra-device memory copies (device to -device, CUDA array to device, CUDA array to CUDA array), -cuMemcpy2DAsync() may fail for pitches not computed by -cuMemAllocPitch().

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpy3DAsync(CUDA_MEMCPY3D pCopy: Optional[CUDA_MEMCPY3D], hStream)#
    -

    Copies memory for 3D arrays.

    -

    Perform a 3D memory copy according to the parameters specified in -pCopy. The CUDA_MEMCPY3D structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • srcMemoryType and dstMemoryType specify the -type of memory of the source and destination, respectively; -CUmemorytype_enum is defined as:

    • -
    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, -srcDevice and srcPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. srcArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If srcMemoryType is CU_MEMORYTYPE_HOST, -srcHost, srcPitch and srcHeight -specify the (host) base address of the source data, the bytes per row, -and the height of each 2D slice of the 3D array. srcArray -is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, -srcDevice, srcPitch and srcHeight -specify the (device) base address of the source data, the bytes per -row, and the height of each 2D slice of the 3D array. -srcArray is ignored.

    -

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, -srcArray specifies the handle of the source data. -srcHost, srcDevice, srcPitch and -srcHeight are ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, -dstDevice and dstPitch specify the (unified -virtual address space) base address of the source data and the bytes -per row to apply. dstArray is ignored. This value may be -used only if unified addressing is supported in the calling context.

    -

    If dstMemoryType is CU_MEMORYTYPE_HOST, -dstHost and dstPitch specify the (host) base -address of the destination data, the bytes per row, and the height of -each 2D slice of the 3D array. dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, -dstDevice and dstPitch specify the (device) -base address of the destination data, the bytes per row, and the height -of each 2D slice of the 3D array. dstArray is ignored.

    -

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, -dstArray specifies the handle of the destination data. -dstHost, dstDevice, dstPitch and -dstHeight are ignored.

    - -

    For host pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, srcXInBytes must be evenly divisible by -the array element size.

    -
      -
    • dstXInBytes, dstY and dstZ specify the base -address of the destination data for the copy.

    • -
    -

    For host pointers, the base address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For device pointers, the starting address is

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUDA arrays, dstXInBytes must be evenly divisible by -the array element size.

    - -

    cuMemcpy3DAsync() returns an error if any pitch is greater -than the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH).

    -

    The srcLOD and dstLOD members of the -CUDA_MEMCPY3D structure must be set to 0.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemcpy3DPeerAsync(CUDA_MEMCPY3D_PEER pCopy: Optional[CUDA_MEMCPY3D_PEER], hStream)#
    -

    Copies memory between contexts asynchronously.

    -

    Perform a 3D memory copy according to the parameters specified in -pCopy. See the definition of the CUDA_MEMCPY3D_PEER -structure for documentation of its parameters.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD8(dstDevice, unsigned char uc, size_t N)#
    -

    Initializes device memory.

    -

    Sets the memory range of N 8-bit values to the specified value uc.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • uc (unsigned char) – Value to set

    • -
    • N (size_t) – Number of elements

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD16(dstDevice, unsigned short us, size_t N)#
    -

    Initializes device memory.

    -

    Sets the memory range of N 16-bit values to the specified value us. -The dstDevice pointer must be two byte aligned.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • us (unsigned short) – Value to set

    • -
    • N (size_t) – Number of elements

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD32(dstDevice, unsigned int ui, size_t N)#
    -

    Initializes device memory.

    -

    Sets the memory range of N 32-bit values to the specified value ui. -The dstDevice pointer must be four byte aligned.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • ui (unsigned int) – Value to set

    • -
    • N (size_t) – Number of elements

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD2D8(dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height)#
    -

    Initializes device memory.

    -

    Sets the 2D memory range of Width 8-bit values to the specified value -uc. Height specifies the number of rows to set, and dstPitch -specifies the number of bytes between each row. This function performs -fastest when the pitch is one that has been passed back by -cuMemAllocPitch().

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • -
    • uc (unsigned char) – Value to set

    • -
    • Width (size_t) – Width of row

    • -
    • Height (size_t) – Number of rows

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD2D16(dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height)#
    -

    Initializes device memory.

    -

    Sets the 2D memory range of Width 16-bit values to the specified -value us. Height specifies the number of rows to set, and -dstPitch specifies the number of bytes between each row. The -dstDevice pointer and dstPitch offset must be two byte aligned. -This function performs fastest when the pitch is one that has been -passed back by cuMemAllocPitch().

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • -
    • us (unsigned short) – Value to set

    • -
    • Width (size_t) – Width of row

    • -
    • Height (size_t) – Number of rows

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD2D32(dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height)#
    -

    Initializes device memory.

    -

    Sets the 2D memory range of Width 32-bit values to the specified -value ui. Height specifies the number of rows to set, and -dstPitch specifies the number of bytes between each row. The -dstDevice pointer and dstPitch offset must be four byte aligned. -This function performs fastest when the pitch is one that has been -passed back by cuMemAllocPitch().

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • -
    • ui (unsigned int) – Value to set

    • -
    • Width (size_t) – Width of row

    • -
    • Height (size_t) – Number of rows

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD8Async(dstDevice, unsigned char uc, size_t N, hStream)#
    -

    Sets device memory.

    -

    Sets the memory range of N 8-bit values to the specified value uc.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • uc (unsigned char) – Value to set

    • -
    • N (size_t) – Number of elements

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD16Async(dstDevice, unsigned short us, size_t N, hStream)#
    -

    Sets device memory.

    -

    Sets the memory range of N 16-bit values to the specified value us. -The dstDevice pointer must be two byte aligned.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • us (unsigned short) – Value to set

    • -
    • N (size_t) – Number of elements

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD32Async(dstDevice, unsigned int ui, size_t N, hStream)#
    -

    Sets device memory.

    -

    Sets the memory range of N 32-bit values to the specified value ui. -The dstDevice pointer must be four byte aligned.

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • ui (unsigned int) – Value to set

    • -
    • N (size_t) – Number of elements

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD2D8Async(dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, hStream)#
    -

    Sets device memory.

    -

    Sets the 2D memory range of Width 8-bit values to the specified value -uc. Height specifies the number of rows to set, and dstPitch -specifies the number of bytes between each row. This function performs -fastest when the pitch is one that has been passed back by -cuMemAllocPitch().

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • -
    • uc (unsigned char) – Value to set

    • -
    • Width (size_t) – Width of row

    • -
    • Height (size_t) – Number of rows

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD2D16Async(dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, hStream)#
    -

    Sets device memory.

    -

    Sets the 2D memory range of Width 16-bit values to the specified -value us. Height specifies the number of rows to set, and -dstPitch specifies the number of bytes between each row. The -dstDevice pointer and dstPitch offset must be two byte aligned. -This function performs fastest when the pitch is one that has been -passed back by cuMemAllocPitch().

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • -
    • us (unsigned short) – Value to set

    • -
    • Width (size_t) – Width of row

    • -
    • Height (size_t) – Number of rows

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemsetD2D32Async(dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, hStream)#
    -

    Sets device memory.

    -

    Sets the 2D memory range of Width 32-bit values to the specified -value ui. Height specifies the number of rows to set, and -dstPitch specifies the number of bytes between each row. The -dstDevice pointer and dstPitch offset must be four byte aligned. -This function performs fastest when the pitch is one that has been -passed back by cuMemAllocPitch().

    -
    -
    Parameters:
    -
      -
    • dstDevice (CUdeviceptr) – Destination device pointer

    • -
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • -
    • ui (unsigned int) – Value to set

    • -
    • Width (size_t) – Width of row

    • -
    • Height (size_t) – Number of rows

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuArrayCreate(CUDA_ARRAY_DESCRIPTOR pAllocateArray: Optional[CUDA_ARRAY_DESCRIPTOR])#
    -

    Creates a 1D or 2D CUDA array.

    -

    Creates a CUDA array according to the CUDA_ARRAY_DESCRIPTOR -structure pAllocateArray and returns a handle to the new CUDA array -in *pHandle. The CUDA_ARRAY_DESCRIPTOR is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • Width, and Height are the width, and height of the CUDA array (in -elements); the CUDA array is one-dimensional if height is 0, two- -dimensional otherwise;

    • -
    • Format specifies the format of the elements; -CUarray_format is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • NumChannels specifies the number of packed components per CUDA -array element; it may be 1, 2, or 4;

    • -
    -

    Here are examples of CUDA array descriptions:

    -

    Description for a CUDA array of 2048 floats:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    Description for a 64 x 64 CUDA array of floats:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    Description for a width x height CUDA array of 64-bit, 4x16-bit -float16’s:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    Description for a width x height CUDA array of 16-bit elements, -each of which is two 8-bit unsigned chars:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -
    -
    Parameters:
    -

    pAllocateArray (CUDA_ARRAY_DESCRIPTOR) – Array descriptor

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuArrayGetDescriptor(hArray)#
    -

    Get a 1D or 2D CUDA array descriptor.

    -

    Returns in *pArrayDescriptor a descriptor containing information on -the format and dimensions of the CUDA array hArray. It is useful for -subroutines that have been passed a CUDA array, but need to know the -CUDA array parameters for validation or other purposes.

    -
    -
    Parameters:
    -

    hArray (CUarray) – Array to get descriptor of

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuArrayGetSparseProperties(array)#
    -

    Returns the layout properties of a sparse CUDA array.

    -

    Returns the layout properties of a sparse CUDA array in -sparseProperties If the CUDA array is not allocated with flag -CUDA_ARRAY3D_SPARSE CUDA_ERROR_INVALID_VALUE -will be returned.

    -

    If the returned value in flags -contains CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, then -miptailSize represents the -total size of the array. Otherwise, it will be zero. Also, the returned -value in miptailFirstLevel is -always zero. Note that the array must have been allocated using -cuArrayCreate or cuArray3DCreate. For CUDA -arrays obtained using cuMipmappedArrayGetLevel, -CUDA_ERROR_INVALID_VALUE will be returned. Instead, -cuMipmappedArrayGetSparseProperties must be used to obtain -the sparse properties of the entire CUDA mipmapped array to which -array belongs to.

    -
    -
    Parameters:
    -

    array (CUarray) – CUDA array to get the sparse properties of

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMipmappedArrayGetSparseProperties(mipmap)#
    -

    Returns the layout properties of a sparse CUDA mipmapped array.

    -

    Returns the sparse array layout properties in sparseProperties If the -CUDA mipmapped array is not allocated with flag -CUDA_ARRAY3D_SPARSE CUDA_ERROR_INVALID_VALUE -will be returned.

    -

    For non-layered CUDA mipmapped arrays, -miptailSize returns the size -of the mip tail region. The mip tail region includes all mip levels -whose width, height or depth is less than that of the tile. For layered -CUDA mipmapped arrays, if -flags contains -CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, then -miptailSize specifies the size -of the mip tail of all layers combined. Otherwise, -miptailSize specifies mip tail -size per layer. The returned value of -miptailFirstLevel is valid -only if miptailSize is non- -zero.

    -
    -
    Parameters:
    -

    mipmap (CUmipmappedArray) – CUDA mipmapped array to get the sparse properties of

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuArrayGetMemoryRequirements(array, device)#
    -

    Returns the memory requirements of a CUDA array.

    -

    Returns the memory requirements of a CUDA array in memoryRequirements -If the CUDA array is not allocated with flag -CUDA_ARRAY3D_DEFERRED_MAPPING -CUDA_ERROR_INVALID_VALUE will be returned.

    -

    The returned value in size -represents the total size of the CUDA array. The returned value in -alignment represents the -alignment necessary for mapping the CUDA array.

    -
    -
    Parameters:
    -
      -
    • array (CUarray) – CUDA array to get the memory requirements of

    • -
    • device (CUdevice) – Device to get the memory requirements for

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMipmappedArrayGetMemoryRequirements(mipmap, device)#
    -

    Returns the memory requirements of a CUDA mipmapped array.

    -

    Returns the memory requirements of a CUDA mipmapped array in -memoryRequirements If the CUDA mipmapped array is not allocated with -flag CUDA_ARRAY3D_DEFERRED_MAPPING -CUDA_ERROR_INVALID_VALUE will be returned.

    -

    The returned value in size -represents the total size of the CUDA mipmapped array. The returned -value in alignment -represents the alignment necessary for mapping the CUDA mipmapped -array.

    -
    -
    Parameters:
    -
      -
    • mipmap (CUmipmappedArray) – CUDA mipmapped array to get the memory requirements of

    • -
    • device (CUdevice) – Device to get the memory requirements for

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuArrayGetPlane(hArray, unsigned int planeIdx)#
    -

    Gets a CUDA array plane from a CUDA array.

    -

    Returns in pPlaneArray a CUDA array that represents a single format -plane of the CUDA array hArray.

    -

    If planeIdx is greater than the maximum number of planes in this -array or if the array does not have a multi-planar format e.g: -CU_AD_FORMAT_NV12, then -CUDA_ERROR_INVALID_VALUE is returned.

    -

    Note that if the hArray has format CU_AD_FORMAT_NV12, -then passing in 0 for planeIdx returns a CUDA array of the same size -as hArray but with one channel and -CU_AD_FORMAT_UNSIGNED_INT8 as its format. If 1 is passed -for planeIdx, then the returned CUDA array has half the height and -width of hArray with two channels and -CU_AD_FORMAT_UNSIGNED_INT8 as its format.

    -
    -
    Parameters:
    -
      -
    • hArray (CUarray) – Multiplanar CUDA array

    • -
    • planeIdx (unsigned int) – Plane index

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuArrayDestroy(hArray)#
    -

    Destroys a CUDA array.

    -

    Destroys the CUDA array hArray.

    -
    -
    Parameters:
    -

    hArray (CUarray) – Array to destroy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_ARRAY_IS_MAPPED, CUDA_ERROR_CONTEXT_IS_DESTROYED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuArray3DCreate(CUDA_ARRAY3D_DESCRIPTOR pAllocateArray: Optional[CUDA_ARRAY3D_DESCRIPTOR])#
    -

    Creates a 3D CUDA array.

    -

    Creates a CUDA array according to the -CUDA_ARRAY3D_DESCRIPTOR structure pAllocateArray and -returns a handle to the new CUDA array in *pHandle. The -CUDA_ARRAY3D_DESCRIPTOR is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • Width, Height, and Depth are the width, height, and depth of -the CUDA array (in elements); the following types of CUDA arrays can -be allocated:

      -
        -
      • A 1D array is allocated if Height and Depth extents are both -zero.

      • -
      • A 2D array is allocated if only Depth extent is zero.

      • -
      • A 3D array is allocated if all three extents are non-zero.

      • -
      • A 1D layered CUDA array is allocated if only Height is zero and -the CUDA_ARRAY3D_LAYERED flag is set. Each layer is a -1D array. The number of layers is determined by the depth extent.

      • -
      • A 2D layered CUDA array is allocated if all three extents are non- -zero and the CUDA_ARRAY3D_LAYERED flag is set. Each -layer is a 2D array. The number of layers is determined by the -depth extent.

      • -
      • A cubemap CUDA array is allocated if all three extents are non-zero -and the CUDA_ARRAY3D_CUBEMAP flag is set. Width must -be equal to Height, and Depth must be six. A cubemap is a -special type of 2D layered CUDA array, where the six layers -represent the six faces of a cube. The order of the six layers in -memory is the same as that listed in -CUarray_cubemap_face.

      • -
      • A cubemap layered CUDA array is allocated if all three extents are -non-zero, and both, CUDA_ARRAY3D_CUBEMAP and -CUDA_ARRAY3D_LAYERED flags are set. Width must be -equal to Height, and Depth must be a multiple of six. A cubemap -layered CUDA array is a special type of 2D layered CUDA array that -consists of a collection of cubemaps. The first six layers -represent the first cubemap, the next six layers form the second -cubemap, and so on.

      • -
      -
    • -
    • Format specifies the format of the elements; -CUarray_format is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • NumChannels specifies the number of packed components per CUDA -array element; it may be 1, 2, or 4;

    • -
    • Flags may be set to

      -
        -
      • CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA -arrays. If this flag is set, Depth specifies the number of -layers, not the depth of a 3D array.

      • -
      • CUDA_ARRAY3D_SURFACE_LDST to enable surface references -to be bound to the CUDA array. If this flag is not set, -cuSurfRefSetArray will fail when attempting to bind the -CUDA array to a surface reference.

      • -
      • CUDA_ARRAY3D_CUBEMAP to enable creation of cubemaps. If -this flag is set, Width must be equal to Height, and Depth -must be six. If the CUDA_ARRAY3D_LAYERED flag is also -set, then Depth must be a multiple of six.

      • -
      • CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA -array will be used for texture gather. Texture gather can only be -performed on 2D CUDA arrays.

      • -
      -
    • -
    -

    Width, Height and Depth must meet certain size requirements as -listed in the following table. All values are specified in elements. -Note that for brevity’s sake, the full name of the device attribute is -not specified. For ex., TEXTURE1D_WIDTH refers to the device attribute -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH.

    -

    Note that 2D CUDA arrays have different size requirements if the -CUDA_ARRAY3D_TEXTURE_GATHER flag is set. Width and -Height must not be greater than -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH and -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT -respectively, in that case.

    -

    View CUDA Toolkit Documentation for a table example

    -

    Here are examples of CUDA array descriptions:

    -

    Description for a CUDA array of 2048 floats:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    Description for a 64 x 64 CUDA array of floats:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    Description for a width x height x depth CUDA array of 64-bit, -4x16-bit float16’s:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -
    -
    Parameters:
    -

    pAllocateArray (CUDA_ARRAY3D_DESCRIPTOR) – 3D array descriptor

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuArray3DGetDescriptor(hArray)#
    -

    Get a 3D CUDA array descriptor.

    -

    Returns in *pArrayDescriptor a descriptor containing information on -the format and dimensions of the CUDA array hArray. It is useful for -subroutines that have been passed a CUDA array, but need to know the -CUDA array parameters for validation or other purposes.

    -

    This function may be called on 1D and 2D arrays, in which case the -Height and/or Depth members of the descriptor struct will be set to -0.

    -
    -
    Parameters:
    -

    hArray (CUarray) – 3D array to get descriptor of

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMipmappedArrayCreate(CUDA_ARRAY3D_DESCRIPTOR pMipmappedArrayDesc: Optional[CUDA_ARRAY3D_DESCRIPTOR], unsigned int numMipmapLevels)#
    -

    Creates a CUDA mipmapped array.

    -

    Creates a CUDA mipmapped array according to the -CUDA_ARRAY3D_DESCRIPTOR structure pMipmappedArrayDesc and -returns a handle to the new CUDA mipmapped array in *pHandle. -numMipmapLevels specifies the number of mipmap levels to be -allocated. This value is clamped to the range [1, 1 + -floor(log2(max(width, height, depth)))].

    -

    The CUDA_ARRAY3D_DESCRIPTOR is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • Width, Height, and Depth are the width, height, and depth of -the CUDA array (in elements); the following types of CUDA arrays can -be allocated:

      -
        -
      • A 1D mipmapped array is allocated if Height and Depth extents -are both zero.

      • -
      • A 2D mipmapped array is allocated if only Depth extent is zero.

      • -
      • A 3D mipmapped array is allocated if all three extents are non- -zero.

      • -
      • A 1D layered CUDA mipmapped array is allocated if only Height is -zero and the CUDA_ARRAY3D_LAYERED flag is set. Each -layer is a 1D array. The number of layers is determined by the -depth extent.

      • -
      • A 2D layered CUDA mipmapped array is allocated if all three extents -are non-zero and the CUDA_ARRAY3D_LAYERED flag is set. -Each layer is a 2D array. The number of layers is determined by the -depth extent.

      • -
      • A cubemap CUDA mipmapped array is allocated if all three extents -are non-zero and the CUDA_ARRAY3D_CUBEMAP flag is set. -Width must be equal to Height, and Depth must be six. A -cubemap is a special type of 2D layered CUDA array, where the six -layers represent the six faces of a cube. The order of the six -layers in memory is the same as that listed in -CUarray_cubemap_face.

      • -
      • A cubemap layered CUDA mipmapped array is allocated if all three -extents are non-zero, and both, CUDA_ARRAY3D_CUBEMAP -and CUDA_ARRAY3D_LAYERED flags are set. Width must be -equal to Height, and Depth must be a multiple of six. A cubemap -layered CUDA array is a special type of 2D layered CUDA array that -consists of a collection of cubemaps. The first six layers -represent the first cubemap, the next six layers form the second -cubemap, and so on.

      • -
      -
    • -
    • Format specifies the format of the elements; -CUarray_format is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • NumChannels specifies the number of packed components per CUDA -array element; it may be 1, 2, or 4;

    • -
    • Flags may be set to

      -
        -
      • CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA -mipmapped arrays. If this flag is set, Depth specifies the number -of layers, not the depth of a 3D array.

      • -
      • CUDA_ARRAY3D_SURFACE_LDST to enable surface references -to be bound to individual mipmap levels of the CUDA mipmapped -array. If this flag is not set, cuSurfRefSetArray will -fail when attempting to bind a mipmap level of the CUDA mipmapped -array to a surface reference.

      • -
      -
    • -
    • CUDA_ARRAY3D_CUBEMAP to enable creation of mipmapped

    • -
    -

    cubemaps. If this flag is set, Width must be equal to Height, and -Depth must be six. If the CUDA_ARRAY3D_LAYERED flag is -also set, then Depth must be a multiple of six.

    -
    -
      -
    • CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA -mipmapped array will be used for texture gather. Texture gather can -only be performed on 2D CUDA mipmapped arrays.

    • -
    -
    -

    Width, Height and Depth must meet certain size requirements as -listed in the following table. All values are specified in elements. -Note that for brevity’s sake, the full name of the device attribute is -not specified. For ex., TEXTURE1D_MIPMAPPED_WIDTH refers to the device -attribute -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH.

    -

    View CUDA Toolkit Documentation for a table example

    -
    -
    Parameters:
    -
      -
    • pMipmappedArrayDesc (CUDA_ARRAY3D_DESCRIPTOR) – mipmapped array descriptor

    • -
    • numMipmapLevels (unsigned int) – Number of mipmap levels

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMipmappedArrayGetLevel(hMipmappedArray, unsigned int level)#
    -

    Gets a mipmap level of a CUDA mipmapped array.

    -

    Returns in *pLevelArray a CUDA array that represents a single mipmap -level of the CUDA mipmapped array hMipmappedArray.

    -

    If level is greater than the maximum number of levels in this -mipmapped array, CUDA_ERROR_INVALID_VALUE is returned.

    -
    -
    Parameters:
    -
      -
    • hMipmappedArray (CUmipmappedArray) – CUDA mipmapped array

    • -
    • level (unsigned int) – Mipmap level

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMipmappedArrayDestroy(hMipmappedArray)#
    -

    Destroys a CUDA mipmapped array.

    -

    Destroys the CUDA mipmapped array hMipmappedArray.

    -
    -
    Parameters:
    -

    hMipmappedArray (CUmipmappedArray) – Mipmapped array to destroy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_ARRAY_IS_MAPPED, CUDA_ERROR_CONTEXT_IS_DESTROYED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemGetHandleForAddressRange(dptr, size_t size, handleType: CUmemRangeHandleType, unsigned long long flags)#
    -

    Retrieve handle for an address range.

    -

    Get a handle of the specified type to an address range. The address -range must have been obtained by a prior call to either -cuMemAlloc or cuMemAddressReserve. If the -address range was obtained via cuMemAddressReserve, it must -also be fully mapped via cuMemMap. The address range must -have been obtained by a prior call to either cuMemAllocHost -or cuMemHostAlloc on Tegra.

    -

    Users must ensure the dptr and size are aligned to the host page -size.

    -

    When requesting -CUmemRangeHandleType::CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD, users are -expected to query for dma_buf support for the platform by using -CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED device attribute -before calling this API. The handle will be interpreted as a pointer -to an integer to store the dma_buf file descriptor. Users must ensure -the entire address range is backed and mapped when the address range is -allocated by cuMemAddressReserve. All the physical -allocations backing the address range must be resident on the same -device and have identical allocation properties. Users are also -expected to retrieve a new handle every time the underlying physical -allocation(s) corresponding to a previously queried VA range are -changed.

    -
    -
    Parameters:
    -
      -
    • dptr (CUdeviceptr) – Pointer to a valid CUDA device allocation. Must be aligned to host -page size.

    • -
    • size (size_t) – Length of the address range. Must be aligned to host page size.

    • -
    • handleType (CUmemRangeHandleType) – Type of handle requested (defines type and size of the handle -output parameter)

    • -
    • flags (unsigned long long) – Reserved, must be zero

    • -
    -
    -
    Returns:
    -

      -
    • CUresult – CUDA_SUCCESS CUDA_ERROR_INVALID_VALUE CUDA_ERROR_NOT_SUPPORTED

    • -
    • handle (Any) – Pointer to the location where the returned handle will be stored.

    • -
    -

    -
    -
    -
    - -
    -
    -

    Virtual Memory Management#

    -

    This section describes the virtual memory management functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuMemAddressReserve(size_t size, size_t alignment, addr, unsigned long long flags)#
    -

    Allocate an address range reservation.

    -

    Reserves a virtual address range based on the given parameters, giving -the starting address of the range in ptr. This API requires a system -that supports UVA. The size and address parameters must be a multiple -of the host page size and the alignment must be a power of two or zero -for default alignment.

    -
    -
    Parameters:
    -
      -
    • size (size_t) – Size of the reserved virtual address range requested

    • -
    • alignment (size_t) – Alignment of the reserved virtual address range requested

    • -
    • addr (CUdeviceptr) – Fixed starting address range requested

    • -
    • flags (unsigned long long) – Currently unused, must be zero

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuMemAddressFree

    -
    -
    - -
    -
    -cuda.cuda.cuMemAddressFree(ptr, size_t size)#
    -

    Free an address range reservation.

    -

    Frees a virtual address range reserved by cuMemAddressReserve. The size -must match what was given to memAddressReserve and the ptr given must -match what was returned from memAddressReserve.

    -
    -
    Parameters:
    -
      -
    • ptr (CUdeviceptr) – Starting address of the virtual address range to free

    • -
    • size (size_t) – Size of the virtual address region to free

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    cuMemAddressReserve

    -
    -
    - -
    -
    -cuda.cuda.cuMemCreate(size_t size, CUmemAllocationProp prop: Optional[CUmemAllocationProp], unsigned long long flags)#
    -

    Create a CUDA memory handle representing a memory allocation of a given size described by the given properties.

    -

    This creates a memory allocation on the target device specified through -the prop structure. The created allocation will not have any device -or host mappings. The generic memory handle for the allocation can be -mapped to the address space of calling process via -cuMemMap. This handle cannot be transmitted directly to -other processes (see cuMemExportToShareableHandle). On -Windows, the caller must also pass an LPSECURITYATTRIBUTE in prop to -be associated with this handle which limits or allows access to this -handle for a recipient process (see -win32HandleMetaData for more). The -size of this allocation must be a multiple of the the value given via -cuMemGetAllocationGranularity with the -CU_MEM_ALLOC_GRANULARITY_MINIMUM flag. To create a CPU -allocation targeting a specific host NUMA node, applications must set -CUmemAllocationProp::CUmemLocation::type to -CU_MEM_LOCATION_TYPE_HOST_NUMA and -CUmemAllocationProp::CUmemLocation::id must specify the -NUMA ID of the CPU. On systems where NUMA is not available -CUmemAllocationProp::CUmemLocation::id must be set to 0. -Specifying CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT or -CU_MEM_LOCATION_TYPE_HOST as the -type will result in -CUDA_ERROR_INVALID_VALUE.

    -

    Applications can set -requestedHandleTypes to -CU_MEM_HANDLE_TYPE_FABRIC in order to create allocations -suitable for sharing within an IMEX domain. An IMEX domain is either an -OS instance or a group of securely connected OS instances using the -NVIDIA IMEX daemon. An IMEX channel is a global resource within the -IMEX domain that represents a logical entity that aims to provide fine -grained accessibility control for the participating processes. When -exporter and importer CUDA processes have been granted access to the -same IMEX channel, they can securely share memory. If the allocating -process does not have access setup for an IMEX channel, attempting to -create a CUmemGenericAllocationHandle with -CU_MEM_HANDLE_TYPE_FABRIC will result in -CUDA_ERROR_NOT_PERMITTED. The nvidia-modprobe CLI provides -more information regarding setting up of IMEX channels.

    -

    If CUmemAllocationProp::allocFlags::usage contains -CU_MEM_CREATE_USAGE_TILE_POOL flag then the memory -allocation is intended only to be used as backing tile pool for sparse -CUDA arrays and sparse CUDA mipmapped arrays. (see -cuMemMapArrayAsync).

    -
    -
    Parameters:
    -
      -
    • size (size_t) – Size of the allocation requested

    • -
    • prop (CUmemAllocationProp) – Properties of the allocation to create.

    • -
    • flags (unsigned long long) – flags for future use, must be zero now.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemRelease(handle)#
    -

    Release a memory handle representing a memory allocation which was previously allocated through cuMemCreate.

    -

    Frees the memory that was allocated on a device through cuMemCreate.

    -

    The memory allocation will be freed when all outstanding mappings to -the memory are unmapped and when all outstanding references to the -handle (including it’s shareable counterparts) are also released. The -generic memory handle can be freed when there are still outstanding -mappings made with this handle. Each time a recipient process imports a -shareable handle, it needs to pair it with cuMemRelease for -the handle to be freed. If handle is not a valid handle the behavior -is undefined.

    -
    -
    Parameters:
    -

    handle (CUmemGenericAllocationHandle) – Value of handle which was returned previously by cuMemCreate.

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    cuMemCreate

    -
    -
    - -
    -
    -cuda.cuda.cuMemMap(ptr, size_t size, size_t offset, handle, unsigned long long flags)#
    -

    Maps an allocation handle to a reserved virtual address range.

    -

    Maps bytes of memory represented by handle starting from byte -offset to size to address range [addr, addr + size]. This -range must be an address reservation previously reserved with -cuMemAddressReserve, and offset + size must be less -than the size of the memory allocation. Both ptr, size, and -offset must be a multiple of the value given via -cuMemGetAllocationGranularity with the -CU_MEM_ALLOC_GRANULARITY_MINIMUM flag. If handle -represents a multicast object, ptr, size and offset must be -aligned to the value returned by cuMulticastGetGranularity -with the flag CU_MULTICAST_MINIMUM_GRANULARITY. For best -performance however, it is recommended that ptr, size and offset -be aligned to the value returned by -cuMulticastGetGranularity with the flag -CU_MULTICAST_RECOMMENDED_GRANULARITY.

    -

    Please note calling cuMemMap does not make the address -accessible, the caller needs to update accessibility of a contiguous -mapped VA range by calling cuMemSetAccess.

    -

    Once a recipient process obtains a shareable memory handle from -cuMemImportFromShareableHandle, the process must use -cuMemMap to map the memory into its address ranges before -setting accessibility with cuMemSetAccess.

    -

    cuMemMap can only create mappings on VA range reservations -that are not currently mapped.

    -
    -
    Parameters:
    -
      -
    • ptr (CUdeviceptr) – Address where memory will be mapped.

    • -
    • size (size_t) – Size of the memory mapping.

    • -
    • offset (size_t) – Offset into the memory represented by

    • -
    • handle (CUmemGenericAllocationHandle) – Handle to a shareable memory

    • -
    • flags (unsigned long long) – flags for future use, must be zero now.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemMapArrayAsync(mapInfoList: Optional[Tuple[CUarrayMapInfo] | List[CUarrayMapInfo]], unsigned int count, hStream)#
    -

    Maps or unmaps subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays.

    -

    Performs map or unmap operations on subregions of sparse CUDA arrays -and sparse CUDA mipmapped arrays. Each operation is specified by a -CUarrayMapInfo entry in the mapInfoList array of size -count. The structure CUarrayMapInfo is defined as follow:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where resourceType specifies the type of -resource to be operated on. If resourceType -is set to CUresourcetype::CU_RESOURCE_TYPE_ARRAY then -CUarrayMapInfo::resource::array must be set to a valid -sparse CUDA array handle. The CUDA array must be either a 2D, 2D -layered or 3D CUDA array and must have been allocated using -cuArrayCreate or cuArray3DCreate with the flag -CUDA_ARRAY3D_SPARSE or -CUDA_ARRAY3D_DEFERRED_MAPPING. For CUDA arrays obtained -using cuMipmappedArrayGetLevel, -CUDA_ERROR_INVALID_VALUE will be returned. If -resourceType is set to -CUresourcetype::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY then -CUarrayMapInfo::resource::mipmap must be set to a valid -sparse CUDA mipmapped array handle. The CUDA mipmapped array must be -either a 2D, 2D layered or 3D CUDA mipmapped array and must have been -allocated using cuMipmappedArrayCreate with the flag -CUDA_ARRAY3D_SPARSE or -CUDA_ARRAY3D_DEFERRED_MAPPING.

    -

    subresourceType specifies the type of -subresource within the resource. -CUarraySparseSubresourceType_enum is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where -CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL -indicates a sparse-miplevel which spans at least one tile in every -dimension. The remaining miplevels which are too small to span at least -one tile in any dimension constitute the mip tail region as indicated -by -CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL -subresource type.

    -

    If subresourceType is set to -CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL -then CUarrayMapInfo::subresource::sparseLevel struct must -contain valid array subregion offsets and extents. The -CUarrayMapInfo::subresource::sparseLevel::offsetX, -CUarrayMapInfo::subresource::sparseLevel::offsetY and -CUarrayMapInfo::subresource::sparseLevel::offsetZ must -specify valid X, Y and Z offsets respectively. The -CUarrayMapInfo::subresource::sparseLevel::extentWidth, -CUarrayMapInfo::subresource::sparseLevel::extentHeight and -CUarrayMapInfo::subresource::sparseLevel::extentDepth must -specify valid width, height and depth extents respectively. These -offsets and extents must be aligned to the corresponding tile -dimension. For CUDA mipmapped arrays -CUarrayMapInfo::subresource::sparseLevel::level must -specify a valid mip level index. Otherwise, must be zero. For layered -CUDA arrays and layered CUDA mipmapped arrays -CUarrayMapInfo::subresource::sparseLevel::layer must -specify a valid layer index. Otherwise, must be zero. -CUarrayMapInfo::subresource::sparseLevel::offsetZ must be -zero and -CUarrayMapInfo::subresource::sparseLevel::extentDepth must -be set to 1 for 2D and 2D layered CUDA arrays and CUDA mipmapped -arrays. Tile extents can be obtained by calling -cuArrayGetSparseProperties and -cuMipmappedArrayGetSparseProperties

    -

    If subresourceType is set to -CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL -then CUarrayMapInfo::subresource::miptail struct must -contain valid mip tail offset in -CUarrayMapInfo::subresource::miptail::offset and size in -CUarrayMapInfo::subresource::miptail::size. Both, mip tail -offset and mip tail size must be aligned to the tile size. For layered -CUDA mipmapped arrays which don’t have the flag -CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL set in -flags as returned by -cuMipmappedArrayGetSparseProperties, -CUarrayMapInfo::subresource::miptail::layer must specify a -valid layer index. Otherwise, must be zero.

    -

    If CUarrayMapInfo::resource::array or -CUarrayMapInfo::resource::mipmap was created with -CUDA_ARRAY3D_DEFERRED_MAPPING flag set the -subresourceType and the contents of -CUarrayMapInfo::subresource will be ignored.

    -

    memOperationType specifies the type of -operation. CUmemOperationType is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If memOperationType is set to -CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP then the -subresource will be mapped onto the tile pool memory specified by -CUarrayMapInfo::memHandle at offset -offset. The tile pool allocation has to be -created by specifying the CU_MEM_CREATE_USAGE_TILE_POOL -flag when calling cuMemCreate. Also, -memHandleType must be set to -CUmemHandleType::CU_MEM_HANDLE_TYPE_GENERIC.

    -

    If memOperationType is set to -CUmemOperationType::CU_MEM_OPERATION_TYPE_UNMAP then an -unmapping operation is performed. CUarrayMapInfo::memHandle -must be NULL.

    -

    deviceBitMask specifies the list of devices -that must map or unmap physical memory. Currently, this mask must have -exactly one bit set, and the corresponding device must match the device -associated with the stream. If -memOperationType is set to -CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP, the device -must also match the device associated with the tile pool memory -allocation as specified by CUarrayMapInfo::memHandle.

    -

    flags and -:py:obj:`~.CUarrayMapInfo.reserved`[] are unused and must be set to -zero.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemUnmap(ptr, size_t size)#
    -

    Unmap the backing memory of a given address range.

    -

    The range must be the entire contiguous address range that was mapped -to. In other words, cuMemUnmap cannot unmap a sub-range of -an address range mapped by cuMemCreate / -cuMemMap. Any backing memory allocations will be freed if -there are no existing mappings and there are no unreleased memory -handles.

    -

    When cuMemUnmap returns successfully the address range is -converted to an address reservation and can be used for a future calls -to cuMemMap. Any new mapping to this virtual address will -need to have access granted through cuMemSetAccess, as all -mappings start with no accessibility setup.

    -
    -
    Parameters:
    -
      -
    • ptr (CUdeviceptr) – Starting address for the virtual address range to unmap

    • -
    • size (size_t) – Size of the virtual address range to unmap

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemSetAccess(ptr, size_t size, desc: Optional[Tuple[CUmemAccessDesc] | List[CUmemAccessDesc]], size_t count)#
    -

    Set the access flags for each location specified in desc for the given virtual address range.

    -

    Given the virtual address range via ptr and size, and the locations -in the array given by desc and count, set the access flags for the -target locations. The range must be a fully mapped address range -containing all allocations created by cuMemMap / -cuMemCreate. Users cannot specify -CU_MEM_LOCATION_TYPE_HOST_NUMA accessibility for -allocations created on with other location types. Note: When -CUmemAccessDesc::CUmemLocation::type is -CU_MEM_LOCATION_TYPE_HOST_NUMA, -CUmemAccessDesc::CUmemLocation::id is ignored. When setting -the access flags for a virtual address range mapping a multicast -object, ptr and size must be aligned to the value returned by -cuMulticastGetGranularity with the flag -CU_MULTICAST_MINIMUM_GRANULARITY. For best performance -however, it is recommended that ptr and size be aligned to the -value returned by cuMulticastGetGranularity with the flag -CU_MULTICAST_RECOMMENDED_GRANULARITY.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemGetAccess(CUmemLocation location: Optional[CUmemLocation], ptr)#
    -

    Get the access flags set for the given location and ptr.

    -
    -
    Parameters:
    -
      -
    • location (CUmemLocation) – Location in which to check the flags for

    • -
    • ptr (CUdeviceptr) – Address in which to check the access flags for

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuMemSetAccess

    -
    -
    - -
    -
    -cuda.cuda.cuMemExportToShareableHandle(handle, handleType: CUmemAllocationHandleType, unsigned long long flags)#
    -

    Exports an allocation to a requested shareable handle type.

    -

    Given a CUDA memory handle, create a shareable memory allocation handle -that can be used to share the memory with other processes. The -recipient process can convert the shareable handle back into a CUDA -memory handle using cuMemImportFromShareableHandle and map -it with cuMemMap. The implementation of what this handle is -and how it can be transferred is defined by the requested handle type -in handleType

    -

    Once all shareable handles are closed and the allocation is released, -the allocated memory referenced will be released back to the OS and -uses of the CUDA handle afterward will lead to undefined behavior.

    -

    This API can also be used in conjunction with other APIs (e.g. Vulkan, -OpenGL) that support importing memory from the shareable type

    -
    -
    Parameters:
    -
      -
    • handle (CUmemGenericAllocationHandle) – CUDA handle for the memory allocation

    • -
    • handleType (CUmemAllocationHandleType) – Type of shareable handle requested (defines type and size of the -shareableHandle output parameter)

    • -
    • flags (unsigned long long) – Reserved, must be zero

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemImportFromShareableHandle(osHandle, shHandleType: CUmemAllocationHandleType)#
    -

    Imports an allocation from a requested shareable handle type.

    -

    If the current process cannot support the memory described by this -shareable handle, this API will error as -CUDA_ERROR_NOT_SUPPORTED.

    -

    If shHandleType is CU_MEM_HANDLE_TYPE_FABRIC and the -importer process has not been granted access to the same IMEX channel -as the exporter process, this API will error as -CUDA_ERROR_NOT_PERMITTED.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Importing shareable handles exported from some graphics APIs(VUlkan, OpenGL, etc) created on devices under an SLI group may not be supported, and thus this API will return CUDA_ERROR_NOT_SUPPORTED. There is no guarantee that the contents of handle will be the same CUDA memory handle for the same given OS shareable handle, or the same underlying allocation.

    -
    - -
    -
    -cuda.cuda.cuMemGetAllocationGranularity(CUmemAllocationProp prop: Optional[CUmemAllocationProp], option: CUmemAllocationGranularity_flags)#
    -

    Calculates either the minimal or recommended granularity.

    -

    Calculates either the minimal or recommended granularity for a given -allocation specification and returns it in granularity. This -granularity can be used as a multiple for alignment, size, or address -mapping.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuMemCreate, cuMemMap

    -
    -
    - -
    -
    -cuda.cuda.cuMemGetAllocationPropertiesFromHandle(handle)#
    -

    Retrieve the contents of the property structure defining properties for this handle.

    -
    -
    Parameters:
    -

    handle (CUmemGenericAllocationHandle) – Handle which to perform the query on

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemRetainAllocationHandle(addr)#
    -

    Given an address addr, returns the allocation handle of the backing memory allocation.

    -

    The handle is guaranteed to be the same handle value used to map the -memory. If the address requested is not mapped, the function will fail. -The returned handle must be released with corresponding number of calls -to cuMemRelease.

    -
    -
    Parameters:
    -

    addr (Any) – Memory address to query, that has been mapped previously.

    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    The address addr, can be any address in a range previously mapped by cuMemMap, and not necessarily the start address.

    -
    - -
    -
    -

    Stream Ordered Memory Allocator#

    -

    This section describes the stream ordered memory allocator exposed by the low-level CUDA driver application programming interface.

    -

    overview

    -

    The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior.

    -

    The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee.

    -

    Supported Platforms

    -

    Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED

    -
    -
    -cuda.cuda.cuMemFreeAsync(dptr, hStream)#
    -

    Frees memory with stream ordered semantics.

    -

    Inserts a free operation into hStream. The allocation must not be -accessed after stream execution reaches the free. After this API -returns, accessing the memory from any subsequent work launched on the -GPU or querying its pointer attributes results in undefined behavior.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -

    Notes

    -

    During stream capture, this function results in the creation of a free node and must therefore be passed the address of a graph allocation.

    -
    - -
    -
    -cuda.cuda.cuMemAllocAsync(size_t bytesize, hStream)#
    -

    Allocates memory with stream ordered semantics.

    -

    Inserts an allocation operation into hStream. A pointer to the -allocated memory is returned immediately in *dptr. The allocation must -not be accessed until the the allocation operation completes. The -allocation comes from the memory pool current to the stream’s device.

    -
    -
    Parameters:
    -
      -
    • bytesize (size_t) – Number of bytes to allocate

    • -
    • hStream (CUstream or cudaStream_t) – The stream establishing the stream ordering contract and the memory -pool to allocate from

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    The default memory pool of a device contains device memory from that device.

    -

    Basic stream ordering allows future work submitted into the same stream to use the allocation. Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation operation completes before work submitted in a separate stream runs.

    -

    During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool’s properties are used to set the node’s creation parameters.

    -
    - -
    -
    -cuda.cuda.cuMemPoolTrimTo(pool, size_t minBytesToKeep)#
    -

    Tries to release memory back to the OS.

    -

    Releases memory back to the OS until the pool contains fewer than -minBytesToKeep reserved bytes, or there is no more memory that the -allocator can safely release. The allocator cannot release OS -allocations that back outstanding asynchronous allocations. The OS -allocations may happen at different granularity from the user -allocations.

    -
    -
    Parameters:
    -
      -
    • pool (CUmemoryPool or cudaMemPool_t) – The memory pool to trim

    • -
    • minBytesToKeep (size_t) – If the pool has less than minBytesToKeep reserved, the TrimTo -operation is a no-op. Otherwise the pool will be guaranteed to have -at least minBytesToKeep bytes reserved after the operation.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    : Allocations that have not been freed count as outstanding.

    -

    : Allocations that have been asynchronously freed but whose completion has not been observed on the host (eg. by a synchronize) can count as outstanding.

    -
    - -
    -
    -cuda.cuda.cuMemPoolSetAttribute(pool, attr: CUmemPool_attribute, value)#
    -

    Sets attributes of a memory pool.

    -

    Supported attributes are:

    -
      -
    • CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = -cuuint64_t) Amount of reserved memory in bytes to hold onto before -trying to release memory back to the OS. When more than the release -threshold bytes of memory are held by the memory pool, the allocator -will try to release memory back to the OS on the next call to stream, -event or context synchronize. (default 0)

    • -
    • CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value -type = int) Allow cuMemAllocAsync to use memory -asynchronously freed in another stream as long as a stream ordering -dependency of the allocating stream on the free action exists. Cuda -events and null stream interactions can create the required stream -ordered dependencies. (default enabled)

    • -
    • CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = -int) Allow reuse of already completed frees when there is no -dependency between the free and allocation. (default enabled)

    • -
    • CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value -type = int) Allow cuMemAllocAsync to insert new stream -dependencies in order to establish the stream ordering required to -reuse a piece of memory released by cuMemFreeAsync -(default enabled).

    • -
    • CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = -cuuint64_t) Reset the high watermark that tracks the amount of -backing memory that was allocated for the memory pool. It is illegal -to set this attribute to a non-zero value.

    • -
    • CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) -Reset the high watermark that tracks the amount of used memory that -was allocated for the memory pool.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemPoolGetAttribute(pool, attr: CUmemPool_attribute)#
    -

    Gets attributes of a memory pool.

    -

    Supported attributes are:

    -
      -
    • CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = -cuuint64_t) Amount of reserved memory in bytes to hold onto before -trying to release memory back to the OS. When more than the release -threshold bytes of memory are held by the memory pool, the allocator -will try to release memory back to the OS on the next call to stream, -event or context synchronize. (default 0)

    • -
    • CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value -type = int) Allow cuMemAllocAsync to use memory -asynchronously freed in another stream as long as a stream ordering -dependency of the allocating stream on the free action exists. Cuda -events and null stream interactions can create the required stream -ordered dependencies. (default enabled)

    • -
    • CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = -int) Allow reuse of already completed frees when there is no -dependency between the free and allocation. (default enabled)

    • -
    • CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value -type = int) Allow cuMemAllocAsync to insert new stream -dependencies in order to establish the stream ordering required to -reuse a piece of memory released by cuMemFreeAsync -(default enabled).

    • -
    • CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: (value type = -cuuint64_t) Amount of backing memory currently allocated for the -mempool

    • -
    • CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = -cuuint64_t) High watermark of backing memory allocated for the -mempool since the last time it was reset.

    • -
    • CU_MEMPOOL_ATTR_USED_MEM_CURRENT: (value type = -cuuint64_t) Amount of memory from the pool that is currently in use -by the application.

    • -
    • CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) -High watermark of the amount of memory from the pool that was in use -by the application.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemPoolSetAccess(pool, map: Optional[Tuple[CUmemAccessDesc] | List[CUmemAccessDesc]], size_t count)#
    -

    Controls visibility of pools between devices.

    -
    -
    Parameters:
    -
      -
    • pool (CUmemoryPool or cudaMemPool_t) – The pool being modified

    • -
    • map (List[CUmemAccessDesc]) – Array of access descriptors. Each descriptor instructs the access -to enable for a single gpu.

    • -
    • count (size_t) – Number of descriptors in the map array.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemPoolGetAccess(memPool, CUmemLocation location: Optional[CUmemLocation])#
    -

    Returns the accessibility of a pool from a device.

    -

    Returns the accessibility of the pool’s memory from the specified -location.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

      -
    • CUresult

    • -
    • flags (CUmemAccess_flags) – the accessibility of the pool from the specified location

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemPoolCreate(CUmemPoolProps poolProps: Optional[CUmemPoolProps])#
    -

    Creates a memory pool.

    -

    Creates a CUDA memory pool and returns the handle in pool. The -poolProps determines the properties of the pool such as the backing -device and IPC capabilities.

    -

    To create a memory pool targeting a specific host NUMA node, -applications must set CUmemPoolProps::CUmemLocation::type -to CU_MEM_LOCATION_TYPE_HOST_NUMA and -CUmemPoolProps::CUmemLocation::id must specify the NUMA ID -of the host memory node. Specifying -CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT or -CU_MEM_LOCATION_TYPE_HOST as the -CUmemPoolProps::CUmemLocation::type will result in -CUDA_ERROR_INVALID_VALUE. By default, the pool’s memory -will be accessible from the device it is allocated on. In the case of -pools created with CU_MEM_LOCATION_TYPE_HOST_NUMA, their -default accessibility will be from the host CPU. Applications can -control the maximum size of the pool by specifying a non-zero value for -maxSize. If set to 0, the maximum size of -the pool will default to a system dependent value.

    -

    Applications can set handleTypes to -CU_MEM_HANDLE_TYPE_FABRIC in order to create -CUmemoryPool suitable for sharing within an IMEX domain. An -IMEX domain is either an OS instance or a group of securely connected -OS instances using the NVIDIA IMEX daemon. An IMEX channel is a global -resource within the IMEX domain that represents a logical entity that -aims to provide fine grained accessibility control for the -participating processes. When exporter and importer CUDA processes have -been granted access to the same IMEX channel, they can securely share -memory. If the allocating process does not have access setup for an -IMEX channel, attempting to export a CUmemoryPool with -CU_MEM_HANDLE_TYPE_FABRIC will result in -CUDA_ERROR_NOT_PERMITTED. The nvidia-modprobe CLI provides -more information regarding setting up of IMEX channels.

    -
    -
    Parameters:
    -

    poolProps (CUmemPoolProps) – None

    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Specifying CU_MEM_HANDLE_TYPE_NONE creates a memory pool that will not support IPC.

    -
    - -
    -
    -cuda.cuda.cuMemPoolDestroy(pool)#
    -

    Destroys the specified memory pool.

    -

    If any pointers obtained from this pool haven’t been freed or the pool -has free operations that haven’t completed when -cuMemPoolDestroy is invoked, the function will return -immediately and the resources associated with the pool will be released -automatically once there are no more outstanding allocations.

    -

    Destroying the current mempool of a device sets the default mempool of -that device as the current mempool for that device.

    -
    -
    Parameters:
    -

    pool (CUmemoryPool or cudaMemPool_t) – None

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    A device’s default memory pool cannot be destroyed.

    -
    - -
    -
    -cuda.cuda.cuMemAllocFromPoolAsync(size_t bytesize, pool, hStream)#
    -

    Allocates memory from a specified pool with stream ordered semantics.

    -

    Inserts an allocation operation into hStream. A pointer to the -allocated memory is returned immediately in *dptr. The allocation must -not be accessed until the the allocation operation completes. The -allocation comes from the specified memory pool.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool’s properties are used to set the node’s creation parameters.

    -
    - -
    -
    -cuda.cuda.cuMemPoolExportToShareableHandle(pool, handleType: CUmemAllocationHandleType, unsigned long long flags)#
    -

    Exports a memory pool to the requested handle type.

    -

    Given an IPC capable mempool, create an OS handle to share the pool -with another process. A recipient process can convert the shareable -handle into a mempool with -cuMemPoolImportFromShareableHandle. Individual pointers can -then be shared with the cuMemPoolExportPointer and -cuMemPoolImportPointer APIs. The implementation of what the -shareable handle is and how it can be transferred is defined by the -requested handle type.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    : To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than CU_MEM_HANDLE_TYPE_NONE.

    -
    - -
    -
    -cuda.cuda.cuMemPoolImportFromShareableHandle(handle, handleType: CUmemAllocationHandleType, unsigned long long flags)#
    -

    imports a memory pool from a shared handle.

    -

    Specific allocations can be imported from the imported pool with -cuMemPoolImportPointer.

    -

    If handleType is CU_MEM_HANDLE_TYPE_FABRIC and the -importer process has not been granted access to the same IMEX channel -as the exporter process, this API will error as -CUDA_ERROR_NOT_PERMITTED.

    -
    -
    Parameters:
    -
      -
    • handle (Any) – OS handle of the pool to open

    • -
    • handleType (CUmemAllocationHandleType) – The type of handle being imported

    • -
    • flags (unsigned long long) – must be 0

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Imported memory pools do not support creating new allocations. As such imported memory pools may not be used in cuDeviceSetMemPool or cuMemAllocFromPoolAsync calls.

    -
    - -
    -
    -cuda.cuda.cuMemPoolExportPointer(ptr)#
    -

    Export data to share a memory pool allocation between processes.

    -

    Constructs shareData_out for sharing a specific allocation from an -already shared memory pool. The recipient process can import the -allocation with the cuMemPoolImportPointer api. The data is -not a handle and may be shared through any IPC mechanism.

    -
    -
    Parameters:
    -

    ptr (CUdeviceptr) – pointer to memory being exported

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemPoolImportPointer(pool, CUmemPoolPtrExportData shareData: Optional[CUmemPoolPtrExportData])#
    -

    Import a memory pool allocation from another process.

    -

    Returns in ptr_out a pointer to the imported memory. The imported -memory must not be accessed before the allocation operation completes -in the exporting process. The imported memory must be freed from all -importing processes before being freed in the exporting process. The -pointer may be freed with cuMemFree or cuMemFreeAsync. If -cuMemFreeAsync is used, the free must be completed on the importing -process before the free operation on the exporting process.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    The cuMemFreeAsync api may be used in the exporting process before the cuMemFreeAsync operation completes in its stream as long as the cuMemFreeAsync in the exporting process specifies a stream with a stream dependency on the importing process’s cuMemFreeAsync.

    -
    - -
    -
    -

    Multicast Object Management#

    -

    This section describes the CUDA multicast object operations exposed by the low-level CUDA driver application programming interface.

    -

    overview

    -

    A multicast object created via cuMulticastCreate enables certain memory operations to be broadcast to a team of devices. Devices can be added to a multicast object via cuMulticastAddDevice. Memory can be bound on each participating device via either cuMulticastBindMem or cuMulticastBindAddr. Multicast objects can be mapped into a device’s virtual address space using the virtual memmory management APIs (see cuMemMap and cuMemSetAccess).

    -

    Supported Platforms

    -

    Support for multicast on a specific device can be queried using the device attribute CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED

    -
    -
    -cuda.cuda.cuMulticastCreate(CUmulticastObjectProp prop: Optional[CUmulticastObjectProp])#
    -

    Create a generic allocation handle representing a multicast object described by the given properties.

    -

    This creates a multicast object as described by prop. The number of -participating devices is specified by -numDevices. Devices can be added to -the multicast object via cuMulticastAddDevice. All -participating devices must be added to the multicast object before -memory can be bound to it. Memory is bound to the multicast object via -either cuMulticastBindMem or -cuMulticastBindAddr, and can be unbound via -cuMulticastUnbind. The total amount of memory that can be -bound per device is specified by -pysize. This size must be a -multiple of the value returned by cuMulticastGetGranularity -with the flag CU_MULTICAST_GRANULARITY_MINIMUM. For best -performance however, the size should be aligned to the value returned -by cuMulticastGetGranularity with the flag -CU_MULTICAST_GRANULARITY_RECOMMENDED.

    -

    After all participating devices have been added, multicast objects can -also be mapped to a device’s virtual address space using the virtual -memory management APIs (see cuMemMap and -cuMemSetAccess). Multicast objects can also be shared with -other processes by requesting a shareable handle via -cuMemExportToShareableHandle. Note that the desired types -of shareable handles must be specified in the bitmask -handleTypes. Multicast objects can be -released using the virtual memory management API -cuMemRelease.

    -
    -
    Parameters:
    -

    prop (CUmulticastObjectProp) – Properties of the multicast object to create.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMulticastAddDevice(mcHandle, dev)#
    -

    Associate a device to a multicast object.

    -

    Associates a device to a multicast object. The added device will be a -part of the multicast team of size specified by -numDevices during -cuMulticastCreate. The association of the device to the -multicast object is permanent during the life time of the multicast -object. All devices must be added to the multicast team before any -memory can be bound to any device in the team. Any calls to -cuMulticastBindMem or cuMulticastBindAddr will -block until all devices have been added. Similarly all devices must be -added to the multicast team before a virtual address range can be -mapped to the multicast object. A call to cuMemMap will -block until all devices have been added.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMulticastBindMem(mcHandle, size_t mcOffset, memHandle, size_t memOffset, size_t size, unsigned long long flags)#
    -

    Bind a memory allocation represented by a handle to a multicast object.

    -

    Binds a memory allocation specified by memHandle and created via -cuMemCreate to a multicast object represented by mcHandle -and created via cuMulticastCreate. The intended size of -the bind, the offset in the multicast range mcOffset as well as the -offset in the memory memOffset must be a multiple of the value -returned by cuMulticastGetGranularity with the flag -CU_MULTICAST_GRANULARITY_MINIMUM. For best performance -however, size, mcOffset and memOffset should be aligned to the -granularity of the memory allocation(see -cuMemGetAllocationGranularity) or to the value returned by -cuMulticastGetGranularity with the flag -CU_MULTICAST_GRANULARITY_RECOMMENDED.

    -

    The size + memOffset cannot be larger than the size of the -allocated memory. Similarly the size + mcOffset cannot be larger -than the size of the multicast object. The memory allocation must have -beeen created on one of the devices that was added to the multicast -team via cuMulticastAddDevice. Externally shareable as well -as imported multicast objects can be bound only to externally shareable -memory. Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if -there are insufficient resources required to perform the bind. This -call may also return CUDA_ERROR_SYSTEM_NOT_READY if the necessary -system software is not initialized or running.

    -
    -
    Parameters:
    -
      -
    • mcHandle (CUmemGenericAllocationHandle) – Handle representing a multicast object.

    • -
    • mcOffset (size_t) – Offset into the multicast object for attachment.

    • -
    • memHandle (CUmemGenericAllocationHandle) – Handle representing a memory allocation.

    • -
    • memOffset (size_t) – Offset into the memory for attachment.

    • -
    • size (size_t) – Size of the memory that will be bound to the multicast object.

    • -
    • flags (unsigned long long) – Flags for future use, must be zero for now.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_SYSTEM_NOT_READY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMulticastBindAddr(mcHandle, size_t mcOffset, memptr, size_t size, unsigned long long flags)#
    -

    Bind a memory allocation represented by a virtual address to a multicast object.

    -

    Binds a memory allocation specified by its mapped address memptr to a -multicast object represented by mcHandle. The memory must have been -allocated via cuMemCreate or cudaMallocAsync. -The intended size of the bind, the offset in the multicast range -mcOffset and memptr must be a multiple of the value returned by -cuMulticastGetGranularity with the flag -CU_MULTICAST_GRANULARITY_MINIMUM. For best performance -however, size, mcOffset and memptr should be aligned to the value -returned by cuMulticastGetGranularity with the flag -CU_MULTICAST_GRANULARITY_RECOMMENDED.

    -

    The size cannot be larger than the size of the allocated memory. -Similarly the size + mcOffset cannot be larger than the total size -of the multicast object. The memory allocation must have beeen created -on one of the devices that was added to the multicast team via -cuMulticastAddDevice. Externally shareable as well as -imported multicast objects can be bound only to externally shareable -memory. Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if -there are insufficient resources required to perform the bind. This -call may also return CUDA_ERROR_SYSTEM_NOT_READY if the necessary -system software is not initialized or running.

    -
    -
    Parameters:
    -
      -
    • mcHandle (CUmemGenericAllocationHandle) – Handle representing a multicast object.

    • -
    • mcOffset (size_t) – Offset into multicast va range for attachment.

    • -
    • memptr (CUdeviceptr) – Virtual address of the memory allocation.

    • -
    • size (size_t) – Size of memory that will be bound to the multicast object.

    • -
    • flags (unsigned long long) – Flags for future use, must be zero now.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_SYSTEM_NOT_READY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMulticastUnbind(mcHandle, dev, size_t mcOffset, size_t size)#
    -

    Unbind any memory allocations bound to a multicast object at a given offset and upto a given size.

    -

    Unbinds any memory allocations hosted on dev and bound to a multicast -object at mcOffset and upto a given size. The intended size of -the unbind and the offset in the multicast range ( mcOffset ) must be -a multiple of the value returned by -cuMulticastGetGranularity flag -CU_MULTICAST_GRANULARITY_MINIMUM. The size + mcOffset -cannot be larger than the total size of the multicast object.

    -
    -
    Parameters:
    -
      -
    • mcHandle (CUmemGenericAllocationHandle) – Handle representing a multicast object.

    • -
    • dev (CUdevice) – Device that hosts the memory allocation.

    • -
    • mcOffset (size_t) – Offset into the multicast object.

    • -
    • size (size_t) – Desired size to unbind.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    Warning: The mcOffset and the size must match the corresponding values specified during the bind call. Any other values may result in undefined behavior.

    -
    - -
    -
    -cuda.cuda.cuMulticastGetGranularity(CUmulticastObjectProp prop: Optional[CUmulticastObjectProp], option: CUmulticastGranularity_flags)#
    -

    Calculates either the minimal or recommended granularity for multicast object.

    -

    Calculates either the minimal or recommended granularity for a given -set of multicast object properties and returns it in granularity. This -granularity can be used as a multiple for size, bind offsets and -address mappings of the multicast object.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Unified Addressing#

    -

    This section describes the unified addressing functions of the low-level CUDA driver application programming interface.

    -

    Overview

    -

    CUDA devices can share a unified address space with the host. For these devices there is no distinction between a device pointer and a host pointer – the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below).

    -

    Supported Platforms

    -

    Whether or not a device supports unified addressing may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING.

    -

    Unified addressing is automatically enabled in 64-bit processes

    -

    Looking Up Information from Pointer Values

    -

    It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cuPointerGetAttribute()

    -

    Since pointers are unique, it is not necessary to specify information about the pointers specified to the various copy functions in the CUDA API. The function cuMemcpy() may be used to perform a copy between two pointers, ignoring whether they point to host or device memory (making cuMemcpyHtoD(), cuMemcpyDtoD(), and cuMemcpyDtoH() unnecessary for devices supporting unified addressing). For multidimensional copies, the memory type CU_MEMORYTYPE_UNIFIED may be used to specify that the CUDA driver should infer the location of the pointer from its value.

    -

    Automatic Mapping of Host Allocated Host Memory

    -

    All host memory allocated in all contexts using cuMemAllocHost() and cuMemHostAlloc() is always directly accessible from all contexts on all devices that support unified addressing. This is the case regardless of whether or not the flags CU_MEMHOSTALLOC_PORTABLE and CU_MEMHOSTALLOC_DEVICEMAP are specified.

    -

    The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host, so it is not necessary to call cuMemHostGetDevicePointer() to get the device pointer for these allocations.

    -

    Note that this is not the case for memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below.

    -

    Automatic Registration of Peer Memory

    -

    Upon enabling direct access from a context that supports unified addressing to another peer context that supports unified addressing using cuCtxEnablePeerAccess() all memory allocated in the peer context using cuMemAlloc() and cuMemAllocPitch() will immediately be accessible by the current context. The device pointer value through which any peer memory may be accessed in the current context is the same pointer value through which that memory may be accessed in the peer context.

    -

    Exceptions, Disjoint Addressing

    -

    Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cuMemHostRegister() and host memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all contexts that support unified addressing.

    -

    This device address may be queried using cuMemHostGetDevicePointer() when a context using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory through cuMemcpy() and similar functions using the CU_MEMORYTYPE_UNIFIED memory type.

    -
    -
    -cuda.cuda.cuPointerGetAttribute(attribute: CUpointer_attribute, ptr)#
    -

    Returns information about a pointer.

    -

    The supported attributes are:

    -
      -
    • CU_POINTER_ATTRIBUTE_CONTEXT:

    • -
    • Returns in *data the CUcontext in which ptr was -allocated or registered. The type of data must be -CUcontext *.

    • -
    • If ptr was not allocated by, mapped by, or registered with a -CUcontext which uses unified virtual addressing then -CUDA_ERROR_INVALID_VALUE is returned.

    • -
    • CU_POINTER_ATTRIBUTE_MEMORY_TYPE:

    • -
    • Returns in *data the physical memory type of the memory that ptr -addresses as a CUmemorytype enumerated value. The type of -data must be unsigned int.

    • -
    • If ptr addresses device memory then *data is set to -CU_MEMORYTYPE_DEVICE. The particular CUdevice -on which the memory resides is the CUdevice of the -CUcontext returned by the -CU_POINTER_ATTRIBUTE_CONTEXT attribute of ptr.

    • -
    • If ptr addresses host memory then *data is set to -CU_MEMORYTYPE_HOST.

    • -
    • If ptr was not allocated by, mapped by, or registered with a -CUcontext which uses unified virtual addressing then -CUDA_ERROR_INVALID_VALUE is returned.

    • -
    • If the current CUcontext does not support unified virtual -addressing then CUDA_ERROR_INVALID_CONTEXT is returned.

    • -
    • CU_POINTER_ATTRIBUTE_DEVICE_POINTER:

    • -
    • Returns in *data the device pointer value through which ptr may -be accessed by kernels running in the current CUcontext. -The type of data must be CUdeviceptr *.

    • -
    • If there exists no device pointer value through which kernels running -in the current CUcontext may access ptr then -CUDA_ERROR_INVALID_VALUE is returned.

    • -
    • If there is no current CUcontext then -CUDA_ERROR_INVALID_CONTEXT is returned.

    • -
    • Except in the exceptional disjoint addressing cases discussed below, -the value returned in *data will equal the input value ptr.

    • -
    • CU_POINTER_ATTRIBUTE_HOST_POINTER:

    • -
    • Returns in *data the host pointer value through which ptr may be -accessed by by the host program. The type of data must be void **. -If there exists no host pointer value through which the host program -may directly access ptr then CUDA_ERROR_INVALID_VALUE -is returned.

    • -
    • Except in the exceptional disjoint addressing cases discussed below, -the value returned in *data will equal the input value ptr.

    • -
    • CU_POINTER_ATTRIBUTE_P2P_TOKENS:

    • -
    • Returns in *data two tokens for use with the nv-p2p.h Linux kernel -interface. data must be a struct of type -CUDA_POINTER_ATTRIBUTE_P2P_TOKENS.

    • -
    • ptr must be a pointer to memory obtained from -pycuMemAlloc(). Note that p2pToken and -vaSpaceToken are only valid for the lifetime of the source -allocation. A subsequent allocation at the same address may return -completely different tokens. Querying this attribute has a side -effect of setting the attribute -CU_POINTER_ATTRIBUTE_SYNC_MEMOPS for the region of memory -that ptr points to.

    • -
    • CU_POINTER_ATTRIBUTE_SYNC_MEMOPS:

    • -
    • A boolean attribute which when set, ensures that synchronous memory -operations initiated on the region of memory that ptr points to -will always synchronize. See further documentation in the section -titled “API synchronization behavior” to learn more about cases when -synchronous memory operations can exhibit asynchronous behavior.

    • -
    • CU_POINTER_ATTRIBUTE_BUFFER_ID:

    • -
    • Returns in *data a buffer ID which is guaranteed to be unique -within the process. data must point to an unsigned long long.

    • -
    • ptr must be a pointer to memory obtained from a CUDA memory -allocation API. Every memory allocation from any of the CUDA memory -allocation APIs will have a unique ID over a process lifetime. -Subsequent allocations do not reuse IDs from previous freed -allocations. IDs are only unique within a single process.

    • -
    • CU_POINTER_ATTRIBUTE_IS_MANAGED:

    • -
    • Returns in *data a boolean that indicates whether the pointer -points to managed memory or not.

    • -
    • If ptr is not a valid CUDA pointer then -CUDA_ERROR_INVALID_VALUE is returned.

    • -
    • CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL:

    • -
    • Returns in *data an integer representing a device ordinal of a -device against which the memory was allocated or registered.

    • -
    • CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE:

    • -
    • Returns in *data a boolean that indicates if this pointer maps to -an allocation that is suitable for cudaIpcGetMemHandle.

    • -
    • CU_POINTER_ATTRIBUTE_RANGE_START_ADDR:

    • -
    • Returns in *data the starting address for the allocation referenced -by the device pointer ptr. Note that this is not necessarily the -address of the mapped region, but the address of the mappable address -range ptr references (e.g. from cuMemAddressReserve).

    • -
    • CU_POINTER_ATTRIBUTE_RANGE_SIZE:

    • -
    • Returns in *data the size for the allocation referenced by the -device pointer ptr. Note that this is not necessarily the size of -the mapped region, but the size of the mappable address range ptr -references (e.g. from cuMemAddressReserve). To retrieve -the size of the mapped region, see cuMemGetAddressRange

    • -
    • CU_POINTER_ATTRIBUTE_MAPPED:

    • -
    • Returns in *data a boolean that indicates if this pointer is in a -valid address range that is mapped to a backing allocation.

    • -
    • CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES:

    • -
    • Returns a bitmask of the allowed handle types for an allocation that -may be passed to cuMemExportToShareableHandle.

    • -
    • CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE:

    • -
    • Returns in *data the handle to the mempool that the allocation was -obtained from.

    • -
    -

    Note that for most allocations in the unified virtual address space the -host and device pointer for accessing the allocation will be the same. -The exceptions to this are

    -
      -
    • user memory registered using cuMemHostRegister

    • -
    • host memory allocated using cuMemHostAlloc with the -CU_MEMHOSTALLOC_WRITECOMBINED flag For these types of -allocation there will exist separate, disjoint host and device -addresses for accessing the allocation. In particular

    • -
    • The host address will correspond to an invalid unmapped device -address (which will result in an exception if accessed from the -device)

    • -
    • The device address will correspond to an invalid unmapped host -address (which will result in an exception if accessed from the -host). For these types of allocations, querying -CU_POINTER_ATTRIBUTE_HOST_POINTER and -CU_POINTER_ATTRIBUTE_DEVICE_POINTER may be used to -retrieve the host and device addresses from either address.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemPrefetchAsync(devPtr, size_t count, dstDevice, hStream)#
    -

    Prefetches memory to the specified destination device.

    -

    Note there is a later version of this API, -cuMemPrefetchAsync_v2. It will supplant this version in -13.0, which is retained for minor version compatibility.

    -

    Prefetches memory to the specified destination device. devPtr is the -base device pointer of the memory to be prefetched and dstDevice is -the destination device. count specifies the number of bytes to copy. -hStream is the stream in which the operation is enqueued. The memory -range must refer to managed memory allocated via -cuMemAllocManaged or declared via managed variables or it -may also refer to system-allocated memory on systems with non-zero -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS.

    -

    Passing in CU_DEVICE_CPU for dstDevice will prefetch the data to host -memory. If dstDevice is a GPU, then the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be non- -zero. Additionally, hStream must be associated with a device that has -a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS.

    -

    The start address and end address of the memory range will be rounded -down and rounded up respectively to be aligned to CPU page size before -the prefetch operation is enqueued in the stream.

    -

    If no physical memory has been allocated for this region, then this -memory region will be populated and mapped on the destination device. -If there’s insufficient memory to prefetch the desired region, the -Unified Memory driver may evict pages from other -cuMemAllocManaged allocations to host memory in order to -make room. Device memory allocated using cuMemAlloc or -cuArrayCreate will not be evicted.

    -

    By default, any mappings to the previous location of the migrated pages -are removed and mappings for the new location are only setup on -dstDevice. The exact behavior however also depends on the settings -applied to this memory range via cuMemAdvise as described -below:

    -

    If CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of -this memory range, then that subset will create a read-only copy of the -pages on dstDevice.

    -

    If CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any -subset of this memory range, then the pages will be migrated to -dstDevice even if dstDevice is not the preferred location of any -pages in the memory range.

    -

    If CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset -of this memory range, then mappings to those pages from all the -appropriate processors are updated to refer to the new location if -establishing such a mapping is possible. Otherwise, those mappings are -cleared.

    -

    Note that this API is not required for functionality and only serves to -improve performance by allowing the application to migrate data to a -suitable location before it is accessed. Memory accesses to this range -are always coherent and are allowed even when the data is actively -being migrated.

    -

    Note that this function is asynchronous with respect to the host and -all work on other devices.

    -
    -
    Parameters:
    -
      -
    • devPtr (CUdeviceptr) – Pointer to be prefetched

    • -
    • count (size_t) – Size in bytes

    • -
    • dstDevice (CUdevice) – Destination device to prefetch to

    • -
    • hStream (CUstream or cudaStream_t) – Stream to enqueue prefetch operation

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemPrefetchAsync_v2(devPtr, size_t count, CUmemLocation location: CUmemLocation, unsigned int flags, hStream)#
    -

    Prefetches memory to the specified destination location.

    -

    Prefetches memory to the specified destination location. devPtr is -the base device pointer of the memory to be prefetched and location -specifies the destination location. count specifies the number of -bytes to copy. hStream is the stream in which the operation is -enqueued. The memory range must refer to managed memory allocated via -cuMemAllocManaged or declared via managed variables.

    -

    Specifying CU_MEM_LOCATION_TYPE_DEVICE for -type will prefetch memory to GPU specified by -device ordinal id which must have non-zero -value for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. -Additionally, hStream must be associated with a device that has a -non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Specifying -CU_MEM_LOCATION_TYPE_HOST as type -will prefetch data to host memory. Applications can request prefetching -memory to a specific host NUMA node by specifying -CU_MEM_LOCATION_TYPE_HOST_NUMA for -type and a valid host NUMA node id in -id Users can also request prefetching memory -to the host NUMA node closest to the current thread’s CPU by specifying -CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT for -type. Note when -type is etiher -CU_MEM_LOCATION_TYPE_HOST OR -CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT, -id will be ignored.

    -

    The start address and end address of the memory range will be rounded -down and rounded up respectively to be aligned to CPU page size before -the prefetch operation is enqueued in the stream.

    -

    If no physical memory has been allocated for this region, then this -memory region will be populated and mapped on the destination device. -If there’s insufficient memory to prefetch the desired region, the -Unified Memory driver may evict pages from other -cuMemAllocManaged allocations to host memory in order to -make room. Device memory allocated using cuMemAlloc or -cuArrayCreate will not be evicted.

    -

    By default, any mappings to the previous location of the migrated pages -are removed and mappings for the new location are only setup on the -destination location. The exact behavior however also depends on the -settings applied to this memory range via cuMemAdvise as -described below:

    -

    If CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of -this memory range, then that subset will create a read-only copy of the -pages on destination location. If however the destination location is a -host NUMA node, then any pages of that subset that are already in -another host NUMA node will be transferred to the destination.

    -

    If CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any -subset of this memory range, then the pages will be migrated to -location even if location is not the preferred location of any -pages in the memory range.

    -

    If CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset -of this memory range, then mappings to those pages from all the -appropriate processors are updated to refer to the new location if -establishing such a mapping is possible. Otherwise, those mappings are -cleared.

    -

    Note that this API is not required for functionality and only serves to -improve performance by allowing the application to migrate data to a -suitable location before it is accessed. Memory accesses to this range -are always coherent and are allowed even when the data is actively -being migrated.

    -

    Note that this function is asynchronous with respect to the host and -all work on other devices.

    -
    -
    Parameters:
    -
      -
    • devPtr (CUdeviceptr) – Pointer to be prefetched

    • -
    • count (size_t) – Size in bytes

    • -
    • dstDevice (CUmemLocation) – Destination device to prefetch to

    • -
    • flags (unsigned int) – flags for future use, must be zero now.

    • -
    • hStream (CUstream or cudaStream_t) – Stream to enqueue prefetch operation

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemAdvise(devPtr, size_t count, advice: CUmem_advise, device)#
    -

    Advise about the usage of a given memory range.

    -

    Note there is a later version of this API, cuMemAdvise_v2. -It will supplant this version in 13.0, which is retained for minor -version compatibility.

    -

    Advise the Unified Memory subsystem about the usage pattern for the -memory range starting at devPtr with a size of count bytes. The -start address and end address of the memory range will be rounded down -and rounded up respectively to be aligned to CPU page size before the -advice is applied. The memory range must refer to managed memory -allocated via cuMemAllocManaged or declared via managed -variables. The memory range could also refer to system-allocated -pageable memory provided it represents a valid, host-accessible region -of memory and all additional constraints imposed by advice as -outlined below are also satisfied. Specifying an invalid system- -allocated pageable memory range results in an error being returned.

    -

    The advice parameter can take the following values:

    -
      -
    • CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data -is mostly going to be read from and only occasionally written to. Any -read accesses from any processor to this region will create a read- -only copy of at least the accessed pages in that processor’s memory. -Additionally, if cuMemPrefetchAsync is called on this -region, it will create a read-only copy of the data on the -destination processor. If any processor writes to this region, all -copies of the corresponding page will be invalidated except for the -one where the write occurred. The device argument is ignored for -this advice. Note that for a page to be read-duplicated, the -accessing processor must either be the CPU or a GPU that has a non- -zero value for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Also, if a -context is created on a device that does not have the device -attribute CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS -set, then read-duplication will not occur until all such contexts are -destroyed. If the memory region refers to valid system-allocated -pageable memory, then the accessing device must have a non-zero value -for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read- -only copy to be created on that device. Note however that if the -accessing device also has a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, -then setting this advice will not create a read-only copy when that -device accesses this memory region.

    • -
    • CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of -CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the -Unified Memory driver from attempting heuristic read-duplication on -the memory range. Any read-duplicated copies of the data will be -collapsed into a single copy. The location for the collapsed copy -will be the preferred location if the page has a preferred location -and one of the read-duplicated copies was resident at that location. -Otherwise, the location chosen is arbitrary.

    • -
    • CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets -the preferred location for the data to be the memory belonging to -device. Passing in CU_DEVICE_CPU for device sets the preferred -location as host memory. If device is a GPU, then it must have a -non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting -the preferred location does not cause data to migrate to that -location immediately. Instead, it guides the migration policy when a -fault occurs on that memory region. If the data is already in its -preferred location and the faulting processor can establish a mapping -without requiring the data to be migrated, then data migration will -be avoided. On the other hand, if the data is not in its preferred -location or if a direct mapping cannot be established, then it will -be migrated to the processor accessing it. It is important to note -that setting the preferred location does not prevent data prefetching -done using cuMemPrefetchAsync. Having a preferred -location can override the page thrash detection and resolution logic -in the Unified Memory driver. Normally, if a page is detected to be -constantly thrashing between for example host and device memory, the -page may eventually be pinned to host memory by the Unified Memory -driver. But if the preferred location is set as device memory, then -the page will continue to thrash indefinitely. If -CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory -region or any subset of it, then the policies associated with that -advice will override the policies of this advice, unless read -accesses from device will not result in a read-only copy being -created on that device as outlined in description for the advice -CU_MEM_ADVISE_SET_READ_MOSTLY. If the memory region -refers to valid system-allocated pageable memory, then device must -have a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS.

    • -
    • CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect -of CU_MEM_ADVISE_SET_PREFERRED_LOCATION and changes the -preferred location to none.

    • -
    • CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that -the data will be accessed by device. Passing in -CU_DEVICE_CPU for device will set the advice for the -CPU. If device is a GPU, then the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be -non-zero. This advice does not cause data migration and has no impact -on the location of the data per se. Instead, it causes the data to -always be mapped in the specified processor’s page tables, as long as -the location of the data permits a mapping to be established. If the -data gets migrated for any reason, the mappings are updated -accordingly. This advice is recommended in scenarios where data -locality is not important, but avoiding faults is. Consider for -example a system containing multiple GPUs with peer-to-peer access -enabled, where the data located on one GPU is occasionally accessed -by peer GPUs. In such scenarios, migrating data over to the other -GPUs is not as important because the accesses are infrequent and the -overhead of migration may be too high. But preventing faults can -still help improve performance, and so having a mapping set up in -advance is useful. Note that on CPU access of this data, the data may -be migrated to host memory because the CPU typically cannot access -device memory directly. Any GPU that had the -CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will -now have its mapping updated to point to the page in host memory. If -CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory -region or any subset of it, then the policies associated with that -advice will override the policies of this advice. Additionally, if -the preferred location of this memory region or any subset of it is -also device, then the policies associated with -CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the -policies of this advice. If the memory region refers to valid system- -allocated pageable memory, then device must have a non-zero value -for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, -if device has a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, -then this call has no effect.

    • -
    • CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of -CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to the data -from device may be removed at any time causing accesses to result -in non-fatal page faults. If the memory region refers to valid -system-allocated pageable memory, then device must have a non-zero -value for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, -if device has a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, -then this call has no effect.

    • -
    -
    -
    Parameters:
    -
      -
    • devPtr (CUdeviceptr) – Pointer to memory to set the advice for

    • -
    • count (size_t) – Size in bytes of the memory range

    • -
    • advice (CUmem_advise) – Advice to be applied for the specified memory range

    • -
    • device (CUdevice) – Device to apply the advice for

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemAdvise_v2(devPtr, size_t count, advice: CUmem_advise, CUmemLocation location: CUmemLocation)#
    -

    Advise about the usage of a given memory range.

    -

    Advise the Unified Memory subsystem about the usage pattern for the -memory range starting at devPtr with a size of count bytes. The -start address and end address of the memory range will be rounded down -and rounded up respectively to be aligned to CPU page size before the -advice is applied. The memory range must refer to managed memory -allocated via cuMemAllocManaged or declared via managed -variables. The memory range could also refer to system-allocated -pageable memory provided it represents a valid, host-accessible region -of memory and all additional constraints imposed by advice as -outlined below are also satisfied. Specifying an invalid system- -allocated pageable memory range results in an error being returned.

    -

    The advice parameter can take the following values:

    -
      -
    • CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data -is mostly going to be read from and only occasionally written to. Any -read accesses from any processor to this region will create a read- -only copy of at least the accessed pages in that processor’s memory. -Additionally, if cuMemPrefetchAsync or -cuMemPrefetchAsync_v2 is called on this region, it will -create a read-only copy of the data on the destination processor. If -the target location for cuMemPrefetchAsync_v2 is a host -NUMA node and a read-only copy already exists on another host NUMA -node, that copy will be migrated to the targeted host NUMA node. If -any processor writes to this region, all copies of the corresponding -page will be invalidated except for the one where the write occurred. -If the writing processor is the CPU and the preferred location of the -page is a host NUMA node, then the page will also be migrated to that -host NUMA node. The location argument is ignored for this advice. -Note that for a page to be read-duplicated, the accessing processor -must either be the CPU or a GPU that has a non-zero value for the -device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Also, if a -context is created on a device that does not have the device -attribute CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS -set, then read-duplication will not occur until all such contexts are -destroyed. If the memory region refers to valid system-allocated -pageable memory, then the accessing device must have a non-zero value -for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read- -only copy to be created on that device. Note however that if the -accessing device also has a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, -then setting this advice will not create a read-only copy when that -device accesses this memory region.

    • -
    • CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of -CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the -Unified Memory driver from attempting heuristic read-duplication on -the memory range. Any read-duplicated copies of the data will be -collapsed into a single copy. The location for the collapsed copy -will be the preferred location if the page has a preferred location -and one of the read-duplicated copies was resident at that location. -Otherwise, the location chosen is arbitrary. Note: The location -argument is ignored for this advice.

    • -
    • CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets -the preferred location for the data to be the memory belonging to -location. When type is -CU_MEM_LOCATION_TYPE_HOST, id -is ignored and the preferred location is set to be host memory. To -set the preferred location to a specific host NUMA node, applications -must set type to -CU_MEM_LOCATION_TYPE_HOST_NUMA and -id must specify the NUMA ID of the host -NUMA node. If type is set to -CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT, -id will be ignored and the the host NUMA -node closest to the calling thread’s CPU will be used as the -preferred location. If type is a -CU_MEM_LOCATION_TYPE_DEVICE, then -id must be a valid device ordinal and the -device must have a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting -the preferred location does not cause data to migrate to that -location immediately. Instead, it guides the migration policy when a -fault occurs on that memory region. If the data is already in its -preferred location and the faulting processor can establish a mapping -without requiring the data to be migrated, then data migration will -be avoided. On the other hand, if the data is not in its preferred -location or if a direct mapping cannot be established, then it will -be migrated to the processor accessing it. It is important to note -that setting the preferred location does not prevent data prefetching -done using cuMemPrefetchAsync. Having a preferred -location can override the page thrash detection and resolution logic -in the Unified Memory driver. Normally, if a page is detected to be -constantly thrashing between for example host and device memory, the -page may eventually be pinned to host memory by the Unified Memory -driver. But if the preferred location is set as device memory, then -the page will continue to thrash indefinitely. If -CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory -region or any subset of it, then the policies associated with that -advice will override the policies of this advice, unless read -accesses from location will not result in a read-only copy being -created on that procesor as outlined in description for the advice -CU_MEM_ADVISE_SET_READ_MOSTLY. If the memory region -refers to valid system-allocated pageable memory, and -type is CU_MEM_LOCATION_TYPE_DEVICE then -id must be a valid device that has a non- -zero alue for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS.

    • -
    • CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect -of CU_MEM_ADVISE_SET_PREFERRED_LOCATION and changes the -preferred location to none. The location argument is ignored for -this advice.

    • -
    • CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that -the data will be accessed by processor location. The -type must be either -CU_MEM_LOCATION_TYPE_DEVICE with -id representing a valid device ordinal or -CU_MEM_LOCATION_TYPE_HOST and -id will be ignored. All other location -types are invalid. If id is a GPU, then the -device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be -non-zero. This advice does not cause data migration and has no impact -on the location of the data per se. Instead, it causes the data to -always be mapped in the specified processor’s page tables, as long as -the location of the data permits a mapping to be established. If the -data gets migrated for any reason, the mappings are updated -accordingly. This advice is recommended in scenarios where data -locality is not important, but avoiding faults is. Consider for -example a system containing multiple GPUs with peer-to-peer access -enabled, where the data located on one GPU is occasionally accessed -by peer GPUs. In such scenarios, migrating data over to the other -GPUs is not as important because the accesses are infrequent and the -overhead of migration may be too high. But preventing faults can -still help improve performance, and so having a mapping set up in -advance is useful. Note that on CPU access of this data, the data may -be migrated to host memory because the CPU typically cannot access -device memory directly. Any GPU that had the -CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will -now have its mapping updated to point to the page in host memory. If -CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory -region or any subset of it, then the policies associated with that -advice will override the policies of this advice. Additionally, if -the preferred location of this memory region or any subset of it is -also location, then the policies associated with -CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the -policies of this advice. If the memory region refers to valid system- -allocated pageable memory, and type is -CU_MEM_LOCATION_TYPE_DEVICE then device in -id must have a non-zero value for the -device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, -if id has a non-zero value for the device -attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, -then this call has no effect.

    • -
    • CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of -CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to the data -from location may be removed at any time causing accesses to result -in non-fatal page faults. If the memory region refers to valid -system-allocated pageable memory, and type -is CU_MEM_LOCATION_TYPE_DEVICE then device in -id must have a non-zero value for the -device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, -if id has a non-zero value for the device -attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, -then this call has no effect.

    • -
    -
    -
    Parameters:
    -
      -
    • devPtr (CUdeviceptr) – Pointer to memory to set the advice for

    • -
    • count (size_t) – Size in bytes of the memory range

    • -
    • advice (CUmem_advise) – Advice to be applied for the specified memory range

    • -
    • location (CUmemLocation) – location to apply the advice for

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemRangeGetAttribute(size_t dataSize, attribute: CUmem_range_attribute, devPtr, size_t count)#
    -

    Query an attribute of a given memory range.

    -

    Query an attribute about the memory range starting at devPtr with a -size of count bytes. The memory range must refer to managed memory -allocated via cuMemAllocManaged or declared via managed -variables.

    -

    The attribute parameter can take the following values:

    -
      -
    • CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: If this attribute is -specified, data will be interpreted as a 32-bit integer, and -dataSize must be 4. The result returned will be 1 if all pages in -the given memory range have read-duplication enabled, or 0 otherwise.

    • -
    • CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: If this -attribute is specified, data will be interpreted as a 32-bit -integer, and dataSize must be 4. The result returned will be a GPU -device id if all pages in the memory range have that GPU as their -preferred location, or it will be CU_DEVICE_CPU if all pages in the -memory range have the CPU as their preferred location, or it will be -CU_DEVICE_INVALID if either all the pages don’t have the same -preferred location or some of the pages don’t have a preferred -location at all. Note that the actual location of the pages in the -memory range at the time of the query may be different from the -preferred location.

    • -
    • CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: If this attribute is -specified, data will be interpreted as an array of 32-bit integers, -and dataSize must be a non-zero multiple of 4. The result returned -will be a list of device ids that had -CU_MEM_ADVISE_SET_ACCESSED_BY set for that entire memory -range. If any device does not have that advice set for the entire -memory range, that device will not be included. If data is larger -than the number of devices that have that advice set for that memory -range, CU_DEVICE_INVALID will be returned in all the extra space -provided. For ex., if dataSize is 12 (i.e. data has 3 elements) -and only device 0 has the advice set, then the result returned will -be { 0, CU_DEVICE_INVALID, CU_DEVICE_INVALID }. If data is smaller -than the number of devices that have that advice set, then only as -many devices will be returned as can fit in the array. There is no -guarantee on which specific devices will be returned, however.

    • -
    • CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: If this -attribute is specified, data will be interpreted as a 32-bit -integer, and dataSize must be 4. The result returned will be the -last location to which all pages in the memory range were prefetched -explicitly via cuMemPrefetchAsync. This will either be a -GPU id or CU_DEVICE_CPU depending on whether the last location for -prefetch was a GPU or the CPU respectively. If any page in the memory -range was never explicitly prefetched or if all pages were not -prefetched to the same location, CU_DEVICE_INVALID will be returned. -Note that this simply returns the last location that the application -requested to prefetch the memory range to. It gives no indication as -to whether the prefetch operation to that location has completed or -even begun.

    • -
    • CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE: If this -attribute is specified, data will be interpreted as a -CUmemLocationType, and dataSize must be -sizeof(CUmemLocationType). The CUmemLocationType returned -will be CU_MEM_LOCATION_TYPE_DEVICE if all pages in the -memory range have the same GPU as their preferred location, or -CUmemLocationType will be -CU_MEM_LOCATION_TYPE_HOST if all pages in the memory -range have the CPU as their preferred location, or it will be -CU_MEM_LOCATION_TYPE_HOST_NUMA if all the pages in the -memory range have the same host NUMA node ID as their preferred -location or it will be CU_MEM_LOCATION_TYPE_INVALID if -either all the pages don’t have the same preferred location or some -of the pages don’t have a preferred location at all. Note that the -actual location type of the pages in the memory range at the time of -the query may be different from the preferred location type.

      - -
    • -
    • CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE: If -this attribute is specified, data will be interpreted as a -CUmemLocationType, and dataSize must be -sizeof(CUmemLocationType). The result returned will be the last -location to which all pages in the memory range were prefetched -explicitly via cuMemPrefetchAsync. The -CUmemLocationType returned will be -CU_MEM_LOCATION_TYPE_DEVICE if the last prefetch location -was a GPU or CU_MEM_LOCATION_TYPE_HOST if it was the CPU -or CU_MEM_LOCATION_TYPE_HOST_NUMA if the last prefetch -location was a specific host NUMA node. If any page in the memory -range was never explicitly prefetched or if all pages were not -prefetched to the same location, CUmemLocationType will -be CU_MEM_LOCATION_TYPE_INVALID. Note that this simply -returns the last location type that the application requested to -prefetch the memory range to. It gives no indication as to whether -the prefetch operation to that location has completed or even begun.

      - -
    • -
    -
    -
    Parameters:
    -
      -
    • dataSize (size_t) – Array containing the size of data

    • -
    • attribute (CUmem_range_attribute) – The attribute to query

    • -
    • devPtr (CUdeviceptr) – Start of the range to query

    • -
    • count (size_t) – Size of the range to query

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuMemRangeGetAttributes(dataSizes: Tuple[int] | List[int], attributes: Optional[Tuple[CUmem_range_attribute] | List[CUmem_range_attribute]], size_t numAttributes, devPtr, size_t count)#
    -

    Query attributes of a given memory range.

    -

    Query attributes of the memory range starting at devPtr with a size -of count bytes. The memory range must refer to managed memory -allocated via cuMemAllocManaged or declared via managed -variables. The attributes array will be interpreted to have -numAttributes entries. The dataSizes array will also be interpreted -to have numAttributes entries. The results of the query will be -stored in data.

    -

    The list of supported attributes are given below. Please refer to -cuMemRangeGetAttribute for attribute descriptions and -restrictions.

    - -
    -
    Parameters:
    -
      -
    • dataSizes (List[int]) – Array containing the sizes of each result

    • -
    • attributes (List[CUmem_range_attribute]) – An array of attributes to query (numAttributes and the number of -attributes in this array should match)

    • -
    • numAttributes (size_t) – Number of attributes to query

    • -
    • devPtr (CUdeviceptr) – Start of the range to query

    • -
    • count (size_t) – Size of the range to query

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuPointerSetAttribute(value, attribute: CUpointer_attribute, ptr)#
    -

    Set attributes on a previously allocated memory region.

    -

    The supported attributes are:

    -
      -
    • CU_POINTER_ATTRIBUTE_SYNC_MEMOPS:

    • -
    • A boolean attribute that can either be set (1) or unset (0). When -set, the region of memory that ptr points to is guaranteed to -always synchronize memory operations that are synchronous. If there -are some previously initiated synchronous memory operations that are -pending when this attribute is set, the function does not return -until those memory operations are complete. See further documentation -in the section titled “API synchronization behavior” to learn more -about cases when synchronous memory operations can exhibit -asynchronous behavior. value will be considered as a pointer to an -unsigned integer to which this attribute is to be set.

    • -
    -
    -
    Parameters:
    -
      -
    • value (Any) – Pointer to memory containing the value to be set

    • -
    • attribute (CUpointer_attribute) – Pointer attribute to set

    • -
    • ptr (CUdeviceptr) – Pointer to a memory region allocated using CUDA memory allocation -APIs

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuPointerGetAttributes(unsigned int numAttributes, attributes: Optional[Tuple[CUpointer_attribute] | List[CUpointer_attribute]], ptr)#
    -

    Returns information about a pointer.

    -

    The supported attributes are (refer to -cuPointerGetAttribute for attribute descriptions and -restrictions):

    - -

    Unlike cuPointerGetAttribute, this function will not return -an error when the ptr encountered is not a valid CUDA pointer. -Instead, the attributes are assigned default NULL values and -CUDA_SUCCESS is returned.

    -

    If ptr was not allocated by, mapped by, or registered with a -CUcontext which uses UVA (Unified Virtual Addressing), -CUDA_ERROR_INVALID_CONTEXT is returned.

    -
    -
    Parameters:
    -
      -
    • numAttributes (unsigned int) – Number of attributes to query

    • -
    • attributes (List[CUpointer_attribute]) – An array of attributes to query (numAttributes and the number of -attributes in this array should match)

    • -
    • ptr (CUdeviceptr) – Pointer to query

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Stream Management#

    -

    This section describes the stream management functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuStreamCreate(unsigned int Flags)#
    -

    Create a stream.

    -

    Creates a stream and returns a handle in phStream. The Flags -argument determines behaviors of the stream.

    -

    Valid values for Flags are:

    -
      -
    • CU_STREAM_DEFAULT: Default stream creation flag.

    • -
    • CU_STREAM_NON_BLOCKING: Specifies that work running in -the created stream may run concurrently with work in stream 0 (the -NULL stream), and that the created stream should perform no implicit -synchronization with stream 0.

    • -
    -
    -
    Parameters:
    -

    Flags (unsigned int) – Parameters for stream creation

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamCreateWithPriority(unsigned int flags, int priority)#
    -

    Create a stream with the given priority.

    -

    Creates a stream with the specified priority and returns a handle in -phStream. This affects the scheduling priority of work in the stream. -Priorities provide a hint to preferentially run work with higher -priority when possible, but do not preempt already-running work or -provide any other functional guarantee on execution order.

    -

    priority follows a convention where lower numbers represent higher -priorities. ‘0’ represents default priority. The range of meaningful -numerical priorities can be queried using -cuCtxGetStreamPriorityRange. If the specified priority is -outside the numerical range returned by -cuCtxGetStreamPriorityRange, it will automatically be -clamped to the lowest or the highest number in the range.

    -
    -
    Parameters:
    -
      -
    • flags (unsigned int) – Flags for stream creation. See cuStreamCreate for a -list of valid flags

    • -
    • priority (int) – Stream priority. Lower numbers represent higher priorities. See -cuCtxGetStreamPriorityRange for more information about -meaningful stream priorities that can be passed.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Stream priorities are supported only on GPUs with compute capability 3.5 or higher.

    -

    In the current implementation, only compute kernels launched in priority streams are affected by the stream’s priority. Stream priorities have no effect on host-to-device and device-to-host memory operations.

    -
    - -
    -
    -cuda.cuda.cuStreamGetPriority(hStream)#
    -

    Query the priority of a given stream.

    -

    Query the priority of a stream created using -cuStreamCreate, cuStreamCreateWithPriority or -cuGreenCtxStreamCreate and return the priority in -priority. Note that if the stream was created with a priority outside -the numerical range returned by -cuCtxGetStreamPriorityRange, this function returns the -clamped priority. See cuStreamCreateWithPriority for -details about priority clamping.

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamGetFlags(hStream)#
    -

    Query the flags of a given stream.

    -

    Query the flags of a stream created using cuStreamCreate, -cuStreamCreateWithPriority or -cuGreenCtxStreamCreate and return the flags in flags.

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamGetId(hStream)#
    -

    Returns the unique Id associated with the stream handle supplied.

    -

    Returns in streamId the unique Id which is associated with the given -stream handle. The Id is unique for the life of the program.

    -

    The stream handle hStream can refer to any of the following:

    - -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamGetCtx(hStream)#
    -

    Query the context associated with a stream.

    -

    Returns the CUDA context that the stream is associated with.

    -

    Note there is a later version of this API, -cuStreamGetCtx_v2. It will supplant this version in CUDA -13.0. It is recommended to use cuStreamGetCtx_v2 till then -as this version will return CUDA_ERROR_NOT_SUPPORTED for -streams created via the API cuGreenCtxStreamCreate.

    -

    The stream handle hStream can refer to any of the following:

    - -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamGetCtx_v2(hStream)#
    -

    Query the contexts associated with a stream.

    -

    Returns the contexts that the stream is associated with.

    -

    If the stream is associated with a green context, the API returns the -green context in pGreenCtx and the primary context of the associated -device in pCtx.

    -

    If the stream is associated with a regular context, the API returns the -regular context in pCtx and NULL in pGreenCtx.

    -

    The stream handle hStream can refer to any of the following:

    - -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamWaitEvent(hStream, hEvent, unsigned int Flags)#
    -

    Make a compute stream wait on an event.

    -

    Makes all future work submitted to hStream wait for all work captured -in hEvent. See cuEventRecord() for details on what is -captured by an event. The synchronization will be performed efficiently -on the device when applicable. hEvent may be from a different context -or device than hStream.

    -

    flags include:

    -
      -
    • CU_EVENT_WAIT_DEFAULT: Default event creation flag.

    • -
    • CU_EVENT_WAIT_EXTERNAL: Event is captured in the graph as -an external event node when performing stream capture. This flag is -invalid outside of stream capture.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamAddCallback(hStream, callback, userData, unsigned int flags)#
    -

    Add a callback to a compute stream.

    -

    Adds a callback to be called on the host after all currently enqueued -items in the stream have completed. For each cuStreamAddCallback call, -the callback will be executed exactly once. The callback will block -later work in the stream until it is finished.

    -

    The callback may be passed CUDA_SUCCESS or an error code. -In the event of a device error, all subsequently executed callbacks -will receive an appropriate CUresult.

    -

    Callbacks must not make any CUDA API calls. Attempting to use a CUDA -API will result in CUDA_ERROR_NOT_PERMITTED. Callbacks must -not perform any synchronization that may depend on outstanding device -work or other callbacks that are not mandated to run earlier. Callbacks -without a mandated order (in independent streams) execute in undefined -order and may be serialized.

    -

    For the purposes of Unified Memory, callback execution makes a number -of guarantees:

    -
      -
    • The callback stream is considered idle for the duration of the -callback. Thus, for example, a callback may always use memory -attached to the callback stream.

    • -
    • The start of execution of a callback has the same effect as -synchronizing an event recorded in the same stream immediately prior -to the callback. It thus synchronizes streams which have been -“joined” prior to the callback.

    • -
    • Adding device work to any stream does not have the effect of making -the stream active until all preceding host functions and stream -callbacks have executed. Thus, for example, a callback might use -global attached memory even if work has been added to another stream, -if the work has been ordered behind the callback with an event.

    • -
    • Completion of a callback does not cause a stream to become active -except as described above. The callback stream will remain idle if no -device work follows the callback, and will remain idle across -consecutive callbacks without device work in between. Thus, for -example, stream synchronization can be done by signaling from a -callback at the end of the stream.

    • -
    -
    -
    Parameters:
    -
      -
    • hStream (CUstream or cudaStream_t) – Stream to add callback to

    • -
    • callback (CUstreamCallback) – The function to call once preceding stream operations are complete

    • -
    • userData (Any) – User specified data to be passed to the callback function

    • -
    • flags (unsigned int) – Reserved for future use, must be 0

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    This function is slated for eventual deprecation and removal. If you do not require the callback to execute in case of a device error, consider using cuLaunchHostFunc. Additionally, this function is not supported with cuStreamBeginCapture and cuStreamEndCapture, unlike cuLaunchHostFunc.

    -
    - -
    -
    -cuda.cuda.cuStreamBeginCapture(hStream, mode: CUstreamCaptureMode)#
    -

    Begins graph capture on a stream.

    -

    Begin graph capture on hStream. When a stream is in capture mode, all -operations pushed into the stream will not be executed, but will -instead be captured into a graph, which will be returned via -cuStreamEndCapture. Capture may not be initiated if -stream is CU_STREAM_LEGACY. Capture must be ended on the same stream -in which it was initiated, and it may only be initiated if the stream -is not already in capture mode. The capture mode may be queried via -cuStreamIsCapturing. A unique id representing the capture -sequence may be queried via cuStreamGetCaptureInfo.

    -

    If mode is not CU_STREAM_CAPTURE_MODE_RELAXED, -cuStreamEndCapture must be called on this stream from the -same thread.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    -
    - -
    -
    -cuda.cuda.cuStreamBeginCaptureToGraph(hStream, hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], dependencyData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies, mode: CUstreamCaptureMode)#
    -

    Begins graph capture on a stream to an existing graph.

    -

    Begin graph capture on hStream, placing new nodes into an existing -graph. When a stream is in capture mode, all operations pushed into the -stream will not be executed, but will instead be captured into -hGraph. The graph will not be instantiable until the user calls -cuStreamEndCapture.

    -

    Capture may not be initiated if stream is CU_STREAM_LEGACY. Capture -must be ended on the same stream in which it was initiated, and it may -only be initiated if the stream is not already in capture mode. The -capture mode may be queried via cuStreamIsCapturing. A -unique id representing the capture sequence may be queried via -cuStreamGetCaptureInfo.

    -

    If mode is not CU_STREAM_CAPTURE_MODE_RELAXED, -cuStreamEndCapture must be called on this stream from the -same thread.

    -
    -
    Parameters:
    -
      -
    • hStream (CUstream or cudaStream_t) – Stream in which to initiate capture.

    • -
    • hGraph (CUgraph or cudaGraph_t) – Graph to capture into.

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the first node captured in the stream. Can be NULL -if numDependencies is 0.

    • -
    • dependencyData (List[CUgraphEdgeData]) – Optional array of data associated with each dependency.

    • -
    • numDependencies (size_t) – Number of dependencies.

    • -
    • mode (CUstreamCaptureMode) – Controls the interaction of this capture sequence with other API -calls that are potentially unsafe. For more details see -cuThreadExchangeStreamCaptureMode.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    -
    - -
    -
    -cuda.cuda.cuThreadExchangeStreamCaptureMode(mode: CUstreamCaptureMode)#
    -

    Swaps the stream capture interaction mode for a thread.

    -

    Sets the calling thread’s stream capture interaction mode to the value -contained in *mode, and overwrites *mode with the previous mode for -the thread. To facilitate deterministic behavior across function or -module boundaries, callers are encouraged to use this API in a push-pop -fashion:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    During stream capture (see cuStreamBeginCapture), some -actions, such as a call to cudaMalloc, may be unsafe. In -the case of cudaMalloc, the operation is not enqueued -asynchronously to a stream, and is not observed by stream capture. -Therefore, if the sequence of operations captured via -cuStreamBeginCapture depended on the allocation being -replayed whenever the graph is launched, the captured graph would be -invalid.

    -

    Therefore, stream capture places restrictions on API calls that can be -made within or concurrently to a -cuStreamBeginCapture-cuStreamEndCapture -sequence. This behavior can be controlled via this API and flags to -cuStreamBeginCapture.

    -

    A thread’s mode is one of the following:

    -
      -
    • CU_STREAM_CAPTURE_MODE_GLOBAL: This is the default mode. If the -local thread has an ongoing capture sequence that was not initiated -with CU_STREAM_CAPTURE_MODE_RELAXED at cuStreamBeginCapture, or -if any other thread has a concurrent capture sequence initiated with -CU_STREAM_CAPTURE_MODE_GLOBAL, this thread is prohibited from -potentially unsafe API calls.

    • -
    • CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: If the local thread has an -ongoing capture sequence not initiated with -CU_STREAM_CAPTURE_MODE_RELAXED, it is prohibited from potentially -unsafe API calls. Concurrent capture sequences in other threads are -ignored.

    • -
    • CU_STREAM_CAPTURE_MODE_RELAXED: The local thread is not prohibited -from potentially unsafe API calls. Note that the thread is still -prohibited from API calls which necessarily conflict with stream -capture, for example, attempting cuEventQuery on an event -that was last recorded inside a capture sequence.

    • -
    -
    -
    Parameters:
    -

    mode (CUstreamCaptureMode) – Pointer to mode value to swap with the current mode

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuStreamBeginCapture

    -
    -
    - -
    -
    -cuda.cuda.cuStreamEndCapture(hStream)#
    -

    Ends capture on a stream, returning the captured graph.

    -

    End capture on hStream, returning the captured graph via phGraph. -Capture must have been initiated on hStream via a call to -cuStreamBeginCapture. If capture was invalidated, due to a -violation of the rules of stream capture, then a NULL graph will be -returned.

    -

    If the mode argument to cuStreamBeginCapture was not -CU_STREAM_CAPTURE_MODE_RELAXED, this call must be from the -same thread as cuStreamBeginCapture.

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Stream to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamIsCapturing(hStream)#
    -

    Returns a stream’s capture status.

    -

    Return the capture status of hStream via captureStatus. After a -successful call, *captureStatus will contain one of the following:

    - -

    Note that, if this is called on CU_STREAM_LEGACY (the “null -stream”) while a blocking stream in the same context is capturing, it -will return CUDA_ERROR_STREAM_CAPTURE_IMPLICIT and -*captureStatus is unspecified after the call. The blocking stream -capture is not invalidated.

    -

    When a blocking stream is capturing, the legacy stream is in an -unusable state until the blocking stream capture is terminated. The -legacy stream is not supported for stream capture, but attempted use -would have an implicit dependency on the capturing stream(s).

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Stream to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamGetCaptureInfo(hStream)#
    -

    Query a stream’s capture state.

    -

    Query stream state related to stream capture.

    -

    If called on CU_STREAM_LEGACY (the “null stream”) while a -stream not created with CU_STREAM_NON_BLOCKING is -capturing, returns CUDA_ERROR_STREAM_CAPTURE_IMPLICIT.

    -

    Valid data (other than capture status) is returned only if both of the -following are true:

    - -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – The stream to query

    -
    -
    Returns:
    -

      -
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_STREAM_CAPTURE_IMPLICIT

    • -
    • captureStatus_out (CUstreamCaptureStatus) – Location to return the capture status of the stream; required

    • -
    • id_out (cuuint64_t) – Optional location to return an id for the capture sequence, which -is unique over the lifetime of the process

    • -
    • graph_out (CUgraph) – Optional location to return the graph being captured into. All -operations other than destroy and node removal are permitted on the -graph while the capture sequence is in progress. This API does not -transfer ownership of the graph, which is transferred or destroyed -at cuStreamEndCapture. Note that the graph handle may -be invalidated before end of capture for certain errors. Nodes that -are or become unreachable from the original stream at -cuStreamEndCapture due to direct actions on the graph -do not trigger CUDA_ERROR_STREAM_CAPTURE_UNJOINED.

    • -
    • dependencies_out (List[CUgraphNode]) – Optional location to store a pointer to an array of nodes. The next -node to be captured in the stream will depend on this set of nodes, -absent operations such as event wait which modify this set. The -array pointer is valid until the next API call which operates on -the stream or until the capture is terminated. The node handles may -be copied out and are valid until they or the graph is destroyed. -The driver-owned array may also be passed directly to APIs that -operate on the graph (not the stream) without copying.

    • -
    • numDependencies_out (int) – Optional location to store the size of the array returned in -dependencies_out.

    • -
    -

    -
    -
    -
    -

    See also

    -
    -
    cuStreamGetCaptureInfo_v3

    py:obj:~.cuStreamBeginCapture, cuStreamIsCapturing, cuStreamUpdateCaptureDependencies

    -
    -
    -
    -
    - -
    -
    -cuda.cuda.cuStreamGetCaptureInfo_v3(hStream)#
    -

    Query a stream’s capture state (12.3+)

    -

    Query stream state related to stream capture.

    -

    If called on CU_STREAM_LEGACY (the “null stream”) while a -stream not created with CU_STREAM_NON_BLOCKING is -capturing, returns CUDA_ERROR_STREAM_CAPTURE_IMPLICIT.

    -

    Valid data (other than capture status) is returned only if both of the -following are true:

    - -

    If edgeData_out is non-NULL then dependencies_out must be as well. -If dependencies_out is non-NULL and edgeData_out is NULL, but there -is non-zero edge data for one or more of the current stream -dependencies, the call will return CUDA_ERROR_LOSSY_QUERY.

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – The stream to query

    -
    -
    Returns:
    -

      -
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_STREAM_CAPTURE_IMPLICIT, CUDA_ERROR_LOSSY_QUERY

    • -
    • captureStatus_out (CUstreamCaptureStatus) – Location to return the capture status of the stream; required

    • -
    • id_out (cuuint64_t) – Optional location to return an id for the capture sequence, which -is unique over the lifetime of the process

    • -
    • graph_out (CUgraph) – Optional location to return the graph being captured into. All -operations other than destroy and node removal are permitted on the -graph while the capture sequence is in progress. This API does not -transfer ownership of the graph, which is transferred or destroyed -at cuStreamEndCapture. Note that the graph handle may -be invalidated before end of capture for certain errors. Nodes that -are or become unreachable from the original stream at -cuStreamEndCapture due to direct actions on the graph -do not trigger CUDA_ERROR_STREAM_CAPTURE_UNJOINED.

    • -
    • dependencies_out (List[CUgraphNode]) – Optional location to store a pointer to an array of nodes. The next -node to be captured in the stream will depend on this set of nodes, -absent operations such as event wait which modify this set. The -array pointer is valid until the next API call which operates on -the stream or until the capture is terminated. The node handles may -be copied out and are valid until they or the graph is destroyed. -The driver-owned array may also be passed directly to APIs that -operate on the graph (not the stream) without copying.

    • -
    • edgeData_out (List[CUgraphEdgeData]) – Optional location to store a pointer to an array of graph edge -data. This array parallels dependencies_out; the next node to be -added has an edge to dependencies_out`[i] with annotation -`edgeData_out`[i] for each `i. The array pointer is valid until -the next API call which operates on the stream or until the capture -is terminated.

    • -
    • numDependencies_out (int) – Optional location to store the size of the array returned in -dependencies_out.

    • -
    -

    -
    -
    -
    -

    See also

    -
    -
    cuStreamGetCaptureInfo

    py:obj:~.cuStreamBeginCapture, cuStreamIsCapturing, cuStreamUpdateCaptureDependencies

    -
    -
    -
    -
    - -
    -
    -cuda.cuda.cuStreamUpdateCaptureDependencies(hStream, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, unsigned int flags)#
    -

    Update the set of dependencies in a capturing stream (11.3+)

    -

    Modifies the dependency set of a capturing stream. The dependency set -is the set of nodes that the next captured node in the stream will -depend on.

    -

    Valid flags are CU_STREAM_ADD_CAPTURE_DEPENDENCIES and -CU_STREAM_SET_CAPTURE_DEPENDENCIES. These control whether -the set passed to the API is added to the existing set or replaces it. -A flags value of 0 defaults to -CU_STREAM_ADD_CAPTURE_DEPENDENCIES.

    -

    Nodes that are removed from the dependency set via this API do not -result in CUDA_ERROR_STREAM_CAPTURE_UNJOINED if they are -unreachable from the stream at cuStreamEndCapture.

    -

    Returns CUDA_ERROR_ILLEGAL_STATE if the stream is not -capturing.

    -

    This API is new in CUDA 11.3. Developers requiring compatibility across -minor versions to CUDA 11.0 should not use this API or provide a -fallback.

    -
    -
    Parameters:
    -
      -
    • hStream (CUstream or cudaStream_t) – The stream to update

    • -
    • dependencies (List[CUgraphNode]) – The set of dependencies to add

    • -
    • numDependencies (size_t) – The size of the dependencies array

    • -
    • flags (unsigned int) – See above

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_ILLEGAL_STATE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamUpdateCaptureDependencies_v2(hStream, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], dependencyData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies, unsigned int flags)#
    -

    Update the set of dependencies in a capturing stream (12.3+)

    -

    Modifies the dependency set of a capturing stream. The dependency set -is the set of nodes that the next captured node in the stream will -depend on along with the edge data for those dependencies.

    -

    Valid flags are CU_STREAM_ADD_CAPTURE_DEPENDENCIES and -CU_STREAM_SET_CAPTURE_DEPENDENCIES. These control whether -the set passed to the API is added to the existing set or replaces it. -A flags value of 0 defaults to -CU_STREAM_ADD_CAPTURE_DEPENDENCIES.

    -

    Nodes that are removed from the dependency set via this API do not -result in CUDA_ERROR_STREAM_CAPTURE_UNJOINED if they are -unreachable from the stream at cuStreamEndCapture.

    -

    Returns CUDA_ERROR_ILLEGAL_STATE if the stream is not -capturing.

    -
    -
    Parameters:
    -
      -
    • hStream (CUstream or cudaStream_t) – The stream to update

    • -
    • dependencies (List[CUgraphNode]) – The set of dependencies to add

    • -
    • dependencyData (List[CUgraphEdgeData]) – Optional array of data associated with each dependency.

    • -
    • numDependencies (size_t) – The size of the dependencies array

    • -
    • flags (unsigned int) – See above

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_ILLEGAL_STATE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamAttachMemAsync(hStream, dptr, size_t length, unsigned int flags)#
    -

    Attach memory to a stream asynchronously.

    -

    Enqueues an operation in hStream to specify stream association of -length bytes of memory starting from dptr. This function is a -stream-ordered operation, meaning that it is dependent on, and will -only take effect when, previous work in stream has completed. Any -previous association is automatically replaced.

    -

    dptr must point to one of the following types of memories:

    -
      -
    • managed memory declared using the managed keyword or allocated with -cuMemAllocManaged.

    • -
    • a valid host-accessible region of system-allocated pageable memory. -This type of memory may only be specified if the device associated -with the stream reports a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS.

    • -
    -

    For managed allocations, length must be either zero or the entire -allocation’s size. Both indicate that the entire allocation’s stream -association is being changed. Currently, it is not possible to change -stream association for a portion of a managed allocation.

    -

    For pageable host allocations, length must be non-zero.

    -

    The stream association is specified using flags which must be one of -CUmemAttach_flags. If the CU_MEM_ATTACH_GLOBAL -flag is specified, the memory can be accessed by any stream on any -device. If the CU_MEM_ATTACH_HOST flag is specified, the -program makes a guarantee that it won’t access the memory on the device -from any stream on a device that has a zero value for the device -attribute CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If -the CU_MEM_ATTACH_SINGLE flag is specified and hStream is -associated with a device that has a zero value for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, the program -makes a guarantee that it will only access the memory on the device -from hStream. It is illegal to attach singly to the NULL stream, -because the NULL stream is a virtual global stream and not a specific -stream. An error will be returned in this case.

    -

    When memory is associated with a single stream, the Unified Memory -system will allow CPU access to this memory region so long as all -operations in hStream have completed, regardless of whether other -streams are active. In effect, this constrains exclusive ownership of -the managed memory region by an active GPU to per-stream activity -instead of whole-GPU activity.

    -

    Accessing memory on the device from streams that are not associated -with it will produce undefined results. No error checking is performed -by the Unified Memory system to ensure that kernels launched into other -streams do not access this region.

    -

    It is a program’s responsibility to order calls to -cuStreamAttachMemAsync via events, synchronization or other -means to ensure legal access to memory at all times. Data visibility -and coherency will be changed appropriately for all kernels which -follow a stream-association change.

    -

    If hStream is destroyed while data is associated with it, the -association is removed and the association reverts to the default -visibility of the allocation as specified at -cuMemAllocManaged. For managed variables, the default -association is always CU_MEM_ATTACH_GLOBAL. Note that -destroying a stream is an asynchronous operation, and as a result, the -change to default association won’t happen until all work in the stream -has completed.

    -
    -
    Parameters:
    -
      -
    • hStream (CUstream or cudaStream_t) – Stream in which to enqueue the attach operation

    • -
    • dptr (CUdeviceptr) – Pointer to memory (must be a pointer to managed memory or to a -valid host-accessible region of system-allocated pageable memory)

    • -
    • length (size_t) – Length of memory

    • -
    • flags (unsigned int) – Must be one of CUmemAttach_flags

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamQuery(hStream)#
    -

    Determine status of a compute stream.

    -

    Returns CUDA_SUCCESS if all operations in the stream -specified by hStream have completed, or -CUDA_ERROR_NOT_READY if not.

    -

    For the purposes of Unified Memory, a return value of -CUDA_SUCCESS is equivalent to having called -cuStreamSynchronize().

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Stream to query status of

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_READY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamSynchronize(hStream)#
    -

    Wait until a stream’s tasks are completed.

    -
    -

    Waits until the device has completed all operations in the stream -specified by hStream. If the context was created with the -CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block -until the stream is finished with all of its tasks.

    -
    -

    ote_null_stream

    -
    -
    -
    - -
    -
    -cuda.cuda.cuStreamDestroy(hStream)#
    -

    Destroys a stream.

    -

    Destroys the stream specified by hStream.

    -

    In case the device is still doing work in the stream hStream when -cuStreamDestroy() is called, the function will return -immediately and the resources associated with hStream will be -released automatically once the device has completed all work in -hStream.

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Stream to destroy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamCopyAttributes(dst, src)#
    -

    Copies attributes from source stream to destination stream.

    -

    Copies attributes from source stream src to destination stream dst. -Both streams must have the same context.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    CUaccessPolicyWindow

    -
    -
    - -
    -
    -cuda.cuda.cuStreamGetAttribute(hStream, attr: CUstreamAttrID)#
    -

    Queries stream attribute.

    -

    Queries attribute attr from hStream and stores it in corresponding -member of value_out.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    CUaccessPolicyWindow

    -
    -
    - -
    -
    -cuda.cuda.cuStreamSetAttribute(hStream, attr: CUstreamAttrID, CUstreamAttrValue value: Optional[CUstreamAttrValue])#
    -

    Sets stream attribute.

    -

    Sets attribute attr on hStream from corresponding attribute of -value. The updated attribute will be applied to subsequent work -submitted to the stream. It will not affect previously submitted work.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    CUaccessPolicyWindow

    -
    -
    - -
    -
    -

    Event Management#

    -

    This section describes the event management functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuEventCreate(unsigned int Flags)#
    -

    Creates an event.

    -

    Creates an event *phEvent for the current context with the flags -specified via Flags. Valid flags include:

    - -
    -
    Parameters:
    -

    Flags (unsigned int) – Event creation flags

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEventRecord(hEvent, hStream)#
    -

    Records an event.

    -

    Captures in hEvent the contents of hStream at the time of this -call. hEvent and hStream must be from the same context otherwise -CUDA_ERROR_INVALID_HANDLE is returned. Calls such as -cuEventQuery() or cuStreamWaitEvent() will then -examine or wait for completion of the work that was captured. Uses of -hStream after this call do not modify hEvent. See note on default -stream behavior for what is captured in the default case.

    -

    cuEventRecord() can be called multiple times on the same -event and will overwrite the previously captured state. Other APIs such -as cuStreamWaitEvent() use the most recently captured state -at the time of the API call, and are not affected by later calls to -cuEventRecord(). Before the first call to -cuEventRecord(), an event represents an empty set of work, -so for example cuEventQuery() would return -CUDA_SUCCESS.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEventRecordWithFlags(hEvent, hStream, unsigned int flags)#
    -

    Records an event.

    -

    Captures in hEvent the contents of hStream at the time of this -call. hEvent and hStream must be from the same context otherwise -CUDA_ERROR_INVALID_HANDLE is returned. Calls such as -cuEventQuery() or cuStreamWaitEvent() will then -examine or wait for completion of the work that was captured. Uses of -hStream after this call do not modify hEvent. See note on default -stream behavior for what is captured in the default case.

    -

    cuEventRecordWithFlags() can be called multiple times on -the same event and will overwrite the previously captured state. Other -APIs such as cuStreamWaitEvent() use the most recently -captured state at the time of the API call, and are not affected by -later calls to cuEventRecordWithFlags(). Before the first -call to cuEventRecordWithFlags(), an event represents an -empty set of work, so for example cuEventQuery() would -return CUDA_SUCCESS.

    -

    flags include:

    - -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEventQuery(hEvent)#
    -

    Queries an event’s status.

    -

    Queries the status of all work currently captured by hEvent. See -cuEventRecord() for details on what is captured by an -event.

    -

    Returns CUDA_SUCCESS if all captured work has been -completed, or CUDA_ERROR_NOT_READY if any captured work is -incomplete.

    -

    For the purposes of Unified Memory, a return value of -CUDA_SUCCESS is equivalent to having called -cuEventSynchronize().

    -
    -
    Parameters:
    -

    hEvent (CUevent or cudaEvent_t) – Event to query

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_READY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEventSynchronize(hEvent)#
    -

    Waits for an event to complete.

    -

    Waits until the completion of all work currently captured in hEvent. -See cuEventRecord() for details on what is captured by an -event.

    -

    Waiting for an event that was created with the -CU_EVENT_BLOCKING_SYNC flag will cause the calling CPU -thread to block until the event has been completed by the device. If -the CU_EVENT_BLOCKING_SYNC flag has not been set, then the -CPU thread will busy-wait until the event has been completed by the -device.

    -
    -
    Parameters:
    -

    hEvent (CUevent or cudaEvent_t) – Event to wait for

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEventDestroy(hEvent)#
    -

    Destroys an event.

    -

    Destroys the event specified by hEvent.

    -

    An event may be destroyed before it is complete (i.e., while -cuEventQuery() would return -CUDA_ERROR_NOT_READY). In this case, the call does not -block on completion of the event, and any associated resources will -automatically be released asynchronously at completion.

    -
    -
    Parameters:
    -

    hEvent (CUevent or cudaEvent_t) – Event to destroy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEventElapsedTime(hStart, hEnd)#
    -

    Computes the elapsed time between two events.

    -

    Computes the elapsed time between two events (in milliseconds with a -resolution of around 0.5 microseconds).

    -

    If either event was last recorded in a non-NULL stream, the resulting -time may be greater than expected (even if both used the same stream -handle). This happens because the cuEventRecord() operation -takes place asynchronously and there is no guarantee that the measured -latency is actually just between the two events. Any number of other -different stream operations could execute in between the two measured -events, thus altering the timing in a significant way.

    -

    If cuEventRecord() has not been called on either event then -CUDA_ERROR_INVALID_HANDLE is returned. If -cuEventRecord() has been called on both events but one or -both of them has not yet been completed (that is, -cuEventQuery() would return -CUDA_ERROR_NOT_READY on at least one of the events), -CUDA_ERROR_NOT_READY is returned. If either event was -created with the CU_EVENT_DISABLE_TIMING flag, then this -function will return CUDA_ERROR_INVALID_HANDLE.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    External Resource Interoperability#

    -

    This section describes the external resource interoperability functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuImportExternalMemory(CUDA_EXTERNAL_MEMORY_HANDLE_DESC memHandleDesc: Optional[CUDA_EXTERNAL_MEMORY_HANDLE_DESC])#
    -

    Imports an external memory object.

    -

    Imports an externally allocated memory object and returns a handle to -that in extMem_out.

    -

    The properties of the handle being imported must be described in -memHandleDesc. The CUDA_EXTERNAL_MEMORY_HANDLE_DESC -structure is defined as follows:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where type specifies the -type of handle being imported. CUexternalMemoryHandleType -is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If type is -CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, then -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::fd must be a -valid file descriptor referencing a memory object. Ownership of the -file descriptor is transferred to the CUDA driver when the handle is -imported successfully. Performing any operations on the file descriptor -after it is imported results in undefined behavior.

    -

    If type is -CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32, then exactly -one of -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must -not be NULL. If -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle is -not NULL, then it must represent a valid shared NT handle that -references a memory object. Ownership of this handle is not transferred -to CUDA after the import operation, so the application must release the -handle using the appropriate system call. If -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name is -not NULL, then it must point to a NULL-terminated array of UTF-16 -characters that refers to a memory object.

    -

    If type is -CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT, then -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle -must be non-NULL and -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must -be NULL. The handle specified must be a globally shared KMT handle. -This handle does not hold a reference to the underlying object, and -thus will be invalid when all references to the memory object are -destroyed.

    -

    If type is -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP, then exactly one -of CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle -and CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name -must not be NULL. If -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle is -not NULL, then it must represent a valid shared NT handle that is -returned by ID3D12Device::CreateSharedHandle when referring to a -ID3D12Heap object. This handle holds a reference to the underlying -object. If -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name is -not NULL, then it must point to a NULL-terminated array of UTF-16 -characters that refers to a ID3D12Heap object.

    -

    If type is -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE, then exactly -one of -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must -not be NULL. If -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle is -not NULL, then it must represent a valid shared NT handle that is -returned by ID3D12Device::CreateSharedHandle when referring to a -ID3D12Resource object. This handle holds a reference to the underlying -object. If -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name is -not NULL, then it must point to a NULL-terminated array of UTF-16 -characters that refers to a ID3D12Resource object.

    -

    If type is -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE, then -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle -must represent a valid shared NT handle that is returned by -IDXGIResource1::CreateSharedHandle when referring to a ID3D11Resource -object. If -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name is -not NULL, then it must point to a NULL-terminated array of UTF-16 -characters that refers to a ID3D11Resource object.

    -

    If type is -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT, then -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle -must represent a valid shared KMT handle that is returned by -IDXGIResource::GetSharedHandle when referring to a ID3D11Resource -object and -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must -be NULL.

    -

    If type is -CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then -CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::nvSciBufObject -must be non-NULL and reference a valid NvSciBuf object. If the NvSciBuf -object imported into CUDA is also mapped by other drivers, then the -application must use cuWaitExternalSemaphoresAsync or -cuSignalExternalSemaphoresAsync as appropriate barriers to -maintain coherence between CUDA and the other drivers. See -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC and -CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC for -memory synchronization.

    -

    The size of the memory object must be specified in -size.

    -

    Specifying the flag CUDA_EXTERNAL_MEMORY_DEDICATED in -flags indicates that the -resource is a dedicated resource. The definition of what a dedicated -resource is outside the scope of this extension. This flag must be set -if type is one of the -following: CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE -CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT

    -
    -
    Parameters:
    -

    memHandleDesc (CUDA_EXTERNAL_MEMORY_HANDLE_DESC) – Memory import handle descriptor

    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    If the Vulkan memory imported into CUDA is mapped on the CPU then the application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges as well as appropriate Vulkan pipeline barriers to maintain coherence between CPU and GPU. For more information on these APIs, please refer to “Synchronization -and Cache Control” chapter from Vulkan specification.

    -
    - -
    -
    -cuda.cuda.cuExternalMemoryGetMappedBuffer(extMem, CUDA_EXTERNAL_MEMORY_BUFFER_DESC bufferDesc: Optional[CUDA_EXTERNAL_MEMORY_BUFFER_DESC])#
    -

    Maps a buffer onto an imported memory object.

    -

    Maps a buffer onto an imported memory object and returns a device -pointer in devPtr.

    -

    The properties of the buffer being mapped must be described in -bufferDesc. The CUDA_EXTERNAL_MEMORY_BUFFER_DESC -structure is defined as follows:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where offset is the offset -in the memory object where the buffer’s base address is. -size is the size of the -buffer. flags must be -zero.

    -

    The offset and size have to be suitably aligned to match the -requirements of the external API. Mapping two buffers whose ranges -overlap may or may not result in the same virtual address being -returned for the overlapped portion. In such cases, the application -must ensure that all accesses to that region from the GPU are volatile. -Otherwise writes made via one address are not guaranteed to be visible -via the other address, even if they’re issued by the same thread. It is -recommended that applications map the combined range instead of mapping -separate buffers and then apply the appropriate offsets to the returned -pointer to derive the individual buffers.

    -

    The returned pointer devPtr must be freed using -cuMemFree.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuExternalMemoryGetMappedMipmappedArray(extMem, CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC mipmapDesc: Optional[CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC])#
    -

    Maps a CUDA mipmapped array onto an external memory object.

    -

    Maps a CUDA mipmapped array onto an external object and returns a -handle to it in mipmap.

    -

    The properties of the CUDA mipmapped array being mapped must be -described in mipmapDesc. The structure -CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC is defined as -follows:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where offset is -the offset in the memory object where the base level of the mipmap -chain is. -arrayDesc -describes the format, dimensions and type of the base level of the -mipmap chain. For further details on these parameters, please refer to -the documentation for cuMipmappedArrayCreate. Note that if -the mipmapped array is bound as a color target in the graphics API, -then the flag CUDA_ARRAY3D_COLOR_ATTACHMENT must be -specified in -CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc::Flags. -numLevels -specifies the total number of levels in the mipmap chain.

    -

    If extMem was imported from a handle of type -CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then -numLevels must be -equal to 1.

    -

    The returned CUDA mipmapped array must be freed using -cuMipmappedArrayDestroy.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDestroyExternalMemory(extMem)#
    -

    Destroys an external memory object.

    -

    Destroys the specified external memory object. Any existing buffers and -CUDA mipmapped arrays mapped onto this object must no longer be used -and must be explicitly freed using cuMemFree and -cuMipmappedArrayDestroy respectively.

    -
    -
    Parameters:
    -

    extMem (CUexternalMemory) – External memory object to be destroyed

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuImportExternalSemaphore(CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC semHandleDesc: Optional[CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC])#
    -

    Imports an external semaphore.

    -

    Imports an externally allocated synchronization object and returns a -handle to that in extSem_out.

    -

    The properties of the handle being imported must be described in -semHandleDesc. The CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC is -defined as follows:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where type specifies -the type of handle being imported. -CUexternalSemaphoreHandleType is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If type is -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, then -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a -valid file descriptor referencing a synchronization object. Ownership -of the file descriptor is transferred to the CUDA driver when the -handle is imported successfully. Performing any operations on the file -descriptor after it is imported results in undefined behavior.

    -

    If type is -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, then -exactly one of -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -and -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name -must not be NULL. If -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -is not NULL, then it must represent a valid shared NT handle that -references a synchronization object. Ownership of this handle is not -transferred to CUDA after the import operation, so the application must -release the handle using the appropriate system call. If -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is -not NULL, then it must name a valid synchronization object.

    -

    If type is -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT, then -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -must be non-NULL and -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name -must be NULL. The handle specified must be a globally shared KMT -handle. This handle does not hold a reference to the underlying object, -and thus will be invalid when all references to the synchronization -object are destroyed.

    -

    If type is -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then exactly -one of -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -and -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name -must not be NULL. If -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -is not NULL, then it must represent a valid shared NT handle that is -returned by ID3D12Device::CreateSharedHandle when referring to a -ID3D12Fence object. This handle holds a reference to the underlying -object. If -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is -not NULL, then it must name a valid synchronization object that refers -to a valid ID3D12Fence object.

    -

    If type is -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, then -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -represents a valid shared NT handle that is returned by -ID3D11Fence::CreateSharedHandle. If -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is -not NULL, then it must name a valid synchronization object that refers -to a valid ID3D11Fence object.

    -

    If type is -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, then -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::nvSciSyncObj -represents a valid NvSciSyncObj.

    -

    CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, then -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -represents a valid shared NT handle that is returned by -IDXGIResource1::CreateSharedHandle when referring to a IDXGIKeyedMutex -object. If -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is -not NULL, then it must name a valid synchronization object that refers -to a valid IDXGIKeyedMutex object.

    -

    If type is -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT, -then -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -represents a valid shared KMT handle that is returned by -IDXGIResource::GetSharedHandle when referring to a IDXGIKeyedMutex -object and -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name -must be NULL.

    -

    If type is -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, -then CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must -be a valid file descriptor referencing a synchronization object. -Ownership of the file descriptor is transferred to the CUDA driver when -the handle is imported successfully. Performing any operations on the -file descriptor after it is imported results in undefined behavior.

    -

    If type is -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32, -then exactly one of -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -and -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name -must not be NULL. If -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle -is not NULL, then it must represent a valid shared NT handle that -references a synchronization object. Ownership of this handle is not -transferred to CUDA after the import operation, so the application must -release the handle using the appropriate system call. If -CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is -not NULL, then it must name a valid synchronization object.

    -
    -
    Parameters:
    -

    semHandleDesc (CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC) – Semaphore import handle descriptor

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuSignalExternalSemaphoresAsync(extSemArray: Optional[Tuple[CUexternalSemaphore] | List[CUexternalSemaphore]], paramsArray: Optional[Tuple[CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS] | List[CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS]], unsigned int numExtSems, stream)#
    -

    Signals a set of external semaphore objects.

    -

    Enqueues a signal operation on a set of externally allocated semaphore -object in the specified stream. The operations will be executed when -all prior operations in the stream complete.

    -

    The exact semantics of signaling a semaphore depends on the type of the -object.

    -

    If the semaphore object is any one of the following types: -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT then -signaling the semaphore will set it to the signaled state.

    -

    If the semaphore object is any one of the following types: -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 -then the semaphore will be set to the value specified in -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::fence::value.

    -

    If the semaphore object is of the type -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC this API sets -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence -to a value that can be used by subsequent waiters of the same NvSciSync -object to order operations with those currently submitted in stream. -Such an update will overwrite previous contents of -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence. -By default, signaling such an external semaphore object causes -appropriate memory synchronization operations to be performed over all -external memory objects that are imported as -CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that -any subsequent accesses made by other importers of the same set of -NvSciBuf memory object(s) are coherent. These operations can be skipped -by specifying the flag -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC, which -can be used as a performance optimization when data coherency is not -required. But specifying this flag in scenarios where data coherency is -required results in undefined behavior. Also, for semaphore object of -the type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, if -the NvSciSyncAttrList used to create the NvSciSyncObj had not set the -flags in cuDeviceGetNvSciSyncAttributes to -CUDA_NVSCISYNC_ATTR_SIGNAL, this API will return -CUDA_ERROR_NOT_SUPPORTED. NvSciSyncFence associated with semaphore -object of the type -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC can be -deterministic. For this the NvSciSyncAttrList used to create the -semaphore object must have value of -NvSciSyncAttrKey_RequireDeterministicFences key set to true. -Deterministic fences allow users to enqueue a wait over the semaphore -object even before corresponding signal is enqueued. For such a -semaphore object, CUDA guarantees that each signal operation will -increment the fence value by ‘1’. Users are expected to track count of -signals enqueued on the semaphore object and insert waits accordingly. -When such a semaphore object is signaled from multiple streams, due to -concurrent stream execution, it is possible that the order in which the -semaphore gets signaled is indeterministic. This could lead to waiters -of the semaphore getting unblocked incorrectly. Users are expected to -handle such situations, either by not using the same semaphore object -with deterministic fence support enabled in different streams or by -adding explicit dependency amongst such streams so that the semaphore -is signaled in order.

    -

    If the semaphore object is any one of the following types: -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT -then the keyed mutex will be released with the key specified in -CUDA_EXTERNAL_SEMAPHORE_PARAMS::params::keyedmutex::key.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuWaitExternalSemaphoresAsync(extSemArray: Optional[Tuple[CUexternalSemaphore] | List[CUexternalSemaphore]], paramsArray: Optional[Tuple[CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS] | List[CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS]], unsigned int numExtSems, stream)#
    -

    Waits on a set of external semaphore objects.

    -

    Enqueues a wait operation on a set of externally allocated semaphore -object in the specified stream. The operations will be executed when -all prior operations in the stream complete.

    -

    The exact semantics of waiting on a semaphore depends on the type of -the object.

    -

    If the semaphore object is any one of the following types: -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT then -waiting on the semaphore will wait until the semaphore reaches the -signaled state. The semaphore will then be reset to the unsignaled -state. Therefore for every signal operation, there can only be one wait -operation.

    -

    If the semaphore object is any one of the following types: -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 -then waiting on the semaphore will wait until the value of the -semaphore is greater than or equal to -CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::fence::value.

    -

    If the semaphore object is of the type -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC then, waiting -on the semaphore will wait until the -CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence -is signaled by the signaler of the NvSciSyncObj that was associated -with this semaphore object. By default, waiting on such an external -semaphore object causes appropriate memory synchronization operations -to be performed over all external memory objects that are imported as -CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that -any subsequent accesses made by other importers of the same set of -NvSciBuf memory object(s) are coherent. These operations can be skipped -by specifying the flag -CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC, which -can be used as a performance optimization when data coherency is not -required. But specifying this flag in scenarios where data coherency is -required results in undefined behavior. Also, for semaphore object of -the type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, if -the NvSciSyncAttrList used to create the NvSciSyncObj had not set the -flags in cuDeviceGetNvSciSyncAttributes to -CUDA_NVSCISYNC_ATTR_WAIT, this API will return -CUDA_ERROR_NOT_SUPPORTED.

    -

    If the semaphore object is any one of the following types: -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, -CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT -then the keyed mutex will be acquired when it is released with the key -specified in -CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::key -or until the timeout specified by -CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::timeoutMs -has lapsed. The timeout interval can either be a finite value specified -in milliseconds or an infinite value. In case an infinite value is -specified the timeout never elapses. The windows INFINITE macro must be -used to specify infinite timeout.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED, CUDA_ERROR_TIMEOUT

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDestroyExternalSemaphore(extSem)#
    -

    Destroys an external semaphore.

    -

    Destroys an external semaphore object and releases any references to -the underlying resource. Any outstanding signals or waits must have -completed before the semaphore is destroyed.

    -
    -
    Parameters:
    -

    extSem (CUexternalSemaphore) – External semaphore to be destroyed

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -

    Stream Memory Operations#

    -

    This section describes the stream memory operations of the low-level CUDA driver application programming interface.

    -

    Support for the CU_STREAM_WAIT_VALUE_NOR flag can be queried with ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2.

    -

    Support for the cuStreamWriteValue64() and cuStreamWaitValue64() functions, as well as for the CU_STREAM_MEM_OP_WAIT_VALUE_64 and CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS.

    -

    Support for both CU_STREAM_WAIT_VALUE_FLUSH and CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform hardware features and can be queried with cuDeviceGetAttribute() and CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.

    -

    Note that all memory pointers passed as parameters to these operations are device pointers. Where necessary a device pointer should be obtained, for example with cuMemHostGetDevicePointer().

    -

    None of the operations accepts pointers to managed memory buffers (cuMemAllocManaged).

    -

    Warning: Improper use of these APIs may deadlock the application. Synchronization ordering established through these APIs is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by these APIs should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order.

    -
    -
    -cuda.cuda.cuStreamWaitValue32(stream, addr, value, unsigned int flags)#
    -

    Wait on a memory location.

    -

    Enqueues a synchronization of the stream on the given memory location. -Work ordered after the operation will block until the given condition -on the memory is satisfied. By default, the condition is to wait for -(int32_t)(*addr - value) >= 0, a cyclic greater-or-equal. Other -condition types can be specified via flags.

    -

    If the memory was registered via cuMemHostRegister(), the -device pointer should be obtained with -cuMemHostGetDevicePointer(). This function cannot be used -with managed memory (cuMemAllocManaged).

    -

    Support for CU_STREAM_WAIT_VALUE_NOR can be queried with -cuDeviceGetAttribute() and -CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order.

    -
    - -
    -
    -cuda.cuda.cuStreamWaitValue64(stream, addr, value, unsigned int flags)#
    -

    Wait on a memory location.

    -

    Enqueues a synchronization of the stream on the given memory location. -Work ordered after the operation will block until the given condition -on the memory is satisfied. By default, the condition is to wait for -(int64_t)(*addr - value) >= 0, a cyclic greater-or-equal. Other -condition types can be specified via flags.

    -

    If the memory was registered via cuMemHostRegister(), the -device pointer should be obtained with -cuMemHostGetDevicePointer().

    -

    Support for this can be queried with cuDeviceGetAttribute() -and CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order.

    -
    - -
    -
    -cuda.cuda.cuStreamWriteValue32(stream, addr, value, unsigned int flags)#
    -

    Write a value to memory.

    -

    Write a value to memory.

    -

    If the memory was registered via cuMemHostRegister(), the -device pointer should be obtained with -cuMemHostGetDevicePointer(). This function cannot be used -with managed memory (cuMemAllocManaged).

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamWriteValue64(stream, addr, value, unsigned int flags)#
    -

    Write a value to memory.

    -

    Write a value to memory.

    -

    If the memory was registered via cuMemHostRegister(), the -device pointer should be obtained with -cuMemHostGetDevicePointer().

    -

    Support for this can be queried with cuDeviceGetAttribute() -and CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuStreamBatchMemOp(stream, unsigned int count, paramArray: Optional[Tuple[CUstreamBatchMemOpParams] | List[CUstreamBatchMemOpParams]], unsigned int flags)#
    -

    Batch operations to synchronize the stream via memory operations.

    -

    This is a batch version of cuStreamWaitValue32() and -cuStreamWriteValue32(). Batching operations may avoid some -performance overhead in both the API call and the device execution -versus adding them to the stream in separate API calls. The operations -are enqueued in the order they appear in the array.

    -

    See CUstreamBatchMemOpType for the full set of supported -operations, and cuStreamWaitValue32(), -cuStreamWaitValue64(), cuStreamWriteValue32(), -and cuStreamWriteValue64() for details of specific -operations.

    -

    See related APIs for details on querying support for specific -operations.

    -
    -
    Parameters:
    -
      -
    • stream (CUstream or cudaStream_t) – The stream to enqueue the operations in.

    • -
    • count (unsigned int) – The number of operations in the array. Must be less than 256.

    • -
    • paramArray (List[CUstreamBatchMemOpParams]) – The types and parameters of the individual operations.

    • -
    • flags (unsigned int) – Reserved for future expansion; must be 0.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. For more information, see the Stream Memory Operations section in the programming guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html).

    -
    - -
    -
    -

    Execution Control#

    -

    This section describes the execution control functions of the low-level CUDA driver application programming interface.

    -
    -
    -class cuda.cuda.CUfunctionLoadingState(value)#
    -
    -
    -CU_FUNCTION_LOADING_STATE_UNLOADED = 0#
    -
    - -
    -
    -CU_FUNCTION_LOADING_STATE_LOADED = 1#
    -
    - -
    -
    -CU_FUNCTION_LOADING_STATE_MAX = 2#
    -
    - -
    - -
    -
    -cuda.cuda.cuFuncGetAttribute(attrib: CUfunction_attribute, hfunc)#
    -

    Returns information about a function.

    -

    Returns in *pi the integer value of the attribute attrib on the -kernel given by hfunc. The supported attributes are:

    -
      -
    • CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum -number of threads per block, beyond which a launch of the function -would fail. This number depends on both the function and the device -on which the function is currently loaded.

    • -
    • CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of -statically-allocated shared memory per block required by this -function. This does not include dynamically-allocated shared memory -requested by the user at runtime.

    • -
    • CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of -user-allocated constant memory required by this function.

    • -
    • CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of -local memory used by each thread of this function.

    • -
    • CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used -by each thread of this function.

    • -
    • CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual -architecture version for which the function was compiled. This value -is the major PTX version * 10

      -
        -
      • the minor PTX version, so a PTX version 1.3 function would return -the value 13. Note that this may return the undefined value of 0 -for cubins compiled prior to CUDA 3.0.

      • -
      -
    • -
    • CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture -version for which the function was compiled. This value is the major -binary version * 10 + the minor binary version, so a binary version -1.3 function would return the value 13. Note that this will return a -value of 10 for legacy cubins that do not have a properly-encoded -binary architecture version.

    • -
    • CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether -the function has been compiled with user specified option “-Xptxas -–dlcm=ca” set .

    • -
    • CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The -maximum size in bytes of dynamically-allocated shared memory.

    • -
    • CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: -Preferred shared memory-L1 cache split ratio in percent of total -shared memory.

    • -
    • CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this -attribute is set, the kernel must launch with a valid cluster size -specified.

    • -
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required -cluster width in blocks.

    • -
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required -cluster height in blocks.

    • -
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required -cluster depth in blocks.

    • -
    • CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: -Indicates whether the function can be launched with non-portable -cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster -size may only function on the specific SKUs the program is tested on. -The launch might fail if the program is run on a different hardware -platform. CUDA API provides cudaOccupancyMaxActiveClusters to assist -with checking whether the desired size can be launched on the current -device. A portable cluster size is guaranteed to be functional on all -compute capabilities higher than the target compute capability. The -portable cluster size for sm_90 is 8 blocks per cluster. This value -may increase for future compute capabilities. The specific hardware -unit may support higher cluster sizes that’s not guaranteed to be -portable.

    • -
    • CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: -The block scheduling policy of a function. The value type is -CUclusterSchedulingPolicy.

    • -
    -

    With a few execeptions, function attributes may also be queried on -unloaded function handles returned from -cuModuleEnumerateFunctions. -CUDA_ERROR_FUNCTION_NOT_LOADED is returned if the attribute -requires a fully loaded function but the function is not loaded. The -loading state of a function may be queried using -cuFuncIsloaded. cuFuncLoad may be called to -explicitly load a function before querying the following attributes -that require the function to be loaded:

    - -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuFuncSetAttribute(hfunc, attrib: CUfunction_attribute, int value)#
    -

    Sets information about a function.

    -

    This call sets the value of a specified attribute attrib on the -kernel given by hfunc to an integer value specified by val This -function returns CUDA_SUCCESS if the new value of the attribute could -be successfully set. If the set fails, this call will return an error. -Not all attributes can have values set. Attempting to set a value on a -read-only attribute will result in an error (CUDA_ERROR_INVALID_VALUE)

    -

    Supported attributes for the cuFuncSetAttribute call are:

    - -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuFuncSetCacheConfig(hfunc, config: CUfunc_cache)#
    -

    Sets the preferred cache configuration for a device function.

    -

    On devices where the L1 cache and shared memory use the same hardware -resources, this sets through config the preferred cache configuration -for the device function hfunc. This is only a preference. The driver -will use the requested configuration if possible, but it is free to -choose a different configuration if required to execute hfunc. Any -context-wide preference set via cuCtxSetCacheConfig() will -be overridden by this per-function setting unless the per-function -setting is CU_FUNC_CACHE_PREFER_NONE. In that case, the -current context-wide setting will be used.

    -

    This setting does nothing on devices where the size of the L1 cache and -shared memory are fixed.

    -

    Launching a kernel with a different preference than the most recent -preference setting may insert a device-side synchronization point.

    -

    The supported cache configurations are:

    - -
    -
    Parameters:
    -
      -
    • hfunc (CUfunction) – Kernel to configure cache for

    • -
    • config (CUfunc_cache) – Requested cache configuration

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuFuncGetModule(hfunc)#
    -

    Returns a module handle.

    -

    Returns in *hmod the handle of the module that function hfunc is -located in. The lifetime of the module corresponds to the lifetime of -the context it was loaded in or until the module is explicitly -unloaded.

    -

    The CUDA runtime manages its own modules loaded into the primary -context. If the handle returned by this API refers to a module loaded -by the CUDA runtime, calling cuModuleUnload() on that -module will result in undefined behavior.

    -
    -
    Parameters:
    -

    hfunc (CUfunction) – Function to retrieve module for

    -
    -
    Returns:
    -

    -

    -
    -
    -
    - -
    -
    -cuda.cuda.cuFuncGetName(hfunc)#
    -

    Returns the function name for a CUfunction handle.

    -

    Returns in **name the function name associated with the function -handle hfunc . The function name is returned as a null-terminated -string. The returned name is only valid when the function handle is -valid. If the module is unloaded or reloaded, one must call the API -again to get the updated name. This API may return a mangled name if -the function is not declared as having C linkage. If either **name or -hfunc is NULL, CUDA_ERROR_INVALID_VALUE is returned.

    -
    -
    Parameters:
    -

    hfunc (CUfunction) – The function handle to retrieve the name for

    -
    -
    Returns:
    -

    -

    -
    -
    -
    - -
    -
    -cuda.cuda.cuFuncGetParamInfo(func, size_t paramIndex)#
    -

    Returns the offset and size of a kernel parameter in the device-side parameter layout.

    -

    Queries the kernel parameter at paramIndex into func’s list of -parameters, and returns in paramOffset and paramSize the offset and -size, respectively, where the parameter will reside in the device-side -parameter layout. This information can be used to update kernel node -parameters from the device via -cudaGraphKernelNodeSetParam() and -cudaGraphKernelNodeUpdatesApply(). paramIndex must be -less than the number of parameters that func takes. paramSize can -be set to NULL if only the parameter offset is desired.

    -
    -
    Parameters:
    -
      -
    • func (CUfunction) – The function to query

    • -
    • paramIndex (size_t) – The parameter index to query

    • -
    -
    -
    Returns:
    -

      -
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    • -
    • paramOffset (int) – Returns the offset into the device-side parameter layout at which -the parameter resides

    • -
    • paramSize (int) – Optionally returns the size of the parameter in the device-side -parameter layout

    • -
    -

    -
    -
    -
    -

    See also

    -

    cuKernelGetParamInfo

    -
    -
    - -
    -
    -cuda.cuda.cuFuncIsLoaded(function)#
    -

    Returns if the function is loaded.

    -

    Returns in state the loading state of function.

    -
    -
    Parameters:
    -

    function (CUfunction) – the function to check

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuFuncLoad(function)#
    -

    Loads a function.

    -

    Finalizes function loading for function. Calling this API with a -fully loaded function has no effect.

    -
    -
    Parameters:
    -

    function (CUfunction) – the function to load

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLaunchKernel(f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, hStream, kernelParams, void_ptr extra)#
    -

    Launches a CUDA function CUfunction or a CUDA kernel CUkernel.

    -

    Invokes the function CUfunction or the kernel -CUkernel f on a gridDimX x gridDimY x gridDimZ grid -of blocks. Each block contains blockDimX x blockDimY x blockDimZ -threads.

    -

    sharedMemBytes sets the amount of dynamic shared memory that will be -available to each thread block.

    -

    Kernel parameters to f can be specified in one of two ways:

    -

    1) Kernel parameters can be specified via kernelParams. If f has N -parameters, then kernelParams needs to be an array of N pointers. -Each of `kernelParams`[0] through `kernelParams`[N-1] must point to a -region of memory from which the actual kernel parameter will be copied. -The number of kernel parameters and their offsets and sizes do not need -to be specified as that information is retrieved directly from the -kernel’s image.

    -

    2) Kernel parameters can also be packaged by the application into a -single buffer that is passed in via the extra parameter. This places -the burden on the application of knowing each kernel parameter’s size -and alignment/padding within the buffer. Here is an example of using -the extra parameter in this manner:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    The extra parameter exists to allow cuLaunchKernel to -take additional less commonly used arguments. extra specifies a list -of names of extra settings and their corresponding values. Each extra -setting name is immediately followed by the corresponding value. The -list must be terminated with either NULL or -CU_LAUNCH_PARAM_END.

    - -

    The error CUDA_ERROR_INVALID_VALUE will be returned if -kernel parameters are specified with both kernelParams and extra -(i.e. both kernelParams and extra are non-NULL).

    -

    Calling cuLaunchKernel() invalidates the persistent -function state set through the following deprecated APIs: -cuFuncSetBlockShape(), cuFuncSetSharedSize(), -cuParamSetSize(), cuParamSeti(), -cuParamSetf(), cuParamSetv().

    -

    Note that to use cuLaunchKernel(), the kernel f must -either have been compiled with toolchain version 3.2 or later so that -it will contain kernel parameter information, or have no kernel -parameters. If either of these conditions is not met, then -cuLaunchKernel() will return -CUDA_ERROR_INVALID_IMAGE.

    -

    Note that the API can also be used to launch context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to launch the -kernel on will either be taken from the specified stream hStream or -the current context in case of NULL stream.

    -
    -
    Parameters:
    -
      -
    • f (CUfunction) – Function CUfunction or Kernel CUkernel to -launch

    • -
    • gridDimX (unsigned int) – Width of grid in blocks

    • -
    • gridDimY (unsigned int) – Height of grid in blocks

    • -
    • gridDimZ (unsigned int) – Depth of grid in blocks

    • -
    • blockDimX (unsigned int) – X dimension of each thread block

    • -
    • blockDimY (unsigned int) – Y dimension of each thread block

    • -
    • blockDimZ (unsigned int) – Z dimension of each thread block

    • -
    • sharedMemBytes (unsigned int) – Dynamic shared-memory size per thread block in bytes

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    • kernelParams (Any) – Array of pointers to kernel parameters

    • -
    • extra (List[Any]) – Extra options

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_LAUNCH_FAILED, CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, CUDA_ERROR_LAUNCH_TIMEOUT, CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, CUDA_ERROR_NOT_FOUND

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLaunchKernelEx(CUlaunchConfig config: Optional[CUlaunchConfig], f, kernelParams, void_ptr extra)#
    -

    Launches a CUDA function CUfunction or a CUDA kernel CUkernel with launch-time configuration.

    -

    Invokes the function CUfunction or the kernel -CUkernel f with the specified launch-time configuration -config.

    -

    The CUlaunchConfig structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • gridDimX is the width of the grid in -blocks.

    • -
    • gridDimY is the height of the grid in -blocks.

    • -
    • gridDimZ is the depth of the grid in -blocks.

    • -
    • blockDimX is the X dimension of each -thread block.

    • -
    • blockDimX is the Y dimension of each -thread block.

    • -
    • blockDimZ is the Z dimension of each -thread block.

    • -
    • sharedMemBytes is the dynamic shared- -memory size per thread block in bytes.

    • -
    • hStream is the handle to the stream to -perform the launch in. The CUDA context associated with this stream -must match that associated with function f.

    • -
    • attrs is an array of -numAttrs continguous -CUlaunchAttribute elements. The value of this pointer is -not considered if numAttrs is zero. -However, in that case, it is recommended to set the pointer to NULL.

    • -
    • numAttrs is the number of attributes -populating the first numAttrs positions of -the attrs array.

    • -
    -

    Launch-time configuration is specified by adding entries to -attrs. Each entry is an attribute ID and a -corresponding attribute value.

    -

    The CUlaunchAttribute structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • id is a unique enum identifying the -attribute.

    • -
    • value is a union that hold the -attribute value.

    • -
    -

    An example of using the config parameter:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    The CUlaunchAttributeID enum is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    and the corresponding CUlaunchAttributeValue union as :

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    Setting CU_LAUNCH_ATTRIBUTE_COOPERATIVE to a non-zero value -causes the kernel launch to be a cooperative launch, with exactly the -same usage and semantics of cuLaunchCooperativeKernel.

    -

    Setting -CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION to a -non-zero values causes the kernel to use programmatic means to resolve -its stream dependency – enabling the CUDA runtime to opportunistically -allow the grid’s execution to overlap with the previous kernel in the -stream, if that kernel requests the overlap.

    -

    CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT records an event -along with the kernel launch. Event recorded through this launch -attribute is guaranteed to only trigger after all block in the -associated kernel trigger the event. A block can trigger the event -through PTX launchdep.release or CUDA builtin function -cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be -inserted at the beginning of each block’s execution if -triggerAtBlockStart is set to non-0. Note that dependents (including -the CPU thread calling cuEventSynchronize()) are not -guaranteed to observe the release precisely when it is released. For -example, cuEventSynchronize() may only observe the event -trigger long after the associated kernel has completed. This recording -type is primarily meant for establishing programmatic dependency -between device tasks. The event supplied must not be an interprocess or -interop event. The event must disable timing (i.e. created with -CU_EVENT_DISABLE_TIMING flag set).

    -

    CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT records an -event along with the kernel launch. Nominally, the event is triggered -once all blocks of the kernel have begun execution. Currently this is a -best effort. If a kernel B has a launch completion dependency on a -kernel A, B may wait until A is complete. Alternatively, blocks of B -may begin before all blocks of A have begun, for example:

    -
      -
    • If B can claim execution resources unavaiable to A, for example if -they run on different GPUs.

    • -
    • If B is a higher priority than A.

    • -
    -

    Exercise caution if such an ordering inversion could lead to deadlock. -The event supplied must not be an interprocess or interop event. The -event must disable timing (i.e. must be created with the -CU_EVENT_DISABLE_TIMING flag set).

    -

    Setting CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE to -1 on a captured launch causes the resulting kernel node to be device- -updatable. This attribute is specific to graphs, and passing it to a -launch in a non-capturing stream results in an error. Passing a value -other than 0 or 1 is not allowed.

    -

    On success, a handle will be returned via -CUlaunchAttributeValue::deviceUpdatableKernelNode::devNode -which can be passed to the various device-side update functions to -update the node’s kernel parameters from within another kernel. For -more information on the types of device updates that can be made, as -well as the relevant limitations thereof, see -cudaGraphKernelNodeUpdatesApply.

    -

    Kernel nodes which are device-updatable have additional restrictions -compared to regular kernel nodes. Firstly, device-updatable nodes -cannot be removed from their graph via cuGraphDestroyNode. -Additionally, once opted-in to this functionality, a node cannot opt -out, and any attempt to set the attribute to 0 will result in an error. -Graphs containing one or more device-updatable node also do not allow -multiple instantiation.

    -

    The effect of other attributes is consistent with their effect when set -via persistent APIs.

    -

    See cuStreamSetAttribute for

    - -

    See cuFuncSetAttribute for

    - -

    Kernel parameters to f can be specified in the same ways that they -can be using cuLaunchKernel.

    -

    Note that the API can also be used to launch context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to launch the -kernel on will either be taken from the specified stream -hStream or the current context in case of -NULL stream.

    -
    -
    Parameters:
    -
      -
    • config (CUlaunchConfig) – Config to launch

    • -
    • f (CUfunction) – Function CUfunction or Kernel CUkernel to -launch

    • -
    • kernelParams (Any) – Array of pointers to kernel parameters

    • -
    • extra (List[Any]) – Extra options

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_LAUNCH_FAILED, CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, CUDA_ERROR_LAUNCH_TIMEOUT, CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, CUDA_ERROR_NOT_FOUND

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLaunchCooperativeKernel(f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, hStream, kernelParams)#
    -

    Launches a CUDA function CUfunction or a CUDA kernel CUkernel where thread blocks can cooperate and synchronize as they execute.

    -

    Invokes the function CUfunction or the kernel -CUkernel f on a gridDimX x gridDimY x gridDimZ grid -of blocks. Each block contains blockDimX x blockDimY x blockDimZ -threads.

    -

    sharedMemBytes sets the amount of dynamic shared memory that will be -available to each thread block.

    -

    The device on which this kernel is invoked must have a non-zero value -for the device attribute -CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH.

    -

    The total number of blocks launched cannot exceed the maximum number of -blocks per multiprocessor as returned by -cuOccupancyMaxActiveBlocksPerMultiprocessor (or -cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times -the number of multiprocessors as specified by the device attribute -CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT.

    -

    The kernel cannot make use of CUDA dynamic parallelism.

    -

    Kernel parameters must be specified via kernelParams. If f has N -parameters, then kernelParams needs to be an array of N pointers. -Each of `kernelParams`[0] through `kernelParams`[N-1] must point to a -region of memory from which the actual kernel parameter will be copied. -The number of kernel parameters and their offsets and sizes do not need -to be specified as that information is retrieved directly from the -kernel’s image.

    -

    Calling cuLaunchCooperativeKernel() sets persistent -function state that is the same as function state set through -cuLaunchKernel API

    -

    When the kernel f is launched via -cuLaunchCooperativeKernel(), the previous block shape, -shared size and parameter info associated with f is overwritten.

    -

    Note that to use cuLaunchCooperativeKernel(), the kernel -f must either have been compiled with toolchain version 3.2 or later -so that it will contain kernel parameter information, or have no kernel -parameters. If either of these conditions is not met, then -cuLaunchCooperativeKernel() will return -CUDA_ERROR_INVALID_IMAGE.

    -

    Note that the API can also be used to launch context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to launch the -kernel on will either be taken from the specified stream hStream or -the current context in case of NULL stream.

    -
    -
    Parameters:
    -
      -
    • f (CUfunction) – Function CUfunction or Kernel CUkernel to -launch

    • -
    • gridDimX (unsigned int) – Width of grid in blocks

    • -
    • gridDimY (unsigned int) – Height of grid in blocks

    • -
    • gridDimZ (unsigned int) – Depth of grid in blocks

    • -
    • blockDimX (unsigned int) – X dimension of each thread block

    • -
    • blockDimY (unsigned int) – Y dimension of each thread block

    • -
    • blockDimZ (unsigned int) – Z dimension of each thread block

    • -
    • sharedMemBytes (unsigned int) – Dynamic shared-memory size per thread block in bytes

    • -
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • -
    • kernelParams (Any) – Array of pointers to kernel parameters

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_LAUNCH_FAILED, CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, CUDA_ERROR_LAUNCH_TIMEOUT, CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, CUDA_ERROR_NOT_FOUND

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLaunchCooperativeKernelMultiDevice(launchParamsList: Optional[Tuple[CUDA_LAUNCH_PARAMS] | List[CUDA_LAUNCH_PARAMS]], unsigned int numDevices, unsigned int flags)#
    -

    Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute.

    -

    [Deprecated]

    -

    Invokes kernels as specified in the launchParamsList array where each -element of the array specifies all the parameters required to perform a -single kernel launch. These kernels can cooperate and synchronize as -they execute. The size of the array is specified by numDevices.

    -

    No two kernels can be launched on the same device. All the devices -targeted by this multi-device launch must be identical. All devices -must have a non-zero value for the device attribute -CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH.

    -

    All kernels launched must be identical with respect to the compiled -code. Note that any device, constant or managed variables present in -the module that owns the kernel launched on each device, are -independently instantiated on every device. It is the application’s -responsibility to ensure these variables are initialized and used -appropriately.

    -

    The size of the grids as specified in blocks, the size of the blocks -themselves and the amount of shared memory used by each thread block -must also match across all launched kernels.

    -

    The streams used to launch these kernels must have been created via -either cuStreamCreate or -cuStreamCreateWithPriority. The NULL stream or -CU_STREAM_LEGACY or CU_STREAM_PER_THREAD cannot -be used.

    -

    The total number of blocks launched per kernel cannot exceed the -maximum number of blocks per multiprocessor as returned by -cuOccupancyMaxActiveBlocksPerMultiprocessor (or -cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times -the number of multiprocessors as specified by the device attribute -CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. Since the total -number of blocks launched per device has to match across all devices, -the maximum number of blocks that can be launched per device will be -limited by the device with the least number of multiprocessors.

    -

    The kernels cannot make use of CUDA dynamic parallelism.

    -

    The CUDA_LAUNCH_PARAMS structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • function specifies the kernel to be -launched. All functions must be identical with respect to the -compiled code. Note that you can also specify context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then casting to -CUfunction. In this case, the context to launch the -kernel on be taken from the specified stream -hStream.

    • -
    • gridDimX is the width of the grid in -blocks. This must match across all kernels launched.

    • -
    • gridDimY is the height of the grid in -blocks. This must match across all kernels launched.

    • -
    • gridDimZ is the depth of the grid in -blocks. This must match across all kernels launched.

    • -
    • blockDimX is the X dimension of each -thread block. This must match across all kernels launched.

    • -
    • blockDimX is the Y dimension of each -thread block. This must match across all kernels launched.

    • -
    • blockDimZ is the Z dimension of each -thread block. This must match across all kernels launched.

    • -
    • sharedMemBytes is the dynamic shared- -memory size per thread block in bytes. This must match across all -kernels launched.

    • -
    • hStream is the handle to the stream to -perform the launch in. This cannot be the NULL stream or -CU_STREAM_LEGACY or CU_STREAM_PER_THREAD. The -CUDA context associated with this stream must match that associated -with function.

    • -
    • kernelParams is an array of pointers -to kernel parameters. If function has -N parameters, then kernelParams needs -to be an array of N pointers. Each of -:py:obj:`~.CUDA_LAUNCH_PARAMS.kernelParams`[0] through -:py:obj:`~.CUDA_LAUNCH_PARAMS.kernelParams`[N-1] must point to a -region of memory from which the actual kernel parameter will be -copied. The number of kernel parameters and their offsets and sizes -do not need to be specified as that information is retrieved directly -from the kernel’s image.

    • -
    -

    By default, the kernel won’t begin execution on any GPU until all prior -work in all the specified streams has completed. This behavior can be -overridden by specifying the flag -CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC. -When this flag is specified, each kernel will only wait for prior work -in the stream corresponding to that GPU to complete before it begins -execution.

    -

    Similarly, by default, any subsequent work pushed in any of the -specified streams will not begin execution until the kernels on all -GPUs have completed. This behavior can be overridden by specifying the -flag -CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC. -When this flag is specified, any subsequent work pushed in any of the -specified streams will only wait for the kernel launched on the GPU -corresponding to that stream to complete before it begins execution.

    -

    Calling cuLaunchCooperativeKernelMultiDevice() sets -persistent function state that is the same as function state set -through cuLaunchKernel API when called individually for -each element in launchParamsList.

    -

    When kernels are launched via -cuLaunchCooperativeKernelMultiDevice(), the previous block -shape, shared size and parameter info associated with each -function in launchParamsList is -overwritten.

    -

    Note that to use cuLaunchCooperativeKernelMultiDevice(), -the kernels must either have been compiled with toolchain version 3.2 -or later so that it will contain kernel parameter information, or have -no kernel parameters. If either of these conditions is not met, then -cuLaunchCooperativeKernelMultiDevice() will return -CUDA_ERROR_INVALID_IMAGE.

    -
    -
    Parameters:
    -
      -
    • launchParamsList (List[CUDA_LAUNCH_PARAMS]) – List of launch parameters, one per device

    • -
    • numDevices (unsigned int) – Size of the launchParamsList array

    • -
    • flags (unsigned int) – Flags to control launch behavior

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_LAUNCH_FAILED, CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, CUDA_ERROR_LAUNCH_TIMEOUT, CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, CUDA_ERROR_SHARED_OBJECT_INIT_FAILED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuLaunchHostFunc(hStream, fn, userData)#
    -

    Enqueues a host function call in a stream.

    -

    Enqueues a host function to run in a stream. The function will be -called after currently enqueued work and will block work added after -it.

    -

    The host function must not make any CUDA API calls. Attempting to use a -CUDA API may result in CUDA_ERROR_NOT_PERMITTED, but this -is not required. The host function must not perform any synchronization -that may depend on outstanding CUDA work not mandated to run earlier. -Host functions without a mandated order (such as in independent -streams) execute in undefined order and may be serialized.

    -

    For the purposes of Unified Memory, execution makes a number of -guarantees:

    -
      -
    • The stream is considered idle for the duration of the function’s -execution. Thus, for example, the function may always use memory -attached to the stream it was enqueued in.

    • -
    • The start of execution of the function has the same effect as -synchronizing an event recorded in the same stream immediately prior -to the function. It thus synchronizes streams which have been -“joined” prior to the function.

    • -
    • Adding device work to any stream does not have the effect of making -the stream active until all preceding host functions and stream -callbacks have executed. Thus, for example, a function might use -global attached memory even if work has been added to another stream, -if the work has been ordered behind the function call with an event.

    • -
    • Completion of the function does not cause a stream to become active -except as described above. The stream will remain idle if no device -work follows the function, and will remain idle across consecutive -host functions or stream callbacks without device work in between. -Thus, for example, stream synchronization can be done by signaling -from a host function at the end of the stream.

    • -
    -

    Note that, in contrast to cuStreamAddCallback, the function -will not be called in the event of an error in the CUDA context.

    -
    -
    Parameters:
    -
      -
    • hStream (CUstream or cudaStream_t) – Stream to enqueue function call in

    • -
    • fn (CUhostFn) – The function to call once preceding stream operations are complete

    • -
    • userData (Any) – User-specified data to be passed to the function

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -

    Graph Management#

    -

    This section describes the graph management functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuGraphCreate(unsigned int flags)#
    -

    Creates a graph.

    -

    Creates an empty graph, which is returned via phGraph.

    -
    -
    Parameters:
    -

    flags (unsigned int) – Graph creation flags, must be 0

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddKernelNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_KERNEL_NODE_PARAMS nodeParams: Optional[CUDA_KERNEL_NODE_PARAMS])#
    -

    Creates a kernel execution node and adds it to a graph.

    -

    Creates a new kernel execution node and adds it to hGraph with -numDependencies dependencies specified via dependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. dependencies may not have any duplicate entries. A -handle to the new node will be returned in phGraphNode.

    -

    The CUDA_KERNEL_NODE_PARAMS structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    When the graph is launched, the node will invoke kernel func on a -(gridDimX x gridDimY x gridDimZ) grid of blocks. Each block -contains (blockDimX x blockDimY x blockDimZ) threads.

    -

    sharedMemBytes sets the amount of dynamic shared memory that will be -available to each thread block.

    -

    Kernel parameters to func can be specified in one of two ways:

    -

    1) Kernel parameters can be specified via kernelParams. If the kernel -has N parameters, then kernelParams needs to be an array of N -pointers. Each pointer, from `kernelParams`[0] to `kernelParams`[N-1], -points to the region of memory from which the actual parameter will be -copied. The number of kernel parameters and their offsets and sizes do -not need to be specified as that information is retrieved directly from -the kernel’s image.

    -

    2) Kernel parameters for non-cooperative kernels can also be packaged -by the application into a single buffer that is passed in via extra. -This places the burden on the application of knowing each kernel -parameter’s size and alignment/padding within the buffer. The extra -parameter exists to allow this function to take additional less -commonly used arguments. extra specifies a list of names of extra -settings and their corresponding values. Each extra setting name is -immediately followed by the corresponding value. The list must be -terminated with either NULL or CU_LAUNCH_PARAM_END.

    - -

    The error CUDA_ERROR_INVALID_VALUE will be returned if -kernel parameters are specified with both kernelParams and extra -(i.e. both kernelParams and extra are non-NULL). -CUDA_ERROR_INVALID_VALUE will be returned if extra is -used for a cooperative kernel.

    -

    The kernelParams or extra array, as well as the argument values it -points to, are copied during this call.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • nodeParams (CUDA_KERNEL_NODE_PARAMS) – Parameters for the GPU execution node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Kernels launched using graphs must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    -
    - -
    -
    -cuda.cuda.cuGraphKernelNodeGetParams(hNode)#
    -

    Returns a kernel node’s parameters.

    -

    Returns the parameters of kernel node hNode in nodeParams. The -kernelParams or extra array returned in nodeParams, as well as -the argument values it points to, are owned by the node. This memory -remains valid until the node is destroyed or its parameters are -modified, and should not be modified directly. Use -cuGraphKernelNodeSetParams to update the parameters of this -node.

    -

    The params will contain either kernelParams or extra, according to -which of these was most recently set on the node.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphKernelNodeSetParams(hNode, CUDA_KERNEL_NODE_PARAMS nodeParams: Optional[CUDA_KERNEL_NODE_PARAMS])#
    -

    Sets a kernel node’s parameters.

    -

    Sets the parameters of kernel node hNode to nodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddMemcpyNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_MEMCPY3D copyParams: Optional[CUDA_MEMCPY3D], ctx)#
    -

    Creates a memcpy node and adds it to a graph.

    -

    Creates a new memcpy node and adds it to hGraph with -numDependencies dependencies specified via dependencies. It is -possible for numDependencies to be 0, in which case the node will be -placed at the root of the graph. dependencies may not have any -duplicate entries. A handle to the new node will be returned in -phGraphNode.

    -

    When the graph is launched, the node will perform the memcpy described -by copyParams. See cuMemcpy3D() for a description of the -structure and its restrictions.

    -

    Memcpy nodes have some additional restrictions with regards to managed -memory, if the system contains at least one device which has a zero -value for the device attribute -CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If one or -more of the operands refer to managed memory, then using the memory -type CU_MEMORYTYPE_UNIFIED is disallowed for those -operand(s). The managed memory will be treated as residing on either -the host or the device, depending on which memory type is specified.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • copyParams (CUDA_MEMCPY3D) – Parameters for the memory copy

    • -
    • ctx (CUcontext) – Context on which to run the node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphMemcpyNodeGetParams(hNode)#
    -

    Returns a memcpy node’s parameters.

    -

    Returns the parameters of memcpy node hNode in nodeParams.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphMemcpyNodeSetParams(hNode, CUDA_MEMCPY3D nodeParams: Optional[CUDA_MEMCPY3D])#
    -

    Sets a memcpy node’s parameters.

    -

    Sets the parameters of memcpy node hNode to nodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddMemsetNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_MEMSET_NODE_PARAMS memsetParams: Optional[CUDA_MEMSET_NODE_PARAMS], ctx)#
    -

    Creates a memset node and adds it to a graph.

    -

    Creates a new memset node and adds it to hGraph with -numDependencies dependencies specified via dependencies. It is -possible for numDependencies to be 0, in which case the node will be -placed at the root of the graph. dependencies may not have any -duplicate entries. A handle to the new node will be returned in -phGraphNode.

    -

    The element size must be 1, 2, or 4 bytes. When the graph is launched, -the node will perform the memset described by memsetParams.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • memsetParams (CUDA_MEMSET_NODE_PARAMS) – Parameters for the memory set

    • -
    • ctx (CUcontext) – Context on which to run the node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphMemsetNodeGetParams(hNode)#
    -

    Returns a memset node’s parameters.

    -

    Returns the parameters of memset node hNode in nodeParams.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphMemsetNodeSetParams(hNode, CUDA_MEMSET_NODE_PARAMS nodeParams: Optional[CUDA_MEMSET_NODE_PARAMS])#
    -

    Sets a memset node’s parameters.

    -

    Sets the parameters of memset node hNode to nodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddHostNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_HOST_NODE_PARAMS nodeParams: Optional[CUDA_HOST_NODE_PARAMS])#
    -

    Creates a host execution node and adds it to a graph.

    -

    Creates a new CPU execution node and adds it to hGraph with -numDependencies dependencies specified via dependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. dependencies may not have any duplicate entries. A -handle to the new node will be returned in phGraphNode.

    -

    When the graph is launched, the node will invoke the specified CPU -function. Host nodes are not supported under MPS with pre-Volta GPUs.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • nodeParams (CUDA_HOST_NODE_PARAMS) – Parameters for the host node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphHostNodeGetParams(hNode)#
    -

    Returns a host node’s parameters.

    -

    Returns the parameters of host node hNode in nodeParams.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphHostNodeSetParams(hNode, CUDA_HOST_NODE_PARAMS nodeParams: Optional[CUDA_HOST_NODE_PARAMS])#
    -

    Sets a host node’s parameters.

    -

    Sets the parameters of host node hNode to nodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddChildGraphNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, childGraph)#
    -

    Creates a child graph node and adds it to a graph.

    -

    Creates a new node which executes an embedded graph, and adds it to -hGraph with numDependencies dependencies specified via -dependencies. It is possible for numDependencies to be 0, in which -case the node will be placed at the root of the graph. dependencies -may not have any duplicate entries. A handle to the new node will be -returned in phGraphNode.

    -

    If hGraph contains allocation or free nodes, this call will return an -error.

    -

    The node executes an embedded child graph. The child graph is cloned in -this call.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • childGraph (CUgraph or cudaGraph_t) – The graph to clone into this node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphChildGraphNodeGetGraph(hNode)#
    -

    Gets a handle to the embedded graph of a child graph node.

    -

    Gets a handle to the embedded graph in a child graph node. This call -does not clone the graph. Changes to the graph will be reflected in the -node, and the node retains ownership of the graph.

    -

    Allocation and free nodes cannot be added to the returned graph. -Attempting to do so will return an error.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the embedded graph for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddEmptyNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies)#
    -

    Creates an empty node and adds it to a graph.

    -

    Creates a new node which performs no operation, and adds it to hGraph -with numDependencies dependencies specified via dependencies. It is -possible for numDependencies to be 0, in which case the node will be -placed at the root of the graph. dependencies may not have any -duplicate entries. A handle to the new node will be returned in -phGraphNode.

    -

    An empty node performs no operation during execution, but can be used -for transitive ordering. For example, a phased execution graph with 2 -groups of n nodes with a barrier between them can be represented using -an empty node and 2*n dependency edges, rather than no empty node and -n^2 dependency edges.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddEventRecordNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, event)#
    -

    Creates an event record node and adds it to a graph.

    -

    Creates a new event record node and adds it to hGraph with -numDependencies dependencies specified via dependencies and event -specified in event. It is possible for numDependencies to be 0, in -which case the node will be placed at the root of the graph. -dependencies may not have any duplicate entries. A handle to the new -node will be returned in phGraphNode.

    -

    Each launch of the graph will record event to capture execution of -the node’s dependencies.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • event (CUevent or cudaEvent_t) – Event for the node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphEventRecordNodeGetEvent(hNode)#
    -

    Returns the event associated with an event record node.

    -

    Returns the event of event record node hNode in event_out.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the event for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphEventRecordNodeSetEvent(hNode, event)#
    -

    Sets an event record node’s event.

    -

    Sets the event of event record node hNode to event.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddEventWaitNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, event)#
    -

    Creates an event wait node and adds it to a graph.

    -

    Creates a new event wait node and adds it to hGraph with -numDependencies dependencies specified via dependencies and event -specified in event. It is possible for numDependencies to be 0, in -which case the node will be placed at the root of the graph. -dependencies may not have any duplicate entries. A handle to the new -node will be returned in phGraphNode.

    -

    The graph node will wait for all work captured in event. See -cuEventRecord() for details on what is captured by an -event. event may be from a different context or device than the -launch stream.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • event (CUevent or cudaEvent_t) – Event for the node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphEventWaitNodeGetEvent(hNode)#
    -

    Returns the event associated with an event wait node.

    -

    Returns the event of event wait node hNode in event_out.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the event for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphEventWaitNodeSetEvent(hNode, event)#
    -

    Sets an event wait node’s event.

    -

    Sets the event of event wait node hNode to event.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddExternalSemaphoresSignalNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS])#
    -

    Creates an external semaphore signal node and adds it to a graph.

    -

    Creates a new external semaphore signal node and adds it to hGraph -with numDependencies dependencies specified via dependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. dependencies may not have any duplicate entries. A -handle to the new node will be returned in phGraphNode.

    -

    Performs a signal operation on a set of externally allocated semaphore -objects when the node is launched. The operation(s) will occur after -all of the node’s dependencies have completed.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExternalSemaphoresSignalNodeGetParams(hNode)#
    -

    Returns an external semaphore signal node’s parameters.

    -

    Returns the parameters of an external semaphore signal node hNode in -params_out. The extSemArray and paramsArray returned in -params_out, are owned by the node. This memory remains valid until -the node is destroyed or its parameters are modified, and should not be -modified directly. Use -cuGraphExternalSemaphoresSignalNodeSetParams to update the -parameters of this node.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExternalSemaphoresSignalNodeSetParams(hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS])#
    -

    Sets an external semaphore signal node’s parameters.

    -

    Sets the parameters of an external semaphore signal node hNode to -nodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddExternalSemaphoresWaitNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_EXT_SEM_WAIT_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_WAIT_NODE_PARAMS])#
    -

    Creates an external semaphore wait node and adds it to a graph.

    -

    Creates a new external semaphore wait node and adds it to hGraph with -numDependencies dependencies specified via dependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. dependencies may not have any duplicate entries. A -handle to the new node will be returned in phGraphNode.

    -

    Performs a wait operation on a set of externally allocated semaphore -objects when the node is launched. The node’s dependencies will not be -launched until the wait operation has completed.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExternalSemaphoresWaitNodeGetParams(hNode)#
    -

    Returns an external semaphore wait node’s parameters.

    -

    Returns the parameters of an external semaphore wait node hNode in -params_out. The extSemArray and paramsArray returned in -params_out, are owned by the node. This memory remains valid until -the node is destroyed or its parameters are modified, and should not be -modified directly. Use -cuGraphExternalSemaphoresSignalNodeSetParams to update the -parameters of this node.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExternalSemaphoresWaitNodeSetParams(hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_WAIT_NODE_PARAMS])#
    -

    Sets an external semaphore wait node’s parameters.

    -

    Sets the parameters of an external semaphore wait node hNode to -nodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddBatchMemOpNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_BATCH_MEM_OP_NODE_PARAMS nodeParams: Optional[CUDA_BATCH_MEM_OP_NODE_PARAMS])#
    -

    Creates a batch memory operation node and adds it to a graph.

    -

    Creates a new batch memory operation node and adds it to hGraph with -numDependencies dependencies specified via dependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. dependencies may not have any duplicate entries. A -handle to the new node will be returned in phGraphNode.

    -

    When the node is added, the paramArray inside nodeParams is copied -and therefore it can be freed after the call returns.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. For more information, see the Stream Memory Operations section in the programming guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html).

    -
    - -
    -
    -cuda.cuda.cuGraphBatchMemOpNodeGetParams(hNode)#
    -

    Returns a batch mem op node’s parameters.

    -

    Returns the parameters of batch mem op node hNode in -nodeParams_out. The paramArray returned in nodeParams_out is -owned by the node. This memory remains valid until the node is -destroyed or its parameters are modified, and should not be modified -directly. Use cuGraphBatchMemOpNodeSetParams to update the -parameters of this node.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphBatchMemOpNodeSetParams(hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS nodeParams: Optional[CUDA_BATCH_MEM_OP_NODE_PARAMS])#
    -

    Sets a batch mem op node’s parameters.

    -

    Sets the parameters of batch mem op node hNode to nodeParams.

    -

    The paramArray inside nodeParams is copied and therefore it can be -freed after the call returns.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecBatchMemOpNodeSetParams(hGraphExec, hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS nodeParams: Optional[CUDA_BATCH_MEM_OP_NODE_PARAMS])#
    -

    Sets the parameters for a batch mem op node in the given graphExec.

    -

    Sets the parameters of a batch mem op node in an executable graph -hGraphExec. The node is identified by the corresponding node hNode -in the non-executable graph, from which the executable graph was -instantiated.

    -

    The following fields on operations may be modified on an executable -graph:

    -

    op.waitValue.address op.waitValue.value[64] op.waitValue.flags bits -corresponding to wait type (i.e. CU_STREAM_WAIT_VALUE_FLUSH bit cannot -be modified) op.writeValue.address op.writeValue.value[64]

    -

    Other fields, such as the context, count or type of operations, and -other types of operations such as membars, may not be modified.

    -

    hNode must not have been removed from the original graph.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    The paramArray inside nodeParams is copied and therefore it can be -freed after the call returns.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddMemAllocNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_MEM_ALLOC_NODE_PARAMS nodeParams: Optional[CUDA_MEM_ALLOC_NODE_PARAMS])#
    -

    Creates an allocation node and adds it to a graph.

    -

    Creates a new allocation node and adds it to hGraph with -numDependencies dependencies specified via dependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. dependencies may not have any duplicate entries. A -handle to the new node will be returned in phGraphNode.

    -

    When cuGraphAddMemAllocNode creates an allocation node, it -returns the address of the allocation in nodeParams.dptr. The -allocation’s address remains fixed across instantiations and launches.

    -

    If the allocation is freed in the same graph, by creating a free node -using cuGraphAddMemFreeNode, the allocation can be accessed -by nodes ordered after the allocation node but before the free node. -These allocations cannot be freed outside the owning graph, and they -can only be freed once in the owning graph.

    -

    If the allocation is not freed in the same graph, then it can be -accessed not only by nodes in the graph which are ordered after the -allocation node, but also by stream operations ordered after the -graph’s execution but before the allocation is freed.

    -

    Allocations which are not freed in the same graph can be freed by:

    - -

    It is not possible to free an allocation in both the owning graph and -another graph. If the allocation is freed in the same graph, a free -node cannot be added to another graph. If the allocation is freed in -another graph, a free node can no longer be added to the owning graph.

    -

    The following restrictions apply to graphs which contain allocation -and/or memory free nodes:

    -
      -
    • Nodes and edges of the graph cannot be deleted.

    • -
    • The graph cannot be used in a child node.

    • -
    • Only one instantiation of the graph may exist at any point in time.

    • -
    • The graph cannot be cloned.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphMemAllocNodeGetParams(hNode)#
    -

    Returns a memory alloc node’s parameters.

    -

    Returns the parameters of a memory alloc node hNode in params_out. -The poolProps and accessDescs returned in params_out, are owned -by the node. This memory remains valid until the node is destroyed. The -returned parameters must not be modified.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddMemFreeNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, dptr)#
    -

    Creates a memory free node and adds it to a graph.

    -

    Creates a new memory free node and adds it to hGraph with -numDependencies dependencies specified via dependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. dependencies may not have any duplicate entries. A -handle to the new node will be returned in phGraphNode.

    -

    cuGraphAddMemFreeNode will return -CUDA_ERROR_INVALID_VALUE if the user attempts to free:

    -
      -
    • an allocation twice in the same graph.

    • -
    • an address that was not returned by an allocation node.

    • -
    • an invalid address.

    • -
    -

    The following restrictions apply to graphs which contain allocation -and/or memory free nodes:

    -
      -
    • Nodes and edges of the graph cannot be deleted.

    • -
    • The graph cannot be used in a child node.

    • -
    • Only one instantiation of the graph may exist at any point in time.

    • -
    • The graph cannot be cloned.

    • -
    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • dptr (CUdeviceptr) – Address of memory to free

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphMemFreeNodeGetParams(hNode)#
    -

    Returns a memory free node’s parameters.

    -

    Returns the address of a memory free node hNode in dptr_out.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGraphMemTrim(device)#
    -

    Free unused memory that was cached on the specified device for use with graphs back to the OS.

    -

    Blocks which are not in use by a graph that is either currently -executing or scheduled to execute are freed back to the operating -system.

    -
    -
    Parameters:
    -

    device (CUdevice) – The device for which cached memory should be freed.

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_DEVICE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetGraphMemAttribute(device, attr: CUgraphMem_attribute)#
    -

    Query asynchronous allocation attributes related to graphs.

    -

    Valid attributes are:

    - -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceSetGraphMemAttribute(device, attr: CUgraphMem_attribute, value)#
    -

    Set asynchronous allocation attributes related to graphs.

    -

    Valid attributes are:

    - -
    -
    Parameters:
    -
      -
    • device (CUdevice) – Specifies the scope of the query

    • -
    • attr (CUgraphMem_attribute) – attribute to get

    • -
    • value (Any) – pointer to value to set

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_DEVICE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphClone(originalGraph)#
    -

    Clones a graph.

    -

    This function creates a copy of originalGraph and returns it in -phGraphClone. All parameters are copied into the cloned graph. The -original graph may be modified after this call without affecting the -clone.

    -

    Child graph nodes in the original graph are recursively copied into the -clone.

    -
    -
    Parameters:
    -

    originalGraph (CUgraph or cudaGraph_t) – Graph to clone

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphNodeFindInClone(hOriginalNode, hClonedGraph)#
    -

    Finds a cloned version of a node.

    -

    This function returns the node in hClonedGraph corresponding to -hOriginalNode in the original graph.

    -

    hClonedGraph must have been cloned from hOriginalGraph via -cuGraphClone. hOriginalNode must have been in -hOriginalGraph at the time of the call to cuGraphClone, -and the corresponding cloned node in hClonedGraph must not have been -removed. The cloned node is then returned via phClonedNode.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuGraphClone

    -
    -
    - -
    -
    -cuda.cuda.cuGraphNodeGetType(hNode)#
    -

    Returns a node’s type.

    -

    Returns the node type of hNode in typename.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphGetNodes(hGraph, size_t numNodes=0)#
    -

    Returns a graph’s nodes.

    -

    Returns a list of hGraph’s nodes. nodes may be NULL, in which case -this function will return the number of nodes in numNodes. Otherwise, -numNodes entries will be filled in. If numNodes is higher than the -actual number of nodes, the remaining entries in nodes will be set to -NULL, and the number of nodes actually obtained will be returned in -numNodes.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to query

    • -
    • numNodes (int) – See description

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphGetRootNodes(hGraph, size_t numRootNodes=0)#
    -

    Returns a graph’s root nodes.

    -

    Returns a list of hGraph’s root nodes. rootNodes may be NULL, in -which case this function will return the number of root nodes in -numRootNodes. Otherwise, numRootNodes entries will be filled in. If -numRootNodes is higher than the actual number of root nodes, the -remaining entries in rootNodes will be set to NULL, and the number of -nodes actually obtained will be returned in numRootNodes.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to query

    • -
    • numRootNodes (int) – See description

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphGetEdges(hGraph, size_t numEdges=0)#
    -

    Returns a graph’s dependency edges.

    -

    Returns a list of hGraph’s dependency edges. Edges are returned via -corresponding indices in from and to; that is, the node in to`[i] -has a dependency on the node in `from`[i]. `from and to may both be -NULL, in which case this function only returns the number of edges in -numEdges. Otherwise, numEdges entries will be filled in. If -numEdges is higher than the actual number of edges, the remaining -entries in from and to will be set to NULL, and the number of edges -actually returned will be written to numEdges.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to get the edges from

    • -
    • numEdges (int) – See description

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphGetEdges_v2(hGraph, size_t numEdges=0)#
    -

    Returns a graph’s dependency edges (12.3+)

    -

    Returns a list of hGraph’s dependency edges. Edges are returned via -corresponding indices in from, to and edgeData; that is, the node -in to`[i] has a dependency on the node in `from`[i] with data -`edgeData`[i]. `from and to may both be NULL, in which case this -function only returns the number of edges in numEdges. Otherwise, -numEdges entries will be filled in. If numEdges is higher than the -actual number of edges, the remaining entries in from and to will -be set to NULL, and the number of edges actually returned will be -written to numEdges. edgeData may alone be NULL, in which case the -edges must all have default (zeroed) edge data. Attempting a lossy -query via NULL edgeData will result in -CUDA_ERROR_LOSSY_QUERY. If edgeData is non-NULL then -from and to must be as well.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to get the edges from

    • -
    • numEdges (int) – See description

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphNodeGetDependencies(hNode, size_t numDependencies=0)#
    -

    Returns a node’s dependencies.

    -

    Returns a list of node’s dependencies. dependencies may be NULL, in -which case this function will return the number of dependencies in -numDependencies. Otherwise, numDependencies entries will be filled -in. If numDependencies is higher than the actual number of -dependencies, the remaining entries in dependencies will be set to -NULL, and the number of nodes actually obtained will be returned in -numDependencies.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphNodeGetDependencies_v2(hNode, size_t numDependencies=0)#
    -

    Returns a node’s dependencies (12.3+)

    -

    Returns a list of node’s dependencies. dependencies may be NULL, in -which case this function will return the number of dependencies in -numDependencies. Otherwise, numDependencies entries will be filled -in. If numDependencies is higher than the actual number of -dependencies, the remaining entries in dependencies will be set to -NULL, and the number of nodes actually obtained will be returned in -numDependencies.

    -

    Note that if an edge has non-zero (non-default) edge data and -edgeData is NULL, this API will return -CUDA_ERROR_LOSSY_QUERY. If edgeData is non-NULL, then -dependencies must be as well.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphNodeGetDependentNodes(hNode, size_t numDependentNodes=0)#
    -

    Returns a node’s dependent nodes.

    -

    Returns a list of node’s dependent nodes. dependentNodes may be -NULL, in which case this function will return the number of dependent -nodes in numDependentNodes. Otherwise, numDependentNodes entries -will be filled in. If numDependentNodes is higher than the actual -number of dependent nodes, the remaining entries in dependentNodes -will be set to NULL, and the number of nodes actually obtained will be -returned in numDependentNodes.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphNodeGetDependentNodes_v2(hNode, size_t numDependentNodes=0)#
    -

    Returns a node’s dependent nodes (12.3+)

    -

    Returns a list of node’s dependent nodes. dependentNodes may be -NULL, in which case this function will return the number of dependent -nodes in numDependentNodes. Otherwise, numDependentNodes entries -will be filled in. If numDependentNodes is higher than the actual -number of dependent nodes, the remaining entries in dependentNodes -will be set to NULL, and the number of nodes actually obtained will be -returned in numDependentNodes.

    -

    Note that if an edge has non-zero (non-default) edge data and -edgeData is NULL, this API will return -CUDA_ERROR_LOSSY_QUERY. If edgeData is non-NULL, then -dependentNodes must be as well.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddDependencies(hGraph, from_: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], to: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies)#
    -

    Adds dependency edges to a graph.

    -

    The number of dependencies to be added is defined by numDependencies -Elements in from and to at corresponding indices define a -dependency. Each node in from and to must belong to hGraph.

    -

    If numDependencies is 0, elements in from and to will be ignored. -Specifying an existing dependency will return an error.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which dependencies are added

    • -
    • from (List[CUgraphNode]) – Array of nodes that provide the dependencies

    • -
    • to (List[CUgraphNode]) – Array of dependent nodes

    • -
    • numDependencies (size_t) – Number of dependencies to be added

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddDependencies_v2(hGraph, from_: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], to: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], edgeData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies)#
    -

    Adds dependency edges to a graph (12.3+)

    -

    The number of dependencies to be added is defined by numDependencies -Elements in from and to at corresponding indices define a -dependency. Each node in from and to must belong to hGraph.

    -

    If numDependencies is 0, elements in from and to will be ignored. -Specifying an existing dependency will return an error.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which dependencies are added

    • -
    • from (List[CUgraphNode]) – Array of nodes that provide the dependencies

    • -
    • to (List[CUgraphNode]) – Array of dependent nodes

    • -
    • edgeData (List[CUgraphEdgeData]) – Optional array of edge data. If NULL, default (zeroed) edge data is -assumed.

    • -
    • numDependencies (size_t) – Number of dependencies to be added

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphRemoveDependencies(hGraph, from_: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], to: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies)#
    -

    Removes dependency edges from a graph.

    -

    The number of dependencies to be removed is defined by -numDependencies. Elements in from and to at corresponding indices -define a dependency. Each node in from and to must belong to -hGraph.

    -

    If numDependencies is 0, elements in from and to will be ignored. -Specifying a non-existing dependency will return an error.

    -

    Dependencies cannot be removed from graphs which contain allocation or -free nodes. Any attempt to do so will return an error.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph from which to remove dependencies

    • -
    • from (List[CUgraphNode]) – Array of nodes that provide the dependencies

    • -
    • to (List[CUgraphNode]) – Array of dependent nodes

    • -
    • numDependencies (size_t) – Number of dependencies to be removed

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphRemoveDependencies_v2(hGraph, from_: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], to: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], edgeData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies)#
    -

    Removes dependency edges from a graph (12.3+)

    -

    The number of dependencies to be removed is defined by -numDependencies. Elements in from and to at corresponding indices -define a dependency. Each node in from and to must belong to -hGraph.

    -

    If numDependencies is 0, elements in from and to will be ignored. -Specifying an edge that does not exist in the graph, with data matching -edgeData, results in an error. edgeData is nullable, which is -equivalent to passing default (zeroed) data for each edge.

    -

    Dependencies cannot be removed from graphs which contain allocation or -free nodes. Any attempt to do so will return an error.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph from which to remove dependencies

    • -
    • from (List[CUgraphNode]) – Array of nodes that provide the dependencies

    • -
    • to (List[CUgraphNode]) – Array of dependent nodes

    • -
    • edgeData (List[CUgraphEdgeData]) – Optional array of edge data. If NULL, edge data is assumed to be -default (zeroed).

    • -
    • numDependencies (size_t) – Number of dependencies to be removed

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphDestroyNode(hNode)#
    -

    Remove a node from the graph.

    -

    Removes hNode from its graph. This operation also severs any -dependencies of other nodes on hNode and vice versa.

    -

    Nodes which belong to a graph which contains allocation or free nodes -cannot be destroyed. Any attempt to do so will return an error.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to remove

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphInstantiate(hGraph, unsigned long long flags)#
    -

    Creates an executable graph from a graph.

    -

    Instantiates hGraph as an executable graph. The graph is validated -for any structural constraints or intra-node constraints which were not -previously validated. If instantiation is successful, a handle to the -instantiated graph is returned in phGraphExec.

    -

    The flags parameter controls the behavior of instantiation and -subsequent graph launches. Valid flags are:

    -
      -
    • CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, which -configures a graph containing memory allocation nodes to -automatically free any unfreed memory allocations before the graph is -relaunched.

    • -
    • CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH, which -configures the graph for launch from the device. If this flag is -passed, the executable graph handle returned can be used to launch -the graph from both the host and device. This flag can only be used -on platforms which support unified addressing. This flag cannot be -used in conjunction with -CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH.

    • -
    • CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, which -causes the graph to use the priorities from the per-node attributes -rather than the priority of the launch stream during execution. Note -that priorities are only available on kernel nodes, and are copied -from stream priority during stream capture.

    • -
    -

    If hGraph contains any allocation or free nodes, there can be at most -one executable graph in existence for that graph at a time. An attempt -to instantiate a second executable graph before destroying the first -with cuGraphExecDestroy will result in an error. The same -also applies if hGraph contains any device-updatable kernel nodes.

    -

    If hGraph contains kernels which call device-side cudaGraphLaunch() -from multiple contexts, this will result in an error.

    -

    Graphs instantiated for launch on the device have additional -restrictions which do not apply to host graphs:

    -
      -
    • The graph’s nodes must reside on a single context.

    • -
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, -and child graph nodes.

    • -
    • The graph cannot be empty and must contain at least one kernel, -memcpy, or memset node. Operation-specific restrictions are outlined -below.

    • -
    • Kernel nodes:

      -
        -
      • Use of CUDA Dynamic Parallelism is not permitted.

      • -
      • Cooperative launches are permitted as long as MPS is not in use.

      • -
      -
    • -
    • Memcpy nodes:

      -
        -
      • Only copies involving device memory and/or pinned device-mapped -host memory are permitted.

      • -
      • Copies involving CUDA arrays are not permitted.

      • -
      • Both operands must be accessible from the current context, and the -current context must match the context of other nodes in the graph.

      • -
      -
    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphInstantiateWithParams(hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS instantiateParams: Optional[CUDA_GRAPH_INSTANTIATE_PARAMS])#
    -

    Creates an executable graph from a graph.

    -

    Instantiates hGraph as an executable graph according to the -instantiateParams structure. The graph is validated for any -structural constraints or intra-node constraints which were not -previously validated. If instantiation is successful, a handle to the -instantiated graph is returned in phGraphExec.

    -

    instantiateParams controls the behavior of instantiation and -subsequent graph launches, as well as returning more detailed -information in the event of an error. -CUDA_GRAPH_INSTANTIATE_PARAMS is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    The flags field controls the behavior of instantiation and subsequent -graph launches. Valid flags are:

    - -

    If hGraph contains any allocation or free nodes, there can be at most -one executable graph in existence for that graph at a time. An attempt -to instantiate a second executable graph before destroying the first -with cuGraphExecDestroy will result in an error. The same -also applies if hGraph contains any device-updatable kernel nodes.

    -

    If hGraph contains kernels which call device-side cudaGraphLaunch() -from multiple contexts, this will result in an error.

    -

    Graphs instantiated for launch on the device have additional -restrictions which do not apply to host graphs:

    -
      -
    • The graph’s nodes must reside on a single context.

    • -
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, -and child graph nodes.

    • -
    • The graph cannot be empty and must contain at least one kernel, -memcpy, or memset node. Operation-specific restrictions are outlined -below.

    • -
    • Kernel nodes:

      -
        -
      • Use of CUDA Dynamic Parallelism is not permitted.

      • -
      • Cooperative launches are permitted as long as MPS is not in use.

      • -
      -
    • -
    • Memcpy nodes:

      -
        -
      • Only copies involving device memory and/or pinned device-mapped -host memory are permitted.

      • -
      • Copies involving CUDA arrays are not permitted.

      • -
      • Both operands must be accessible from the current context, and the -current context must match the context of other nodes in the graph.

      • -
      -
    • -
    -

    In the event of an error, the result_out and hErrNode_out fields -will contain more information about the nature of the error. Possible -error reporting includes:

    -
      -
    • CUDA_GRAPH_INSTANTIATE_ERROR, if passed an invalid value -or if an unexpected error occurred which is described by the return -value of the function. hErrNode_out will be set to NULL.

    • -
    • CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE, if the graph -structure is invalid. hErrNode_out will be set to one of the -offending nodes.

    • -
    • CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED, if -the graph is instantiated for device launch but contains a node of an -unsupported node type, or a node which performs unsupported -operations, such as use of CUDA dynamic parallelism within a kernel -node. hErrNode_out will be set to this node.

    • -
    • CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED, if -the graph is instantiated for device launch but a node’s context -differs from that of another node. This error can also be returned if -a graph is not instantiated for device launch and it contains kernels -which call device-side cudaGraphLaunch() from multiple contexts. -hErrNode_out will be set to this node.

    • -
    -

    If instantiation is successful, result_out will be set to -CUDA_GRAPH_INSTANTIATE_SUCCESS, and hErrNode_out will be -set to NULL.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecGetFlags(hGraphExec)#
    -

    Query the instantiation flags of an executable graph.

    -

    Returns the flags that were passed to instantiation for the given -executable graph. CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD will -not be returned by this API as it does not affect the resulting -executable graph.

    -
    -
    Parameters:
    -

    hGraphExec (CUgraphExec or cudaGraphExec_t) – The executable graph to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecKernelNodeSetParams(hGraphExec, hNode, CUDA_KERNEL_NODE_PARAMS nodeParams: Optional[CUDA_KERNEL_NODE_PARAMS])#
    -

    Sets the parameters for a kernel node in the given graphExec.

    -

    Sets the parameters of a kernel node in an executable graph -hGraphExec. The node is identified by the corresponding node hNode -in the non-executable graph, from which the executable graph was -instantiated.

    -

    hNode must not have been removed from the original graph. All -nodeParams fields may change, but the following restrictions apply to -func updates:

    -
      -
    • The owning context of the function cannot change.

    • -
    • A node whose function originally did not use CUDA dynamic parallelism -cannot be updated to a function which uses CDP

    • -
    • A node whose function originally did not make device-side update -calls cannot be updated to a function which makes device-side update -calls.

    • -
    • If hGraphExec was not instantiated for device launch, a node whose -function originally did not use device-side cudaGraphLaunch() cannot -be updated to a function which uses device-side cudaGraphLaunch() -unless the node resides on the same context as nodes which contained -such calls at instantiate-time. If no such calls were present at -instantiation, these updates cannot be performed at all.

    • -
    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    If hNode is a device-updatable kernel node, the next upload/launch of -hGraphExec will overwrite any previous device-side updates. -Additionally, applying host updates to a device-updatable kernel node -while it is being updated from the device will result in undefined -behavior.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecMemcpyNodeSetParams(hGraphExec, hNode, CUDA_MEMCPY3D copyParams: Optional[CUDA_MEMCPY3D], ctx)#
    -

    Sets the parameters for a memcpy node in the given graphExec.

    -

    Updates the work represented by hNode in hGraphExec as though -hNode had contained copyParams at instantiation. hNode must remain -in the graph which was used to instantiate hGraphExec. Changed edges -to and from hNode are ignored.

    -

    The source and destination memory in copyParams must be allocated -from the same contexts as the original source and destination memory. -Both the instantiation-time memory operands and the memory operands in -copyParams must be 1-dimensional. Zero-length operations are not -supported.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    Returns CUDA_ERROR_INVALID_VALUE if the memory operands’ mappings -changed or either the original or new memory operands are -multidimensional.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecMemsetNodeSetParams(hGraphExec, hNode, CUDA_MEMSET_NODE_PARAMS memsetParams: Optional[CUDA_MEMSET_NODE_PARAMS], ctx)#
    -

    Sets the parameters for a memset node in the given graphExec.

    -

    Updates the work represented by hNode in hGraphExec as though -hNode had contained memsetParams at instantiation. hNode must -remain in the graph which was used to instantiate hGraphExec. Changed -edges to and from hNode are ignored.

    -

    Zero sized operations are not supported.

    -

    The new destination pointer in memsetParams must be to the same kind of -allocation as the original destination pointer and have the same -context association and device mapping as the original destination -pointer.

    -

    Both the value and pointer address may be updated. Changing other -aspects of the memset (width, height, element size or pitch) may cause -the update to be rejected. Specifically, for 2d memsets, all dimension -changes are rejected. For 1d memsets, changes in height are explicitly -rejected and other changes are oportunistically allowed if the -resulting work maps onto the work resources already allocated for the -node.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecHostNodeSetParams(hGraphExec, hNode, CUDA_HOST_NODE_PARAMS nodeParams: Optional[CUDA_HOST_NODE_PARAMS])#
    -

    Sets the parameters for a host node in the given graphExec.

    -

    Updates the work represented by hNode in hGraphExec as though -hNode had contained nodeParams at instantiation. hNode must remain -in the graph which was used to instantiate hGraphExec. Changed edges -to and from hNode are ignored.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecChildGraphNodeSetParams(hGraphExec, hNode, childGraph)#
    -

    Updates node parameters in the child graph node in the given graphExec.

    -

    Updates the work represented by hNode in hGraphExec as though the -nodes contained in hNode’s graph had the parameters contained in -childGraph’s nodes at instantiation. hNode must remain in the graph -which was used to instantiate hGraphExec. Changed edges to and from -hNode are ignored.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    The topology of childGraph, as well as the node insertion order, must -match that of the graph contained in hNode. See -cuGraphExecUpdate() for a list of restrictions on what can -be updated in an instantiated graph. The update is recursive, so child -graph nodes contained within the top level child graph will also be -updated.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event)#
    -

    Sets the event for an event record node in the given graphExec.

    -

    Sets the event of an event record node in an executable graph -hGraphExec. The node is identified by the corresponding node hNode -in the non-executable graph, from which the executable graph was -instantiated.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event)#
    -

    Sets the event for an event wait node in the given graphExec.

    -

    Sets the event of an event wait node in an executable graph -hGraphExec. The node is identified by the corresponding node hNode -in the non-executable graph, from which the executable graph was -instantiated.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS])#
    -

    Sets the parameters for an external semaphore signal node in the given graphExec.

    -

    Sets the parameters of an external semaphore signal node in an -executable graph hGraphExec. The node is identified by the -corresponding node hNode in the non-executable graph, from which the -executable graph was instantiated.

    -

    hNode must not have been removed from the original graph.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    Changing nodeParams->numExtSems is not supported.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_WAIT_NODE_PARAMS])#
    -

    Sets the parameters for an external semaphore wait node in the given graphExec.

    -

    Sets the parameters of an external semaphore wait node in an executable -graph hGraphExec. The node is identified by the corresponding node -hNode in the non-executable graph, from which the executable graph -was instantiated.

    -

    hNode must not have been removed from the original graph.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    Changing nodeParams->numExtSems is not supported.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphNodeSetEnabled(hGraphExec, hNode, unsigned int isEnabled)#
    -

    Enables or disables the specified node in the given graphExec.

    -

    Sets hNode to be either enabled or disabled. Disabled nodes are -functionally equivalent to empty nodes until they are reenabled. -Existing node parameters are not affected by disabling/enabling the -node.

    -

    The node is identified by the corresponding node hNode in the non- -executable graph, from which the executable graph was instantiated.

    -

    hNode must not have been removed from the original graph.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    If hNode is a device-updatable kernel node, the next upload/launch of -hGraphExec will overwrite any previous device-side updates. -Additionally, applying host updates to a device-updatable kernel node -while it is being updated from the device will result in undefined -behavior.

    -
    -
    Parameters:
    -
      -
    • hGraphExec (CUgraphExec or cudaGraphExec_t) – The executable graph in which to set the specified node

    • -
    • hNode (CUgraphNode or cudaGraphNode_t) – Node from the graph from which graphExec was instantiated

    • -
    • isEnabled (unsigned int) – Node is enabled if != 0, otherwise the node is disabled

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    Currently only kernel, memset and memcpy nodes are supported.

    -
    - -
    -
    -cuda.cuda.cuGraphNodeGetEnabled(hGraphExec, hNode)#
    -

    Query whether a node in the given graphExec is enabled.

    -

    Sets isEnabled to 1 if hNode is enabled, or 0 if hNode is disabled.

    -

    The node is identified by the corresponding node hNode in the non- -executable graph, from which the executable graph was instantiated.

    -

    hNode must not have been removed from the original graph.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Currently only kernel, memset and memcpy nodes are supported.

    -

    This function will not reflect device-side updates for device-updatable kernel nodes.

    -
    - -
    -
    -cuda.cuda.cuGraphUpload(hGraphExec, hStream)#
    -

    Uploads an executable graph in a stream.

    -

    Uploads hGraphExec to the device in hStream without executing it. -Uploads of the same hGraphExec will be serialized. Each upload is -ordered behind both any previous work in hStream and any previous -launches of hGraphExec. Uses memory cached by stream to back the -allocations owned by hGraphExec.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphLaunch(hGraphExec, hStream)#
    -

    Launches an executable graph in a stream.

    -

    Executes hGraphExec in hStream. Only one instance of hGraphExec -may be executing at a time. Each launch is ordered behind both any -previous work in hStream and any previous launches of hGraphExec. -To execute a graph concurrently, it must be instantiated multiple times -into multiple executable graphs.

    -

    If any allocations created by hGraphExec remain unfreed (from a -previous launch) and hGraphExec was not instantiated with -CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, the launch -will fail with CUDA_ERROR_INVALID_VALUE.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecDestroy(hGraphExec)#
    -

    Destroys an executable graph.

    -

    Destroys the executable graph specified by hGraphExec, as well as all -of its executable nodes. If the executable graph is in-flight, it will -not be terminated, but rather freed asynchronously on completion.

    -
    -
    Parameters:
    -

    hGraphExec (CUgraphExec or cudaGraphExec_t) – Executable graph to destroy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphDestroy(hGraph)#
    -

    Destroys a graph.

    -

    Destroys the graph specified by hGraph, as well as all of its nodes.

    -
    -
    Parameters:
    -

    hGraph (CUgraph or cudaGraph_t) – Graph to destroy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    cuGraphCreate

    -
    -
    - -
    -
    -cuda.cuda.cuGraphExecUpdate(hGraphExec, hGraph)#
    -

    Check whether an executable graph can be updated with a graph and perform the update if possible.

    -

    Updates the node parameters in the instantiated graph specified by -hGraphExec with the node parameters in a topologically identical -graph specified by hGraph.

    -

    Limitations:

    -
      -
    • Kernel nodes:

      -
        -
      • The owning context of the function cannot change.

      • -
      • A node whose function originally did not use CUDA dynamic -parallelism cannot be updated to a function which uses CDP.

      • -
      • A node whose function originally did not make device-side update -calls cannot be updated to a function which makes device-side -update calls.

      • -
      • A cooperative node cannot be updated to a non-cooperative node, and -vice-versa.

      • -
      • If the graph was instantiated with -CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, the priority -attribute cannot change. Equality is checked on the originally -requested priority values, before they are clamped to the device’s -supported range.

      • -
      • If hGraphExec was not instantiated for device launch, a node -whose function originally did not use device-side cudaGraphLaunch() -cannot be updated to a function which uses device-side -cudaGraphLaunch() unless the node resides on the same context as -nodes which contained such calls at instantiate-time. If no such -calls were present at instantiation, these updates cannot be -performed at all.

      • -
      • Neither hGraph nor hGraphExec may contain device-updatable -kernel nodes.

      • -
      -
    • -
    • Memset and memcpy nodes:

      -
        -
      • The CUDA device(s) to which the operand(s) was allocated/mapped -cannot change.

      • -
      • The source/destination memory must be allocated from the same -contexts as the original source/destination memory.

      • -
      • For 2d memsets, only address and assinged value may be updated.

      • -
      • For 1d memsets, updating dimensions is also allowed, but may fail -if the resulting operation doesn’t map onto the work resources -already allocated for the node.

      • -
      -
    • -
    • Additional memcpy node restrictions:

      -
        -
      • Changing either the source or destination memory type(i.e. -CU_MEMORYTYPE_DEVICE, CU_MEMORYTYPE_ARRAY, etc.) is not supported.

      • -
      -
    • -
    • External semaphore wait nodes and record nodes:

      -
        -
      • Changing the number of semaphores is not supported.

      • -
      -
    • -
    • Conditional nodes:

      -
        -
      • Changing node parameters is not supported.

      • -
      • Changeing parameters of nodes within the conditional body graph is -subject to the rules above.

      • -
      • Conditional handle flags and default values are updated as part of -the graph update.

      • -
      -
    • -
    -

    Note: The API may add further restrictions in future releases. The -return code should always be checked.

    -

    cuGraphExecUpdate sets the result member of resultInfo to -CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED under the following -conditions:

    -
      -
    • The count of nodes directly in hGraphExec and hGraph differ, in -which case resultInfo->errorNode is set to NULL.

    • -
    • hGraph has more exit nodes than hGraph, in which case -resultInfo->errorNode is set to one of the exit nodes in hGraph.

    • -
    • A node in hGraph has a different number of dependencies than the -node from hGraphExec it is paired with, in which case -resultInfo->errorNode is set to the node from hGraph.

    • -
    • A node in hGraph has a dependency that does not match with the -corresponding dependency of the paired node from hGraphExec. -resultInfo->errorNode will be set to the node from hGraph. -resultInfo->errorFromNode will be set to the mismatched dependency. -The dependencies are paired based on edge order and a dependency does -not match when the nodes are already paired based on other edges -examined in the graph.

    • -
    -

    cuGraphExecUpdate sets the result member of resultInfo to:

    -
      -
    • CU_GRAPH_EXEC_UPDATE_ERROR if passed an invalid value.

    • -
    • CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED if the graph topology -changed

    • -
    • CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED if the type of a node -changed, in which case hErrorNode_out is set to the node from -hGraph.

    • -
    • CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE if the -function changed in an unsupported way(see note above), in which case -hErrorNode_out is set to the node from hGraph

    • -
    • CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED if any parameters to a -node changed in a way that is not supported, in which case -hErrorNode_out is set to the node from hGraph.

    • -
    • CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED if any attributes of a -node changed in a way that is not supported, in which case -hErrorNode_out is set to the node from hGraph.

    • -
    • CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED if something about a node is -unsupported, like the node’s type or configuration, in which case -hErrorNode_out is set to the node from hGraph

    • -
    -

    If the update fails for a reason not listed above, the result member of -resultInfo will be set to CU_GRAPH_EXEC_UPDATE_ERROR. If the update -succeeds, the result member will be set to -CU_GRAPH_EXEC_UPDATE_SUCCESS.

    -

    cuGraphExecUpdate returns CUDA_SUCCESS when the updated was performed -successfully. It returns CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE if the -graph update was not performed because it included changes which -violated constraints specific to instantiated graph update.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuGraphInstantiate

    -
    -
    - -
    -
    -cuda.cuda.cuGraphKernelNodeCopyAttributes(dst, src)#
    -

    Copies attributes from source node to destination node.

    -

    Copies attributes from source node src to destination node dst. -Both node must have the same context.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    CUaccessPolicyWindow

    -
    -
    - -
    -
    -cuda.cuda.cuGraphKernelNodeGetAttribute(hNode, attr: CUkernelNodeAttrID)#
    -

    Queries node attribute.

    -

    Queries attribute attr from node hNode and stores it in -corresponding member of value_out.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    CUaccessPolicyWindow

    -
    -
    - -
    -
    -cuda.cuda.cuGraphKernelNodeSetAttribute(hNode, attr: CUkernelNodeAttrID, CUkernelNodeAttrValue value: Optional[CUkernelNodeAttrValue])#
    -

    Sets node attribute.

    -

    Sets attribute attr on node hNode from corresponding attribute of -value.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    CUaccessPolicyWindow

    -
    -
    - -
    -
    -cuda.cuda.cuGraphDebugDotPrint(hGraph, char *path, unsigned int flags)#
    -

    Write a DOT file describing graph structure.

    -

    Using the provided hGraph, write to path a DOT formatted -description of the graph. By default this includes the graph topology, -node types, node id, kernel names and memcpy direction. flags can be -specified to write more detailed information about each node type such -as parameter values, kernel attributes, node and function handles.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – The graph to create a DOT file from

    • -
    • path (bytes) – The path to write the DOT file to

    • -
    • flags (unsigned int) – Flags from CUgraphDebugDot_flags for specifying which additional -node information to write

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_OPERATING_SYSTEM

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    - -
    -
    -cuda.cuda.cuUserObjectCreate(ptr, destroy, unsigned int initialRefcount, unsigned int flags)#
    -

    Create a user object.

    -

    Create a user object with the specified destructor callback and initial -reference count. The initial references are owned by the caller.

    -

    Destructor callbacks cannot make CUDA API calls and should avoid -blocking behavior, as they are executed by a shared internal thread. -Another thread may be signaled to perform such actions, if it does not -block forward progress of tasks scheduled through CUDA.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • ptr (Any) – The pointer to pass to the destroy function

    • -
    • destroy (CUhostFn) – Callback to free the user object when it is no longer in use

    • -
    • initialRefcount (unsigned int) – The initial refcount to create the object with, typically 1. The -initial references are owned by the calling thread.

    • -
    • flags (unsigned int) – Currently it is required to pass -CU_USER_OBJECT_NO_DESTRUCTOR_SYNC, which is the only -defined flag. This indicates that the destroy callback cannot be -waited on by any CUDA API. Users requiring synchronization of the -callback should signal its completion manually.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuUserObjectRetain(object, unsigned int count)#
    -

    Retain a reference to a user object.

    -

    Retains new references to a user object. The new references are owned -by the caller.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • object (CUuserObject) – The object to retain

    • -
    • count (unsigned int) – The number of references to retain, typically 1. Must be nonzero -and not larger than INT_MAX.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuUserObjectRelease(object, unsigned int count)#
    -

    Release a reference to a user object.

    -

    Releases user object references owned by the caller. The object’s -destructor is invoked if the reference count reaches zero.

    -

    It is undefined behavior to release references not owned by the caller, -or to use a user object handle after all references are released.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • object (CUuserObject) – The object to release

    • -
    • count (unsigned int) – The number of references to release, typically 1. Must be nonzero -and not larger than INT_MAX.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphRetainUserObject(graph, object, unsigned int count, unsigned int flags)#
    -

    Retain a reference to a user object from a graph.

    -

    Creates or moves user object references that will be owned by a CUDA -graph.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – The graph to associate the reference with

    • -
    • object (CUuserObject) – The user object to retain a reference for

    • -
    • count (unsigned int) – The number of references to add to the graph, typically 1. Must be -nonzero and not larger than INT_MAX.

    • -
    • flags (unsigned int) – The optional flag CU_GRAPH_USER_OBJECT_MOVE transfers -references from the calling thread, rather than create new -references. Pass 0 to create new references.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphReleaseUserObject(graph, object, unsigned int count)#
    -

    Release a user object reference from a graph.

    -

    Releases user object references owned by a graph.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – The graph that will release the reference

    • -
    • object (CUuserObject) – The user object to release a reference for

    • -
    • count (unsigned int) – The number of references to release, typically 1. Must be nonzero -and not larger than INT_MAX.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUgraphNodeParams nodeParams: Optional[CUgraphNodeParams])#
    -

    Adds a node of arbitrary type to a graph.

    -

    Creates a new node in hGraph described by nodeParams with -numDependencies dependencies specified via dependencies. -numDependencies may be 0. dependencies may be null if -numDependencies is 0. dependencies may not have any duplicate -entries.

    -

    nodeParams is a tagged union. The node type should be specified in -the typename field, and type-specific parameters in the corresponding -union member. All unused bytes - that is, reserved0 and all bytes -past the utilized union member - must be set to zero. It is recommended -to use brace initialization or memset to ensure all bytes are -initialized.

    -

    Note that for some node types, nodeParams may contain “out -parameters” which are modified during the call, such as -nodeParams->alloc.dptr.

    -

    A handle to the new node will be returned in phGraphNode.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • nodeParams (CUgraphNodeParams) – Specification of the node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphAddNode_v2(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], dependencyData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies, CUgraphNodeParams nodeParams: Optional[CUgraphNodeParams])#
    -

    Adds a node of arbitrary type to a graph (12.3+)

    -

    Creates a new node in hGraph described by nodeParams with -numDependencies dependencies specified via dependencies. -numDependencies may be 0. dependencies may be null if -numDependencies is 0. dependencies may not have any duplicate -entries.

    -

    nodeParams is a tagged union. The node type should be specified in -the typename field, and type-specific parameters in the corresponding -union member. All unused bytes - that is, reserved0 and all bytes -past the utilized union member - must be set to zero. It is recommended -to use brace initialization or memset to ensure all bytes are -initialized.

    -

    Note that for some node types, nodeParams may contain “out -parameters” which are modified during the call, such as -nodeParams->alloc.dptr.

    -

    A handle to the new node will be returned in phGraphNode.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • -
    • dependencyData (List[CUgraphEdgeData]) – Optional edge data for the dependencies. If NULL, the data is -assumed to be default (zeroed) for all dependencies.

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • nodeParams (CUgraphNodeParams) – Specification of the node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphNodeSetParams(hNode, CUgraphNodeParams nodeParams: Optional[CUgraphNodeParams])#
    -

    Update’s a graph node’s parameters.

    -

    Sets the parameters of graph node hNode to nodeParams. The node -type specified by nodeParams->type must match the type of hNode. -nodeParams must be fully initialized and all unused bytes (reserved, -padding) zeroed.

    -

    Modifying parameters is not supported for node types -CU_GRAPH_NODE_TYPE_MEM_ALLOC and CU_GRAPH_NODE_TYPE_MEM_FREE.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphExecNodeSetParams(hGraphExec, hNode, CUgraphNodeParams nodeParams: Optional[CUgraphNodeParams])#
    -

    Update’s a graph node’s parameters in an instantiated graph.

    -

    Sets the parameters of a node in an executable graph hGraphExec. The -node is identified by the corresponding node hNode in the non- -executable graph from which the executable graph was instantiated. -hNode must not have been removed from the original graph.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    Allowed changes to parameters on executable graphs are as follows:

    -

    View CUDA Toolkit Documentation for a table example

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphConditionalHandleCreate(hGraph, ctx, unsigned int defaultLaunchValue, unsigned int flags)#
    -

    Create a conditional handle.

    -

    Creates a conditional handle associated with hGraph.

    -

    The conditional handle must be associated with a conditional node in -this graph or one of its children.

    -

    Handles not associated with a conditional node may cause graph -instantiation to fail.

    -

    Handles can only be set from the context with which they are -associated.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph which will contain the conditional node using this handle.

    • -
    • ctx (CUcontext) – Context for the handle and associated conditional node.

    • -
    • defaultLaunchValue (unsigned int) – Optional initial value for the conditional variable.

    • -
    • flags (unsigned int) – Currently must be CU_GRAPH_COND_ASSIGN_DEFAULT or 0.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuGraphAddNode

    -
    -
    - -
    -
    -

    Occupancy#

    -

    This section describes the occupancy calculation functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuOccupancyMaxActiveBlocksPerMultiprocessor(func, int blockSize, size_t dynamicSMemSize)#
    -

    Returns occupancy of a function.

    -

    Returns in *numBlocks the number of the maximum active blocks per -streaming multiprocessor.

    -

    Note that the API can also be used with context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to use for -calculations will be the current context.

    -
    -
    Parameters:
    -
      -
    • func (CUfunction) – Kernel for which occupancy is calculated

    • -
    • blockSize (int) – Block size the kernel is intended to be launched with

    • -
    • dynamicSMemSize (size_t) – Per-block dynamic shared memory usage intended, in bytes

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(func, int blockSize, size_t dynamicSMemSize, unsigned int flags)#
    -

    Returns occupancy of a function.

    -

    Returns in *numBlocks the number of the maximum active blocks per -streaming multiprocessor.

    -

    The Flags parameter controls how special cases are handled. The valid -flags are:

    - -

    Note that the API can also be with launch context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to use for -calculations will be the current context.

    -
    -
    Parameters:
    -
      -
    • func (CUfunction) – Kernel for which occupancy is calculated

    • -
    • blockSize (int) – Block size the kernel is intended to be launched with

    • -
    • dynamicSMemSize (size_t) – Per-block dynamic shared memory usage intended, in bytes

    • -
    • flags (unsigned int) – Requested behavior for the occupancy calculator

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuOccupancyMaxPotentialBlockSize(func, blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit)#
    -

    Suggest a launch configuration with reasonable occupancy.

    -

    Returns in *blockSize a reasonable block size that can achieve the -maximum occupancy (or, the maximum number of active warps with the -fewest blocks per multiprocessor), and in *minGridSize the minimum -grid size to achieve the maximum occupancy.

    -

    If blockSizeLimit is 0, the configurator will use the maximum block -size permitted by the device / function instead.

    -

    If per-block dynamic shared memory allocation is not needed, the user -should leave both blockSizeToDynamicSMemSize and dynamicSMemSize as -0.

    -

    If per-block dynamic shared memory allocation is needed, then if the -dynamic shared memory size is constant regardless of block size, the -size should be passed through dynamicSMemSize, and -blockSizeToDynamicSMemSize should be NULL.

    -

    Otherwise, if the per-block dynamic shared memory size varies with -different block sizes, the user needs to provide a unary function -through blockSizeToDynamicSMemSize that computes the dynamic shared -memory needed by func for any given block size. dynamicSMemSize is -ignored. An example signature is:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    Note that the API can also be used with context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to use for -calculations will be the current context.

    -
    -
    Parameters:
    -
      -
    • func (CUfunction) – Kernel for which launch configuration is calculated

    • -
    • blockSizeToDynamicSMemSize (CUoccupancyB2DSize) – A function that calculates how much per-block dynamic shared memory -func uses based on the block size

    • -
    • dynamicSMemSize (size_t) – Dynamic shared memory usage intended, in bytes

    • -
    • blockSizeLimit (int) – The maximum block size func is designed to handle

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaOccupancyMaxPotentialBlockSize

    -
    -
    - -
    -
    -cuda.cuda.cuOccupancyMaxPotentialBlockSizeWithFlags(func, blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags)#
    -

    Suggest a launch configuration with reasonable occupancy.

    -

    An extended version of cuOccupancyMaxPotentialBlockSize. In -addition to arguments passed to -cuOccupancyMaxPotentialBlockSize, -cuOccupancyMaxPotentialBlockSizeWithFlags also takes a -Flags parameter.

    -

    The Flags parameter controls how special cases are handled. The valid -flags are:

    -
      -
    • CU_OCCUPANCY_DEFAULT, which maintains the default -behavior as cuOccupancyMaxPotentialBlockSize;

    • -
    • CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses -the default behavior on platform where global caching affects -occupancy. On such platforms, the launch configurations that produces -maximal occupancy might not support global caching. Setting -CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE guarantees that the -the produced launch configuration is global caching compatible at a -potential cost of occupancy. More information can be found about this -feature in the “Unified L1/Texture Cache” section of the Maxwell -tuning guide.

    • -
    -

    Note that the API can also be used with context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to use for -calculations will be the current context.

    -
    -
    Parameters:
    -
      -
    • func (CUfunction) – Kernel for which launch configuration is calculated

    • -
    • blockSizeToDynamicSMemSize (CUoccupancyB2DSize) – A function that calculates how much per-block dynamic shared memory -func uses based on the block size

    • -
    • dynamicSMemSize (size_t) – Dynamic shared memory usage intended, in bytes

    • -
    • blockSizeLimit (int) – The maximum block size func is designed to handle

    • -
    • flags (unsigned int) – Options

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaOccupancyMaxPotentialBlockSizeWithFlags

    -
    -
    - -
    -
    -cuda.cuda.cuOccupancyAvailableDynamicSMemPerBlock(func, int numBlocks, int blockSize)#
    -

    Returns dynamic shared memory available per block when launching numBlocks blocks on SM.

    -

    Returns in *dynamicSmemSize the maximum size of dynamic shared memory -to allow numBlocks blocks per SM.

    -

    Note that the API can also be used with context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to use for -calculations will be the current context.

    -
    -
    Parameters:
    -
      -
    • func (CUfunction) – Kernel function for which occupancy is calculated

    • -
    • numBlocks (int) – Number of blocks to fit on SM

    • -
    • blockSize (int) – Size of the blocks

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    - -
    -
    -cuda.cuda.cuOccupancyMaxPotentialClusterSize(func, CUlaunchConfig config: Optional[CUlaunchConfig])#
    -

    Given the kernel function (func) and launch configuration (config), return the maximum cluster size in *clusterSize.

    -

    The cluster dimensions in config are ignored. If func has a required -cluster size set (see cudaFuncGetAttributes / -cuFuncGetAttribute),`*clusterSize` will reflect the -required cluster size.

    -

    By default this function will always return a value that’s portable on -future hardware. A higher value may be returned if the kernel function -allows non-portable cluster sizes.

    -

    This function will respect the compile time launch bounds.

    -

    Note that the API can also be used with context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to use for -calculations will either be taken from the specified stream -config->hStream or the current context in case of NULL stream.

    -
    -
    Parameters:
    -
      -
    • func (CUfunction) – Kernel function for which maximum cluster size is calculated

    • -
    • config (CUlaunchConfig) – Launch configuration for the given kernel function

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuOccupancyMaxActiveClusters(func, CUlaunchConfig config: Optional[CUlaunchConfig])#
    -

    Given the kernel function (func) and launch configuration (config), return the maximum number of clusters that could co-exist on the target device in *numClusters.

    -

    If the function has required cluster size already set (see -cudaFuncGetAttributes / cuFuncGetAttribute), -the cluster size from config must either be unspecified or match the -required size. Without required sizes, the cluster size must be -specified in config, else the function will return an error.

    -

    Note that various attributes of the kernel function may affect -occupancy calculation. Runtime environment may affect how the hardware -schedules the clusters, so the calculated occupancy is not guaranteed -to be achievable.

    -

    Note that the API can also be used with context-less kernel -CUkernel by querying the handle using -cuLibraryGetKernel() and then passing it to the API by -casting to CUfunction. Here, the context to use for -calculations will either be taken from the specified stream -config->hStream or the current context in case of NULL stream.

    -
    -
    Parameters:
    -
      -
    • func (CUfunction) – Kernel function for which maximum number of clusters are calculated

    • -
    • config (CUlaunchConfig) – Launch configuration for the given kernel function

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Texture Object Management#

    -

    This section describes the texture object management functions of the low-level CUDA driver application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher.

    -
    -
    -cuda.cuda.cuTexObjectCreate(CUDA_RESOURCE_DESC pResDesc: Optional[CUDA_RESOURCE_DESC], CUDA_TEXTURE_DESC pTexDesc: Optional[CUDA_TEXTURE_DESC], CUDA_RESOURCE_VIEW_DESC pResViewDesc: Optional[CUDA_RESOURCE_VIEW_DESC])#
    -

    Creates a texture object.

    -

    Creates a texture object and returns it in pTexObject. pResDesc -describes the data to texture from. pTexDesc describes how the data -should be sampled. pResViewDesc is an optional argument that -specifies an alternate format for the data described by pResDesc, and -also describes the subresource region to restrict access to when -texturing. pResViewDesc can only be specified if the type of resource -is a CUDA array or a CUDA mipmapped array not in a block compressed -format.

    -

    Texture objects are only supported on devices of compute capability 3.0 -or higher. Additionally, a texture object is an opaque value, and, as -such, should only be accessed through CUDA API calls.

    -

    The CUDA_RESOURCE_DESC structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • resType specifies the type of resource -to texture from. CUresourceType is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    -

    If resType is set to -CU_RESOURCE_TYPE_ARRAY, -CUDA_RESOURCE_DESC::res::array::hArray must be set to a -valid CUDA array handle.

    -

    If resType is set to -CU_RESOURCE_TYPE_MIPMAPPED_ARRAY, -CUDA_RESOURCE_DESC::res::mipmap::hMipmappedArray must be -set to a valid CUDA mipmapped array handle.

    -

    If resType is set to -CU_RESOURCE_TYPE_LINEAR, -CUDA_RESOURCE_DESC::res::linear::devPtr must be set to a -valid device pointer, that is aligned to -CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. -CUDA_RESOURCE_DESC::res::linear::format and -CUDA_RESOURCE_DESC::res::linear::numChannels describe the -format of each component and the number of components per array -element. CUDA_RESOURCE_DESC::res::linear::sizeInBytes -specifies the size of the array in bytes. The total number of elements -in the linear address range cannot exceed -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. The -number of elements is computed as (sizeInBytes / (sizeof(format) * -numChannels)).

    -

    If resType is set to -CU_RESOURCE_TYPE_PITCH2D, -CUDA_RESOURCE_DESC::res::pitch2D::devPtr must be set to a -valid device pointer, that is aligned to -CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. -CUDA_RESOURCE_DESC::res::pitch2D::format and -CUDA_RESOURCE_DESC::res::pitch2D::numChannels describe the -format of each component and the number of components per array -element. CUDA_RESOURCE_DESC::res::pitch2D::width and -CUDA_RESOURCE_DESC::res::pitch2D::height specify the width -and height of the array in elements, and cannot exceed -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and -CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT -respectively. -CUDA_RESOURCE_DESC::res::pitch2D::pitchInBytes specifies -the pitch between two rows in bytes and has to be aligned to -CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. Pitch cannot -exceed CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH.

    -
      -
    • flags must be set to zero.

    • -
    -

    The CUDA_TEXTURE_DESC struct is defined as

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where

    -
      -
    • addressMode specifies the addressing -mode for each dimension of the texture data. -CUaddress_mode is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • This is ignored if resType is -CU_RESOURCE_TYPE_LINEAR. Also, if the flag, -CU_TRSF_NORMALIZED_COORDINATES is not set, the only -supported address mode is CU_TR_ADDRESS_MODE_CLAMP.

    • -
    • filterMode specifies the filtering mode -to be used when fetching from the texture. CUfilter_mode is defined -as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • This is ignored if resType is -CU_RESOURCE_TYPE_LINEAR.

    • -
    • flags can be any combination of the -following:

      -
        -
      • CU_TRSF_READ_AS_INTEGER, which suppresses the default -behavior of having the texture promote integer data to floating -point data in the range [0, 1]. Note that texture with 32-bit -integer format would not be promoted, regardless of whether or not -this flag is specified.

      • -
      • CU_TRSF_NORMALIZED_COORDINATES, which suppresses the -default behavior of having the texture coordinates range from [0, -Dim) where Dim is the width or height of the CUDA array. Instead, -the texture coordinates [0, 1.0) reference the entire breadth of -the array dimension; Note that for CUDA mipmapped arrays, this flag -has to be set.

      • -
      • CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables -any trilinear filtering optimizations. Trilinear optimizations -improve texture filtering performance by allowing bilinear -filtering on textures in scenarios where it can closely approximate -the expected results.

      • -
      • CU_TRSF_SEAMLESS_CUBEMAP, which enables seamless cube -map filtering. This flag can only be specified if the underlying -resource is a CUDA array or a CUDA mipmapped array that was created -with the flag CUDA_ARRAY3D_CUBEMAP. When seamless cube -map filtering is enabled, texture address modes specified by -addressMode are ignored. Instead, if -the filterMode is set to -CU_TR_FILTER_MODE_POINT the address mode -CU_TR_ADDRESS_MODE_CLAMP will be applied for all -dimensions. If the filterMode is set -to CU_TR_FILTER_MODE_LINEAR seamless cube map filtering -will be performed when sampling along the cube face borders.

      • -
      -
    • -
    • maxAnisotropy specifies the maximum -anisotropy ratio to be used when doing anisotropic filtering. This -value will be clamped to the range [1,16].

    • -
    • mipmapFilterMode specifies the filter -mode when the calculated mipmap level lies between two defined mipmap -levels.

    • -
    • mipmapLevelBias specifies the offset to -be applied to the calculated mipmap level.

    • -
    • minMipmapLevelClamp specifies the lower -end of the mipmap level range to clamp access to.

    • -
    • maxMipmapLevelClamp specifies the upper -end of the mipmap level range to clamp access to.

    • -
    -

    The CUDA_RESOURCE_VIEW_DESC struct is defined as

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • format specifies how the data -contained in the CUDA array or CUDA mipmapped array should be -interpreted. Note that this can incur a change in size of the texture -data. If the resource view format is a block compressed format, then -the underlying CUDA array or CUDA mipmapped array has to have a base -of format CU_AD_FORMAT_UNSIGNED_INT32. with 2 or 4 -channels, depending on the block compressed format. For ex., BC1 and -BC4 require the underlying CUDA array to have a format of -CU_AD_FORMAT_UNSIGNED_INT32 with 2 channels. The other BC -formats require the underlying resource to have the same base format -but with 4 channels.

    • -
    • width specifies the new width of -the texture data. If the resource view format is a block compressed -format, this value has to be 4 times the original width of the -resource. For non block compressed formats, this value has to be -equal to that of the original resource.

    • -
    • height specifies the new height -of the texture data. If the resource view format is a block -compressed format, this value has to be 4 times the original height -of the resource. For non block compressed formats, this value has to -be equal to that of the original resource.

    • -
    • depth specifies the new depth of -the texture data. This value has to be equal to that of the original -resource.

    • -
    • firstMipmapLevel specifies the -most detailed mipmap level. This will be the new mipmap level zero. -For non-mipmapped resources, this value has to be -zero.:py:obj:~.CUDA_TEXTURE_DESC.minMipmapLevelClamp and -maxMipmapLevelClamp will be relative to -this value. For ex., if the firstMipmapLevel is set to 2, and a -minMipmapLevelClamp of 1.2 is specified, then the actual minimum -mipmap level clamp will be 3.2.

    • -
    • lastMipmapLevel specifies the -least detailed mipmap level. For non-mipmapped resources, this value -has to be zero.

    • -
    • firstLayer specifies the first -layer index for layered textures. This will be the new layer zero. -For non-layered resources, this value has to be zero.

    • -
    • lastLayer specifies the last -layer index for layered textures. For non-layered resources, this -value has to be zero.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuTexObjectDestroy(texObject)#
    -

    Destroys a texture object.

    -

    Destroys the texture object specified by texObject.

    -
    -
    Parameters:
    -

    texObject (CUtexObject) – Texture object to destroy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuTexObjectGetResourceDesc(texObject)#
    -

    Returns a texture object’s resource descriptor.

    -

    Returns the resource descriptor for the texture object specified by -texObject.

    -
    -
    Parameters:
    -

    texObject (CUtexObject) – Texture object

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuTexObjectGetTextureDesc(texObject)#
    -

    Returns a texture object’s texture descriptor.

    -

    Returns the texture descriptor for the texture object specified by -texObject.

    -
    -
    Parameters:
    -

    texObject (CUtexObject) – Texture object

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuTexObjectGetResourceViewDesc(texObject)#
    -

    Returns a texture object’s resource view descriptor.

    -

    Returns the resource view descriptor for the texture object specified -by texObject. If no resource view was set for texObject, the -CUDA_ERROR_INVALID_VALUE is returned.

    -
    -
    Parameters:
    -

    texObject (CUtexObject) – Texture object

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Surface Object Management#

    -

    This section describes the surface object management functions of the low-level CUDA driver application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher.

    -
    -
    -cuda.cuda.cuSurfObjectCreate(CUDA_RESOURCE_DESC pResDesc: Optional[CUDA_RESOURCE_DESC])#
    -

    Creates a surface object.

    -

    Creates a surface object and returns it in pSurfObject. pResDesc -describes the data to perform surface load/stores on. -resType must be -CU_RESOURCE_TYPE_ARRAY and -CUDA_RESOURCE_DESC::res::array::hArray must be set to a -valid CUDA array handle. flags must be -set to zero.

    -

    Surface objects are only supported on devices of compute capability 3.0 -or higher. Additionally, a surface object is an opaque value, and, as -such, should only be accessed through CUDA API calls.

    -
    -
    Parameters:
    -

    pResDesc (CUDA_RESOURCE_DESC) – Resource descriptor

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuSurfObjectDestroy(surfObject)#
    -

    Destroys a surface object.

    -

    Destroys the surface object specified by surfObject.

    -
    -
    Parameters:
    -

    surfObject (CUsurfObject) – Surface object to destroy

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuSurfObjectGetResourceDesc(surfObject)#
    -

    Returns a surface object’s resource descriptor.

    -

    Returns the resource descriptor for the surface object specified by -surfObject.

    -
    -
    Parameters:
    -

    surfObject (CUsurfObject) – Surface object

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Tensor Map Object Managment#

    -

    This section describes the tensor map object management functions of the low-level CUDA driver application programming interface. The tensor core API is only supported on devices of compute capability 9.0 or higher.

    -
    -
    -cuda.cuda.cuTensorMapEncodeTiled(tensorDataType: CUtensorMapDataType, tensorRank, globalAddress, globalDim: Optional[Tuple[cuuint64_t] | List[cuuint64_t]], globalStrides: Optional[Tuple[cuuint64_t] | List[cuuint64_t]], boxDim: Optional[Tuple[cuuint32_t] | List[cuuint32_t]], elementStrides: Optional[Tuple[cuuint32_t] | List[cuuint32_t]], interleave: CUtensorMapInterleave, swizzle: CUtensorMapSwizzle, l2Promotion: CUtensorMapL2promotion, oobFill: CUtensorMapFloatOOBfill)#
    -

    Create a tensor map descriptor object representing tiled memory region.

    -

    Creates a descriptor for Tensor Memory Access (TMA) object specified by -the parameters describing a tiled region and returns it in tensorMap.

    -

    Tensor map objects are only supported on devices of compute capability -9.0 or higher. Additionally, a tensor map object is an opaque value, -and, as such, should only be accessed through CUDA API calls.

    -

    The parameters passed are bound to the following requirements:

    -
      -
    • tensorMap address must be aligned to 64 bytes.

    • -
    • tensorDataType has to be an enum from -CUtensorMapDataType which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • tensorRank must be non-zero and less than or equal to the maximum -supported dimensionality of 5. If interleave is not -CU_TENSOR_MAP_INTERLEAVE_NONE, then tensorRank must -additionally be greater than or equal to 3.

    • -
    • globalAddress, which specifies the starting address of the memory -region described, must be 32 byte aligned when interleave is -CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned -otherwise.

    • -
    • globalDim array, which specifies tensor size of each of the -tensorRank dimensions, must be non-zero and less than or equal to -2^32.

    • -
    • globalStrides array, which specifies tensor stride of each of the -lower tensorRank - 1 dimensions in bytes, must be a multiple of 16 -and less than 2^40. Additionally, the stride must be a multiple of 32 -when interleave is CU_TENSOR_MAP_INTERLEAVE_32B. Each -following dimension specified includes previous dimension stride:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • boxDim array, which specifies number of elements to be traversed -along each of the tensorRank dimensions, must be non-zero and less -than or equal to 256. When interleave is -CU_TENSOR_MAP_INTERLEAVE_NONE, { boxDim`[0] * -elementSizeInBytes( `tensorDataType ) } must be a multiple of 16 -bytes.

    • -
    • elementStrides array, which specifies the iteration step along each -of the tensorRank dimensions, must be non-zero and less than or -equal to 8. Note that when interleave is -CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this -array is ignored since TMA doesn’t support the stride for dimension -zero. When all elements of elementStrides array is one, boxDim -specifies the number of elements to load. However, if the -`elementStrides`[i] is not equal to one, then TMA loads ceil( -`boxDim`[i] / `elementStrides`[i]) number of elements along i-th -dimension. To load N elements along i-th dimension, `boxDim`[i] must -be set to N * `elementStrides`[i].

    • -
    • interleave specifies the interleaved layout of type -CUtensorMapInterleave, which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 -bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 -uses 32 bytes. When interleave is -CU_TENSOR_MAP_INTERLEAVE_NONE and swizzle is not -CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner -dimension (computed as boxDim`[0] multiplied by element size derived -from `tensorDataType) must be less than or equal to the swizzle -size.

      -
        -
      • CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension -will be <= 32.

      • -
      • CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension -will be <= 64.

      • -
      • CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension -will be <= 128.

      • -
      -
    • -
    • swizzle, which specifies the shared memory bank swizzling pattern, -has to be of type CUtensorMapSwizzle which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • Data are organized in a specific order in global memory; however, -this may not match the order in which the application accesses data -in shared memory. This difference in data organization may cause bank -conflicts when shared memory is accessed. In order to avoid this -problem, data can be loaded to shared memory with shuffling across -shared memory banks. When interleave is -CU_TENSOR_MAP_INTERLEAVE_32B, swizzle must be -CU_TENSOR_MAP_SWIZZLE_32B. Other interleave modes can -have any swizzling pattern.

    • -
    • l2Promotion specifies L2 fetch size which indicates the byte -granurality at which L2 requests is filled from DRAM. It must be of -type CUtensorMapL2promotion, which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • oobFill, which indicates whether zero or a special NaN constant -should be used to fill out-of-bound elements, must be of type -CUtensorMapFloatOOBfill which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • Note that -CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can -only be used when tensorDataType represents a floating-point data -type.

    • -
    -
    -
    Parameters:
    -
      -
    • tensorDataType (CUtensorMapDataType) – Tensor data type

    • -
    • tensorRank (Any) – Dimensionality of tensor

    • -
    • globalAddress (Any) – Starting address of memory region described by tensor

    • -
    • globalDim (List[cuuint64_t]) – Array containing tensor size (number of elements) along each of the -tensorRank dimensions

    • -
    • globalStrides (List[cuuint64_t]) – Array containing stride size (in bytes) along each of the -tensorRank - 1 dimensions

    • -
    • boxDim (List[cuuint32_t]) – Array containing traversal box size (number of elments) along each -of the tensorRank dimensions. Specifies how many elements to be -traversed along each tensor dimension.

    • -
    • elementStrides (List[cuuint32_t]) – Array containing traversal stride in each of the tensorRank -dimensions

    • -
    • interleave (CUtensorMapInterleave) – Type of interleaved layout the tensor addresses

    • -
    • swizzle (CUtensorMapSwizzle) – Bank swizzling pattern inside shared memory

    • -
    • l2Promotion (CUtensorMapL2promotion) – L2 promotion size

    • -
    • oobFill (CUtensorMapFloatOOBfill) – Indicate whether zero or special NaN constant must be used to fill -out-of-bound elements

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuTensorMapEncodeIm2col(tensorDataType: CUtensorMapDataType, tensorRank, globalAddress, globalDim: Optional[Tuple[cuuint64_t] | List[cuuint64_t]], globalStrides: Optional[Tuple[cuuint64_t] | List[cuuint64_t]], pixelBoxLowerCorner: Optional[Tuple[int] | List[int]], pixelBoxUpperCorner: Optional[Tuple[int] | List[int]], channelsPerPixel, pixelsPerColumn, elementStrides: Optional[Tuple[cuuint32_t] | List[cuuint32_t]], interleave: CUtensorMapInterleave, swizzle: CUtensorMapSwizzle, l2Promotion: CUtensorMapL2promotion, oobFill: CUtensorMapFloatOOBfill)#
    -

    Create a tensor map descriptor object representing im2col memory region.

    -

    Creates a descriptor for Tensor Memory Access (TMA) object specified by -the parameters describing a im2col memory layout and returns it in -tensorMap.

    -

    Tensor map objects are only supported on devices of compute capability -9.0 or higher. Additionally, a tensor map object is an opaque value, -and, as such, should only be accessed through CUDA API calls.

    -

    The parameters passed are bound to the following requirements:

    -
      -
    • tensorMap address must be aligned to 64 bytes.

    • -
    • tensorDataType has to be an enum from -CUtensorMapDataType which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • tensorRank, which specifies the number of tensor dimensions, must -be 3, 4, or 5.

    • -
    • globalAddress, which specifies the starting address of the memory -region described, must be 32 byte aligned when interleave is -CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned -otherwise.

    • -
    • globalDim array, which specifies tensor size of each of the -tensorRank dimensions, must be non-zero and less than or equal to -2^32.

    • -
    • globalStrides array, which specifies tensor stride of each of the -lower tensorRank - 1 dimensions in bytes, must be a multiple of 16 -and less than 2^40. Additionally, the stride must be a multiple of 32 -when interleave is CU_TENSOR_MAP_INTERLEAVE_32B. Each -following dimension specified includes previous dimension stride:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • pixelBoxLowerCorner array specifies the coordinate offsets {D, H, -W} of the bounding box from top/left/front corner. The number of -offsets and their precision depend on the tensor dimensionality:

      -
        -
      • When tensorRank is 3, one signed offset within range [-32768, -32767] is supported.

      • -
      • When tensorRank is 4, two signed offsets each within range [-128, -127] are supported.

      • -
      • When tensorRank is 5, three offsets each within range [-16, 15] -are supported.

      • -
      -
    • -
    • pixelBoxUpperCorner array specifies the coordinate offsets {D, H, -W} of the bounding box from bottom/right/back corner. The number of -offsets and their precision depend on the tensor dimensionality:

      -
        -
      • When tensorRank is 3, one signed offset within range [-32768, -32767] is supported.

      • -
      • When tensorRank is 4, two signed offsets each within range [-128, -127] are supported.

      • -
      • When tensorRank is 5, three offsets each within range [-16, 15] -are supported. The bounding box specified by pixelBoxLowerCorner -and pixelBoxUpperCorner must have non-zero area.

      • -
      -
    • -
    • channelsPerPixel, which specifies the number of elements which must -be accessed along C dimension, must be less than or equal to 256.

    • -
    • pixelsPerColumn, which specifies the number of elements that must -be accessed along the {N, D, H, W} dimensions, must be less than or -equal to 1024.

    • -
    • elementStrides array, which specifies the iteration step along each -of the tensorRank dimensions, must be non-zero and less than or -equal to 8. Note that when interleave is -CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this -array is ignored since TMA doesn’t support the stride for dimension -zero. When all elements of the elementStrides array are one, -boxDim specifies the number of elements to load. However, if -elementStrides`[i] is not equal to one for some `i, then TMA loads -ceil( `boxDim`[i] / `elementStrides`[i]) number of elements along -i-th dimension. To load N elements along i-th dimension, `boxDim`[i] -must be set to N * `elementStrides`[i].

    • -
    • interleave specifies the interleaved layout of type -CUtensorMapInterleave, which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 -bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 -uses 32 bytes. When interleave is -CU_TENSOR_MAP_INTERLEAVE_NONE and swizzle is not -CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner -dimension (computed as boxDim`[0] multiplied by element size derived -from `tensorDataType) must be less than or equal to the swizzle -size.

      -
        -
      • CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension -will be <= 32.

      • -
      • CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension -will be <= 64.

      • -
      • CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension -will be <= 128.

      • -
      -
    • -
    • swizzle, which specifies the shared memory bank swizzling pattern, -has to be of type CUtensorMapSwizzle which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • Data are organized in a specific order in global memory; however, -this may not match the order in which the application accesses data -in shared memory. This difference in data organization may cause bank -conflicts when shared memory is accessed. In order to avoid this -problem, data can be loaded to shared memory with shuffling across -shared memory banks. When interleave is -CU_TENSOR_MAP_INTERLEAVE_32B, swizzle must be -CU_TENSOR_MAP_SWIZZLE_32B. Other interleave modes can -have any swizzling pattern.

    • -
    • l2Promotion specifies L2 fetch size which indicates the byte -granularity at which L2 requests are filled from DRAM. It must be of -type CUtensorMapL2promotion, which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • oobFill, which indicates whether zero or a special NaN constant -should be used to fill out-of-bound elements, must be of type -CUtensorMapFloatOOBfill which is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    • Note that -CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can -only be used when tensorDataType represents a floating-point data -type.

    • -
    -
    -
    Parameters:
    -
      -
    • tensorDataType (CUtensorMapDataType) – Tensor data type

    • -
    • tensorRank (Any) – Dimensionality of tensor; must be at least 3

    • -
    • globalAddress (Any) – Starting address of memory region described by tensor

    • -
    • globalDim (List[cuuint64_t]) – Array containing tensor size (number of elements) along each of the -tensorRank dimensions

    • -
    • globalStrides (List[cuuint64_t]) – Array containing stride size (in bytes) along each of the -tensorRank - 1 dimensions

    • -
    • pixelBoxLowerCorner (List[int]) – Array containing DHW dimensions of lower box corner

    • -
    • pixelBoxUpperCorner (List[int]) – Array containing DHW dimensions of upper box corner

    • -
    • channelsPerPixel (Any) – Number of channels per pixel

    • -
    • pixelsPerColumn (Any) – Number of pixels per column

    • -
    • elementStrides (List[cuuint32_t]) – Array containing traversal stride in each of the tensorRank -dimensions

    • -
    • interleave (CUtensorMapInterleave) – Type of interleaved layout the tensor addresses

    • -
    • swizzle (CUtensorMapSwizzle) – Bank swizzling pattern inside shared memory

    • -
    • l2Promotion (CUtensorMapL2promotion) – L2 promotion size

    • -
    • oobFill (CUtensorMapFloatOOBfill) – Indicate whether zero or special NaN constant will be used to fill -out-of-bound elements

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuTensorMapReplaceAddress(CUtensorMap tensorMap: Optional[CUtensorMap], globalAddress)#
    -

    Modify an existing tensor map descriptor with an updated global address.

    -

    Modifies the descriptor for Tensor Memory Access (TMA) object passed in -tensorMap with an updated globalAddress.

    -

    Tensor map objects are only supported on devices of compute capability -9.0 or higher. Additionally, a tensor map object is an opaque value, -and, as such, should only be accessed through CUDA API calls.

    -
    -
    Parameters:
    -
      -
    • tensorMap (CUtensorMap) – Tensor map object to modify

    • -
    • globalAddress (Any) – Starting address of memory region described by tensor, must follow -previous alignment requirements

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -

    Peer Context Memory Access#

    -

    This section describes the direct peer context memory access functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuDeviceCanAccessPeer(dev, peerDev)#
    -

    Queries if a device may directly access a peer device’s memory.

    -

    Returns in *canAccessPeer a value of 1 if contexts on dev are -capable of directly accessing memory from contexts on peerDev and 0 -otherwise. If direct access of peerDev from dev is possible, then -access may be enabled on two specific contexts by calling -cuCtxEnablePeerAccess().

    -
    -
    Parameters:
    -
      -
    • dev (CUdevice) – Device from which allocations on peerDev are to be directly -accessed.

    • -
    • peerDev (CUdevice) – Device on which the allocations to be directly accessed by dev -reside.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxEnablePeerAccess(peerContext, unsigned int Flags)#
    -

    Enables direct access to memory allocations in a peer context.

    -

    If both the current context and peerContext are on devices which -support unified addressing (as may be queried using -CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING) and same major -compute capability, then on success all allocations from peerContext -will immediately be accessible by the current context. See -Unified Addressing for additional details.

    -

    Note that access granted by this call is unidirectional and that in -order to access memory from the current context in peerContext, a -separate symmetric call to cuCtxEnablePeerAccess() is -required.

    -

    Note that there are both device-wide and system-wide limitations per -system configuration, as noted in the CUDA Programming Guide under the -section “Peer-to-Peer Memory Access”.

    -

    Returns CUDA_ERROR_PEER_ACCESS_UNSUPPORTED if -cuDeviceCanAccessPeer() indicates that the -CUdevice of the current context cannot directly access -memory from the CUdevice of peerContext.

    -

    Returns CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED if direct -access of peerContext from the current context has already been -enabled.

    -

    Returns CUDA_ERROR_TOO_MANY_PEERS if direct peer access is -not possible because hardware resources required for peer access have -been exhausted.

    -

    Returns CUDA_ERROR_INVALID_CONTEXT if there is no current -context, peerContext is not a valid context, or if the current -context is peerContext.

    -

    Returns CUDA_ERROR_INVALID_VALUE if Flags is not 0.

    -
    -
    Parameters:
    -
      -
    • peerContext (CUcontext) – Peer context to enable direct access to from the current context

    • -
    • Flags (unsigned int) – Reserved for future use and must be set to 0

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED, CUDA_ERROR_TOO_MANY_PEERS, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, CUDA_ERROR_INVALID_VALUE

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxDisablePeerAccess(peerContext)#
    -

    Disables direct access to memory allocations in a peer context and unregisters any registered allocations.

    -

    Returns CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer -access has not yet been enabled from peerContext to the current -context.

    -

    Returns CUDA_ERROR_INVALID_CONTEXT if there is no current -context, or if peerContext is not a valid context.

    -
    -
    Parameters:
    -

    peerContext (CUcontext) – Peer context to disable direct access to

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_PEER_ACCESS_NOT_ENABLED, CUDA_ERROR_INVALID_CONTEXT,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDeviceGetP2PAttribute(attrib: CUdevice_P2PAttribute, srcDevice, dstDevice)#
    -

    Queries attributes of the link between two devices.

    -

    Returns in *value the value of the requested attribute attrib of -the link between srcDevice and dstDevice. The supported attributes -are:

    - -

    Returns CUDA_ERROR_INVALID_DEVICE if srcDevice or -dstDevice are not valid or if they represent the same device.

    -

    Returns CUDA_ERROR_INVALID_VALUE if attrib is not valid -or if value is a null pointer.

    -
    -
    Parameters:
    -
      -
    • attrib (CUdevice_P2PAttribute) – The requested attribute of the link between srcDevice and -dstDevice.

    • -
    • srcDevice (CUdevice) – The source device of the target link.

    • -
    • dstDevice (CUdevice) – The destination device of the target link.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Graphics Interoperability#

    -

    This section describes the graphics interoperability functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuGraphicsUnregisterResource(resource)#
    -

    Unregisters a graphics resource for access by CUDA.

    -

    Unregisters the graphics resource resource so it is not accessible by -CUDA unless registered again.

    -

    If resource is invalid then CUDA_ERROR_INVALID_HANDLE is -returned.

    -
    -
    Parameters:
    -

    resource (CUgraphicsResource) – Resource to unregister

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_UNKNOWN

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    cuGraphicsD3D9RegisterResource, cuGraphicsD3D10RegisterResource, cuGraphicsD3D11RegisterResource, cuGraphicsGLRegisterBuffer, cuGraphicsGLRegisterImage, cudaGraphicsUnregisterResource

    -
    -
    - -
    -
    -cuda.cuda.cuGraphicsSubResourceGetMappedArray(resource, unsigned int arrayIndex, unsigned int mipLevel)#
    -

    Get an array through which to access a subresource of a mapped graphics resource.

    -

    Returns in *pArray an array through which the subresource of the -mapped graphics resource resource which corresponds to array index -arrayIndex and mipmap level mipLevel may be accessed. The value set -in *pArray may change every time that resource is mapped.

    -

    If resource is not a texture then it cannot be accessed via an array -and CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. If -arrayIndex is not a valid array index for resource then -CUDA_ERROR_INVALID_VALUE is returned. If mipLevel is not -a valid mipmap level for resource then -CUDA_ERROR_INVALID_VALUE is returned. If resource is not -mapped then CUDA_ERROR_NOT_MAPPED is returned.

    -
    -
    Parameters:
    -
      -
    • resource (CUgraphicsResource) – Mapped resource to access

    • -
    • arrayIndex (unsigned int) – Array index for array textures or cubemap face index as defined by -CUarray_cubemap_face for cubemap textures for the -subresource to access

    • -
    • mipLevel (unsigned int) – Mipmap level for the subresource to access

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphicsResourceGetMappedMipmappedArray(resource)#
    -

    Get a mipmapped array through which to access a mapped graphics resource.

    -

    Returns in *pMipmappedArray a mipmapped array through which the -mapped graphics resource resource. The value set in -*pMipmappedArray may change every time that resource is mapped.

    -

    If resource is not a texture then it cannot be accessed via a -mipmapped array and CUDA_ERROR_NOT_MAPPED_AS_ARRAY is -returned. If resource is not mapped then -CUDA_ERROR_NOT_MAPPED is returned.

    -
    -
    Parameters:
    -

    resource (CUgraphicsResource) – Mapped resource to access

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphicsResourceGetMappedPointer(resource)#
    -

    Get a device pointer through which to access a mapped graphics resource.

    -

    Returns in *pDevPtr a pointer through which the mapped graphics -resource resource may be accessed. Returns in pSize the size of the -memory in bytes which may be accessed from that pointer. The value set -in pPointer may change every time that resource is mapped.

    -

    If resource is not a buffer then it cannot be accessed via a pointer -and CUDA_ERROR_NOT_MAPPED_AS_POINTER is returned. If -resource is not mapped then CUDA_ERROR_NOT_MAPPED is -returned.

    -
    -
    Parameters:
    -

    resource (CUgraphicsResource) – None

    -
    -
    Returns:
    -

      -
    • CUresult

    • -
    • pDevPtr (CUdeviceptr) – None

    • -
    • pSize (int) – None

    • -
    -

    -
    -
    -
    - -
    -
    -cuda.cuda.cuGraphicsResourceSetMapFlags(resource, unsigned int flags)#
    -

    Set usage flags for mapping a graphics resource.

    -

    Set flags for mapping the graphics resource resource.

    -

    Changes to flags will take effect the next time resource is mapped. -The flags argument may be any of the following:

    -
      -
    • CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints -about how this resource will be used. It is therefore assumed that -this resource will be read from and written to by CUDA kernels. This -is the default value.

    • -
    • CU_GRAPHICS_MAP_RESOURCE_FLAGS_READONLY: Specifies that -CUDA kernels which access this resource will not write to this -resource.

    • -
    • CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITEDISCARD: Specifies -that CUDA kernels which access this resource will not read from this -resource and will write over the entire contents of the resource, so -none of the data previously stored in the resource will be preserved.

    • -
    -

    If resource is presently mapped for access by CUDA then -CUDA_ERROR_ALREADY_MAPPED is returned. If flags is not -one of the above values then CUDA_ERROR_INVALID_VALUE is -returned.

    -
    -
    Parameters:
    -
      -
    • resource (CUgraphicsResource) – Registered resource to set flags for

    • -
    • flags (unsigned int) – Parameters for resource mapping

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_ALREADY_MAPPED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphicsMapResources(unsigned int count, resources, hStream)#
    -

    Map graphics resources for access by CUDA.

    -

    Maps the count graphics resources in resources for access by CUDA.

    -

    The resources in resources may be accessed by CUDA until they are -unmapped. The graphics API from which resources were registered -should not access any resources while they are mapped by CUDA. If an -application does so, the results are undefined.

    -

    This function provides the synchronization guarantee that any graphics -calls issued before cuGraphicsMapResources() will complete -before any subsequent CUDA work issued in stream begins.

    -

    If resources includes any duplicate entries then -CUDA_ERROR_INVALID_HANDLE is returned. If any of -resources are presently mapped for access by CUDA then -CUDA_ERROR_ALREADY_MAPPED is returned.

    -
    -
    Parameters:
    -
      -
    • count (unsigned int) – Number of resources to map

    • -
    • resources (CUgraphicsResource) – Resources to map for CUDA usage

    • -
    • hStream (CUstream or cudaStream_t) – Stream with which to synchronize

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_ALREADY_MAPPED, CUDA_ERROR_UNKNOWN

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphicsUnmapResources(unsigned int count, resources, hStream)#
    -

    Unmap graphics resources.

    -

    Unmaps the count graphics resources in resources.

    -

    Once unmapped, the resources in resources may not be accessed by CUDA -until they are mapped again.

    -

    This function provides the synchronization guarantee that any CUDA work -issued in stream before cuGraphicsUnmapResources() will -complete before any subsequently issued graphics work begins.

    -

    If resources includes any duplicate entries then -CUDA_ERROR_INVALID_HANDLE is returned. If any of -resources are not presently mapped for access by CUDA then -CUDA_ERROR_NOT_MAPPED is returned.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_MAPPED, CUDA_ERROR_UNKNOWN

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -

    Driver Entry Point Access#

    -

    This section describes the driver entry point access functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuGetProcAddress(char *symbol, int cudaVersion, flags)#
    -

    Returns the requested driver API function pointer.

    -

    Returns in **pfn the address of the CUDA driver function for the -requested CUDA version and flags.

    -

    The CUDA version is specified as (1000 * major + 10 * minor), so CUDA -11.2 should be specified as 11020. For a requested driver symbol, if -the specified CUDA version is greater than or equal to the CUDA version -in which the driver symbol was introduced, this API will return the -function pointer to the corresponding versioned function.

    -

    The pointer returned by the API should be cast to a function pointer -matching the requested driver function’s definition in the API header -file. The function pointer typedef can be picked up from the -corresponding typedefs header file. For example, cudaTypedefs.h -consists of function pointer typedefs for driver APIs defined in -h.

    -

    The API will return CUDA_SUCCESS and set the returned pfn -to NULL if the requested driver function is not supported on the -platform, no ABI compatible driver function exists for the specified -cudaVersion or if the driver symbol is invalid.

    -

    It will also set the optional symbolStatus to one of the values in -CUdriverProcAddressQueryResult with the following meanings:

    - -

    The requested flags can be:

    - -
    -
    Parameters:
    -
      -
    • symbol (bytes) – The base name of the driver API function to look for. As an -example, for the driver API cuMemAlloc_v2, symbol -would be cuMemAlloc and cudaVersion would be the ABI compatible -CUDA version for the _v2 variant.

    • -
    • cudaVersion (int) – The CUDA version to look for the requested driver symbol

    • -
    • flags (Any) – Flags to specify search options.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Coredump Attributes Control API#

    -

    This section describes the coredump attribute control functions of the low-level CUDA driver application programming interface.

    -
    -
    -class cuda.cuda.CUcoredumpSettings(value)#
    -

    Flags for choosing a coredump attribute to get/set

    -
    -
    -CU_COREDUMP_ENABLE_ON_EXCEPTION = 1#
    -
    - -
    -
    -CU_COREDUMP_TRIGGER_HOST = 2#
    -
    - -
    -
    -CU_COREDUMP_LIGHTWEIGHT = 3#
    -
    - -
    -
    -CU_COREDUMP_ENABLE_USER_TRIGGER = 4#
    -
    - -
    -
    -CU_COREDUMP_FILE = 5#
    -
    - -
    -
    -CU_COREDUMP_PIPE = 6#
    -
    - -
    -
    -CU_COREDUMP_GENERATION_FLAGS = 7#
    -
    - -
    -
    -CU_COREDUMP_MAX = 8#
    -
    - -
    - -
    -
    -class cuda.cuda.CUCoredumpGenerationFlags(value)#
    -

    Flags for controlling coredump contents

    -
    -
    -CU_COREDUMP_DEFAULT_FLAGS = 0#
    -
    - -
    -
    -CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES = 1#
    -
    - -
    -
    -CU_COREDUMP_SKIP_GLOBAL_MEMORY = 2#
    -
    - -
    -
    -CU_COREDUMP_SKIP_SHARED_MEMORY = 4#
    -
    - -
    -
    -CU_COREDUMP_SKIP_LOCAL_MEMORY = 8#
    -
    - -
    -
    -CU_COREDUMP_SKIP_ABORT = 16#
    -
    - -
    -
    -CU_COREDUMP_SKIP_CONSTBANK_MEMORY = 32#
    -
    - -
    -
    -CU_COREDUMP_LIGHTWEIGHT_FLAGS = 47#
    -
    - -
    - -
    -
    -cuda.cuda.cuCoredumpGetAttribute(attrib: CUcoredumpSettings)#
    -

    Allows caller to fetch a coredump attribute value for the current context.

    -

    Returns in *value the requested value specified by attrib. It is up -to the caller to ensure that the data type and size of *value matches -the request.

    -

    If the caller calls this function with *value equal to NULL, the size -of the memory region (in bytes) expected for attrib will be placed in -size.

    -

    The supported attributes are:

    -
      -
    • CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where -true means that GPU exceptions from this context will -create a coredump at the location specified by -CU_COREDUMP_FILE. The default value is false -unless set to true globally or locally, or the -CU_CTX_USER_COREDUMP_ENABLE flag was set during context creation.

    • -
    • CU_COREDUMP_TRIGGER_HOST: Bool where true -means that the host CPU will also create a coredump. The default -value is true unless set to false globally or -or locally. This value is deprecated as of CUDA 12.5 - raise the -CU_COREDUMP_SKIP_ABORT flag to disable host device -abort() if needed.

    • -
    • CU_COREDUMP_LIGHTWEIGHT: Bool where true -means that any resulting coredumps will not have a dump of GPU memory -or non-reloc ELF images. The default value is false -unless set to true globally or locally. This attribute is -deprecated as of CUDA 12.5, please use -CU_COREDUMP_GENERATION_FLAGS instead.

    • -
    • CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where -true means that a coredump can be created by writing to -the system pipe specified by CU_COREDUMP_PIPE. The -default value is false unless set to true -globally or locally.

    • -
    • CU_COREDUMP_FILE: String of up to 1023 characters that -defines the location where any coredumps generated by this context -will be written. The default value is -core.cuda.HOSTNAME.PID where HOSTNAME is the -host name of the machine running the CUDA applications and -PID is the process ID of the CUDA application.

    • -
    • CU_COREDUMP_PIPE: String of up to 1023 characters that -defines the name of the pipe that will be monitored if user-triggered -coredumps are enabled. The default value is -corepipe.cuda.HOSTNAME.PID where HOSTNAME is -the host name of the machine running the CUDA application and -PID is the process ID of the CUDA application.

    • -
    • CU_COREDUMP_GENERATION_FLAGS: An integer with values to -allow granular control the data contained in a coredump specified as -a bitwise OR combination of the following values:

      - -
    • -
    -
    -
    Parameters:
    -
      -
    • attrib (CUcoredumpSettings) – The enum defining which value to fetch.

    • -
    • size (int) – The size of the memory region value points to.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCoredumpGetAttributeGlobal(attrib: CUcoredumpSettings)#
    -

    Allows caller to fetch a coredump attribute value for the entire application.

    -

    Returns in *value the requested value specified by attrib. It is up -to the caller to ensure that the data type and size of *value matches -the request.

    -

    If the caller calls this function with *value equal to NULL, the size -of the memory region (in bytes) expected for attrib will be placed in -size.

    -

    The supported attributes are:

    -
      -
    • CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where -true means that GPU exceptions from this context will -create a coredump at the location specified by -CU_COREDUMP_FILE. The default value is false.

    • -
    • CU_COREDUMP_TRIGGER_HOST: Bool where true -means that the host CPU will also create a coredump. The default -value is true unless set to false globally or -or locally. This value is deprecated as of CUDA 12.5 - raise the -CU_COREDUMP_SKIP_ABORT flag to disable host device -abort() if needed.

    • -
    • CU_COREDUMP_LIGHTWEIGHT: Bool where true -means that any resulting coredumps will not have a dump of GPU memory -or non-reloc ELF images. The default value is false. This -attribute is deprecated as of CUDA 12.5, please use -CU_COREDUMP_GENERATION_FLAGS instead.

    • -
    • CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where -true means that a coredump can be created by writing to -the system pipe specified by CU_COREDUMP_PIPE. The -default value is false.

    • -
    • CU_COREDUMP_FILE: String of up to 1023 characters that -defines the location where any coredumps generated by this context -will be written. The default value is -core.cuda.HOSTNAME.PID where HOSTNAME is the -host name of the machine running the CUDA applications and -PID is the process ID of the CUDA application.

    • -
    • CU_COREDUMP_PIPE: String of up to 1023 characters that -defines the name of the pipe that will be monitored if user-triggered -coredumps are enabled. The default value is -corepipe.cuda.HOSTNAME.PID where HOSTNAME is -the host name of the machine running the CUDA application and -PID is the process ID of the CUDA application.

    • -
    • CU_COREDUMP_GENERATION_FLAGS: An integer with values to -allow granular control the data contained in a coredump specified as -a bitwise OR combination of the following values:

      - -
    • -
    -
    -
    Parameters:
    -
      -
    • attrib (CUcoredumpSettings) – The enum defining which value to fetch.

    • -
    • size (int) – The size of the memory region value points to.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCoredumpSetAttribute(attrib: CUcoredumpSettings, value)#
    -

    Allows caller to set a coredump attribute value for the current context.

    -

    This function should be considered an alternate interface to the CUDA- -GDB environment variables defined in this document: -https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump

    -

    An important design decision to note is that any coredump environment -variable values set before CUDA initializes will take permanent -precedence over any values set with this function. This decision was -made to ensure no change in behavior for any users that may be -currently using these variables to get coredumps.

    -

    *value shall contain the requested value specified by set. It is up -to the caller to ensure that the data type and size of *value matches -the request.

    -

    If the caller calls this function with *value equal to NULL, the size -of the memory region (in bytes) expected for set will be placed in -size.

    -

    /note This function will return CUDA_ERROR_NOT_SUPPORTED if -the caller attempts to set CU_COREDUMP_ENABLE_ON_EXCEPTION -on a GPU of with Compute Capability < 6.0. -cuCoredumpSetAttributeGlobal works on those platforms as an -alternative.

    -

    /note CU_COREDUMP_ENABLE_USER_TRIGGER and -CU_COREDUMP_PIPE cannot be set on a per-context basis.

    -

    The supported attributes are:

    -
      -
    • CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where -true means that GPU exceptions from this context will -create a coredump at the location specified by -CU_COREDUMP_FILE. The default value is false.

    • -
    • CU_COREDUMP_TRIGGER_HOST: Bool where true -means that the host CPU will also create a coredump. The default -value is true unless set to false globally or -or locally. This value is deprecated as of CUDA 12.5 - raise the -CU_COREDUMP_SKIP_ABORT flag to disable host device -abort() if needed.

    • -
    • CU_COREDUMP_LIGHTWEIGHT: Bool where true -means that any resulting coredumps will not have a dump of GPU memory -or non-reloc ELF images. The default value is false. This -attribute is deprecated as of CUDA 12.5, please use -CU_COREDUMP_GENERATION_FLAGS instead.

    • -
    • CU_COREDUMP_FILE: String of up to 1023 characters that -defines the location where any coredumps generated by this context -will be written. The default value is -core.cuda.HOSTNAME.PID where HOSTNAME is the -host name of the machine running the CUDA applications and -PID is the process ID of the CUDA application.

    • -
    • CU_COREDUMP_GENERATION_FLAGS: An integer with values to -allow granular control the data contained in a coredump specified as -a bitwise OR combination of the following values:

      - -
    • -
    -
    -
    Parameters:
    -
      -
    • attrib (CUcoredumpSettings) – The enum defining which value to set.

    • -
    • value (Any) – void* containing the requested data.

    • -
    • size (int) – The size of the memory region value points to.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCoredumpSetAttributeGlobal(attrib: CUcoredumpSettings, value)#
    -

    Allows caller to set a coredump attribute value globally.

    -

    This function should be considered an alternate interface to the CUDA- -GDB environment variables defined in this document: -https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump

    -

    An important design decision to note is that any coredump environment -variable values set before CUDA initializes will take permanent -precedence over any values set with this function. This decision was -made to ensure no change in behavior for any users that may be -currently using these variables to get coredumps.

    -

    *value shall contain the requested value specified by set. It is up -to the caller to ensure that the data type and size of *value matches -the request.

    -

    If the caller calls this function with *value equal to NULL, the size -of the memory region (in bytes) expected for set will be placed in -size.

    -

    The supported attributes are:

    -
      -
    • CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where -true means that GPU exceptions from this context will -create a coredump at the location specified by -CU_COREDUMP_FILE. The default value is false.

    • -
    • CU_COREDUMP_TRIGGER_HOST: Bool where true -means that the host CPU will also create a coredump. The default -value is true unless set to false globally or -or locally. This value is deprecated as of CUDA 12.5 - raise the -CU_COREDUMP_SKIP_ABORT flag to disable host device -abort() if needed.

    • -
    • CU_COREDUMP_LIGHTWEIGHT: Bool where true -means that any resulting coredumps will not have a dump of GPU memory -or non-reloc ELF images. The default value is false. This -attribute is deprecated as of CUDA 12.5, please use -CU_COREDUMP_GENERATION_FLAGS instead.

    • -
    • CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where -true means that a coredump can be created by writing to -the system pipe specified by CU_COREDUMP_PIPE. The -default value is false.

    • -
    • CU_COREDUMP_FILE: String of up to 1023 characters that -defines the location where any coredumps generated by this context -will be written. The default value is -core.cuda.HOSTNAME.PID where HOSTNAME is the -host name of the machine running the CUDA applications and -PID is the process ID of the CUDA application.

    • -
    • CU_COREDUMP_PIPE: String of up to 1023 characters that -defines the name of the pipe that will be monitored if user-triggered -coredumps are enabled. This value may not be changed after -CU_COREDUMP_ENABLE_USER_TRIGGER is set to -true. The default value is -corepipe.cuda.HOSTNAME.PID where HOSTNAME is -the host name of the machine running the CUDA application and -PID is the process ID of the CUDA application.

    • -
    • CU_COREDUMP_GENERATION_FLAGS: An integer with values to -allow granular control the data contained in a coredump specified as -a bitwise OR combination of the following values:

      - -
    • -
    -
    -
    Parameters:
    -
      -
    • attrib (CUcoredumpSettings) – The enum defining which value to set.

    • -
    • value (Any) – void* containing the requested data.

    • -
    • size (int) – The size of the memory region value points to.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Green Contexts#

    -

    This section describes the APIs for creation and manipulation of green contexts in the CUDA driver. Green contexts are a lightweight alternative to traditional contexts, with the ability to pass in a set of resources that they should be initialized with. This allows the developer to represent distinct spatial partitions of the GPU, provision resources for them, and target them via the same programming model that CUDA exposes (streams, kernel launches, etc.).

    -

    There are 4 main steps to using these new set of APIs.

    -
      -
      1. -
      2. Start with an initial set of resources, for example via cuDeviceGetDevResource. Only SM type is supported today.

      3. -
      -
    • -
      1. -
      2. Partition this set of resources by providing them as input to a partition API, for example: cuDevSmResourceSplitByCount.

      3. -
      -
    • -
      1. -
      2. Finalize the specification of resources by creating a descriptor via cuDevResourceGenerateDesc.

      3. -
      -
    • -
      1. -
      2. Provision the resources and create a green context via cuGreenCtxCreate.

      3. -
      -
    • -
    -

    For CU_DEV_RESOURCE_TYPE_SM, the partitions created have minimum SM count requirements, often rounding up and aligning the minCount provided to cuDevSmResourceSplitByCount. The following is a guideline for each architecture and may be subject to change:

    -
      -
    • On Compute Architecture 6.X: The minimum count is 1 SM.

    • -
    • On Compute Architecture 7.X: The minimum count is 2 SMs and must be a multiple of 2.

    • -
    • On Compute Architecture 8.X: The minimum count is 4 SMs and must be a multiple of 2.

    • -
    • On Compute Architecture 9.0+: The minimum count is 8 SMs and must be a multiple of 8.

    • -
    -

    In the future, flags can be provided to tradeoff functional and performance characteristics versus finer grained SM partitions.

    -

    Even if the green contexts have disjoint SM partitions, it is not guaranteed that the kernels launched in them will run concurrently or have forward progress guarantees. This is due to other resources (like HW connections, see ::CUDA_DEVICE_MAX_CONNECTIONS) that could cause a dependency. Additionally, in certain scenarios, it is possible for the workload to run on more SMs than was provisioned (but never less). The following are two scenarios which can exhibit this behavior:

    -
      -
    • On Volta+ MPS: When CUDA_MPS_ACTIVE_THREAD_PERCENTAGE is used, the set of SMs that are used for running kernels can be scaled up to the value of SMs used for the MPS client.

    • -
    • On Compute Architecture 9.x: When a module with dynamic parallelism (CDP) is loaded, all future kernels running under green contexts may use and share an additional set of 2 SMs.

    • -
    -
    -
    -class cuda.cuda.CUdevSmResource_st(void_ptr _ptr=0)#
    -
    -
    -smCount#
    -

    The amount of streaming multiprocessors available in this resource. -This is an output parameter only, do not write to this field.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevResource_st(void_ptr _ptr=0)#
    -
    -
    -type#
    -

    Type of resource, dictates which union field was last set

    -
    -
    Type:
    -

    CUdevResourceType

    -
    -
    -
    - -
    -
    -_internal_padding#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -sm#
    -

    Resource corresponding to CU_DEV_RESOURCE_TYPE_SM ``. type.

    -
    -
    Type:
    -

    CUdevSmResource

    -
    -
    -
    - -
    -
    -_oversize#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevSmResource#
    -
    -
    -smCount#
    -

    The amount of streaming multiprocessors available in this resource. -This is an output parameter only, do not write to this field.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevResource#
    -
    -
    -type#
    -

    Type of resource, dictates which union field was last set

    -
    -
    Type:
    -

    CUdevResourceType

    -
    -
    -
    - -
    -
    -_internal_padding#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -sm#
    -

    Resource corresponding to CU_DEV_RESOURCE_TYPE_SM ``. type.

    -
    -
    Type:
    -

    CUdevSmResource

    -
    -
    -
    - -
    -
    -_oversize#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUgreenCtxCreate_flags(value)#
    -
    -
    -CU_GREEN_CTX_DEFAULT_STREAM = 1#
    -

    Required. Creates a default stream to use inside the green context

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevSmResourceSplit_flags(value)#
    -
    -
    -CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING = 1#
    -
    - -
    -
    -CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE = 2#
    -
    - -
    - -
    -
    -class cuda.cuda.CUdevResourceType(value)#
    -

    Type of resource

    -
    -
    -CU_DEV_RESOURCE_TYPE_INVALID = 0#
    -
    - -
    -
    -CU_DEV_RESOURCE_TYPE_SM = 1#
    -

    Streaming multiprocessors related information

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevResourceDesc(*args, **kwargs)#
    -

    An opaque descriptor handle. The descriptor encapsulates multiple created and configured resources. Created via cuDevResourceGenerateDesc

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cuda.CUdevSmResource#
    -
    -
    -smCount#
    -

    The amount of streaming multiprocessors available in this resource. -This is an output parameter only, do not write to this field.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -cuda.cuda.cuGreenCtxCreate(desc, dev, unsigned int flags)#
    -

    Creates a green context with a specified set of resources.

    -

    This API creates a green context with the resources specified in the -descriptor desc and returns it in the handle represented by phCtx. -This API will retain the primary context on device dev, which will is -released when the green context is destroyed. It is advised to have the -primary context active before calling this API to avoid the heavy cost -of triggering primary context initialization and deinitialization -multiple times.

    -

    The API does not set the green context current. In order to set it -current, you need to explicitly set it current by first converting the -green context to a CUcontext using cuCtxFromGreenCtx and -subsequently calling cuCtxSetCurrent / -cuCtxPushCurrent. It should be noted that a green context -can be current to only one thread at a time. There is no internal -synchronization to make API calls accessing the same green context from -multiple threads work.

    -

    Note: The API is not supported on 32-bit platforms.

    -

    The supported flags are:

    -
      -
    • CU_GREEN_CTX_DEFAULT_STREAM : Creates a default stream to use -inside the green context. Required.

    • -
    -
    -
    Parameters:
    -
      -
    • desc (CUdevResourceDesc) – Descriptor generated via cuDevResourceGenerateDesc -which contains the set of resources to be used

    • -
    • dev (CUdevice) – Device on which to create the green context.

    • -
    • flags (unsigned int) – One of the supported green context creation flags. -CU_GREEN_CTX_DEFAULT_STREAM is required.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGreenCtxDestroy(hCtx)#
    -

    Destroys a green context.

    -

    Destroys the green context, releasing the primary context of the device -that this green context was created for. Any resources provisioned for -this green context (that were initially available via the resource -descriptor) are released as well.

    -
    -
    Parameters:
    -

    hCtx (CUgreenCtx) – Green context to be destroyed

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_CONTEXT_IS_DESTROYED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxFromGreenCtx(hCtx)#
    -

    Converts a green context into the primary context.

    -

    The API converts a green context into the primary context returned in -pContext. It is important to note that the converted context -pContext is a normal primary context but with the resources of the -specified green context hCtx. Once converted, it can then be used to -set the context current with cuCtxSetCurrent or with any of -the CUDA APIs that accept a CUcontext parameter.

    -

    Users are expected to call this API before calling any CUDA APIs that -accept a CUcontext. Failing to do so will result in the APIs returning -CUDA_ERROR_INVALID_CONTEXT.

    -
    -
    Parameters:
    -

    hCtx (CUgreenCtx) – Green context to convert

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuGreenCtxCreate

    -
    -
    - -
    -
    -cuda.cuda.cuDeviceGetDevResource(device, typename: CUdevResourceType)#
    -

    Get device resources.

    -

    Get the typename resources available to the device. This may often -be the starting point for further partitioning or configuring of -resources.

    -

    Note: The API is not supported on 32-bit platforms.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuCtxGetDevResource(hCtx, typename: CUdevResourceType)#
    -

    Get context resources.

    -

    Get the typename resources available to the context represented by -hCtx Note: The API is not supported on 32-bit platforms.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGreenCtxGetDevResource(hCtx, typename: CUdevResourceType)#
    -

    Get green context resources.

    -

    Get the typename resources available to the green context represented -by hCtx

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDevSmResourceSplitByCount(unsigned int nbGroups, CUdevResource input_: Optional[CUdevResource], unsigned int useFlags, unsigned int minCount)#
    -

    Splits CU_DEV_RESOURCE_TYPE_SM resources.

    -

    Splits CU_DEV_RESOURCE_TYPE_SM resources into nbGroups, adhering to -the minimum SM count specified in minCount and the usage flags in -useFlags. If result is NULL, the API simulates a split and provides -the amount of groups that would be created in nbGroups. Otherwise, -nbGroups must point to the amount of elements in result and on -return, the API will overwrite nbGroups with the amount actually -created. The groups are written to the array in result. nbGroups -can be less than the total amount if a smaller number of groups is -needed.

    -

    This API is used to spatially partition the input resource. The input -resource needs to come from one of cuDeviceGetDevResource, -cuCtxGetDevResource, or -cuGreenCtxGetDevResource. A limitation of the API is that -the output results cannot be split again without first creating a -descriptor and a green context with that descriptor.

    -

    When creating the groups, the API will take into account the -performance and functional characteristics of the input resource, and -guarantee a split that will create a disjoint set of symmetrical -partitions. This may lead to fewer groups created than purely dividing -the total SM count by the minCount due to cluster requirements or -alignment and granularity requirements for the minCount.

    -

    The remainder set does not have the same functional or performance -guarantees as the groups in result. Its use should be carefully -planned and future partitions of the remainder set are discouraged.

    -

    The following flags are supported:

    -
      -
    • CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING : Lower the minimum -SM count and alignment, and treat each SM independent of its -hierarchy. This allows more fine grained partitions but at the cost -of advanced features (such as large clusters on compute capability -9.0+).

    • -
    • CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE : Compute -Capability 9.0+ only. Attempt to create groups that may allow for -maximally sized thread clusters. This can be queried post green -context creation using -cuOccupancyMaxPotentialClusterSize.

    • -
    -

    A successful API call must either have:

    -
      -
    • A valid array of result pointers of size passed in nbGroups, with -input of type CU_DEV_RESOURCE_TYPE_SM. Value of minCount must -be between 0 and the SM count specified in input. remaining may -be NULL.

    • -
    • NULL passed in for result, with a valid integer pointer in -nbGroups and input of type CU_DEV_RESOURCE_TYPE_SM. Value of -minCount must be between 0 and the SM count specified in input. -remaining may be NULL. This queries the number of groups that would -be created by the API.

    • -
    -

    Note: The API is not supported on 32-bit platforms.

    -
    -
    Parameters:
    -
      -
    • nbGroups (unsigned int) – This is a pointer, specifying the number of groups that would be or -should be created as described below.

    • -
    • input (CUdevResource) – Input SM resource to be split. Must be a valid -CU_DEV_RESOURCE_TYPE_SM resource.

    • -
    • useFlags (unsigned int) – Flags specifying how these partitions are used or which constraints -to abide by when splitting the input. Zero is valid for default -behavior.

    • -
    • minCount (unsigned int) – Minimum number of SMs required

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuDevResourceGenerateDesc(resources: Optional[Tuple[CUdevResource] | List[CUdevResource]], unsigned int nbResources)#
    -

    Generate a resource descriptor.

    -

    Generates a single resource descriptor with the set of resources -specified in resources. The generated resource descriptor is -necessary for the creation of green contexts via the -cuGreenCtxCreate API. Resources of the same type can be -passed in, provided they meet the requirements as noted below.

    -

    A successful API call must have:

    -
      -
    • A valid output pointer for the phDesc descriptor as well as a valid -array of resources pointers, with the array size passed in -nbResources. If multiple resources are provided in resources, the -device they came from must be the same, otherwise -CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION is returned. If multiple -resources are provided in resources and they are of type -CU_DEV_RESOURCE_TYPE_SM, they must be outputs (whether -result or remaining) from the same split API instance, otherwise -CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION is returned.

    • -
    -

    Note: The API is not supported on 32-bit platforms.

    -
    -
    Parameters:
    -
      -
    • resources (List[CUdevResource]) – Array of resources to be included in the descriptor

    • -
    • nbResources (unsigned int) – Number of resources passed in resources

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGreenCtxRecordEvent(hCtx, hEvent)#
    -

    Records an event.

    -

    Captures in hEvent all the activities of the green context of hCtx -at the time of this call. hEvent and hCtx must be from the same -primary context otherwise CUDA_ERROR_INVALID_HANDLE is -returned. Calls such as cuEventQuery() or -cuGreenCtxWaitEvent() will then examine or wait for -completion of the work that was captured. Uses of hCtx after this -call do not modify hEvent.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    The API will return CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED if the specified green context hCtx has a stream in the capture mode. In such a case, the call will invalidate all the conflicting captures.

    -
    - -
    -
    -cuda.cuda.cuGreenCtxWaitEvent(hCtx, hEvent)#
    -

    Make a green context wait on an event.

    -

    Makes all future work submitted to green context hCtx wait for all -work captured in hEvent. The synchronization will be performed on the -device and will not block the calling CPU thread. See -cuGreenCtxRecordEvent() or cuEventRecord(), for -details on what is captured by an event.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -

    Notes

    -

    hEvent may be from a different context or device than hCtx.

    -

    The API will return CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED and invalidate the capture if the specified event hEvent is part of an ongoing capture sequence or if the specified green context hCtx has a stream in the capture mode.

    -
    - -
    -
    -cuda.cuda.cuStreamGetGreenCtx(hStream)#
    -

    Query the green context associated with a stream.

    -

    Returns the CUDA green context that the stream is associated with, or -NULL if the stream is not associated with any green context.

    -

    The stream handle hStream can refer to any of the following:

    - -

    Passing an invalid handle will result in undefined behavior.

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGreenCtxStreamCreate(greenCtx, unsigned int flags, int priority)#
    -

    Create a stream for use in the green context.

    -

    Creates a stream for use in the specified green context greenCtx and -returns a handle in phStream. The stream can be destroyed by calling -cuStreamDestroy(). Note that the API ignores the context -that is current to the calling thread and creates a stream in the -specified green context greenCtx.

    -

    The supported values for flags are:

    -
      -
    • CU_STREAM_NON_BLOCKING: This must be specified. It -indicates that work running in the created stream may run -concurrently with work in the default stream, and that the created -stream should perform no implicit synchronization with the default -stream.

    • -
    -

    Specifying priority affects the scheduling priority of work in the -stream. Priorities provide a hint to preferentially run work with -higher priority when possible, but do not preempt already-running work -or provide any other functional guarantee on execution order. -priority follows a convention where lower numbers represent higher -priorities. ‘0’ represents default priority. The range of meaningful -numerical priorities can be queried using -cuCtxGetStreamPriorityRange. If the specified priority is -outside the numerical range returned by -cuCtxGetStreamPriorityRange, it will automatically be -clamped to the lowest or the highest number in the range.

    -
    -
    Parameters:
    -
      -
    • greenCtx (CUgreenCtx) – Green context for which to create the stream for

    • -
    • flags (unsigned int) – Flags for stream creation. CU_STREAM_NON_BLOCKING must be -specified.

    • -
    • priority (int) – Stream priority. Lower numbers represent higher priorities. See -cuCtxGetStreamPriorityRange for more information about -meaningful stream priorities that can be passed.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    In the current implementation, only compute kernels launched in priority streams are affected by the stream’s priority. Stream priorities have no effect on host-to-device and device-to-host memory operations.

    -
    - -
    -
    -cuda.RESOURCE_ABI_VERSION = 1#
    -
    - -
    -
    -cuda.RESOURCE_ABI_EXTERNAL_BYTES = 48#
    -
    - -
    -
    -

    EGL Interoperability#

    -

    This section describes the EGL interoperability functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuGraphicsEGLRegisterImage(image, unsigned int flags)#
    -

    Registers an EGL image.

    -

    Registers the EGLImageKHR specified by image for access by CUDA. A -handle to the registered object is returned as pCudaResource. -Additional Mapping/Unmapping is not required for the registered -resource and cuGraphicsResourceGetMappedEglFrame can be -directly called on the pCudaResource.

    -

    The application will be responsible for synchronizing access to shared -objects. The application must ensure that any pending operation which -access the objects have completed before passing control to CUDA. This -may be accomplished by issuing and waiting for glFinish command on all -GLcontexts (for OpenGL and likewise for other APIs). The application -will be also responsible for ensuring that any pending operation on the -registered CUDA resource has completed prior to executing subsequent -commands in other APIs accesing the same memory objects. This can be -accomplished by calling cuCtxSynchronize or cuEventSynchronize -(preferably).

    -

    The surface’s intended usage is specified using flags, as follows:

    - -

    The EGLImageKHR is an object which can be used to create EGLImage -target resource. It is defined as a void pointer. typedef void* -EGLImageKHR

    -
    -
    Parameters:
    -
      -
    • image (EGLImageKHR) – An EGLImageKHR image which can be used to create target resource.

    • -
    • flags (unsigned int) – Map flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEGLStreamConsumerConnect(stream)#
    -

    Connect CUDA to EGLStream as a consumer.

    -

    Connect CUDA as a consumer to EGLStreamKHR specified by stream.

    -

    The EGLStreamKHR is an EGL object that transfers a sequence of image -frames from one API to another.

    -
    -
    Parameters:
    -

    stream (EGLStreamKHR) – EGLStreamKHR handle

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEGLStreamConsumerConnectWithFlags(stream, unsigned int flags)#
    -

    Connect CUDA to EGLStream as a consumer with given flags.

    -

    Connect CUDA as a consumer to EGLStreamKHR specified by stream with -specified flags defined by CUeglResourceLocationFlags.

    -

    The flags specify whether the consumer wants to access frames from -system memory or video memory. Default is -CU_EGL_RESOURCE_LOCATION_VIDMEM.

    -
    -
    Parameters:
    -
      -
    • stream (EGLStreamKHR) – EGLStreamKHR handle

    • -
    • flags (unsigned int) – Flags denote intended location - system or video.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEGLStreamConsumerDisconnect(conn)#
    -

    Disconnect CUDA as a consumer to EGLStream .

    -

    Disconnect CUDA as a consumer to EGLStreamKHR.

    -
    -
    Parameters:
    -

    conn (CUeglStreamConnection) – Conection to disconnect.

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_CONTEXT,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, unsigned int timeout)#
    -

    Acquire an image frame from the EGLStream with CUDA as a consumer.

    -

    Acquire an image frame from EGLStreamKHR. This API can also acquire an -old frame presented by the producer unless explicitly disabled by -setting EGL_SUPPORT_REUSE_NV flag to EGL_FALSE during stream -initialization. By default, EGLStream is created with this flag set to -EGL_TRUE. cuGraphicsResourceGetMappedEglFrame can be called -on pCudaResource to get CUeglFrame.

    -
    -
    Parameters:
    -
      -
    • conn (CUeglStreamConnection) – Connection on which to acquire

    • -
    • pCudaResource (CUgraphicsResource) – CUDA resource on which the stream frame will be mapped for use.

    • -
    • pStream (CUstream) – CUDA stream for synchronization and any data migrations implied by -CUeglResourceLocationFlags.

    • -
    • timeout (unsigned int) – Desired timeout in usec for a new frame to be acquired. If set as -CUDA_EGL_INFINITE_TIMEOUT, acquire waits infinitely. -After timeout occurs CUDA consumer tries to acquire an old frame if -available and EGL_SUPPORT_REUSE_NV flag is set.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_LAUNCH_TIMEOUT,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream)#
    -

    Releases the last frame acquired from the EGLStream.

    -

    Release the acquired image frame specified by pCudaResource to -EGLStreamKHR. If EGL_SUPPORT_REUSE_NV flag is set to EGL_TRUE, at the -time of EGL creation this API doesn’t release the last frame acquired -on the EGLStream. By default, EGLStream is created with this flag set -to EGL_TRUE.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEGLStreamProducerConnect(stream, width, height)#
    -

    Connect CUDA to EGLStream as a producer.

    -

    Connect CUDA as a producer to EGLStreamKHR specified by stream.

    -

    The EGLStreamKHR is an EGL object that transfers a sequence of image -frames from one API to another.

    -
    -
    Parameters:
    -
      -
    • stream (EGLStreamKHR) – EGLStreamKHR handle

    • -
    • width (EGLint) – width of the image to be submitted to the stream

    • -
    • height (EGLint) – height of the image to be submitted to the stream

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEGLStreamProducerDisconnect(conn)#
    -

    Disconnect CUDA as a producer to EGLStream .

    -

    Disconnect CUDA as a producer to EGLStreamKHR.

    -
    -
    Parameters:
    -

    conn (CUeglStreamConnection) – Conection to disconnect.

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_CONTEXT,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEGLStreamProducerPresentFrame(conn, CUeglFrame eglframe: CUeglFrame, pStream)#
    -

    Present a CUDA eglFrame to the EGLStream with CUDA as a producer.

    -

    When a frame is presented by the producer, it gets associated with the -EGLStream and thus it is illegal to free the frame before the producer -is disconnected. If a frame is freed and reused it may lead to -undefined behavior.

    -

    If producer and consumer are on different GPUs (iGPU and dGPU) then -frametype CU_EGL_FRAME_TYPE_ARRAY is not supported. -CU_EGL_FRAME_TYPE_PITCH can be used for such cross-device -applications.

    -

    The CUeglFrame is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For CUeglFrame of type CU_EGL_FRAME_TYPE_PITCH, -the application may present sub-region of a memory allocation. In that -case, the pitched pointer will specify the start address of the sub- -region in the allocation and corresponding CUeglFrame -fields will specify the dimensions of the sub-region.

    -
    -
    Parameters:
    -
      -
    • conn (CUeglStreamConnection) – Connection on which to present the CUDA array

    • -
    • eglframe (CUeglFrame) – CUDA Eglstream Proucer Frame handle to be sent to the consumer over -EglStream.

    • -
    • pStream (CUstream) – CUDA stream on which to present the frame.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE,

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuEGLStreamProducerReturnFrame(conn, CUeglFrame eglframe: Optional[CUeglFrame], pStream)#
    -

    Return the CUDA eglFrame to the EGLStream released by the consumer.

    -

    This API can potentially return CUDA_ERROR_LAUNCH_TIMEOUT if the -consumer has not returned a frame to EGL stream. If timeout is returned -the application can retry.

    -
    -
    Parameters:
    -
      -
    • conn (CUeglStreamConnection) – Connection on which to return

    • -
    • eglframe (CUeglFrame) – CUDA Eglstream Proucer Frame handle returned from the consumer over -EglStream.

    • -
    • pStream (CUstream) – CUDA stream on which to return the frame.

    • -
    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_LAUNCH_TIMEOUT

    -
    -
    Return type:
    -

    CUresult

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphicsResourceGetMappedEglFrame(resource, unsigned int index, unsigned int mipLevel)#
    -

    Get an eglFrame through which to access a registered EGL graphics resource.

    -

    Returns in *eglFrame an eglFrame pointer through which the registered -graphics resource resource may be accessed. This API can only be -called for registered EGL graphics resources.

    -

    The CUeglFrame is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If resource is not registered then CUDA_ERROR_NOT_MAPPED -is returned.

    -
    -
    Parameters:
    -
      -
    • resource (CUgraphicsResource) – None

    • -
    • index (unsigned int) – None

    • -
    • mipLevel (unsigned int) – None

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    - -
    -
    -cuda.cuda.cuEventCreateFromEGLSync(eglSync, unsigned int flags)#
    -

    Creates an event from EGLSync object.

    -

    Creates an event *phEvent from an EGLSyncKHR eglSync with the flags -specified via flags. Valid flags include:

    -
      -
    • CU_EVENT_DEFAULT: Default event creation flag.

    • -
    • CU_EVENT_BLOCKING_SYNC: Specifies that the created event -should use blocking synchronization. A CPU thread that uses -cuEventSynchronize() to wait on an event created with -this flag will block until the event has actually been completed.

    • -
    -

    Once the eglSync gets destroyed, cuEventDestroy is the -only API that can be invoked on the event.

    -

    cuEventRecord and TimingData are not supported for events -created from EGLSync.

    -

    The EGLSyncKHR is an opaque handle to an EGL sync object. typedef void* -EGLSyncKHR

    -
    -
    Parameters:
    -
      -
    • eglSync (EGLSyncKHR) – Opaque handle to EGLSync object

    • -
    • flags (unsigned int) – Event creation flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    OpenGL Interoperability#

    -

    This section describes the OpenGL interoperability functions of the low-level CUDA driver application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interoperability.

    -
    -
    -class cuda.cuda.CUGLDeviceList(value)#
    -

    CUDA devices corresponding to an OpenGL device

    -
    -
    -CU_GL_DEVICE_LIST_ALL = 1#
    -

    The CUDA devices for all GPUs used by the current OpenGL context

    -
    - -
    -
    -CU_GL_DEVICE_LIST_CURRENT_FRAME = 2#
    -

    The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame

    -
    - -
    -
    -CU_GL_DEVICE_LIST_NEXT_FRAME = 3#
    -

    The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame

    -
    - -
    - -
    -
    -cuda.cuda.cuGraphicsGLRegisterBuffer(buffer, unsigned int Flags)#
    -

    Registers an OpenGL buffer object.

    -

    Registers the buffer object specified by buffer for access by CUDA. A -handle to the registered object is returned as pCudaResource. The -register flags Flags specify the intended usage, as follows:

    - -
    -
    Parameters:
    -
      -
    • buffer (GLuint) – name of buffer object to be registered

    • -
    • Flags (unsigned int) – Register flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphicsGLRegisterImage(image, target, unsigned int Flags)#
    -

    Register an OpenGL texture or renderbuffer object.

    -

    Registers the texture or renderbuffer object specified by image for -access by CUDA. A handle to the registered object is returned as -pCudaResource.

    -

    target must match the type of the object, and must be one of -GL_TEXTURE_2D, GL_TEXTURE_RECTANGLE, -GL_TEXTURE_CUBE_MAP, GL_TEXTURE_3D, -GL_TEXTURE_2D_ARRAY, or GL_RENDERBUFFER.

    -

    The register flags Flags specify the intended usage, as follows:

    - -

    The following image formats are supported. For brevity’s sake, the list -is abbreviated. For ex., {GL_R, GL_RG} X {8, 16} would expand to the -following 4 formats {GL_R8, GL_R16, GL_RG8, GL_RG16} :

    -
      -
    • GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, -GL_INTENSITY

    • -
    • {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, -32I}

    • -
    • {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X {8, 16, -16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, -32I_EXT}

    • -
    -

    The following image classes are currently disallowed:

    -
      -
    • Textures with borders

    • -
    • Multisampled renderbuffers

    • -
    -
    -
    Parameters:
    -
      -
    • image (GLuint) – name of texture or renderbuffer object to be registered

    • -
    • target (GLenum) – Identifies the type of object specified by image

    • -
    • Flags (unsigned int) – Register flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGLGetDevices(unsigned int cudaDeviceCount, deviceList: CUGLDeviceList)#
    -

    Gets the CUDA devices associated with the current OpenGL context.

    -

    Returns in *pCudaDeviceCount the number of CUDA-compatible devices -corresponding to the current OpenGL context. Also returns in -*pCudaDevices at most cudaDeviceCount of the CUDA-compatible devices -corresponding to the current OpenGL context. If any of the GPUs being -used by the current OpenGL context are not CUDA capable then the call -will return CUDA_ERROR_NO_DEVICE.

    -

    The deviceList argument may be any of the following: -CU_GL_DEVICE_LIST_ALL: Query all devices used by the current OpenGL -context. CU_GL_DEVICE_LIST_CURRENT_FRAME: Query the devices used by the -current OpenGL context to render the current frame (in SLI). -CU_GL_DEVICE_LIST_NEXT_FRAME: Query the devices used by the current -OpenGL context to render the next frame (in SLI). Note that this is a -prediction, it can’t be guaranteed that this is correct in all cases.

    -
    -
    Parameters:
    -
      -
    • cudaDeviceCount (unsigned int) – The size of the output device array pCudaDevices.

    • -
    • deviceList (CUGLDeviceList) – The set of devices to return.

    • -
    -
    -
    Returns:
    -

      -
    • CUresult – CUDA_SUCCESS -CUDA_ERROR_NO_DEVICE -CUDA_ERROR_INVALID_VALUE -CUDA_ERROR_INVALID_CONTEXT -CUDA_ERROR_INVALID_GRAPHICS_CONTEXT

    • -
    • pCudaDeviceCount (unsigned int) – Returned number of CUDA devices.

    • -
    • pCudaDevices (List[CUdevice]) – Returned CUDA devices.

    • -
    -

    -
    -
    -
    -

    See also

    -

    cudaGLGetDevices

    -
    -

    Notes

    -

    This function is not supported on Mac OS X.

    -
    - -
    -
    -

    Profiler Control#

    -

    This section describes the profiler control functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuProfilerStart()#
    -

    Enable profiling.

    -

    Enables profile collection by the active profiling tool for the current -context. If profiling is already enabled, then -cuProfilerStart() has no effect.

    -

    cuProfilerStart and cuProfilerStop APIs are used to programmatically -control the profiling granularity by allowing profiling to be done only -on selective pieces of code.

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_CONTEXT

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    cuProfilerInitialize, cuProfilerStop, cudaProfilerStart

    -
    -
    - -
    -
    -cuda.cuda.cuProfilerStop()#
    -

    Disable profiling.

    -

    Disables profile collection by the active profiling tool for the -current context. If profiling is already disabled, then -cuProfilerStop() has no effect.

    -

    cuProfilerStart and cuProfilerStop APIs are used to programmatically -control the profiling granularity by allowing profiling to be done only -on selective pieces of code.

    -
    -
    Returns:
    -

    CUDA_SUCCESS, CUDA_ERROR_INVALID_CONTEXT

    -
    -
    Return type:
    -

    CUresult

    -
    -
    -
    -

    See also

    -

    cuProfilerInitialize, cuProfilerStart, cudaProfilerStop

    -
    -
    - -
    -
    -

    VDPAU Interoperability#

    -

    This section describes the VDPAU interoperability functions of the low-level CUDA driver application programming interface.

    -
    -
    -cuda.cuda.cuVDPAUGetDevice(vdpDevice, vdpGetProcAddress)#
    -

    Gets the CUDA device associated with a VDPAU device.

    -

    Returns in *pDevice the CUDA device associated with a vdpDevice, if -applicable.

    -
    -
    Parameters:
    -
      -
    • vdpDevice (VdpDevice) – A VdpDevice handle

    • -
    • vdpGetProcAddress (VdpGetProcAddress) – VDPAU’s VdpGetProcAddress function pointer

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuVDPAUCtxCreate(unsigned int flags, device, vdpDevice, vdpGetProcAddress)#
    -

    Create a CUDA context for interoperability with VDPAU.

    -

    Creates a new CUDA context, initializes VDPAU interoperability, and -associates the CUDA context with the calling thread. It must be called -before performing any other VDPAU interoperability operations. It may -fail if the needed VDPAU driver facilities are not available. For usage -of the flags parameter, see cuCtxCreate().

    -
    -
    Parameters:
    -
      -
    • flags (unsigned int) – Options for CUDA context creation

    • -
    • device (CUdevice) – Device on which to create the context

    • -
    • vdpDevice (VdpDevice) – The VdpDevice to interop with

    • -
    • vdpGetProcAddress (VdpGetProcAddress) – VDPAU’s VdpGetProcAddress function pointer

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphicsVDPAURegisterVideoSurface(vdpSurface, unsigned int flags)#
    -

    Registers a VDPAU VdpVideoSurface object.

    -

    Registers the VdpVideoSurface specified by vdpSurface for access by -CUDA. A handle to the registered object is returned as pCudaResource. -The surface’s intended usage is specified using flags, as follows:

    - -

    The VdpVideoSurface is presented as an array of subresources that may -be accessed using pointers returned by -cuGraphicsSubResourceGetMappedArray. The exact number of -valid arrayIndex values depends on the VDPAU surface format. The -mapping is shown in the table below. mipLevel must be 0.

    -
    -
    Parameters:
    -
      -
    • vdpSurface (VdpVideoSurface) – The VdpVideoSurface to be registered

    • -
    • flags (unsigned int) – Map flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cuda.cuGraphicsVDPAURegisterOutputSurface(vdpSurface, unsigned int flags)#
    -

    Registers a VDPAU VdpOutputSurface object.

    -

    Registers the VdpOutputSurface specified by vdpSurface for access by -CUDA. A handle to the registered object is returned as pCudaResource. -The surface’s intended usage is specified using flags, as follows:

    - -

    The VdpOutputSurface is presented as an array of subresources that may -be accessed using pointers returned by -cuGraphicsSubResourceGetMappedArray. The exact number of -valid arrayIndex values depends on the VDPAU surface format. The -mapping is shown in the table below. mipLevel must be 0.

    -
    -
    Parameters:
    -
      -
    • vdpSurface (VdpOutputSurface) – The VdpOutputSurface to be registered

    • -
    • flags (unsigned int) – Map flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    - -
    -
    - -
    - -
    -
    - - - - - - - \ No newline at end of file diff --git a/docs/module/cudart.html b/docs/module/cudart.html deleted file mode 100644 index 1e104416..00000000 --- a/docs/module/cudart.html +++ /dev/null @@ -1,23594 +0,0 @@ - - - - - - - - - - cudart - CUDA Python 12.6.0 documentation - - - - - - - - - - - - - - - - - Contents - - - - - - Menu - - - - - - - - Expand - - - - - - Light mode - - - - - - - - - - - - - - Dark mode - - - - - - - Auto light/dark mode - - - - - - - - - - - - - - - - - - - -
    -
    -
    - -
    - -
    -
    - -
    - -
    -
    - -
    -
    -
    - - - - - Back to top - -
    - -
    - -
    - -
    -
    -
    -

    cudart#

    -
    -

    Profiler Control#

    -

    This section describes the profiler control functions of the CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaProfilerStart()#
    -

    Enable profiling.

    -

    Enables profile collection by the active profiling tool for the current -context. If profiling is already enabled, then -cudaProfilerStart() has no effect.

    -

    cudaProfilerStart and cudaProfilerStop APIs are used to -programmatically control the profiling granularity by allowing -profiling to be done only on selective pieces of code.

    -
    -
    Returns:
    -

    cudaSuccess

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaProfilerStop()#
    -

    Disable profiling.

    -

    Disables profile collection by the active profiling tool for the -current context. If profiling is already disabled, then -cudaProfilerStop() has no effect.

    -

    cudaProfilerStart and cudaProfilerStop APIs are used to -programmatically control the profiling granularity by allowing -profiling to be done only on selective pieces of code.

    -
    -
    Returns:
    -

    cudaSuccess

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -

    Device Management#

    -

    impl_private

    -

    This section describes the device management functions of the CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaDeviceReset()#
    -

    Destroy all allocations and reset all state on the current device in the current process.

    -

    Explicitly destroys and cleans up all resources associated with the -current device in the current process. It is the caller’s -responsibility to ensure that the resources are not accessed or passed -in subsequent API calls and doing so will result in undefined behavior. -These resources include CUDA types cudaStream_t, -cudaEvent_t, cudaArray_t, -cudaMipmappedArray_t, cudaPitchedPtr, -cudaTextureObject_t, cudaSurfaceObject_t, -textureReference, surfaceReference, -cudaExternalMemory_t, cudaExternalSemaphore_t -and cudaGraphicsResource_t. These resources also include -memory allocations by cudaMalloc, -cudaMallocHost, cudaMallocManaged and -cudaMallocPitch. Any subsequent API call to this device -will reinitialize the device.

    -

    Note that this function will reset the device immediately. It is the -caller’s responsibility to ensure that the device is not being accessed -by any other host threads from the process when this function is -called.

    -
    -
    Returns:
    -

    cudaSuccess

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaDeviceSynchronize

    -
    -

    Notes

    -

    cudaDeviceReset() will not destroy memory allocations by cudaMallocAsync() and cudaMallocFromPoolAsync(). These memory allocations need to be destroyed explicitly.

    -

    If a non-primary CUcontext is current to the thread, cudaDeviceReset() will destroy only the internal CUDA RT state for that CUcontext.

    -
    - -
    -
    -cuda.cudart.cudaDeviceSynchronize()#
    -

    Wait for compute device to finish.

    -

    Blocks until the device has completed all preceding requested tasks. -cudaDeviceSynchronize() returns an error if one of the -preceding tasks has failed. If the -cudaDeviceScheduleBlockingSync flag was set for this -device, the host thread will block until the device has finished its -work.

    -
    -
    Returns:
    -

    cudaSuccess

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceSetLimit(limit: cudaLimit, size_t value)#
    -

    Set resource limits.

    -

    Setting limit to value is a request by the application to update -the current limit maintained by the device. The driver is free to -modify the requested value to meet h/w requirements (this could be -clamping to minimum or maximum values, rounding up to nearest element -size, etc). The application can use cudaDeviceGetLimit() to -find out exactly what the limit has been set to.

    -

    Setting each cudaLimit has its own specific restrictions, -so each is discussed here.

    -
      -
    • cudaLimitStackSize controls the stack size in bytes of -each GPU thread.

    • -
    • cudaLimitPrintfFifoSize controls the size in bytes of the -shared FIFO used by the printf() device system call. -Setting cudaLimitPrintfFifoSize must not be performed -after launching any kernel that uses the printf() device -system call - in such case cudaErrorInvalidValue will be -returned.

    • -
    • cudaLimitMallocHeapSize controls the size in bytes of the -heap used by the malloc() and free() device -system calls. Setting cudaLimitMallocHeapSize must not be -performed after launching any kernel that uses the -malloc() or free() device system calls - in -such case cudaErrorInvalidValue will be returned.

    • -
    • cudaLimitDevRuntimeSyncDepth controls the maximum nesting -depth of a grid at which a thread can safely call -cudaDeviceSynchronize(). Setting this limit must be -performed before any launch of a kernel that uses the device runtime -and calls cudaDeviceSynchronize() above the default sync -depth, two levels of grids. Calls to -cudaDeviceSynchronize() will fail with error code -cudaErrorSyncDepthExceeded if the limitation is violated. -This limit can be set smaller than the default or up the maximum -launch depth of 24. When setting this limit, keep in mind that -additional levels of sync depth require the runtime to reserve large -amounts of device memory which can no longer be used for user -allocations. If these reservations of device memory fail, -cudaDeviceSetLimit will return -cudaErrorMemoryAllocation, and the limit can be reset to -a lower value. This limit is only applicable to devices of compute -capability < 9.0. Attempting to set this limit on devices of other -compute capability will results in error -cudaErrorUnsupportedLimit being returned.

    • -
    • cudaLimitDevRuntimePendingLaunchCount controls the -maximum number of outstanding device runtime launches that can be -made from the current device. A grid is outstanding from the point of -launch up until the grid is known to have been completed. Device -runtime launches which violate this limitation fail and return -cudaErrorLaunchPendingCountExceeded when -cudaGetLastError() is called after launch. If more -pending launches than the default (2048 launches) are needed for a -module using the device runtime, this limit can be increased. Keep in -mind that being able to sustain additional pending launches will -require the runtime to reserve larger amounts of device memory -upfront which can no longer be used for allocations. If these -reservations fail, cudaDeviceSetLimit will return -cudaErrorMemoryAllocation, and the limit can be reset to -a lower value. This limit is only applicable to devices of compute -capability 3.5 and higher. Attempting to set this limit on devices of -compute capability less than 3.5 will result in the error -cudaErrorUnsupportedLimit being returned.

    • -
    • cudaLimitMaxL2FetchGranularity controls the L2 cache -fetch granularity. Values can range from 0B to 128B. This is purely a -performance hint and it can be ignored or clamped depending on the -platform.

    • -
    • cudaLimitPersistingL2CacheSize controls size in bytes -available for persisting L2 cache. This is purely a performance hint -and it can be ignored or clamped depending on the platform.

    • -
    -
    -
    Parameters:
    -
      -
    • limit (cudaLimit) – Limit to set

    • -
    • value (size_t) – Size of limit

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorUnsupportedLimit, cudaErrorInvalidValue, cudaErrorMemoryAllocation

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGetLimit(limit: cudaLimit)#
    -

    Return resource limits.

    -

    Returns in *pValue the current size of limit. The following -cudaLimit values are supported.

    - -
    -
    Parameters:
    -

    limit (cudaLimit) – Limit to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGetTexture1DLinearMaxWidth(cudaChannelFormatDesc fmtDesc: Optional[cudaChannelFormatDesc], int device)#
    -

    Returns the maximum number of elements allocatable in a 1D linear texture for a given element size.

    -

    Returns in maxWidthInElements the maximum number of elements -allocatable in a 1D linear texture for given format descriptor -fmtDesc.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGetCacheConfig()#
    -

    Returns the preferred cache configuration for the current device.

    -

    On devices where the L1 cache and shared memory use the same hardware -resources, this returns through pCacheConfig the preferred cache -configuration for the current device. This is only a preference. The -runtime will use the requested configuration if possible, but it is -free to choose a different configuration if required to execute -functions.

    -

    This will return a pCacheConfig of -cudaFuncCachePreferNone on devices where the size of the L1 -cache and shared memory are fixed.

    -

    The supported cache configurations are:

    - -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaDeviceSetCacheConfig, cudaFuncSetCacheConfig (C API), cudaFuncSetCacheConfig (C++ API), cuCtxGetCacheConfig

    -
    -
    - -
    -
    -cuda.cudart.cudaDeviceGetStreamPriorityRange()#
    -

    Returns numerical values that correspond to the least and greatest stream priorities.

    -

    Returns in *leastPriority and *greatestPriority the numerical -values that correspond to the least and greatest stream priorities -respectively. Stream priorities follow a convention where lower numbers -imply greater priorities. The range of meaningful stream priorities is -given by [*greatestPriority, *leastPriority]. If the user attempts -to create a stream with a priority value that is outside the the -meaningful range as specified by this API, the priority is -automatically clamped down or up to either *leastPriority or -*greatestPriority respectively. See -cudaStreamCreateWithPriority for details on creating a -priority stream. A NULL may be passed in for *leastPriority or -*greatestPriority if the value is not desired.

    -

    This function will return ‘0’ in both *leastPriority and -*greatestPriority if the current context’s device does not support -stream priorities (see cudaDeviceGetAttribute).

    -
    -
    Returns:
    -

      -
    • cudaError_tcudaSuccess

    • -
    • leastPriority (int) – Pointer to an int in which the numerical value for least stream -priority is returned

    • -
    • greatestPriority (int) – Pointer to an int in which the numerical value for greatest stream -priority is returned

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceSetCacheConfig(cacheConfig: cudaFuncCache)#
    -

    Sets the preferred cache configuration for the current device.

    -

    On devices where the L1 cache and shared memory use the same hardware -resources, this sets through cacheConfig the preferred cache -configuration for the current device. This is only a preference. The -runtime will use the requested configuration if possible, but it is -free to choose a different configuration if required to execute the -function. Any function preference set via -cudaFuncSetCacheConfig (C API) or cudaFuncSetCacheConfig -(C++ API) will be preferred over this device-wide setting. Setting the -device-wide cache configuration to cudaFuncCachePreferNone -will cause subsequent kernel launches to prefer to not change the cache -configuration unless required to launch the kernel.

    -

    This setting does nothing on devices where the size of the L1 cache and -shared memory are fixed.

    -

    Launching a kernel with a different preference than the most recent -preference setting may insert a device-side synchronization point.

    -

    The supported cache configurations are:

    - -
    -
    Parameters:
    -

    cacheConfig (cudaFuncCache) – Requested cache configuration

    -
    -
    Returns:
    -

    cudaSuccess

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaDeviceGetCacheConfig, cudaFuncSetCacheConfig (C API), cudaFuncSetCacheConfig (C++ API), cuCtxSetCacheConfig

    -
    -
    - -
    -
    -cuda.cudart.cudaDeviceGetByPCIBusId(char *pciBusId)#
    -

    Returns a handle to a compute device.

    -

    Returns in *device a device ordinal given a PCI bus ID string.

    -

    where domain, bus, device, and function are all hexadecimal -values

    -
    -
    Parameters:
    -

    pciBusId (bytes) – String in one of the following forms:

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGetPCIBusId(int length, int device)#
    -

    Returns a PCI Bus Id string for the device.

    -

    Returns an ASCII string identifying the device dev in the NULL- -terminated string pointed to by pciBusId. length specifies the -maximum length of the string that may be returned.

    -

    where domain, bus, device, and function are all hexadecimal -values. pciBusId should be large enough to store 13 characters -including the NULL-terminator.

    -
    -
    Parameters:
    -
      -
    • length (int) – Maximum length of string to store in name

    • -
    • device (int) – Device to get identifier string for

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaIpcGetEventHandle(event)#
    -

    Gets an interprocess handle for a previously allocated event.

    -

    Takes as input a previously allocated event. This event must have been -created with the cudaEventInterprocess and -cudaEventDisableTiming flags set. This opaque handle may be -copied into other processes and opened with -cudaIpcOpenEventHandle to allow efficient hardware -synchronization between GPU work in different processes.

    -

    After the event has been been opened in the importing process, -cudaEventRecord, cudaEventSynchronize, -cudaStreamWaitEvent and cudaEventQuery may be -used in either process. Performing operations on the imported event -after the exported event has been freed with -cudaEventDestroy will result in undefined behavior.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cudaDeviceGetAttribute with -cudaDevAttrIpcEventSupport

    -
    -
    Parameters:
    -

    event (CUevent or cudaEvent_t) – Event allocated with cudaEventInterprocess and -cudaEventDisableTiming flags.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaIpcOpenEventHandle(cudaIpcEventHandle_t handle: cudaIpcEventHandle_t)#
    -

    Opens an interprocess event handle for use in the current process.

    -

    Opens an interprocess event handle exported from another process with -cudaIpcGetEventHandle. This function returns a -cudaEvent_t that behaves like a locally created event with -the cudaEventDisableTiming flag specified. This event must -be freed with cudaEventDestroy.

    -

    Performing operations on the imported event after the exported event -has been freed with cudaEventDestroy will result in -undefined behavior.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cudaDeviceGetAttribute with -cudaDevAttrIpcEventSupport

    -
    -
    Parameters:
    -

    handle (cudaIpcEventHandle_t) – Interprocess handle to open

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaIpcGetMemHandle(devPtr)#
    -

    Gets an interprocess memory handle for an existing device memory allocation.

    -

    Takes a pointer to the base of an existing device memory allocation -created with cudaMalloc and exports it for use in another -process. This is a lightweight operation and may be called multiple -times on an allocation without adverse effects.

    -

    If a region of memory is freed with cudaFree and a -subsequent call to cudaMalloc returns memory with the same -device address, cudaIpcGetMemHandle will return a unique -handle for the new memory.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cudaDeviceGetAttribute with -cudaDevAttrIpcEventSupport

    -
    -
    Parameters:
    -

    devPtr (Any) – Base pointer to previously allocated device memory

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaIpcOpenMemHandle(cudaIpcMemHandle_t handle: cudaIpcMemHandle_t, unsigned int flags)#
    -

    Opens an interprocess memory handle exported from another process and returns a device pointer usable in the local process.

    -

    Maps memory exported from another process with -cudaIpcGetMemHandle into the current device address space. -For contexts on different devices cudaIpcOpenMemHandle can -attempt to enable peer access between the devices as if the user called -cudaDeviceEnablePeerAccess. This behavior is controlled by -the cudaIpcMemLazyEnablePeerAccess flag. -cudaDeviceCanAccessPeer can determine if a mapping is -possible.

    -

    cudaIpcOpenMemHandle can open handles to devices that may -not be visible in the process calling the API.

    -

    Contexts that may open cudaIpcMemHandles are restricted in -the following way. cudaIpcMemHandles from each device in a -given process may only be opened by one context per device per other -process.

    -

    If the memory handle has already been opened by the current context, -the reference count on the handle is incremented by 1 and the existing -device pointer is returned.

    -

    Memory returned from cudaIpcOpenMemHandle must be freed -with cudaIpcCloseMemHandle.

    -

    Calling cudaFree on an exported memory region before -calling cudaIpcCloseMemHandle in the importing context will -result in undefined behavior.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cudaDeviceGetAttribute with -cudaDevAttrIpcEventSupport

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -
    -
    No guarantees are made about the address returned in *devPtr.

    In particular, multiple processes may not receive the same address for the same handle.

    -
    -
    -
    - -
    -
    -cuda.cudart.cudaIpcCloseMemHandle(devPtr)#
    -

    Attempts to close memory mapped with cudaIpcOpenMemHandle.

    -

    Decrements the reference count of the memory returnd by -cudaIpcOpenMemHandle by 1. When the reference count reaches -0, this API unmaps the memory. The original allocation in the exporting -process as well as imported mappings in other processes will be -unaffected.

    -

    Any resources used to enable peer access will be freed if this is the -last mapping using them.

    -

    IPC functionality is restricted to devices with support for unified -addressing on Linux and Windows operating systems. IPC functionality on -Windows is supported for compatibility purposes but not recommended as -it comes with performance cost. Users can test their device for IPC -functionality by calling cudaDeviceGetAttribute with -cudaDevAttrIpcEventSupport

    -
    -
    Parameters:
    -

    devPtr (Any) – Device pointer returned by cudaIpcOpenMemHandle

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorMapBufferObjectFailed, cudaErrorNotSupported, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceFlushGPUDirectRDMAWrites(target: cudaFlushGPUDirectRDMAWritesTarget, scope: cudaFlushGPUDirectRDMAWritesScope)#
    -

    Blocks until remote writes are visible to the specified scope.

    -

    Blocks until remote writes to the target context via mappings created -through GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see -https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are -visible to the specified scope.

    -

    If the scope equals or lies within the scope indicated by -cudaDevAttrGPUDirectRDMAWritesOrdering, the call will be a -no-op and can be safely omitted for performance. This can be determined -by comparing the numerical values between the two enums, with smaller -scopes having smaller values.

    -

    Users may query support for this API via -cudaDevAttrGPUDirectRDMAFlushWritesOptions.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorNotSupported,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceRegisterAsyncNotification(int device, callbackFunc, userData)#
    -

    Registers a callback function to receive async notifications.

    -

    Registers callbackFunc to receive async notifications.

    -

    The userData parameter is passed to the callback function at async -notification time. Likewise, callback is also passed to the callback -function to distinguish between multiple registered callbacks.

    -

    The callback function being registered should be designed to return -quickly (~10ms). Any long running tasks should be queued for execution -on an application thread.

    -

    Callbacks may not call cudaDeviceRegisterAsyncNotification or -cudaDeviceUnregisterAsyncNotification. Doing so will result in -cudaErrorNotPermitted. Async notification callbacks execute -in an undefined order and may be serialized.

    -

    Returns in *callback a handle representing the registered callback -instance.

    -
    -
    Parameters:
    -
      -
    • device (int) – The device on which to register the callback

    • -
    • callbackFunc (cudaAsyncCallback) – The function to register as a callback

    • -
    • userData (Any) – A generic pointer to user data. This is passed into the callback -function.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceUnregisterAsyncNotification(int device, callback)#
    -

    Unregisters an async notification callback.

    -

    Unregisters callback so that the corresponding callback function will -stop receiving async notifications.

    -
    -
    Parameters:
    -
      -
    • device (int) – The device from which to remove callback.

    • -
    • callback (cudaAsyncCallbackHandle_t) – The callback instance to unregister from receiving async -notifications.

    • -
    -
    -
    Returns:
    -

    cudaSuccess cudaErrorNotSupported cudaErrorInvalidDevice cudaErrorInvalidValue cudaErrorNotPermitted cudaErrorUnknown

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetDeviceCount()#
    -

    Returns the number of compute-capable devices.

    -

    Returns in *count the number of devices with compute capability -greater or equal to 2.0 that are available for execution.

    -
    -
    Returns:
    -

      -
    • cudaError_tcudaSuccess

    • -
    • count (int) – Returns the number of devices with compute capability greater or -equal to 2.0

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetDeviceProperties(int device)#
    -

    Returns information about the compute-device.

    -

    Returns in *prop the properties of device dev. The -cudaDeviceProp structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • name[256] is an ASCII string identifying the device.

    • -
    • uuid is a 16-byte unique identifier.

    • -
    • totalGlobalMem is the total amount of global memory -available on the device in bytes.

    • -
    • sharedMemPerBlock is the maximum amount of shared memory -available to a thread block in bytes.

    • -
    • regsPerBlock is the maximum number of 32-bit registers -available to a thread block.

    • -
    • warpSize is the warp size in threads.

    • -
    • memPitch is the maximum pitch in bytes allowed by the -memory copy functions that involve memory regions allocated through -cudaMallocPitch().

    • -
    • maxThreadsPerBlock is the maximum number of threads per -block.

    • -
    • maxThreadsDim[3] contains the maximum size of each -dimension of a block.

    • -
    • maxGridSize[3] contains the maximum size of each -dimension of a grid.

    • -
    • clockRate is the clock frequency in kilohertz.

    • -
    • totalConstMem is the total amount of constant memory -available on the device in bytes.

    • -
    • major, minor are the major and minor revision -numbers defining the device’s compute capability.

    • -
    • textureAlignment is the alignment requirement; texture -base addresses that are aligned to textureAlignment bytes -do not need an offset applied to texture fetches.

    • -
    • texturePitchAlignment is the pitch alignment requirement -for 2D texture references that are bound to pitched memory.

    • -
    • deviceOverlap is 1 if the device can concurrently copy -memory between host and device while executing a kernel, or 0 if not. -Deprecated, use instead asyncEngineCount.

    • -
    • multiProcessorCount is the number of multiprocessors on -the device.

    • -
    • kernelExecTimeoutEnabled is 1 if there is a run time -limit for kernels executed on the device, or 0 if not.

    • -
    • integrated is 1 if the device is an integrated -(motherboard) GPU and 0 if it is a discrete (card) component.

    • -
    • canMapHostMemory is 1 if the device can map host memory -into the CUDA address space for use with -cudaHostAlloc()/cudaHostGetDevicePointer(), -or 0 if not.

    • -
    • computeMode is the compute mode that the device is -currently in. Available modes are as follows:

      -
        -
      • cudaComputeModeDefault: Default mode - Device is not restricted and -multiple threads can use cudaSetDevice() with this -device.

      • -
      • cudaComputeModeProhibited: Compute-prohibited mode - No threads can -use cudaSetDevice() with this device.

      • -
      • cudaComputeModeExclusiveProcess: Compute-exclusive-process mode - -Many threads in one process will be able to use -cudaSetDevice() with this device. When an occupied -exclusive mode device is chosen with cudaSetDevice, all -subsequent non-device management runtime functions will return -cudaErrorDevicesUnavailable.

      • -
      -
    • -
    • maxTexture1D is the maximum 1D texture size.

    • -
    • maxTexture1DMipmap is the maximum 1D mipmapped texture -texture size.

    • -
    • maxTexture1DLinear is the maximum 1D texture size for -textures bound to linear memory.

    • -
    • maxTexture2D[2] contains the maximum 2D texture -dimensions.

    • -
    • maxTexture2DMipmap[2] contains the maximum 2D mipmapped -texture dimensions.

    • -
    • maxTexture2DLinear[3] contains the maximum 2D texture -dimensions for 2D textures bound to pitch linear memory.

    • -
    • maxTexture2DGather[2] contains the maximum 2D texture -dimensions if texture gather operations have to be performed.

    • -
    • maxTexture3D[3] contains the maximum 3D texture -dimensions.

    • -
    • maxTexture3DAlt[3] contains the maximum alternate 3D -texture dimensions.

    • -
    • maxTextureCubemap is the maximum cubemap texture width or -height.

    • -
    • maxTexture1DLayered[2] contains the maximum 1D layered -texture dimensions.

    • -
    • maxTexture2DLayered[3] contains the maximum 2D layered -texture dimensions.

    • -
    • maxTextureCubemapLayered[2] contains the maximum cubemap -layered texture dimensions.

    • -
    • maxSurface1D is the maximum 1D surface size.

    • -
    • maxSurface2D[2] contains the maximum 2D surface -dimensions.

    • -
    • maxSurface3D[3] contains the maximum 3D surface -dimensions.

    • -
    • maxSurface1DLayered[2] contains the maximum 1D layered -surface dimensions.

    • -
    • maxSurface2DLayered[3] contains the maximum 2D layered -surface dimensions.

    • -
    • maxSurfaceCubemap is the maximum cubemap surface width or -height.

    • -
    • maxSurfaceCubemapLayered[2] contains the maximum cubemap -layered surface dimensions.

    • -
    • surfaceAlignment specifies the alignment requirements for -surfaces.

    • -
    • concurrentKernels is 1 if the device supports executing -multiple kernels within the same context simultaneously, or 0 if not. -It is not guaranteed that multiple kernels will be resident on the -device concurrently so this feature should not be relied upon for -correctness.

    • -
    • ECCEnabled is 1 if the device has ECC support turned on, -or 0 if not.

    • -
    • pciBusID is the PCI bus identifier of the device.

    • -
    • pciDeviceID is the PCI device (sometimes called slot) -identifier of the device.

    • -
    • pciDomainID is the PCI domain identifier of the device.

    • -
    • tccDriver is 1 if the device is using a TCC driver or 0 -if not.

    • -
    • asyncEngineCount is 1 when the device can concurrently -copy memory between host and device while executing a kernel. It is 2 -when the device can concurrently copy memory between host and device -in both directions and execute a kernel at the same time. It is 0 if -neither of these is supported.

    • -
    • unifiedAddressing is 1 if the device shares a unified -address space with the host and 0 otherwise.

    • -
    • memoryClockRate is the peak memory clock frequency in -kilohertz.

    • -
    • memoryBusWidth is the memory bus width in bits.

    • -
    • l2CacheSize is L2 cache size in bytes.

    • -
    • persistingL2CacheMaxSize is L2 cache’s maximum persisting -lines size in bytes.

    • -
    • maxThreadsPerMultiProcessor is the number of maximum -resident threads per multiprocessor.

    • -
    • streamPrioritiesSupported is 1 if the device supports -stream priorities, or 0 if it is not supported.

    • -
    • globalL1CacheSupported is 1 if the device supports -caching of globals in L1 cache, or 0 if it is not supported.

    • -
    • localL1CacheSupported is 1 if the device supports caching -of locals in L1 cache, or 0 if it is not supported.

    • -
    • sharedMemPerMultiprocessor is the maximum amount of -shared memory available to a multiprocessor in bytes; this amount is -shared by all thread blocks simultaneously resident on a -multiprocessor.

    • -
    • regsPerMultiprocessor is the maximum number of 32-bit -registers available to a multiprocessor; this number is shared by all -thread blocks simultaneously resident on a multiprocessor.

    • -
    • managedMemory is 1 if the device supports allocating -managed memory on this system, or 0 if it is not supported.

    • -
    • isMultiGpuBoard is 1 if the device is on a multi-GPU -board (e.g. Gemini cards), and 0 if not;

    • -
    • multiGpuBoardGroupID is a unique identifier for a group -of devices associated with the same board. Devices on the same multi- -GPU board will share the same identifier.

    • -
    • hostNativeAtomicSupported is 1 if the link between the -device and the host supports native atomic operations, or 0 if it is -not supported.

    • -
    • singleToDoublePrecisionPerfRatio is the ratio of single -precision performance (in floating-point operations per second) to -double precision performance.

    • -
    • pageableMemoryAccess is 1 if the device supports -coherently accessing pageable memory without calling cudaHostRegister -on it, and 0 otherwise.

    • -
    • concurrentManagedAccess is 1 if the device can coherently -access managed memory concurrently with the CPU, and 0 otherwise.

    • -
    • computePreemptionSupported is 1 if the device supports -Compute Preemption, and 0 otherwise.

    • -
    • canUseHostPointerForRegisteredMem is 1 if the device can -access host registered memory at the same virtual address as the CPU, -and 0 otherwise.

    • -
    • cooperativeLaunch is 1 if the device supports launching -cooperative kernels via cudaLaunchCooperativeKernel, and -0 otherwise.

    • -
    • cooperativeMultiDeviceLaunch is 1 if the device supports -launching cooperative kernels via -cudaLaunchCooperativeKernelMultiDevice, and 0 otherwise.

    • -
    • sharedMemPerBlockOptin is the per device maximum shared -memory per block usable by special opt in

    • -
    • pageableMemoryAccessUsesHostPageTables is 1 if the device -accesses pageable memory via the host’s page tables, and 0 otherwise.

    • -
    • directManagedMemAccessFromHost is 1 if the host can -directly access managed memory on the device without migration, and 0 -otherwise.

    • -
    • maxBlocksPerMultiProcessor is the maximum number of -thread blocks that can reside on a multiprocessor.

    • -
    • accessPolicyMaxWindowSize is the maximum value of -num_bytes.

    • -
    • reservedSharedMemPerBlock is the shared memory reserved -by CUDA driver per block in bytes

    • -
    • hostRegisterSupported is 1 if the device supports host -memory registration via cudaHostRegister, and 0 -otherwise.

    • -
    • sparseCudaArraySupported is 1 if the device supports -sparse CUDA arrays and sparse CUDA mipmapped arrays, 0 otherwise

    • -
    • hostRegisterReadOnlySupported is 1 if the device supports -using the cudaHostRegister flag cudaHostRegisterReadOnly -to register memory that must be mapped as read-only to the GPU

    • -
    • timelineSemaphoreInteropSupported is 1 if external -timeline semaphore interop is supported on the device, 0 otherwise

    • -
    • memoryPoolsSupported is 1 if the device supports using -the cudaMallocAsync and cudaMemPool family of APIs, 0 otherwise

    • -
    • gpuDirectRDMASupported is 1 if the device supports -GPUDirect RDMA APIs, 0 otherwise

    • -
    • gpuDirectRDMAFlushWritesOptions is a bitmask to be -interpreted according to the -cudaFlushGPUDirectRDMAWritesOptions enum

    • -
    • gpuDirectRDMAWritesOrdering See the -cudaGPUDirectRDMAWritesOrdering enum for numerical values

    • -
    • memoryPoolSupportedHandleTypes is a bitmask of handle -types supported with mempool-based IPC

    • -
    • deferredMappingCudaArraySupported is 1 if the device -supports deferred mapping CUDA arrays and CUDA mipmapped arrays

    • -
    • ipcEventSupported is 1 if the device supports IPC Events, -and 0 otherwise

    • -
    • unifiedFunctionPointers is 1 if the device support -unified pointers, and 0 otherwise

    • -
    -
    -
    Parameters:
    -

    device (int) – Device number to get properties for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGetAttribute(attr: cudaDeviceAttr, int device)#
    -

    Returns information about the device.

    -

    Returns in *value the integer value of the attribute attr on device -device. The supported attributes are:

    - -
    -
    Parameters:
    -
      -
    • attr (cudaDeviceAttr) – Device attribute to query

    • -
    • device (int) – Device number to query

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGetDefaultMemPool(int device)#
    -

    Returns the default mempool of a device.

    -

    The default mempool of a device contains device memory from that -device.

    -
    -
    Parameters:
    -

    device (int) – None

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceSetMemPool(int device, memPool)#
    -

    Sets the current memory pool of a device.

    -

    The memory pool must be local to the specified device. Unless a mempool -is specified in the cudaMallocAsync call, -cudaMallocAsync allocates from the current mempool of the -provided stream’s device. By default, a device’s current memory pool is -its default memory pool.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue cudaErrorInvalidDevice cudaErrorNotSupported

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -

    Notes

    -

    Use cudaMallocFromPoolAsync to specify asynchronous allocations from a device different than the one the stream runs on.

    -
    - -
    -
    -cuda.cudart.cudaDeviceGetMemPool(int device)#
    -

    Gets the current mempool for a device.

    -

    Returns the last pool provided to cudaDeviceSetMemPool for -this device or the device’s default memory pool if -cudaDeviceSetMemPool has never been called. By default the -current mempool is the default mempool for a device, otherwise the -returned pool must have been set with cuDeviceSetMemPool or -cudaDeviceSetMemPool.

    -
    -
    Parameters:
    -

    device (int) – None

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, int device, int flags)#
    -

    Return NvSciSync attributes that this device can support.

    -

    Returns in nvSciSyncAttrList, the properties of NvSciSync that this -CUDA device, dev can support. The returned nvSciSyncAttrList can be -used to create an NvSciSync that matches this device’s capabilities.

    -

    If NvSciSyncAttrKey_RequiredPerm field in nvSciSyncAttrList is -already set this API will return cudaErrorInvalidValue.

    -

    The applications should set nvSciSyncAttrList to a valid -NvSciSyncAttrList failing which this API will return -cudaErrorInvalidHandle.

    -

    The flags controls how applications intends to use the NvSciSync -created from the nvSciSyncAttrList. The valid flags are:

    -
      -
    • cudaNvSciSyncAttrSignal, specifies that the applications -intends to signal an NvSciSync on this CUDA device.

    • -
    • cudaNvSciSyncAttrWait, specifies that the applications -intends to wait on an NvSciSync on this CUDA device.

    • -
    -

    At least one of these flags must be set, failing which the API returns -cudaErrorInvalidValue. Both the flags are orthogonal to one -another: a developer may set both these flags that allows to set both -wait and signal specific attributes in the same nvSciSyncAttrList.

    -

    Note that this API updates the input nvSciSyncAttrList with values -equivalent to the following public attribute key-values: -NvSciSyncAttrKey_RequiredPerm is set to

    -
      -
    • NvSciSyncAccessPerm_SignalOnly if cudaNvSciSyncAttrSignal -is set in flags.

    • -
    • NvSciSyncAccessPerm_WaitOnly if cudaNvSciSyncAttrWait is -set in flags.

    • -
    • NvSciSyncAccessPerm_WaitSignal if both -cudaNvSciSyncAttrWait and -cudaNvSciSyncAttrSignal are set in flags. -NvSciSyncAttrKey_PrimitiveInfo is set to

    • -
    • NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid device.

    • -
    • NvSciSyncAttrValPrimitiveType_Syncpoint if device is a Tegra -device.

    • -
    • NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if device -is GA10X+. NvSciSyncAttrKey_GpuId is set to the same UUID that is -returned in None from cudaDeviceGetProperties for this -device.

    • -
    -

    cudaSuccess, cudaErrorDeviceUninitialized, -cudaErrorInvalidValue, cudaErrorInvalidHandle, -cudaErrorInvalidDevice, cudaErrorNotSupported, -cudaErrorMemoryAllocation

    -
    -
    Parameters:
    -
      -
    • nvSciSyncAttrList (Any) – Return NvSciSync attributes supported.

    • -
    • device (int) – Valid Cuda Device to get NvSciSync attributes for.

    • -
    • flags (int) – flags describing NvSciSync usage.

    • -
    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGetP2PAttribute(attr: cudaDeviceP2PAttr, int srcDevice, int dstDevice)#
    -

    Queries attributes of the link between two devices.

    -

    Returns in *value the value of the requested attribute attrib of -the link between srcDevice and dstDevice. The supported attributes -are:

    - -

    Returns cudaErrorInvalidDevice if srcDevice or -dstDevice are not valid or if they represent the same device.

    -

    Returns cudaErrorInvalidValue if attrib is not valid or -if value is a null pointer.

    -
    -
    Parameters:
    -
      -
    • attrib (cudaDeviceP2PAttr) – The requested attribute of the link between srcDevice and -dstDevice.

    • -
    • srcDevice (int) – The source device of the target link.

    • -
    • dstDevice (int) – The destination device of the target link.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaChooseDevice(cudaDeviceProp prop: Optional[cudaDeviceProp])#
    -

    Select compute-device which best matches criteria.

    -

    Returns in *device the device which has properties that best match -*prop.

    -
    -
    Parameters:
    -

    prop (cudaDeviceProp) – Desired device properties

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaInitDevice(int device, unsigned int deviceFlags, unsigned int flags)#
    -

    Initialize device to be used for GPU executions.

    -

    This function will initialize the CUDA Runtime structures and primary -context on device when called, but the context will not be made -current to device.

    -

    When cudaInitDeviceFlagsAreValid is set in flags, -deviceFlags are applied to the requested device. The values of -deviceFlags match those of the flags parameters in -cudaSetDeviceFlags. The effect may be verified by -cudaGetDeviceFlags.

    -

    This function will return an error if the device is in -cudaComputeModeExclusiveProcess and is occupied by another -process or if the device is in cudaComputeModeProhibited.

    -
    -
    Parameters:
    -
      -
    • device (int) – Device on which the runtime will initialize itself.

    • -
    • deviceFlags (unsigned int) – Parameters for device operation.

    • -
    • flags (unsigned int) – Flags for controlling the device initialization.

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidDevice,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaSetDevice(int device)#
    -

    Set device to be used for GPU executions.

    -

    Sets device as the current device for the calling host thread. Valid -device id’s are 0 to (cudaGetDeviceCount() - 1).

    -

    Any device memory subsequently allocated from this host thread using -cudaMalloc(), cudaMallocPitch() or -cudaMallocArray() will be physically resident on device. -Any host memory allocated from this host thread using -cudaMallocHost() or cudaHostAlloc() or -cudaHostRegister() will have its lifetime associated with -device. Any streams or events created from this host thread will be -associated with device. Any kernels launched from this host thread -using the <<<>>> operator or cudaLaunchKernel() will be -executed on device.

    -

    This call may be made from any host thread, to any device, and at any -time. This function will do no synchronization with the previous or new -device, and should only take significant time when it initializes the -runtime’s context state. This call will bind the primary context of the -specified device to the calling thread and all the subsequent memory -allocations, stream and event creations, and kernel launches will be -associated with the primary context. This function will also -immediately initialize the runtime state on the primary context, and -the context will be current on device immediately. This function will -return an error if the device is in -cudaComputeModeExclusiveProcess and is occupied by another -process or if the device is in cudaComputeModeProhibited.

    -

    It is not required to call cudaInitDevice before using this -function.

    -
    -
    Parameters:
    -

    device (int) – Device on which the active host thread should execute the device -code.

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidDevice, cudaErrorDeviceUnavailable,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetDevice()#
    -

    Returns which device is currently being used.

    -

    Returns in *device the current device for the calling host thread.

    -
    -
    Returns:
    -

      -
    • cudaError_tcudaSuccess, cudaErrorInvalidValue, cudaErrorDeviceUnavailable,

    • -
    • device (int) – Returns the device on which the active host thread executes the -device code.

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaSetDeviceFlags(unsigned int flags)#
    -

    Sets flags to be used for device executions.

    -

    Records flags as the flags for the current device. If the current -device has been set and that device has already been initialized, the -previous flags are overwritten. If the current device has not been -initialized, it is initialized with the provided flags. If no device -has been made current to the calling thread, a default device is -selected and initialized with the provided flags.

    -

    The three LSBs of the flags parameter can be used to control how the -CPU thread interacts with the OS scheduler when waiting for results -from the device.

    -
      -
    • cudaDeviceScheduleAuto: The default value if the flags -parameter is zero, uses a heuristic based on the number of active -CUDA contexts in the process C and the number of logical processors -in the system P. If C > P, then CUDA will yield to other OS -threads when waiting for the device, otherwise CUDA will not yield -while waiting for results and actively spin on the processor. -Additionally, on Tegra devices, cudaDeviceScheduleAuto -uses a heuristic based on the power profile of the platform and may -choose cudaDeviceScheduleBlockingSync for low-powered -devices.

    • -
    • cudaDeviceScheduleSpin: Instruct CUDA to actively spin -when waiting for results from the device. This can decrease latency -when waiting for the device, but may lower the performance of CPU -threads if they are performing work in parallel with the CUDA thread.

    • -
    • cudaDeviceScheduleYield: Instruct CUDA to yield its -thread when waiting for results from the device. This can increase -latency when waiting for the device, but can increase the performance -of CPU threads performing work in parallel with the device.

    • -
    • cudaDeviceScheduleBlockingSync: Instruct CUDA to block -the CPU thread on a synchronization primitive when waiting for the -device to finish work.

    • -
    • cudaDeviceBlockingSync: Instruct CUDA to block the CPU -thread on a synchronization primitive when waiting for the device to -finish work. Deprecated: This flag was deprecated as of -CUDA 4.0 and replaced with -cudaDeviceScheduleBlockingSync.

    • -
    • cudaDeviceMapHost: This flag enables allocating pinned -host memory that is accessible to the device. It is implicit for the -runtime but may be absent if a context is created using the driver -API. If this flag is not set, cudaHostGetDevicePointer() -will always return a failure code.

    • -
    • cudaDeviceLmemResizeToMax: Instruct CUDA to not reduce -local memory after resizing local memory for a kernel. This can -prevent thrashing by local memory allocations when launching many -kernels with high local memory usage at the cost of potentially -increased memory usage. Deprecated: This flag is -deprecated and the behavior enabled by this flag is now the default -and cannot be disabled.

    • -
    • cudaDeviceSyncMemops: Ensures that synchronous memory -operations initiated on this context will always synchronize. See -further documentation in the section titled “API Synchronization -behavior” to learn more about cases when synchronous memory -operations can exhibit asynchronous behavior.

    • -
    -
    -
    Parameters:
    -

    flags (unsigned int) – Parameters for device operation

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetDeviceFlags()#
    -

    Gets the flags for the current device.

    -

    Returns in flags the flags for the current device. If there is a -current device for the calling thread, the flags for the device are -returned. If there is no current device, the flags for the first device -are returned, which may be the default flags. Compare to the behavior -of cudaSetDeviceFlags.

    -

    Typically, the flags returned should match the behavior that will be -seen if the calling thread uses a device after this call, without any -change to the flags or current device inbetween by this or another -thread. Note that if the device is not initialized, it is possible for -another thread to change the flags for the current device before it is -initialized. Additionally, when using exclusive mode, if this thread -has not requested a specific device, it may use a device other than the -first device, contrary to the assumption made by this function.

    -

    If a context has been created via the driver API and is current to the -calling thread, the flags for that context are always returned.

    -

    Flags returned by this function may specifically include -cudaDeviceMapHost even though it is not accepted by -cudaSetDeviceFlags because it is implicit in runtime API -flags. The reason for this is that the current context may have been -created via the driver API in which case the flag is not implicit and -may be unset.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Error Handling#

    -

    This section describes the error handling functions of the CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaGetLastError()#
    -

    Returns the last error from a runtime call.

    -

    Returns the last error that has been produced by any of the runtime -calls in the same instance of the CUDA Runtime library in the host -thread and resets it to cudaSuccess.

    -

    Note: Multiple instances of the CUDA Runtime library can be present in -an application when using a library that statically links the CUDA -Runtime.

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorMissingConfiguration, cudaErrorMemoryAllocation, cudaErrorInitializationError, cudaErrorLaunchFailure, cudaErrorLaunchTimeout, cudaErrorLaunchOutOfResources, cudaErrorInvalidDeviceFunction, cudaErrorInvalidConfiguration, cudaErrorInvalidDevice, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidSymbol, cudaErrorUnmapBufferObjectFailed, cudaErrorInvalidDevicePointer, cudaErrorInvalidTexture, cudaErrorInvalidTextureBinding, cudaErrorInvalidChannelDescriptor, cudaErrorInvalidMemcpyDirection, cudaErrorInvalidFilterSetting, cudaErrorInvalidNormSetting, cudaErrorUnknown, cudaErrorInvalidResourceHandle, cudaErrorInsufficientDriver, cudaErrorNoDevice, cudaErrorSetOnActiveProcess, cudaErrorStartupFailure, cudaErrorInvalidPtx, cudaErrorUnsupportedPtxVersion, cudaErrorNoKernelImageForDevice, cudaErrorJitCompilerNotFound, cudaErrorJitCompilationDisabled

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaPeekAtLastError()#
    -

    Returns the last error from a runtime call.

    -

    Returns the last error that has been produced by any of the runtime -calls in the same instance of the CUDA Runtime library in the host -thread. This call does not reset the error to cudaSuccess -like cudaGetLastError().

    -

    Note: Multiple instances of the CUDA Runtime library can be present in -an application when using a library that statically links the CUDA -Runtime.

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorMissingConfiguration, cudaErrorMemoryAllocation, cudaErrorInitializationError, cudaErrorLaunchFailure, cudaErrorLaunchTimeout, cudaErrorLaunchOutOfResources, cudaErrorInvalidDeviceFunction, cudaErrorInvalidConfiguration, cudaErrorInvalidDevice, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidSymbol, cudaErrorUnmapBufferObjectFailed, cudaErrorInvalidDevicePointer, cudaErrorInvalidTexture, cudaErrorInvalidTextureBinding, cudaErrorInvalidChannelDescriptor, cudaErrorInvalidMemcpyDirection, cudaErrorInvalidFilterSetting, cudaErrorInvalidNormSetting, cudaErrorUnknown, cudaErrorInvalidResourceHandle, cudaErrorInsufficientDriver, cudaErrorNoDevice, cudaErrorSetOnActiveProcess, cudaErrorStartupFailure, cudaErrorInvalidPtx, cudaErrorUnsupportedPtxVersion, cudaErrorNoKernelImageForDevice, cudaErrorJitCompilerNotFound, cudaErrorJitCompilationDisabled

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetErrorName(error: cudaError_t)#
    -

    Returns the string representation of an error code enum name.

    -

    Returns a string containing the name of an error code in the enum. If -the error code is not recognized, “unrecognized error code” is -returned.

    -
    -
    Parameters:
    -

    error (cudaError_t) – Error code to convert to string

    -
    -
    Returns:
    -

      -
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • -
    • byteschar* pointer to a NULL-terminated string

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetErrorString(error: cudaError_t)#
    -

    Returns the description string for an error code.

    -

    Returns the description string for an error code. If the error code is -not recognized, “unrecognized error code” is returned.

    -
    -
    Parameters:
    -

    error (cudaError_t) – Error code to convert to string

    -
    -
    Returns:
    -

      -
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • -
    • byteschar* pointer to a NULL-terminated string

    • -
    -

    -
    -
    - -
    - -
    -
    -

    Stream Management#

    -

    This section describes the stream management functions of the CUDA runtime application programming interface.

    -
    -
    -class cuda.cudart.cudaStreamCallback_t(*args, **kwargs)#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamCreate()#
    -

    Create an asynchronous stream.

    -

    Creates a new asynchronous stream on the context that is current to the -calling host thread. If no context is current to the calling host -thread, then the primary context for a device is selected, made current -to the calling thread, and initialized before creating a stream on it.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamCreateWithFlags(unsigned int flags)#
    -

    Create an asynchronous stream.

    -

    Creates a new asynchronous stream on the context that is current to the -calling host thread. If no context is current to the calling host -thread, then the primary context for a device is selected, made current -to the calling thread, and initialized before creating a stream on it. -The flags argument determines the behaviors of the stream. Valid -values for flags are

    -
      -
    • cudaStreamDefault: Default stream creation flag.

    • -
    • cudaStreamNonBlocking: Specifies that work running in the -created stream may run concurrently with work in stream 0 (the NULL -stream), and that the created stream should perform no implicit -synchronization with stream 0.

    • -
    -
    -
    Parameters:
    -

    flags (unsigned int) – Parameters for stream creation

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamCreateWithPriority(unsigned int flags, int priority)#
    -

    Create an asynchronous stream with the specified priority.

    -

    Creates a stream with the specified priority and returns a handle in -pStream. The stream is created on the context that is current to the -calling host thread. If no context is current to the calling host -thread, then the primary context for a device is selected, made current -to the calling thread, and initialized before creating a stream on it. -This affects the scheduling priority of work in the stream. Priorities -provide a hint to preferentially run work with higher priority when -possible, but do not preempt already-running work or provide any other -functional guarantee on execution order.

    -

    priority follows a convention where lower numbers represent higher -priorities. ‘0’ represents default priority. The range of meaningful -numerical priorities can be queried using -cudaDeviceGetStreamPriorityRange. If the specified priority -is outside the numerical range returned by -cudaDeviceGetStreamPriorityRange, it will automatically be -clamped to the lowest or the highest number in the range.

    -
    -
    Parameters:
    -
      -
    • flags (unsigned int) – Flags for stream creation. See -cudaStreamCreateWithFlags for a list of valid flags -that can be passed

    • -
    • priority (int) – Priority of the stream. Lower numbers represent higher priorities. -See cudaDeviceGetStreamPriorityRange for more -information about the meaningful stream priorities that can be -passed.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Stream priorities are supported only on GPUs with compute capability 3.5 or higher.

    -

    In the current implementation, only compute kernels launched in priority streams are affected by the stream’s priority. Stream priorities have no effect on host-to-device and device-to-host memory operations.

    -
    - -
    -
    -cuda.cudart.cudaStreamGetPriority(hStream)#
    -

    Query the priority of a stream.

    -

    Query the priority of a stream. The priority is returned in in -priority. Note that if the stream was created with a priority outside -the meaningful numerical range returned by -cudaDeviceGetStreamPriorityRange, this function returns the -clamped priority. See cudaStreamCreateWithPriority for -details about priority clamping.

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamGetFlags(hStream)#
    -

    Query the flags of a stream.

    -

    Query the flags of a stream. The flags are returned in flags. See -cudaStreamCreateWithFlags for a list of valid flags.

    -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamGetId(hStream)#
    -

    Query the Id of a stream.

    -

    Query the Id of a stream. The Id is returned in streamId. The Id is -unique for the life of the program.

    -

    The stream handle hStream can refer to any of the following:

    - -
    -
    Parameters:
    -

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaCtxResetPersistingL2Cache()#
    -

    Resets all persisting lines in cache to normal status.

    -

    Resets all persisting lines in cache to normal status. Takes effect on -function return.

    -
    -
    Returns:
    -

    cudaSuccess,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamCopyAttributes(dst, src)#
    -

    Copies attributes from source stream to destination stream.

    -

    Copies attributes from source stream src to destination stream dst. -Both streams must have the same context.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorNotSupported

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamGetAttribute(hStream, attr: cudaStreamAttrID)#
    -

    Queries stream attribute.

    -

    Queries attribute attr from hStream and stores it in corresponding -member of value_out.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamSetAttribute(hStream, attr: cudaStreamAttrID, cudaStreamAttrValue value: Optional[cudaStreamAttrValue])#
    -

    Sets stream attribute.

    -

    Sets attribute attr on hStream from corresponding attribute of -value. The updated attribute will be applied to subsequent work -submitted to the stream. It will not affect previously submitted work.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamDestroy(stream)#
    -

    Destroys and cleans up an asynchronous stream.

    -

    Destroys and cleans up the asynchronous stream specified by stream.

    -

    In case the device is still doing work in the stream stream when -cudaStreamDestroy() is called, the function will return -immediately and the resources associated with stream will be released -automatically once the device has completed all work in stream.

    -
    -
    Parameters:
    -

    stream (CUstream or cudaStream_t) – Stream identifier

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamWaitEvent(stream, event, unsigned int flags)#
    -

    Make a compute stream wait on an event.

    -

    Makes all future work submitted to stream wait for all work captured -in event. See cudaEventRecord() for details on what is -captured by an event. The synchronization will be performed efficiently -on the device when applicable. event may be from a different device -than stream.

    -

    flags include:

    - -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamAddCallback(stream, callback, userData, unsigned int flags)#
    -

    Add a callback to a compute stream.

    -

    Adds a callback to be called on the host after all currently enqueued -items in the stream have completed. For each cudaStreamAddCallback -call, a callback will be executed exactly once. The callback will block -later work in the stream until it is finished.

    -

    The callback may be passed cudaSuccess or an error code. In -the event of a device error, all subsequently executed callbacks will -receive an appropriate cudaError_t.

    -

    Callbacks must not make any CUDA API calls. Attempting to use CUDA APIs -may result in cudaErrorNotPermitted. Callbacks must not -perform any synchronization that may depend on outstanding device work -or other callbacks that are not mandated to run earlier. Callbacks -without a mandated order (in independent streams) execute in undefined -order and may be serialized.

    -

    For the purposes of Unified Memory, callback execution makes a number -of guarantees:

    -
      -
    • The callback stream is considered idle for the duration of the -callback. Thus, for example, a callback may always use memory -attached to the callback stream.

    • -
    • The start of execution of a callback has the same effect as -synchronizing an event recorded in the same stream immediately prior -to the callback. It thus synchronizes streams which have been -“joined” prior to the callback.

    • -
    • Adding device work to any stream does not have the effect of making -the stream active until all preceding callbacks have executed. Thus, -for example, a callback might use global attached memory even if work -has been added to another stream, if it has been properly ordered -with an event.

    • -
    • Completion of a callback does not cause a stream to become active -except as described above. The callback stream will remain idle if no -device work follows the callback, and will remain idle across -consecutive callbacks without device work in between. Thus, for -example, stream synchronization can be done by signaling from a -callback at the end of the stream.

    • -
    -
    -
    Parameters:
    -
      -
    • stream (CUstream or cudaStream_t) – Stream to add callback to

    • -
    • callback (cudaStreamCallback_t) – The function to call once preceding stream operations are complete

    • -
    • userData (Any) – User specified data to be passed to the callback function

    • -
    • flags (unsigned int) – Reserved for future use, must be 0

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorInvalidValue, cudaErrorNotSupported

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -

    Notes

    -

    This function is slated for eventual deprecation and removal. If you do not require the callback to execute in case of a device error, consider using cudaLaunchHostFunc. Additionally, this function is not supported with cudaStreamBeginCapture and cudaStreamEndCapture, unlike cudaLaunchHostFunc.

    -
    - -
    -
    -cuda.cudart.cudaStreamSynchronize(stream)#
    -

    Waits for stream tasks to complete.

    -

    Blocks until stream has completed all operations. If the -cudaDeviceScheduleBlockingSync flag was set for this -device, the host thread will block until the stream is finished with -all of its tasks.

    -
    -
    Parameters:
    -

    stream (CUstream or cudaStream_t) – Stream identifier

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamQuery(stream)#
    -

    Queries an asynchronous stream for completion status.

    -

    Returns cudaSuccess if all operations in stream have -completed, or cudaErrorNotReady if not.

    -

    For the purposes of Unified Memory, a return value of -cudaSuccess is equivalent to having called -cudaStreamSynchronize().

    -
    -
    Parameters:
    -

    stream (CUstream or cudaStream_t) – Stream identifier

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorNotReady, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamAttachMemAsync(stream, devPtr, size_t length, unsigned int flags)#
    -

    Attach memory to a stream asynchronously.

    -

    Enqueues an operation in stream to specify stream association of -length bytes of memory starting from devPtr. This function is a -stream-ordered operation, meaning that it is dependent on, and will -only take effect when, previous work in stream has completed. Any -previous association is automatically replaced.

    -

    devPtr must point to an one of the following types of memories:

    -
      -
    • managed memory declared using the managed keyword or allocated with -cudaMallocManaged.

    • -
    • a valid host-accessible region of system-allocated pageable memory. -This type of memory may only be specified if the device associated -with the stream reports a non-zero value for the device attribute -cudaDevAttrPageableMemoryAccess.

    • -
    -

    For managed allocations, length must be either zero or the entire -allocation’s size. Both indicate that the entire allocation’s stream -association is being changed. Currently, it is not possible to change -stream association for a portion of a managed allocation.

    -

    For pageable allocations, length must be non-zero.

    -

    The stream association is specified using flags which must be one of -cudaMemAttachGlobal, cudaMemAttachHost or -cudaMemAttachSingle. The default value for flags is -cudaMemAttachSingle If the cudaMemAttachGlobal -flag is specified, the memory can be accessed by any stream on any -device. If the cudaMemAttachHost flag is specified, the -program makes a guarantee that it won’t access the memory on the device -from any stream on a device that has a zero value for the device -attribute cudaDevAttrConcurrentManagedAccess. If the -cudaMemAttachSingle flag is specified and stream is -associated with a device that has a zero value for the device attribute -cudaDevAttrConcurrentManagedAccess, the program makes a -guarantee that it will only access the memory on the device from -stream. It is illegal to attach singly to the NULL stream, because -the NULL stream is a virtual global stream and not a specific stream. -An error will be returned in this case.

    -

    When memory is associated with a single stream, the Unified Memory -system will allow CPU access to this memory region so long as all -operations in stream have completed, regardless of whether other -streams are active. In effect, this constrains exclusive ownership of -the managed memory region by an active GPU to per-stream activity -instead of whole-GPU activity.

    -

    Accessing memory on the device from streams that are not associated -with it will produce undefined results. No error checking is performed -by the Unified Memory system to ensure that kernels launched into other -streams do not access this region.

    -

    It is a program’s responsibility to order calls to -cudaStreamAttachMemAsync via events, synchronization or -other means to ensure legal access to memory at all times. Data -visibility and coherency will be changed appropriately for all kernels -which follow a stream-association change.

    -

    If stream is destroyed while data is associated with it, the -association is removed and the association reverts to the default -visibility of the allocation as specified at -cudaMallocManaged. For managed variables, the default -association is always cudaMemAttachGlobal. Note that -destroying a stream is an asynchronous operation, and as a result, the -change to default association won’t happen until all work in the stream -has completed.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorNotReady, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamBeginCapture(stream, mode: cudaStreamCaptureMode)#
    -

    Begins graph capture on a stream.

    -

    Begin graph capture on stream. When a stream is in capture mode, all -operations pushed into the stream will not be executed, but will -instead be captured into a graph, which will be returned via -cudaStreamEndCapture. Capture may not be initiated if -stream is cudaStreamLegacy. Capture must be ended on the -same stream in which it was initiated, and it may only be initiated if -the stream is not already in capture mode. The capture mode may be -queried via cudaStreamIsCapturing. A unique id representing -the capture sequence may be queried via -cudaStreamGetCaptureInfo.

    -

    If mode is not cudaStreamCaptureModeRelaxed, -cudaStreamEndCapture must be called on this stream from the -same thread.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -

    Notes

    -

    Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    -
    - -
    -
    -cuda.cudart.cudaStreamBeginCaptureToGraph(stream, graph, dependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], dependencyData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies, mode: cudaStreamCaptureMode)#
    -

    Begins graph capture on a stream to an existing graph.

    -

    Begin graph capture on stream. When a stream is in capture mode, all -operations pushed into the stream will not be executed, but will -instead be captured into graph, which will be returned via -cudaStreamEndCapture.

    -

    Capture may not be initiated if stream is -cudaStreamLegacy. Capture must be ended on the same stream -in which it was initiated, and it may only be initiated if the stream -is not already in capture mode. The capture mode may be queried via -cudaStreamIsCapturing. A unique id representing the capture -sequence may be queried via cudaStreamGetCaptureInfo.

    -

    If mode is not cudaStreamCaptureModeRelaxed, -cudaStreamEndCapture must be called on this stream from the -same thread.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -

    Notes

    -

    Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    -
    - -
    -
    -cuda.cudart.cudaThreadExchangeStreamCaptureMode(mode: cudaStreamCaptureMode)#
    -

    Swaps the stream capture interaction mode for a thread.

    -

    Sets the calling thread’s stream capture interaction mode to the value -contained in *mode, and overwrites *mode with the previous mode for -the thread. To facilitate deterministic behavior across function or -module boundaries, callers are encouraged to use this API in a push-pop -fashion:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    During stream capture (see cudaStreamBeginCapture), some -actions, such as a call to cudaMalloc, may be unsafe. In -the case of cudaMalloc, the operation is not enqueued -asynchronously to a stream, and is not observed by stream capture. -Therefore, if the sequence of operations captured via -cudaStreamBeginCapture depended on the allocation being -replayed whenever the graph is launched, the captured graph would be -invalid.

    -

    Therefore, stream capture places restrictions on API calls that can be -made within or concurrently to a -cudaStreamBeginCapture-cudaStreamEndCapture -sequence. This behavior can be controlled via this API and flags to -cudaStreamBeginCapture.

    -

    A thread’s mode is one of the following:

    -
      -
    • cudaStreamCaptureModeGlobal: This is the default mode. If the local -thread has an ongoing capture sequence that was not initiated with -cudaStreamCaptureModeRelaxed at cuStreamBeginCapture, or if any -other thread has a concurrent capture sequence initiated with -cudaStreamCaptureModeGlobal, this thread is prohibited from -potentially unsafe API calls.

    • -
    • cudaStreamCaptureModeThreadLocal: If the local thread has an -ongoing capture sequence not initiated with -cudaStreamCaptureModeRelaxed, it is prohibited from potentially -unsafe API calls. Concurrent capture sequences in other threads are -ignored.

    • -
    • cudaStreamCaptureModeRelaxed: The local thread is not prohibited -from potentially unsafe API calls. Note that the thread is still -prohibited from API calls which necessarily conflict with stream -capture, for example, attempting cudaEventQuery on an -event that was last recorded inside a capture sequence.

    • -
    -
    -
    Parameters:
    -

    mode (cudaStreamCaptureMode) – Pointer to mode value to swap with the current mode

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamEndCapture(stream)#
    -

    Ends capture on a stream, returning the captured graph.

    -

    End capture on stream, returning the captured graph via pGraph. -Capture must have been initiated on stream via a call to -cudaStreamBeginCapture. If capture was invalidated, due to -a violation of the rules of stream capture, then a NULL graph will be -returned.

    -

    If the mode argument to cudaStreamBeginCapture was not -cudaStreamCaptureModeRelaxed, this call must be from the -same thread as cudaStreamBeginCapture.

    -
    -
    Parameters:
    -

    stream (CUstream or cudaStream_t) – Stream to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamIsCapturing(stream)#
    -

    Returns a stream’s capture status.

    -

    Return the capture status of stream via pCaptureStatus. After a -successful call, *pCaptureStatus will contain one of the following:

    - -

    Note that, if this is called on cudaStreamLegacy (the “null -stream”) while a blocking stream on the same device is capturing, it -will return cudaErrorStreamCaptureImplicit and -*pCaptureStatus is unspecified after the call. The blocking stream -capture is not invalidated.

    -

    When a blocking stream is capturing, the legacy stream is in an -unusable state until the blocking stream capture is terminated. The -legacy stream is not supported for stream capture, but attempted use -would have an implicit dependency on the capturing stream(s).

    -
    -
    Parameters:
    -

    stream (CUstream or cudaStream_t) – Stream to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamGetCaptureInfo(stream)#
    -

    Query a stream’s capture state.

    -

    Query stream state related to stream capture.

    -

    If called on cudaStreamLegacy (the “null stream”) while a -stream not created with cudaStreamNonBlocking is capturing, -returns cudaErrorStreamCaptureImplicit.

    -

    Valid data (other than capture status) is returned only if both of the -following are true:

    - -
    -
    Parameters:
    -

    stream (CUstream or cudaStream_t) – The stream to query

    -
    -
    Returns:
    -

      -
    • cudaError_tcudaSuccess, cudaErrorInvalidValue, cudaErrorStreamCaptureImplicit

    • -
    • captureStatus_out (cudaStreamCaptureStatus) – Location to return the capture status of the stream; required

    • -
    • id_out (unsigned long long) – Optional location to return an id for the capture sequence, which -is unique over the lifetime of the process

    • -
    • graph_out (cudaGraph_t) – Optional location to return the graph being captured into. All -operations other than destroy and node removal are permitted on the -graph while the capture sequence is in progress. This API does not -transfer ownership of the graph, which is transferred or destroyed -at cudaStreamEndCapture. Note that the graph handle may -be invalidated before end of capture for certain errors. Nodes that -are or become unreachable from the original stream at -cudaStreamEndCapture due to direct actions on the graph -do not trigger cudaErrorStreamCaptureUnjoined.

    • -
    • dependencies_out (List[cudaGraphNode_t]) – Optional location to store a pointer to an array of nodes. The next -node to be captured in the stream will depend on this set of nodes, -absent operations such as event wait which modify this set. The -array pointer is valid until the next API call which operates on -the stream or until the capture is terminated. The node handles may -be copied out and are valid until they or the graph is destroyed. -The driver-owned array may also be passed directly to APIs that -operate on the graph (not the stream) without copying.

    • -
    • numDependencies_out (int) – Optional location to store the size of the array returned in -dependencies_out.

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamGetCaptureInfo_v3(stream)#
    -

    Query a stream’s capture state (12.3+)

    -

    Query stream state related to stream capture.

    -

    If called on cudaStreamLegacy (the “null stream”) while a -stream not created with cudaStreamNonBlocking is capturing, -returns cudaErrorStreamCaptureImplicit.

    -

    Valid data (other than capture status) is returned only if both of the -following are true:

    - -

    If edgeData_out is non-NULL then dependencies_out must be as well. -If dependencies_out is non-NULL and edgeData_out is NULL, but there -is non-zero edge data for one or more of the current stream -dependencies, the call will return cudaErrorLossyQuery.

    -
    -
    Parameters:
    -

    stream (CUstream or cudaStream_t) – The stream to query

    -
    -
    Returns:
    -

      -
    • cudaError_tcudaSuccess, cudaErrorInvalidValue, cudaErrorStreamCaptureImplicit, cudaErrorLossyQuery

    • -
    • captureStatus_out (cudaStreamCaptureStatus) – Location to return the capture status of the stream; required

    • -
    • id_out (unsigned long long) – Optional location to return an id for the capture sequence, which -is unique over the lifetime of the process

    • -
    • graph_out (cudaGraph_t) – Optional location to return the graph being captured into. All -operations other than destroy and node removal are permitted on the -graph while the capture sequence is in progress. This API does not -transfer ownership of the graph, which is transferred or destroyed -at cudaStreamEndCapture. Note that the graph handle may -be invalidated before end of capture for certain errors. Nodes that -are or become unreachable from the original stream at -cudaStreamEndCapture due to direct actions on the graph -do not trigger cudaErrorStreamCaptureUnjoined.

    • -
    • dependencies_out (List[cudaGraphNode_t]) – Optional location to store a pointer to an array of nodes. The next -node to be captured in the stream will depend on this set of nodes, -absent operations such as event wait which modify this set. The -array pointer is valid until the next API call which operates on -the stream or until the capture is terminated. The node handles may -be copied out and are valid until they or the graph is destroyed. -The driver-owned array may also be passed directly to APIs that -operate on the graph (not the stream) without copying.

    • -
    • edgeData_out (List[cudaGraphEdgeData]) – Optional location to store a pointer to an array of graph edge -data. This array parallels dependencies_out; the next node to be -added has an edge to dependencies_out`[i] with annotation -`edgeData_out`[i] for each `i. The array pointer is valid until -the next API call which operates on the stream or until the capture -is terminated.

    • -
    • numDependencies_out (int) – Optional location to store the size of the array returned in -dependencies_out.

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamUpdateCaptureDependencies(stream, dependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, unsigned int flags)#
    -

    Update the set of dependencies in a capturing stream (11.3+)

    -

    Modifies the dependency set of a capturing stream. The dependency set -is the set of nodes that the next captured node in the stream will -depend on.

    -

    Valid flags are cudaStreamAddCaptureDependencies and -cudaStreamSetCaptureDependencies. These control whether the -set passed to the API is added to the existing set or replaces it. A -flags value of 0 defaults to -cudaStreamAddCaptureDependencies.

    -

    Nodes that are removed from the dependency set via this API do not -result in cudaErrorStreamCaptureUnjoined if they are -unreachable from the stream at cudaStreamEndCapture.

    -

    Returns cudaErrorIllegalState if the stream is not -capturing.

    -

    This API is new in CUDA 11.3. Developers requiring compatibility across -minor versions of the CUDA driver to 11.0 should not use this API or -provide a fallback.

    -
    -
    Parameters:
    -
      -
    • stream (CUstream or cudaStream_t) – The stream to update

    • -
    • dependencies (List[cudaGraphNode_t]) – The set of dependencies to add

    • -
    • numDependencies (size_t) – The size of the dependencies array

    • -
    • flags (unsigned int) – See above

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorIllegalState

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaStreamUpdateCaptureDependencies_v2(stream, dependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], dependencyData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies, unsigned int flags)#
    -

    Update the set of dependencies in a capturing stream (12.3+)

    -

    Modifies the dependency set of a capturing stream. The dependency set -is the set of nodes that the next captured node in the stream will -depend on.

    -

    Valid flags are cudaStreamAddCaptureDependencies and -cudaStreamSetCaptureDependencies. These control whether the -set passed to the API is added to the existing set or replaces it. A -flags value of 0 defaults to -cudaStreamAddCaptureDependencies.

    -

    Nodes that are removed from the dependency set via this API do not -result in cudaErrorStreamCaptureUnjoined if they are -unreachable from the stream at cudaStreamEndCapture.

    -

    Returns cudaErrorIllegalState if the stream is not -capturing.

    -
    -
    Parameters:
    -
      -
    • stream (CUstream or cudaStream_t) – The stream to update

    • -
    • dependencies (List[cudaGraphNode_t]) – The set of dependencies to add

    • -
    • dependencyData (List[cudaGraphEdgeData]) – Optional array of data associated with each dependency.

    • -
    • numDependencies (size_t) – The size of the dependencies array

    • -
    • flags (unsigned int) – See above

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorIllegalState

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -

    Event Management#

    -

    This section describes the event management functions of the CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaEventCreate()#
    -

    Creates an event object.

    -

    Creates an event object for the current device using -cudaEventDefault.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEventCreateWithFlags(unsigned int flags)#
    -

    Creates an event object with the specified flags.

    -

    Creates an event object for the current device with the specified -flags. Valid flags include:

    - -
    -
    Parameters:
    -

    flags (unsigned int) – Flags for new event

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEventRecord(event, stream)#
    -

    Records an event.

    -

    Captures in event the contents of stream at the time of this call. -event and stream must be on the same CUDA context. Calls such as -cudaEventQuery() or cudaStreamWaitEvent() will -then examine or wait for completion of the work that was captured. Uses -of stream after this call do not modify event. See note on default -stream behavior for what is captured in the default case.

    -

    cudaEventRecord() can be called multiple times on the same -event and will overwrite the previously captured state. Other APIs such -as cudaStreamWaitEvent() use the most recently captured -state at the time of the API call, and are not affected by later calls -to cudaEventRecord(). Before the first call to -cudaEventRecord(), an event represents an empty set of -work, so for example cudaEventQuery() would return -cudaSuccess.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEventRecordWithFlags(event, stream, unsigned int flags)#
    -

    Records an event.

    -

    Captures in event the contents of stream at the time of this call. -event and stream must be on the same CUDA context. Calls such as -cudaEventQuery() or cudaStreamWaitEvent() will -then examine or wait for completion of the work that was captured. Uses -of stream after this call do not modify event. See note on default -stream behavior for what is captured in the default case.

    -

    cudaEventRecordWithFlags() can be called multiple times on -the same event and will overwrite the previously captured state. Other -APIs such as cudaStreamWaitEvent() use the most recently -captured state at the time of the API call, and are not affected by -later calls to cudaEventRecordWithFlags(). Before the first -call to cudaEventRecordWithFlags(), an event represents an -empty set of work, so for example cudaEventQuery() would -return cudaSuccess.

    -

    flags include:

    - -
    -
    Parameters:
    -
      -
    • event (CUevent or cudaEvent_t) – Event to record

    • -
    • stream (CUstream or cudaStream_t) – Stream in which to record event

    • -
    • flags (unsigned int) – Parameters for the operation(See above)

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEventQuery(event)#
    -

    Queries an event’s status.

    -

    Queries the status of all work currently captured by event. See -cudaEventRecord() for details on what is captured by an -event.

    -

    Returns cudaSuccess if all captured work has been -completed, or cudaErrorNotReady if any captured work is -incomplete.

    -

    For the purposes of Unified Memory, a return value of -cudaSuccess is equivalent to having called -cudaEventSynchronize().

    -
    -
    Parameters:
    -

    event (CUevent or cudaEvent_t) – Event to query

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorNotReady, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEventSynchronize(event)#
    -

    Waits for an event to complete.

    -

    Waits until the completion of all work currently captured in event. -See cudaEventRecord() for details on what is captured by an -event.

    -

    Waiting for an event that was created with the -cudaEventBlockingSync flag will cause the calling CPU -thread to block until the event has been completed by the device. If -the cudaEventBlockingSync flag has not been set, then the -CPU thread will busy-wait until the event has been completed by the -device.

    -
    -
    Parameters:
    -

    event (CUevent or cudaEvent_t) – Event to wait for

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEventDestroy(event)#
    -

    Destroys an event object.

    -

    Destroys the event specified by event.

    -

    An event may be destroyed before it is complete (i.e., while -cudaEventQuery() would return -cudaErrorNotReady). In this case, the call does not block -on completion of the event, and any associated resources will -automatically be released asynchronously at completion.

    -
    -
    Parameters:
    -

    event (CUevent or cudaEvent_t) – Event to destroy

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEventElapsedTime(start, end)#
    -

    Computes the elapsed time between events.

    -

    Computes the elapsed time between two events (in milliseconds with a -resolution of around 0.5 microseconds).

    -

    If either event was last recorded in a non-NULL stream, the resulting -time may be greater than expected (even if both used the same stream -handle). This happens because the cudaEventRecord() -operation takes place asynchronously and there is no guarantee that the -measured latency is actually just between the two events. Any number of -other different stream operations could execute in between the two -measured events, thus altering the timing in a significant way.

    -

    If cudaEventRecord() has not been called on either event, -then cudaErrorInvalidResourceHandle is returned. If -cudaEventRecord() has been called on both events but one or -both of them has not yet been completed (that is, -cudaEventQuery() would return cudaErrorNotReady -on at least one of the events), cudaErrorNotReady is -returned. If either event was created with the -cudaEventDisableTiming flag, then this function will return -cudaErrorInvalidResourceHandle.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    External Resource Interoperability#

    -

    This section describes the external resource interoperability functions of the CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaImportExternalMemory(cudaExternalMemoryHandleDesc memHandleDesc: Optional[cudaExternalMemoryHandleDesc])#
    -

    Imports an external memory object.

    -

    Imports an externally allocated memory object and returns a handle to -that in extMem_out.

    -

    The properties of the handle being imported must be described in -memHandleDesc. The cudaExternalMemoryHandleDesc structure -is defined as follows:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where type specifies the type -of handle being imported. cudaExternalMemoryHandleType is -defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If type is -cudaExternalMemoryHandleTypeOpaqueFd, then -cudaExternalMemoryHandleDesc::handle::fd must be a valid -file descriptor referencing a memory object. Ownership of the file -descriptor is transferred to the CUDA driver when the handle is -imported successfully. Performing any operations on the file descriptor -after it is imported results in undefined behavior.

    -

    If type is -cudaExternalMemoryHandleTypeOpaqueWin32, then exactly one -of cudaExternalMemoryHandleDesc::handle::win32::handle and -cudaExternalMemoryHandleDesc::handle::win32::name must not -be NULL. If -cudaExternalMemoryHandleDesc::handle::win32::handle is not -NULL, then it must represent a valid shared NT handle that references a -memory object. Ownership of this handle is not transferred to CUDA -after the import operation, so the application must release the handle -using the appropriate system call. If -cudaExternalMemoryHandleDesc::handle::win32::name is not -NULL, then it must point to a NULL-terminated array of UTF-16 -characters that refers to a memory object.

    -

    If type is -cudaExternalMemoryHandleTypeOpaqueWin32Kmt, then -cudaExternalMemoryHandleDesc::handle::win32::handle must be -non-NULL and -cudaExternalMemoryHandleDesc::handle::win32::name must be -NULL. The handle specified must be a globally shared KMT handle. This -handle does not hold a reference to the underlying object, and thus -will be invalid when all references to the memory object are destroyed.

    -

    If type is -cudaExternalMemoryHandleTypeD3D12Heap, then exactly one of -cudaExternalMemoryHandleDesc::handle::win32::handle and -cudaExternalMemoryHandleDesc::handle::win32::name must not -be NULL. If -cudaExternalMemoryHandleDesc::handle::win32::handle is not -NULL, then it must represent a valid shared NT handle that is returned -by ID3D12Device::CreateSharedHandle when referring to a ID3D12Heap -object. This handle holds a reference to the underlying object. If -cudaExternalMemoryHandleDesc::handle::win32::name is not -NULL, then it must point to a NULL-terminated array of UTF-16 -characters that refers to a ID3D12Heap object.

    -

    If type is -cudaExternalMemoryHandleTypeD3D12Resource, then exactly one -of cudaExternalMemoryHandleDesc::handle::win32::handle and -cudaExternalMemoryHandleDesc::handle::win32::name must not -be NULL. If -cudaExternalMemoryHandleDesc::handle::win32::handle is not -NULL, then it must represent a valid shared NT handle that is returned -by ID3D12Device::CreateSharedHandle when referring to a ID3D12Resource -object. This handle holds a reference to the underlying object. If -cudaExternalMemoryHandleDesc::handle::win32::name is not -NULL, then it must point to a NULL-terminated array of UTF-16 -characters that refers to a ID3D12Resource object.

    -

    If type is -cudaExternalMemoryHandleTypeD3D11Resource,then exactly one -of cudaExternalMemoryHandleDesc::handle::win32::handle and -cudaExternalMemoryHandleDesc::handle::win32::name must not -be NULL. If -cudaExternalMemoryHandleDesc::handle::win32::handle is -not NULL, then it must represent a valid shared NT handle that is -returned by IDXGIResource1::CreateSharedHandle when referring to a -ID3D11Resource object. If -cudaExternalMemoryHandleDesc::handle::win32::name is not -NULL, then it must point to a NULL-terminated array of UTF-16 -characters that refers to a ID3D11Resource object.

    -

    If type is -cudaExternalMemoryHandleTypeD3D11ResourceKmt, then -cudaExternalMemoryHandleDesc::handle::win32::handle must be -non-NULL and -cudaExternalMemoryHandleDesc::handle::win32::name must be -NULL. The handle specified must be a valid shared KMT handle that is -returned by IDXGIResource::GetSharedHandle when referring to a -ID3D11Resource object.

    -

    If type is -cudaExternalMemoryHandleTypeNvSciBuf, then -cudaExternalMemoryHandleDesc::handle::nvSciBufObject must -be NON-NULL and reference a valid NvSciBuf object. If the NvSciBuf -object imported into CUDA is also mapped by other drivers, then the -application must use cudaWaitExternalSemaphoresAsync or -cudaSignalExternalSemaphoresAsync as approprriate barriers -to maintain coherence between CUDA and the other drivers. See -cudaExternalSemaphoreWaitSkipNvSciBufMemSync and -cudaExternalSemaphoreSignalSkipNvSciBufMemSync for memory -synchronization.

    -

    The size of the memory object must be specified in -size.

    -

    Specifying the flag cudaExternalMemoryDedicated in -flags indicates that the -resource is a dedicated resource. The definition of what a dedicated -resource is outside the scope of this extension. This flag must be set -if type is one of the -following: cudaExternalMemoryHandleTypeD3D12Resource -cudaExternalMemoryHandleTypeD3D11Resource -cudaExternalMemoryHandleTypeD3D11ResourceKmt

    -
    -
    Parameters:
    -

    memHandleDesc (cudaExternalMemoryHandleDesc) – Memory import handle descriptor

    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    If the Vulkan memory imported into CUDA is mapped on the CPU then the application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges as well as appropriate Vulkan pipeline barriers to maintain coherence between CPU and GPU. For more information on these APIs, please refer to “Synchronization -and Cache Control” chapter from Vulkan specification.

    -
    - -
    -
    -cuda.cudart.cudaExternalMemoryGetMappedBuffer(extMem, cudaExternalMemoryBufferDesc bufferDesc: Optional[cudaExternalMemoryBufferDesc])#
    -

    Maps a buffer onto an imported memory object.

    -

    Maps a buffer onto an imported memory object and returns a device -pointer in devPtr.

    -

    The properties of the buffer being mapped must be described in -bufferDesc. The cudaExternalMemoryBufferDesc structure is -defined as follows:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where offset is the offset in -the memory object where the buffer’s base address is. -size is the size of the -buffer. flags must be zero.

    -

    The offset and size have to be suitably aligned to match the -requirements of the external API. Mapping two buffers whose ranges -overlap may or may not result in the same virtual address being -returned for the overlapped portion. In such cases, the application -must ensure that all accesses to that region from the GPU are volatile. -Otherwise writes made via one address are not guaranteed to be visible -via the other address, even if they’re issued by the same thread. It is -recommended that applications map the combined range instead of mapping -separate buffers and then apply the appropriate offsets to the returned -pointer to derive the individual buffers.

    -

    The returned pointer devPtr must be freed using cudaFree.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaExternalMemoryGetMappedMipmappedArray(extMem, cudaExternalMemoryMipmappedArrayDesc mipmapDesc: Optional[cudaExternalMemoryMipmappedArrayDesc])#
    -

    Maps a CUDA mipmapped array onto an external memory object.

    -

    Maps a CUDA mipmapped array onto an external object and returns a -handle to it in mipmap.

    -

    The properties of the CUDA mipmapped array being mapped must be -described in mipmapDesc. The structure -cudaExternalMemoryMipmappedArrayDesc is defined as follows:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where offset is the -offset in the memory object where the base level of the mipmap chain -is. formatDesc -describes the format of the data. -extent specifies the -dimensions of the base level of the mipmap chain. -flags are flags -associated with CUDA mipmapped arrays. For further details, please -refer to the documentation for cudaMalloc3DArray. Note that -if the mipmapped array is bound as a color target in the graphics API, -then the flag cudaArrayColorAttachment must be specified in -flags. -numLevels specifies -the total number of levels in the mipmap chain.

    -

    The returned CUDA mipmapped array must be freed using -cudaFreeMipmappedArray.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    If type is cudaExternalMemoryHandleTypeNvSciBuf, then numLevels must not be greater than 1.

    -
    - -
    -
    -cuda.cudart.cudaDestroyExternalMemory(extMem)#
    -

    Destroys an external memory object.

    -

    Destroys the specified external memory object. Any existing buffers and -CUDA mipmapped arrays mapped onto this object must no longer be used -and must be explicitly freed using cudaFree and -cudaFreeMipmappedArray respectively.

    -
    -
    Parameters:
    -

    extMem (cudaExternalMemory_t) – External memory object to be destroyed

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaImportExternalSemaphore(cudaExternalSemaphoreHandleDesc semHandleDesc: Optional[cudaExternalSemaphoreHandleDesc])#
    -

    Imports an external semaphore.

    -

    Imports an externally allocated synchronization object and returns a -handle to that in extSem_out.

    -

    The properties of the handle being imported must be described in -semHandleDesc. The cudaExternalSemaphoreHandleDesc is -defined as follows:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where type specifies the -type of handle being imported. -cudaExternalSemaphoreHandleType is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    If type is -cudaExternalSemaphoreHandleTypeOpaqueFd, then -cudaExternalSemaphoreHandleDesc::handle::fd must be a valid -file descriptor referencing a synchronization object. Ownership of the -file descriptor is transferred to the CUDA driver when the handle is -imported successfully. Performing any operations on the file descriptor -after it is imported results in undefined behavior.

    -

    If type is -cudaExternalSemaphoreHandleTypeOpaqueWin32, then exactly -one of -cudaExternalSemaphoreHandleDesc::handle::win32::handle and -cudaExternalSemaphoreHandleDesc::handle::win32::name must -not be NULL. If -cudaExternalSemaphoreHandleDesc::handle::win32::handle is -not NULL, then it must represent a valid shared NT handle that -references a synchronization object. Ownership of this handle is not -transferred to CUDA after the import operation, so the application must -release the handle using the appropriate system call. If -cudaExternalSemaphoreHandleDesc::handle::win32::name is not -NULL, then it must name a valid synchronization object.

    -

    If type is -cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt, then -cudaExternalSemaphoreHandleDesc::handle::win32::handle must -be non-NULL and -cudaExternalSemaphoreHandleDesc::handle::win32::name must -be NULL. The handle specified must be a globally shared KMT handle. -This handle does not hold a reference to the underlying object, and -thus will be invalid when all references to the synchronization object -are destroyed.

    -

    If type is -cudaExternalSemaphoreHandleTypeD3D12Fence, then exactly one -of cudaExternalSemaphoreHandleDesc::handle::win32::handle -and cudaExternalSemaphoreHandleDesc::handle::win32::name -must not be NULL. If -cudaExternalSemaphoreHandleDesc::handle::win32::handle is -not NULL, then it must represent a valid shared NT handle that is -returned by ID3D12Device::CreateSharedHandle when referring to a -ID3D12Fence object. This handle holds a reference to the underlying -object. If -cudaExternalSemaphoreHandleDesc::handle::win32::name is not -NULL, then it must name a valid synchronization object that refers to a -valid ID3D12Fence object.

    -

    If type is -cudaExternalSemaphoreHandleTypeD3D11Fence, then exactly one -of cudaExternalSemaphoreHandleDesc::handle::win32::handle -and cudaExternalSemaphoreHandleDesc::handle::win32::name -must not be NULL. If -cudaExternalSemaphoreHandleDesc::handle::win32::handle is -not NULL, then it must represent a valid shared NT handle that is -returned by ID3D11Fence::CreateSharedHandle. If -cudaExternalSemaphoreHandleDesc::handle::win32::name is not -NULL, then it must name a valid synchronization object that refers to a -valid ID3D11Fence object.

    -

    If type is -cudaExternalSemaphoreHandleTypeNvSciSync, then -cudaExternalSemaphoreHandleDesc::handle::nvSciSyncObj -represents a valid NvSciSyncObj.

    -

    cudaExternalSemaphoreHandleTypeKeyedMutex, then exactly one -of cudaExternalSemaphoreHandleDesc::handle::win32::handle -and cudaExternalSemaphoreHandleDesc::handle::win32::name -must not be NULL. If -cudaExternalSemaphoreHandleDesc::handle::win32::handle is -not NULL, then it represent a valid shared NT handle that is returned -by IDXGIResource1::CreateSharedHandle when referring to a -IDXGIKeyedMutex object.

    -

    If type is -cudaExternalSemaphoreHandleTypeKeyedMutexKmt, then -cudaExternalSemaphoreHandleDesc::handle::win32::handle must -be non-NULL and -cudaExternalSemaphoreHandleDesc::handle::win32::name must -be NULL. The handle specified must represent a valid KMT handle that is -returned by IDXGIResource::GetSharedHandle when referring to a -IDXGIKeyedMutex object.

    -

    If type is -cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, then -cudaExternalSemaphoreHandleDesc::handle::fd must be a valid -file descriptor referencing a synchronization object. Ownership of the -file descriptor is transferred to the CUDA driver when the handle is -imported successfully. Performing any operations on the file descriptor -after it is imported results in undefined behavior.

    -

    If type is -cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32, then -exactly one of -cudaExternalSemaphoreHandleDesc::handle::win32::handle and -cudaExternalSemaphoreHandleDesc::handle::win32::name must -not be NULL. If -cudaExternalSemaphoreHandleDesc::handle::win32::handle is -not NULL, then it must represent a valid shared NT handle that -references a synchronization object. Ownership of this handle is not -transferred to CUDA after the import operation, so the application must -release the handle using the appropriate system call. If -cudaExternalSemaphoreHandleDesc::handle::win32::name is not -NULL, then it must name a valid synchronization object.

    -
    -
    Parameters:
    -

    semHandleDesc (cudaExternalSemaphoreHandleDesc) – Semaphore import handle descriptor

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaSignalExternalSemaphoresAsync(extSemArray: Optional[Tuple[cudaExternalSemaphore_t] | List[cudaExternalSemaphore_t]], paramsArray: Optional[Tuple[cudaExternalSemaphoreSignalParams] | List[cudaExternalSemaphoreSignalParams]], unsigned int numExtSems, stream)#
    -

    Signals a set of external semaphore objects.

    -

    Enqueues a signal operation on a set of externally allocated semaphore -object in the specified stream. The operations will be executed when -all prior operations in the stream complete.

    -

    The exact semantics of signaling a semaphore depends on the type of the -object.

    -

    If the semaphore object is any one of the following types: -cudaExternalSemaphoreHandleTypeOpaqueFd, -cudaExternalSemaphoreHandleTypeOpaqueWin32, -cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt then -signaling the semaphore will set it to the signaled state.

    -

    If the semaphore object is any one of the following types: -cudaExternalSemaphoreHandleTypeD3D12Fence, -cudaExternalSemaphoreHandleTypeD3D11Fence, -cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, -cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 then -the semaphore will be set to the value specified in -cudaExternalSemaphoreSignalParams::params::fence::value.

    -

    If the semaphore object is of the type -cudaExternalSemaphoreHandleTypeNvSciSync this API sets -cudaExternalSemaphoreSignalParams::params::nvSciSync::fence -to a value that can be used by subsequent waiters of the same NvSciSync -object to order operations with those currently submitted in stream. -Such an update will overwrite previous contents of -cudaExternalSemaphoreSignalParams::params::nvSciSync::fence. -By default, signaling such an external semaphore object causes -appropriate memory synchronization operations to be performed over all -the external memory objects that are imported as -cudaExternalMemoryHandleTypeNvSciBuf. This ensures that any -subsequent accesses made by other importers of the same set of NvSciBuf -memory object(s) are coherent. These operations can be skipped by -specifying the flag -cudaExternalSemaphoreSignalSkipNvSciBufMemSync, which can -be used as a performance optimization when data coherency is not -required. But specifying this flag in scenarios where data coherency is -required results in undefined behavior. Also, for semaphore object of -the type cudaExternalSemaphoreHandleTypeNvSciSync, if the -NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags -in cudaDeviceGetNvSciSyncAttributes to -cudaNvSciSyncAttrSignal, this API will return cudaErrorNotSupported.

    -

    cudaExternalSemaphoreSignalParams::params::nvSciSync::fence -associated with semaphore object of the type -cudaExternalSemaphoreHandleTypeNvSciSync can be -deterministic. For this the NvSciSyncAttrList used to create the -semaphore object must have value of -NvSciSyncAttrKey_RequireDeterministicFences key set to true. -Deterministic fences allow users to enqueue a wait over the semaphore -object even before corresponding signal is enqueued. For such a -semaphore object, CUDA guarantees that each signal operation will -increment the fence value by ‘1’. Users are expected to track count of -signals enqueued on the semaphore object and insert waits accordingly. -When such a semaphore object is signaled from multiple streams, due to -concurrent stream execution, it is possible that the order in which the -semaphore gets signaled is indeterministic. This could lead to waiters -of the semaphore getting unblocked incorrectly. Users are expected to -handle such situations, either by not using the same semaphore object -with deterministic fence support enabled in different streams or by -adding explicit dependency amongst such streams so that the semaphore -is signaled in order.

    -

    If the semaphore object is any one of the following types: -cudaExternalSemaphoreHandleTypeKeyedMutex, -cudaExternalSemaphoreHandleTypeKeyedMutexKmt, then the -keyed mutex will be released with the key specified in -cudaExternalSemaphoreSignalParams::params::keyedmutex::key.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaWaitExternalSemaphoresAsync(extSemArray: Optional[Tuple[cudaExternalSemaphore_t] | List[cudaExternalSemaphore_t]], paramsArray: Optional[Tuple[cudaExternalSemaphoreWaitParams] | List[cudaExternalSemaphoreWaitParams]], unsigned int numExtSems, stream)#
    -

    Waits on a set of external semaphore objects.

    -

    Enqueues a wait operation on a set of externally allocated semaphore -object in the specified stream. The operations will be executed when -all prior operations in the stream complete.

    -

    The exact semantics of waiting on a semaphore depends on the type of -the object.

    -

    If the semaphore object is any one of the following types: -cudaExternalSemaphoreHandleTypeOpaqueFd, -cudaExternalSemaphoreHandleTypeOpaqueWin32, -cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt then waiting -on the semaphore will wait until the semaphore reaches the signaled -state. The semaphore will then be reset to the unsignaled state. -Therefore for every signal operation, there can only be one wait -operation.

    -

    If the semaphore object is any one of the following types: -cudaExternalSemaphoreHandleTypeD3D12Fence, -cudaExternalSemaphoreHandleTypeD3D11Fence, -cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, -cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 then -waiting on the semaphore will wait until the value of the semaphore is -greater than or equal to -cudaExternalSemaphoreWaitParams::params::fence::value.

    -

    If the semaphore object is of the type -cudaExternalSemaphoreHandleTypeNvSciSync then, waiting on -the semaphore will wait until the -cudaExternalSemaphoreSignalParams::params::nvSciSync::fence -is signaled by the signaler of the NvSciSyncObj that was associated -with this semaphore object. By default, waiting on such an external -semaphore object causes appropriate memory synchronization operations -to be performed over all external memory objects that are imported as -cudaExternalMemoryHandleTypeNvSciBuf. This ensures that any -subsequent accesses made by other importers of the same set of NvSciBuf -memory object(s) are coherent. These operations can be skipped by -specifying the flag -cudaExternalSemaphoreWaitSkipNvSciBufMemSync, which can be -used as a performance optimization when data coherency is not required. -But specifying this flag in scenarios where data coherency is required -results in undefined behavior. Also, for semaphore object of the type -cudaExternalSemaphoreHandleTypeNvSciSync, if the -NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags -in cudaDeviceGetNvSciSyncAttributes to -cudaNvSciSyncAttrWait, this API will return cudaErrorNotSupported.

    -

    If the semaphore object is any one of the following types: -cudaExternalSemaphoreHandleTypeKeyedMutex, -cudaExternalSemaphoreHandleTypeKeyedMutexKmt, then the -keyed mutex will be acquired when it is released with the key specified -in -cudaExternalSemaphoreSignalParams::params::keyedmutex::key -or until the timeout specified by -cudaExternalSemaphoreSignalParams::params::keyedmutex::timeoutMs -has lapsed. The timeout interval can either be a finite value specified -in milliseconds or an infinite value. In case an infinite value is -specified the timeout never elapses. The windows INFINITE macro must be -used to specify infinite timeout

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle cudaErrorTimeout

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDestroyExternalSemaphore(extSem)#
    -

    Destroys an external semaphore.

    -

    Destroys an external semaphore object and releases any references to -the underlying resource. Any outstanding signals or waits must have -completed before the semaphore is destroyed.

    -
    -
    Parameters:
    -

    extSem (cudaExternalSemaphore_t) – External semaphore to be destroyed

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -

    Execution Control#

    -

    This section describes the execution control functions of the CUDA runtime application programming interface.

    -

    Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module.

    -
    -
    -cuda.cudart.cudaFuncSetCacheConfig(func, cacheConfig: cudaFuncCache)#
    -

    Sets the preferred cache configuration for a device function.

    -

    On devices where the L1 cache and shared memory use the same hardware -resources, this sets through cacheConfig the preferred cache -configuration for the function specified via func. This is only a -preference. The runtime will use the requested configuration if -possible, but it is free to choose a different configuration if -required to execute func.

    -

    func is a device function symbol and must be declared as a None -function. If the specified function does not exist, then -cudaErrorInvalidDeviceFunction is returned. For templated -functions, pass the function symbol as follows: -func_name<template_arg_0,…,template_arg_N>

    -

    This setting does nothing on devices where the size of the L1 cache and -shared memory are fixed.

    -

    Launching a kernel with a different preference than the most recent -preference setting may insert a device-side synchronization point.

    -

    The supported cache configurations are:

    - -
    -
    Parameters:
    -
      -
    • func (Any) – Device function symbol

    • -
    • cacheConfig (cudaFuncCache) – Requested cache configuration

    • -
    -
    -
    Returns:
    -

    cudaSuccess, :py:obj:`~.cudaErrorInvalidDeviceFunction`2

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaFuncSetCacheConfig (C++ API), cudaFuncGetAttributes (C API), cudaLaunchKernel (C API), cuFuncSetCacheConfig

    -
    -
    - -
    -
    -cuda.cudart.cudaFuncGetAttributes(func)#
    -

    Find out attributes for a given function.

    -

    This function obtains the attributes of a function specified via -func. func is a device function symbol and must be declared as a -None function. The fetched attributes are placed in attr. If the -specified function does not exist, then -cudaErrorInvalidDeviceFunction is returned. For templated -functions, pass the function symbol as follows: -func_name<template_arg_0,…,template_arg_N>

    -

    Note that some function attributes such as -maxThreadsPerBlock may vary based on the device that is -currently being used.

    -
    -
    Parameters:
    -

    func (Any) – Device function symbol

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaFuncSetCacheConfig (C API), cudaFuncGetAttributes (C++ API), cudaLaunchKernel (C API), cuFuncGetAttribute

    -
    -
    - -
    -
    -cuda.cudart.cudaFuncSetAttribute(func, attr: cudaFuncAttribute, int value)#
    -

    Set attributes for a given function.

    -

    This function sets the attributes of a function specified via func. -The parameter func must be a pointer to a function that executes on -the device. The parameter specified by func must be declared as a -None function. The enumeration defined by attr is set to the value -defined by value. If the specified function does not exist, then -cudaErrorInvalidDeviceFunction is returned. If the -specified attribute cannot be written, or if the value is incorrect, -then cudaErrorInvalidValue is returned.

    -

    Valid values for attr are:

    -
      -
    • cudaFuncAttributeMaxDynamicSharedMemorySize - The -requested maximum size in bytes of dynamically-allocated shared -memory. The sum of this value and the function attribute -sharedSizeBytes cannot exceed the device attribute -cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size -of requestable dynamic shared memory may differ by GPU architecture.

    • -
    • cudaFuncAttributePreferredSharedMemoryCarveout - On -devices where the L1 cache and shared memory use the same hardware -resources, this sets the shared memory carveout preference, in -percent of the total shared memory. See -cudaDevAttrMaxSharedMemoryPerMultiprocessor. This is only -a hint, and the driver can choose a different ratio if required to -execute the function.

    • -
    • cudaFuncAttributeRequiredClusterWidth: The required -cluster width in blocks. The width, height, and depth values must -either all be 0 or all be positive. The validity of the cluster -dimensions is checked at launch time. If the value is set during -compile time, it cannot be set at runtime. Setting it at runtime will -return cudaErrorNotPermitted.

    • -
    • cudaFuncAttributeRequiredClusterHeight: The required -cluster height in blocks. The width, height, and depth values must -either all be 0 or all be positive. The validity of the cluster -dimensions is checked at launch time. If the value is set during -compile time, it cannot be set at runtime. Setting it at runtime will -return cudaErrorNotPermitted.

    • -
    • cudaFuncAttributeRequiredClusterDepth: The required -cluster depth in blocks. The width, height, and depth values must -either all be 0 or all be positive. The validity of the cluster -dimensions is checked at launch time. If the value is set during -compile time, it cannot be set at runtime. Setting it at runtime will -return cudaErrorNotPermitted.

    • -
    • cudaFuncAttributeNonPortableClusterSizeAllowed: Indicates -whether the function can be launched with non-portable cluster size. -1 is allowed, 0 is disallowed.

    • -
    • cudaFuncAttributeClusterSchedulingPolicyPreference: The -block scheduling policy of a function. The value type is -cudaClusterSchedulingPolicy.

    • -
    -

    cudaLaunchKernel (C++ API), cudaFuncSetCacheConfig (C++ API), -cudaFuncGetAttributes (C API),

    -
    -
    Parameters:
    -
      -
    • func (Any) – Function to get attributes of

    • -
    • attr (cudaFuncAttribute) – Attribute to set

    • -
    • value (int) – Value to set

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidDeviceFunction, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    - -
    -
    -cuda.cudart.cudaLaunchHostFunc(stream, fn, userData)#
    -

    Enqueues a host function call in a stream.

    -

    Enqueues a host function to run in a stream. The function will be -called after currently enqueued work and will block work added after -it.

    -

    The host function must not make any CUDA API calls. Attempting to use a -CUDA API may result in cudaErrorNotPermitted, but this is -not required. The host function must not perform any synchronization -that may depend on outstanding CUDA work not mandated to run earlier. -Host functions without a mandated order (such as in independent -streams) execute in undefined order and may be serialized.

    -

    For the purposes of Unified Memory, execution makes a number of -guarantees:

    -
      -
    • The stream is considered idle for the duration of the function’s -execution. Thus, for example, the function may always use memory -attached to the stream it was enqueued in.

    • -
    • The start of execution of the function has the same effect as -synchronizing an event recorded in the same stream immediately prior -to the function. It thus synchronizes streams which have been -“joined” prior to the function.

    • -
    • Adding device work to any stream does not have the effect of making -the stream active until all preceding host functions and stream -callbacks have executed. Thus, for example, a function might use -global attached memory even if work has been added to another stream, -if the work has been ordered behind the function call with an event.

    • -
    • Completion of the function does not cause a stream to become active -except as described above. The stream will remain idle if no device -work follows the function, and will remain idle across consecutive -host functions or stream callbacks without device work in between. -Thus, for example, stream synchronization can be done by signaling -from a host function at the end of the stream.

    • -
    -

    Note that, in constrast to cuStreamAddCallback, the -function will not be called in the event of an error in the CUDA -context.

    -
    -
    Parameters:
    -
      -
    • hStream (CUstream or cudaStream_t) – Stream to enqueue function call in

    • -
    • fn (cudaHostFn_t) – The function to call once preceding stream operations are complete

    • -
    • userData (Any) – User-specified data to be passed to the function

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorInvalidValue, cudaErrorNotSupported

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -

    Occupancy#

    -

    This section describes the occupancy calculation functions of the CUDA runtime application programming interface.

    -

    Besides the occupancy calculator functions (cudaOccupancyMaxActiveBlocksPerMultiprocessor and cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags), there are also C++ only occupancy-based launch configuration functions documented in C++ API Routines module.

    -

    See cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API) cudaOccupancyAvailableDynamicSMemPerBlock (C++ API),

    -
    -
    -cuda.cudart.cudaOccupancyMaxActiveBlocksPerMultiprocessor(func, int blockSize, size_t dynamicSMemSize)#
    -

    Returns occupancy for a device function.

    -

    Returns in *numBlocks the maximum number of active blocks per -streaming multiprocessor for the device function.

    -
    -
    Parameters:
    -
      -
    • func (Any) – Kernel function for which occupancy is calculated

    • -
    • blockSize (int) – Block size the kernel is intended to be launched with

    • -
    • dynamicSMemSize (size_t) – Per-block dynamic shared memory usage intended, in bytes

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, cudaOccupancyMaxPotentialBlockSize, cudaOccupancyMaxPotentialBlockSizeWithFlags, cudaOccupancyMaxPotentialBlockSizeVariableSMem, cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags, cudaOccupancyAvailableDynamicSMemPerBlock, cuOccupancyMaxActiveBlocksPerMultiprocessor

    -
    -
    - -
    -
    -cuda.cudart.cudaOccupancyAvailableDynamicSMemPerBlock(func, int numBlocks, int blockSize)#
    -

    Returns dynamic shared memory available per block when launching numBlocks blocks on SM.

    -

    Returns in *dynamicSmemSize the maximum size of dynamic shared memory -to allow numBlocks blocks per SM.

    -
    -
    Parameters:
    -
      -
    • func (Any) – Kernel function for which occupancy is calculated

    • -
    • numBlocks (int) – Number of blocks to fit on SM

    • -
    • blockSize (int) – Size of the block

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, cudaOccupancyMaxPotentialBlockSize, cudaOccupancyMaxPotentialBlockSizeWithFlags, cudaOccupancyMaxPotentialBlockSizeVariableSMem, cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags, cudaOccupancyAvailableDynamicSMemPerBlock

    -
    -
    - -
    -
    -cuda.cudart.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(func, int blockSize, size_t dynamicSMemSize, unsigned int flags)#
    -

    Returns occupancy for a device function with the specified flags.

    -

    Returns in *numBlocks the maximum number of active blocks per -streaming multiprocessor for the device function.

    -

    The flags parameter controls how special cases are handled. Valid -flags include:

    -
      -
    • cudaOccupancyDefault: keeps the default behavior as -cudaOccupancyMaxActiveBlocksPerMultiprocessor

    • -
    • cudaOccupancyDisableCachingOverride: This flag suppresses -the default behavior on platform where global caching affects -occupancy. On such platforms, if caching is enabled, but per-block SM -resource usage would result in zero occupancy, the occupancy -calculator will calculate the occupancy as if caching is disabled. -Setting this flag makes the occupancy calculator to return 0 in such -cases. More information can be found about this feature in the -“Unified L1/Texture Cache” section of the Maxwell tuning guide.

    • -
    -
    -
    Parameters:
    -
      -
    • func (Any) – Kernel function for which occupancy is calculated

    • -
    • blockSize (int) – Block size the kernel is intended to be launched with

    • -
    • dynamicSMemSize (size_t) – Per-block dynamic shared memory usage intended, in bytes

    • -
    • flags (unsigned int) – Requested behavior for the occupancy calculator

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaOccupancyMaxActiveBlocksPerMultiprocessor, cudaOccupancyMaxPotentialBlockSize, cudaOccupancyMaxPotentialBlockSizeWithFlags, cudaOccupancyMaxPotentialBlockSizeVariableSMem, cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags, cudaOccupancyAvailableDynamicSMemPerBlock, cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags

    -
    -
    - -
    -
    -

    Memory Management#

    -

    This section describes the memory management functions of the CUDA runtime application programming interface.

    -

    Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module.

    -
    -
    -cuda.cudart.cudaMallocManaged(size_t size, unsigned int flags)#
    -

    Allocates memory that will be automatically managed by the Unified Memory system.

    -

    Allocates size bytes of managed memory on the device and returns in -*devPtr a pointer to the allocated memory. If the device doesn’t -support allocating managed memory, cudaErrorNotSupported is -returned. Support for managed memory can be queried using the device -attribute cudaDevAttrManagedMemory. The allocated memory is -suitably aligned for any kind of variable. The memory is not cleared. -If size is 0, cudaMallocManaged returns -cudaErrorInvalidValue. The pointer is valid on the CPU and -on all GPUs in the system that support managed memory. All accesses to -this pointer must obey the Unified Memory programming model.

    -

    flags specifies the default stream association for this allocation. -flags must be one of cudaMemAttachGlobal or -cudaMemAttachHost. The default value for flags is -cudaMemAttachGlobal. If cudaMemAttachGlobal is -specified, then this memory is accessible from any stream on any -device. If cudaMemAttachHost is specified, then the -allocation should not be accessed from devices that have a zero value -for the device attribute -cudaDevAttrConcurrentManagedAccess; an explicit call to -cudaStreamAttachMemAsync will be required to enable access -on such devices.

    -

    If the association is later changed via -cudaStreamAttachMemAsync to a single stream, the default -association, as specifed during cudaMallocManaged, is -restored when that stream is destroyed. For managed variables, the -default association is always cudaMemAttachGlobal. Note -that destroying a stream is an asynchronous operation, and as a result, -the change to default association won’t happen until all work in the -stream has completed.

    -

    Memory allocated with cudaMallocManaged should be released -with cudaFree.

    -

    Device memory oversubscription is possible for GPUs that have a non- -zero value for the device attribute -cudaDevAttrConcurrentManagedAccess. Managed memory on such -GPUs may be evicted from device memory to host memory at any time by -the Unified Memory driver in order to make room for other allocations.

    -

    In a system where all GPUs have a non-zero value for the device -attribute cudaDevAttrConcurrentManagedAccess, managed -memory may not be populated when this API returns and instead may be -populated on access. In such systems, managed memory can migrate to any -processor’s memory at any time. The Unified Memory driver will employ -heuristics to maintain data locality and prevent excessive page faults -to the extent possible. The application can also guide the driver about -memory usage patterns via cudaMemAdvise. The application -can also explicitly migrate memory to a desired processor’s memory via -cudaMemPrefetchAsync.

    -

    In a multi-GPU system where all of the GPUs have a zero value for the -device attribute cudaDevAttrConcurrentManagedAccess and all -the GPUs have peer-to-peer support with each other, the physical -storage for managed memory is created on the GPU which is active at the -time cudaMallocManaged is called. All other GPUs will -reference the data at reduced bandwidth via peer mappings over the PCIe -bus. The Unified Memory driver does not migrate memory among such GPUs.

    -

    In a multi-GPU system where not all GPUs have peer-to-peer support with -each other and where the value of the device attribute -cudaDevAttrConcurrentManagedAccess is zero for at least one -of those GPUs, the location chosen for physical storage of managed -memory is system-dependent.

    -
      -
    • On Linux, the location chosen will be device memory as long as the -current set of active contexts are on devices that either have peer- -to-peer support with each other or have a non-zero value for the -device attribute cudaDevAttrConcurrentManagedAccess. If -there is an active context on a GPU that does not have a non-zero -value for that device attribute and it does not have peer-to-peer -support with the other devices that have active contexts on them, -then the location for physical storage will be ‘zero-copy’ or host -memory. Note that this means that managed memory that is located in -device memory is migrated to host memory if a new context is created -on a GPU that doesn’t have a non-zero value for the device attribute -and does not support peer-to-peer with at least one of the other -devices that has an active context. This in turn implies that context -creation may fail if there is insufficient host memory to migrate all -managed allocations.

    • -
    • On Windows, the physical storage is always created in ‘zero-copy’ or -host memory. All GPUs will reference the data at reduced bandwidth -over the PCIe bus. In these circumstances, use of the environment -variable CUDA_VISIBLE_DEVICES is recommended to restrict CUDA to only -use those GPUs that have peer-to-peer support. Alternatively, users -can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero value to -force the driver to always use device memory for physical storage. -When this environment variable is set to a non-zero value, all -devices used in that process that support managed memory have to be -peer-to-peer compatible with each other. The error -cudaErrorInvalidDevice will be returned if a device that -supports managed memory is used and it is not peer-to-peer compatible -with any of the other managed memory supporting devices that were -previously used in that process, even if cudaDeviceReset -has been called on those devices. These environment variables are -described in the CUDA programming guide under the “CUDA environment -variables” section.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMalloc(size_t size)#
    -

    Allocate memory on the device.

    -

    Allocates size bytes of linear memory on the device and returns in -*devPtr a pointer to the allocated memory. The allocated memory is -suitably aligned for any kind of variable. The memory is not cleared. -cudaMalloc() returns cudaErrorMemoryAllocation -in case of failure.

    -

    The device version of cudaFree cannot be used with a -*devPtr allocated using the host API, and vice versa.

    -
    -
    Parameters:
    -

    size (size_t) – Requested allocation size in bytes

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMallocHost(size_t size)#
    -

    Allocates page-locked memory on the host.

    -

    Allocates size bytes of host memory that is page-locked and -accessible to the device. The driver tracks the virtual memory ranges -allocated with this function and automatically accelerates calls to -functions such as malloc().

    -

    On systems where pageableMemoryAccessUsesHostPageTables is -true, cudaMallocHost may not page-lock the allocated -memory.

    -

    Page-locking excessive amounts of memory with -cudaMallocHost() may degrade system performance, since it -reduces the amount of memory available to the system for paging. As a -result, this function is best used sparingly to allocate staging areas -for data exchange between host and device.

    -
    -
    Parameters:
    -

    size (size_t) – Requested allocation size in bytes

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMallocPitch(size_t width, size_t height)#
    -

    Allocates pitched memory on the device.

    -

    Allocates at least width (in bytes) * height bytes of linear memory -on the device and returns in *devPtr a pointer to the allocated -memory. The function may pad the allocation to ensure that -corresponding pointers in any given row will continue to meet the -alignment requirements for coalescing as the address is updated from -row to row. The pitch returned in *pitch by -cudaMallocPitch() is the width in bytes of the allocation. -The intended usage of pitch is as a separate parameter of the -allocation, used to compute addresses within the 2D array. Given the -row and column of an array element of type T, the address is computed -as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For allocations of 2D arrays, it is recommended that programmers -consider performing pitch allocations using -cudaMallocPitch(). Due to pitch alignment restrictions in -the hardware, this is especially true if the application will be -performing 2D memory copies between different regions of device memory -(whether linear memory or CUDA arrays).

    -
    -
    Parameters:
    -
      -
    • width (size_t) – Requested pitched allocation width (in bytes)

    • -
    • height (size_t) – Requested pitched allocation height

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMallocArray(cudaChannelFormatDesc desc: Optional[cudaChannelFormatDesc], size_t width, size_t height, unsigned int flags)#
    -

    Allocate an array on the device.

    -

    Allocates a CUDA array according to the -cudaChannelFormatDesc structure desc and returns a handle -to the new CUDA array in *array.

    -

    The cudaChannelFormatDesc is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where cudaChannelFormatKind is one of -cudaChannelFormatKindSigned, -cudaChannelFormatKindUnsigned, or -cudaChannelFormatKindFloat.

    -

    The flags parameter enables different options to be specified that -affect the allocation, as follows.

    -
      -
    • cudaArrayDefault: This flag’s value is defined to be 0 -and provides default array allocation

    • -
    • cudaArraySurfaceLoadStore: Allocates an array that can be -read from or written to using a surface reference

    • -
    • cudaArrayTextureGather: This flag indicates that texture -gather operations will be performed on the array.

    • -
    • cudaArraySparse: Allocates a CUDA array without physical -backing memory. The subregions within this sparse array can later be -mapped onto a physical memory allocation by calling -cuMemMapArrayAsync. The physical backing memory must be -allocated via cuMemCreate.

    • -
    • cudaArrayDeferredMapping: Allocates a CUDA array without -physical backing memory. The entire array can later be mapped onto a -physical memory allocation by calling cuMemMapArrayAsync. -The physical backing memory must be allocated via -cuMemCreate.

    • -
    -

    width and height must meet certain size requirements. See -cudaMalloc3DArray() for more details.

    -
    -
    Parameters:
    -
      -
    • desc (cudaChannelFormatDesc) – Requested channel format

    • -
    • width (size_t) – Requested array allocation width

    • -
    • height (size_t) – Requested array allocation height

    • -
    • flags (unsigned int) – Requested properties of allocated array

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaFree(devPtr)#
    -

    Frees memory on the device.

    -

    Frees the memory space pointed to by devPtr, which must have been -returned by a previous call to one of the following memory allocation -APIs - cudaMalloc(), cudaMallocPitch(), -cudaMallocManaged(), cudaMallocAsync(), -cudaMallocFromPoolAsync().

    -

    Note - This API will not perform any implicit synchronization when the -pointer was allocated with cudaMallocAsync or -cudaMallocFromPoolAsync. Callers must ensure that all -accesses to these pointer have completed before invoking -cudaFree. For best performance and memory reuse, users -should use cudaFreeAsync to free memory allocated via the -stream ordered memory allocator. For all other pointers, this API may -perform implicit synchronization.

    -

    If cudaFree`(`devPtr) has already been called before, an -error is returned. If devPtr is 0, no operation is performed. -cudaFree() returns cudaErrorValue in case of -failure.

    -

    The device version of cudaFree cannot be used with a -*devPtr allocated using the host API, and vice versa.

    -
    -
    Parameters:
    -

    devPtr (Any) – Device pointer to memory to free

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaFreeHost(ptr)#
    -

    Frees page-locked memory.

    -

    Frees the memory space pointed to by hostPtr, which must have been -returned by a previous call to cudaMallocHost() or -cudaHostAlloc().

    -
    -
    Parameters:
    -

    ptr (Any) – Pointer to memory to free

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaFreeArray(array)#
    -

    Frees an array on the device.

    -

    Frees the CUDA array array, which must have been returned by a -previous call to cudaMallocArray(). If devPtr is 0, no -operation is performed.

    -
    -
    Parameters:
    -

    array (cudaArray_t) – Pointer to array to free

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaFreeMipmappedArray(mipmappedArray)#
    -

    Frees a mipmapped array on the device.

    -

    Frees the CUDA mipmapped array mipmappedArray, which must have been -returned by a previous call to cudaMallocMipmappedArray(). -If devPtr is 0, no operation is performed.

    -
    -
    Parameters:
    -

    mipmappedArray (cudaMipmappedArray_t) – Pointer to mipmapped array to free

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaHostAlloc(size_t size, unsigned int flags)#
    -

    Allocates page-locked memory on the host.

    -

    Allocates size bytes of host memory that is page-locked and -accessible to the device. The driver tracks the virtual memory ranges -allocated with this function and automatically accelerates calls to -functions such as cudaMemcpy(). Since the memory can be -accessed directly by the device, it can be read or written with much -higher bandwidth than pageable memory obtained with functions such as -malloc(). Allocating excessive amounts of pinned memory may -degrade system performance, since it reduces the amount of memory -available to the system for paging. As a result, this function is best -used sparingly to allocate staging areas for data exchange between host -and device.

    -

    The flags parameter enables different options to be specified that -affect the allocation, as follows.

    -
      -
    • cudaHostAllocDefault: This flag’s value is defined to be -0 and causes cudaHostAlloc() to emulate -cudaMallocHost().

    • -
    • cudaHostAllocPortable: The memory returned by this call -will be considered as pinned memory by all CUDA contexts, not just -the one that performed the allocation.

    • -
    • cudaHostAllocMapped: Maps the allocation into the CUDA -address space. The device pointer to the memory may be obtained by -calling cudaHostGetDevicePointer().

    • -
    • cudaHostAllocWriteCombined: Allocates the memory as -write-combined (WC). WC memory can be transferred across the PCI -Express bus more quickly on some system configurations, but cannot be -read efficiently by most CPUs. WC memory is a good option for buffers -that will be written by the CPU and read by the device via mapped -pinned memory or host->device transfers.

    • -
    -

    All of these flags are orthogonal to one another: a developer may -allocate memory that is portable, mapped and/or write-combined with no -restrictions.

    -

    In order for the cudaHostAllocMapped flag to have any -effect, the CUDA context must support the cudaDeviceMapHost -flag, which can be checked via cudaGetDeviceFlags(). The -cudaDeviceMapHost flag is implicitly set for contexts -created via the runtime API.

    -

    The cudaHostAllocMapped flag may be specified on CUDA -contexts for devices that do not support mapped pinned memory. The -failure is deferred to cudaHostGetDevicePointer() because -the memory may be mapped into other CUDA contexts via the -cudaHostAllocPortable flag.

    -

    Memory allocated by this function must be freed with -cudaFreeHost().

    -
    -
    Parameters:
    -
      -
    • size (size_t) – Requested allocation size in bytes

    • -
    • flags (unsigned int) – Requested properties of allocated memory

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaSetDeviceFlags, cudaMallocHost (C API), cudaFreeHost, cudaGetDeviceFlags, cuMemHostAlloc

    -
    -
    - -
    -
    -cuda.cudart.cudaHostRegister(ptr, size_t size, unsigned int flags)#
    -

    Registers an existing host memory range for use by CUDA.

    -

    Page-locks the memory range specified by ptr and size and maps it -for the device(s) as specified by flags. This memory range also is -added to the same tracking mechanism as cudaHostAlloc() to -automatically accelerate calls to functions such as -cudaMemcpy(). Since the memory can be accessed directly by -the device, it can be read or written with much higher bandwidth than -pageable memory that has not been registered. Page-locking excessive -amounts of memory may degrade system performance, since it reduces the -amount of memory available to the system for paging. As a result, this -function is best used sparingly to register staging areas for data -exchange between host and device.

    -

    On systems where pageableMemoryAccessUsesHostPageTables is -true, cudaHostRegister will not page-lock the memory range -specified by ptr but only populate unpopulated pages.

    -

    cudaHostRegister is supported only on I/O coherent devices -that have a non-zero value for the device attribute -cudaDevAttrHostRegisterSupported.

    -

    The flags parameter enables different options to be specified that -affect the allocation, as follows.

    -
      -
    • cudaHostRegisterDefault: On a system with unified virtual -addressing, the memory will be both mapped and portable. On a system -with no unified virtual addressing, the memory will be neither mapped -nor portable.

    • -
    • cudaHostRegisterPortable: The memory returned by this -call will be considered as pinned memory by all CUDA contexts, not -just the one that performed the allocation.

    • -
    • cudaHostRegisterMapped: Maps the allocation into the CUDA -address space. The device pointer to the memory may be obtained by -calling cudaHostGetDevicePointer().

    • -
    • cudaHostRegisterIoMemory: The passed memory pointer is -treated as pointing to some memory-mapped I/O space, e.g. belonging -to a third-party PCIe device, and it will marked as non cache- -coherent and contiguous.

    • -
    • cudaHostRegisterReadOnly: The passed memory pointer is -treated as pointing to memory that is considered read-only by the -device. On platforms without -cudaDevAttrPageableMemoryAccessUsesHostPageTables, this -flag is required in order to register memory mapped to the CPU as -read-only. Support for the use of this flag can be queried from the -device attribute cudaDeviceAttrReadOnlyHostRegisterSupported. Using -this flag with a current context associated with a device that does -not have this attribute set will cause cudaHostRegister -to error with cudaErrorNotSupported.

    • -
    -

    All of these flags are orthogonal to one another: a developer may page- -lock memory that is portable or mapped with no restrictions.

    -

    The CUDA context must have been created with the -cudaMapHost flag in order for the -cudaHostRegisterMapped flag to have any effect.

    -

    The cudaHostRegisterMapped flag may be specified on CUDA -contexts for devices that do not support mapped pinned memory. The -failure is deferred to cudaHostGetDevicePointer() because -the memory may be mapped into other CUDA contexts via the -cudaHostRegisterPortable flag.

    -

    For devices that have a non-zero value for the device attribute -cudaDevAttrCanUseHostPointerForRegisteredMem, the memory -can also be accessed from the device using the host pointer ptr. The -device pointer returned by cudaHostGetDevicePointer() may -or may not match the original host pointer ptr and depends on the -devices visible to the application. If all devices visible to the -application have a non-zero value for the device attribute, the device -pointer returned by cudaHostGetDevicePointer() will match -the original pointer ptr. If any device visible to the application -has a zero value for the device attribute, the device pointer returned -by cudaHostGetDevicePointer() will not match the original -host pointer ptr, but it will be suitable for use on all devices -provided Unified Virtual Addressing is enabled. In such systems, it is -valid to access the memory using either pointer on devices that have a -non-zero value for the device attribute. Note however that such devices -should access the memory using only of the two pointers and not both.

    -

    The memory page-locked by this function must be unregistered with -cudaHostUnregister().

    -
    -
    Parameters:
    -
      -
    • ptr (Any) – Host pointer to memory to page-lock

    • -
    • size (size_t) – Size in bytes of the address range to page-lock in bytes

    • -
    • flags (unsigned int) – Flags for allocation request

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorMemoryAllocation, cudaErrorHostMemoryAlreadyRegistered, cudaErrorNotSupported

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaHostUnregister(ptr)#
    -

    Unregisters a memory range that was registered with cudaHostRegister.

    -

    Unmaps the memory range whose base address is specified by ptr, and -makes it pageable again.

    -

    The base address must be the same one specified to -cudaHostRegister().

    -
    -
    Parameters:
    -

    ptr (Any) – Host pointer to memory to unregister

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorHostMemoryNotRegistered

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaHostGetDevicePointer(pHost, unsigned int flags)#
    -

    Passes back device pointer of mapped host memory allocated by cudaHostAlloc or registered by cudaHostRegister.

    -

    Passes back the device pointer corresponding to the mapped, pinned host -buffer allocated by cudaHostAlloc() or registered by -cudaHostRegister().

    -

    cudaHostGetDevicePointer() will fail if the -cudaDeviceMapHost flag was not specified before deferred -context creation occurred, or if called on a device that does not -support mapped, pinned memory.

    -

    For devices that have a non-zero value for the device attribute -cudaDevAttrCanUseHostPointerForRegisteredMem, the memory -can also be accessed from the device using the host pointer pHost. -The device pointer returned by cudaHostGetDevicePointer() -may or may not match the original host pointer pHost and depends on -the devices visible to the application. If all devices visible to the -application have a non-zero value for the device attribute, the device -pointer returned by cudaHostGetDevicePointer() will match -the original pointer pHost. If any device visible to the application -has a zero value for the device attribute, the device pointer returned -by cudaHostGetDevicePointer() will not match the original -host pointer pHost, but it will be suitable for use on all devices -provided Unified Virtual Addressing is enabled. In such systems, it is -valid to access the memory using either pointer on devices that have a -non-zero value for the device attribute. Note however that such devices -should access the memory using only of the two pointers and not both.

    -

    flags provides for future releases. For now, it must be set to 0.

    -
    -
    Parameters:
    -
      -
    • pHost (Any) – Requested host pointer mapping

    • -
    • flags (unsigned int) – Flags for extensions (must be 0 for now)

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaHostGetFlags(pHost)#
    -

    Passes back flags used to allocate pinned host memory allocated by cudaHostAlloc.

    -

    cudaHostGetFlags() will fail if the input pointer does not -reside in an address range allocated by cudaHostAlloc().

    -
    -
    Parameters:
    -

    pHost (Any) – Host pointer

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMalloc3D(cudaExtent extent: cudaExtent)#
    -

    Allocates logical 1D, 2D, or 3D memory objects on the device.

    -

    Allocates at least width * height * depth bytes of linear memory -on the device and returns a cudaPitchedPtr in which ptr -is a pointer to the allocated memory. The function may pad the -allocation to ensure hardware alignment requirements are met. The pitch -returned in the pitch field of pitchedDevPtr is the width in bytes -of the allocation.

    -

    The returned cudaPitchedPtr contains additional fields -xsize and ysize, the logical width and height of the allocation, -which are equivalent to the width and height extent parameters -provided by the programmer during allocation.

    -

    For allocations of 2D and 3D objects, it is highly recommended that -programmers perform allocations using cudaMalloc3D() or -cudaMallocPitch(). Due to alignment restrictions in the -hardware, this is especially true if the application will be performing -memory copies involving 2D or 3D objects (whether linear memory or CUDA -arrays).

    -
    -
    Parameters:
    -

    extent (cudaExtent) – Requested allocation size (width field in bytes)

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMalloc3DArray(cudaChannelFormatDesc desc: Optional[cudaChannelFormatDesc], cudaExtent extent: cudaExtent, unsigned int flags)#
    -

    Allocate an array on the device.

    -

    Allocates a CUDA array according to the -cudaChannelFormatDesc structure desc and returns a handle -to the new CUDA array in *array.

    -

    The cudaChannelFormatDesc is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where cudaChannelFormatKind is one of -cudaChannelFormatKindSigned, -cudaChannelFormatKindUnsigned, or -cudaChannelFormatKindFloat.

    -

    cudaMalloc3DArray() can allocate the following:

    -
      -
    • A 1D array is allocated if the height and depth extents are both -zero.

    • -
    • A 2D array is allocated if only the depth extent is zero.

    • -
    • A 3D array is allocated if all three extents are non-zero.

    • -
    • A 1D layered CUDA array is allocated if only the height extent is -zero and the cudaArrayLayered flag is set. Each layer is a 1D array. -The number of layers is determined by the depth extent.

    • -
    • A 2D layered CUDA array is allocated if all three extents are non- -zero and the cudaArrayLayered flag is set. Each layer is a 2D array. -The number of layers is determined by the depth extent.

    • -
    • A cubemap CUDA array is allocated if all three extents are non-zero -and the cudaArrayCubemap flag is set. Width must be equal to height, -and depth must be six. A cubemap is a special type of 2D layered CUDA -array, where the six layers represent the six faces of a cube. The -order of the six layers in memory is the same as that listed in -cudaGraphicsCubeFace.

    • -
    • A cubemap layered CUDA array is allocated if all three extents are -non-zero, and both, cudaArrayCubemap and cudaArrayLayered flags are -set. Width must be equal to height, and depth must be a multiple of -six. A cubemap layered CUDA array is a special type of 2D layered -CUDA array that consists of a collection of cubemaps. The first six -layers represent the first cubemap, the next six layers form the -second cubemap, and so on.

    • -
    -

    The flags parameter enables different options to be specified that -affect the allocation, as follows.

    -
      -
    • cudaArrayDefault: This flag’s value is defined to be 0 -and provides default array allocation

    • -
    • cudaArrayLayered: Allocates a layered CUDA array, with -the depth extent indicating the number of layers

    • -
    • cudaArrayCubemap: Allocates a cubemap CUDA array. Width -must be equal to height, and depth must be six. If the -cudaArrayLayered flag is also set, depth must be a multiple of six.

    • -
    • cudaArraySurfaceLoadStore: Allocates a CUDA array that -could be read from or written to using a surface reference.

    • -
    • cudaArrayTextureGather: This flag indicates that texture -gather operations will be performed on the CUDA array. Texture gather -can only be performed on 2D CUDA arrays.

    • -
    • cudaArraySparse: Allocates a CUDA array without physical -backing memory. The subregions within this sparse array can later be -mapped onto a physical memory allocation by calling -cuMemMapArrayAsync. This flag can only be used for -creating 2D, 3D or 2D layered sparse CUDA arrays. The physical -backing memory must be allocated via cuMemCreate.

    • -
    • cudaArrayDeferredMapping: Allocates a CUDA array without -physical backing memory. The entire array can later be mapped onto a -physical memory allocation by calling cuMemMapArrayAsync. -The physical backing memory must be allocated via -cuMemCreate.

    • -
    -

    The width, height and depth extents must meet certain size requirements -as listed in the following table. All values are specified in elements.

    -

    Note that 2D CUDA arrays have different size requirements if the -cudaArrayTextureGather flag is set. In that case, the valid -range for (width, height, depth) is ((1,maxTexture2DGather[0]), -(1,maxTexture2DGather[1]), 0).

    -

    View CUDA Toolkit Documentation for a table example

    -
    -
    Parameters:
    -
      -
    • desc (cudaChannelFormatDesc) – Requested channel format

    • -
    • extent (cudaExtent) – Requested allocation size (width field in elements)

    • -
    • flags (unsigned int) – Flags for extensions

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMallocMipmappedArray(cudaChannelFormatDesc desc: Optional[cudaChannelFormatDesc], cudaExtent extent: cudaExtent, unsigned int numLevels, unsigned int flags)#
    -

    Allocate a mipmapped array on the device.

    -

    Allocates a CUDA mipmapped array according to the -cudaChannelFormatDesc structure desc and returns a handle -to the new CUDA mipmapped array in *mipmappedArray. numLevels -specifies the number of mipmap levels to be allocated. This value is -clamped to the range [1, 1 + floor(log2(max(width, height, depth)))].

    -

    The cudaChannelFormatDesc is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where cudaChannelFormatKind is one of -cudaChannelFormatKindSigned, -cudaChannelFormatKindUnsigned, or -cudaChannelFormatKindFloat.

    -

    cudaMallocMipmappedArray() can allocate the following:

    -
      -
    • A 1D mipmapped array is allocated if the height and depth extents are -both zero.

    • -
    • A 2D mipmapped array is allocated if only the depth extent is zero.

    • -
    • A 3D mipmapped array is allocated if all three extents are non-zero.

    • -
    • A 1D layered CUDA mipmapped array is allocated if only the height -extent is zero and the cudaArrayLayered flag is set. Each layer is a -1D mipmapped array. The number of layers is determined by the depth -extent.

    • -
    • A 2D layered CUDA mipmapped array is allocated if all three extents -are non-zero and the cudaArrayLayered flag is set. Each layer is a 2D -mipmapped array. The number of layers is determined by the depth -extent.

    • -
    • A cubemap CUDA mipmapped array is allocated if all three extents are -non-zero and the cudaArrayCubemap flag is set. Width must be equal to -height, and depth must be six. The order of the six layers in memory -is the same as that listed in cudaGraphicsCubeFace.

    • -
    • A cubemap layered CUDA mipmapped array is allocated if all three -extents are non-zero, and both, cudaArrayCubemap and cudaArrayLayered -flags are set. Width must be equal to height, and depth must be a -multiple of six. A cubemap layered CUDA mipmapped array is a special -type of 2D layered CUDA mipmapped array that consists of a collection -of cubemap mipmapped arrays. The first six layers represent the first -cubemap mipmapped array, the next six layers form the second cubemap -mipmapped array, and so on.

    • -
    -

    The flags parameter enables different options to be specified that -affect the allocation, as follows.

    -
      -
    • cudaArrayDefault: This flag’s value is defined to be 0 -and provides default mipmapped array allocation

    • -
    • cudaArrayLayered: Allocates a layered CUDA mipmapped -array, with the depth extent indicating the number of layers

    • -
    • cudaArrayCubemap: Allocates a cubemap CUDA mipmapped -array. Width must be equal to height, and depth must be six. If the -cudaArrayLayered flag is also set, depth must be a multiple of six.

    • -
    • cudaArraySurfaceLoadStore: This flag indicates that -individual mipmap levels of the CUDA mipmapped array will be read -from or written to using a surface reference.

    • -
    • cudaArrayTextureGather: This flag indicates that texture -gather operations will be performed on the CUDA array. Texture gather -can only be performed on 2D CUDA mipmapped arrays, and the gather -operations are performed only on the most detailed mipmap level.

    • -
    • cudaArraySparse: Allocates a CUDA mipmapped array without -physical backing memory. The subregions within this sparse array can -later be mapped onto a physical memory allocation by calling -cuMemMapArrayAsync. This flag can only be used for -creating 2D, 3D or 2D layered sparse CUDA mipmapped arrays. The -physical backing memory must be allocated via -cuMemCreate.

    • -
    • cudaArrayDeferredMapping: Allocates a CUDA mipmapped -array without physical backing memory. The entire array can later be -mapped onto a physical memory allocation by calling -cuMemMapArrayAsync. The physical backing memory must be -allocated via cuMemCreate.

    • -
    -

    The width, height and depth extents must meet certain size requirements -as listed in the following table. All values are specified in elements.

    -

    View CUDA Toolkit Documentation for a table example

    -
    -
    Parameters:
    -
      -
    • desc (cudaChannelFormatDesc) – Requested channel format

    • -
    • extent (cudaExtent) – Requested allocation size (width field in elements)

    • -
    • numLevels (unsigned int) – Number of mipmap levels to allocate

    • -
    • flags (unsigned int) – Flags for extensions

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetMipmappedArrayLevel(mipmappedArray, unsigned int level)#
    -

    Gets a mipmap level of a CUDA mipmapped array.

    -

    Returns in *levelArray a CUDA array that represents a single mipmap -level of the CUDA mipmapped array mipmappedArray.

    -

    If level is greater than the maximum number of levels in this -mipmapped array, cudaErrorInvalidValue is returned.

    -

    If mipmappedArray is NULL, cudaErrorInvalidResourceHandle -is returned.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemcpy3D(cudaMemcpy3DParms p: Optional[cudaMemcpy3DParms])#
    -

    Copies data between 3D objects.

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    cudaMemcpy3D() copies data betwen two 3D objects. The -source and destination objects may be in either host memory, device -memory, or a CUDA array. The source, destination, extent, and kind of -copy performed is specified by the cudaMemcpy3DParms struct -which should be initialized to zero before use:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    The struct passed to cudaMemcpy3D() must specify one of -srcArray or srcPtr and one of dstArray or dstPtr. Passing more -than one non-zero source or destination will cause -cudaMemcpy3D() to return an error.

    -

    The srcPos and dstPos fields are optional offsets into the source -and destination objects and are defined in units of each object’s -elements. The element for a host or device pointer is assumed to be -unsigned char.

    -

    The extent field defines the dimensions of the transferred area in -elements. If a CUDA array is participating in the copy, the extent is -defined in terms of that array’s elements. If no CUDA array is -participating in the copy then the extents are defined in elements of -unsigned char.

    -

    The kind field defines the direction of the copy. It must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. For cudaMemcpyHostToHost or -cudaMemcpyHostToDevice or -cudaMemcpyDeviceToHost passed as kind and cudaArray type -passed as source or destination, if the kind implies cudaArray type to -be present on the host, cudaMemcpy3D() will disregard that -implication and silently correct the kind based on the fact that -cudaArray type can only be present on the device.

    -

    If the source and destination are both arrays, -cudaMemcpy3D() will return an error if they do not have the -same element size.

    -

    The source and destination object may not overlap. If overlapping -source and destination objects are specified, undefined behavior will -result.

    -

    The source object must entirely contain the region defined by srcPos -and extent. The destination object must entirely contain the region -defined by dstPos and extent.

    -

    cudaMemcpy3D() returns an error if the pitch of srcPtr or -dstPtr exceeds the maximum allowed. The pitch of a -cudaPitchedPtr allocated with cudaMalloc3D() -will always be valid.

    -
    -
    Parameters:
    -

    p (cudaMemcpy3DParms) – 3D memory copy parameters

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemcpy3DPeer(cudaMemcpy3DPeerParms p: Optional[cudaMemcpy3DPeerParms])#
    -

    Copies memory between devices.

    -

    Perform a 3D memory copy according to the parameters specified in p. -See the definition of the cudaMemcpy3DPeerParms structure -for documentation of its parameters.

    -

    Note that this function is synchronous with respect to the host only if -the source or destination of the transfer is host memory. Note also -that this copy is serialized with respect to all pending and future -asynchronous work in to the current device, the copy’s source device, -and the copy’s destination device (use -cudaMemcpy3DPeerAsync to avoid this synchronization).

    -
    -
    Parameters:
    -

    p (cudaMemcpy3DPeerParms) – Parameters for the memory copy

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice, cudaErrorInvalidPitchValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemcpy3DAsync(cudaMemcpy3DParms p: Optional[cudaMemcpy3DParms], stream)#
    -

    Copies data between 3D objects.

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    cudaMemcpy3DAsync() copies data betwen two 3D objects. The -source and destination objects may be in either host memory, device -memory, or a CUDA array. The source, destination, extent, and kind of -copy performed is specified by the cudaMemcpy3DParms struct -which should be initialized to zero before use:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    The struct passed to cudaMemcpy3DAsync() must specify one -of srcArray or srcPtr and one of dstArray or dstPtr. Passing -more than one non-zero source or destination will cause -cudaMemcpy3DAsync() to return an error.

    -

    The srcPos and dstPos fields are optional offsets into the source -and destination objects and are defined in units of each object’s -elements. The element for a host or device pointer is assumed to be -unsigned char. For CUDA arrays, positions must be in the range [0, -2048) for any dimension.

    -

    The extent field defines the dimensions of the transferred area in -elements. If a CUDA array is participating in the copy, the extent is -defined in terms of that array’s elements. If no CUDA array is -participating in the copy then the extents are defined in elements of -unsigned char.

    -

    The kind field defines the direction of the copy. It must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. For cudaMemcpyHostToHost or -cudaMemcpyHostToDevice or -cudaMemcpyDeviceToHost passed as kind and cudaArray type -passed as source or destination, if the kind implies cudaArray type to -be present on the host, cudaMemcpy3DAsync() will disregard -that implication and silently correct the kind based on the fact that -cudaArray type can only be present on the device.

    -

    If the source and destination are both arrays, -cudaMemcpy3DAsync() will return an error if they do not -have the same element size.

    -

    The source and destination object may not overlap. If overlapping -source and destination objects are specified, undefined behavior will -result.

    -

    The source object must lie entirely within the region defined by -srcPos and extent. The destination object must lie entirely within -the region defined by dstPos and extent.

    -

    cudaMemcpy3DAsync() returns an error if the pitch of -srcPtr or dstPtr exceeds the maximum allowed. The pitch of a -cudaPitchedPtr allocated with cudaMalloc3D() -will always be valid.

    -

    cudaMemcpy3DAsync() is asynchronous with respect to the -host, so the call may return before the copy is complete. The copy can -optionally be associated to a stream by passing a non-zero stream -argument. If kind is cudaMemcpyHostToDevice or -cudaMemcpyDeviceToHost and stream is non-zero, the copy -may overlap with operations in other streams.

    -

    The device version of this function only handles device to device -copies and cannot be given local or shared pointers.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemcpy3DPeerAsync(cudaMemcpy3DPeerParms p: Optional[cudaMemcpy3DPeerParms], stream)#
    -

    Copies memory between devices asynchronously.

    -

    Perform a 3D memory copy according to the parameters specified in p. -See the definition of the cudaMemcpy3DPeerParms structure -for documentation of its parameters.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice, cudaErrorInvalidPitchValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemGetInfo()#
    -

    Gets free and total device memory.

    -

    Returns in *total the total amount of memory available to the the -current context. Returns in *free the amount of memory on the device -that is free according to the OS. CUDA is not guaranteed to be able to -allocate all of the memory that the OS reports as free. In a multi- -tenet situation, free estimate returned is prone to race condition -where a new allocation/free done by a different process or a different -thread in the same process between the time when free memory was -estimated and reported, will result in deviation in free value reported -and actual free memory.

    -

    The integrated GPU on Tegra shares memory with CPU and other component -of the SoC. The free and total values returned by the API excludes the -SWAP memory space maintained by the OS on some platforms. The OS may -move some of the memory pages into swap area as the GPU or CPU allocate -or access memory. See Tegra app note on how to calculate total and free -memory on Tegra.

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuMemGetInfo

    -
    -
    - -
    -
    -cuda.cudart.cudaArrayGetInfo(array)#
    -

    Gets info about the specified cudaArray.

    -

    Returns in *desc, *extent and *flags respectively, the type, -shape and flags of array.

    -

    Any of *desc, *extent and *flags may be specified as NULL.

    -
    -
    Parameters:
    -

    array (cudaArray_t) – The cudaArray to get info for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaArrayGetPlane(hArray, unsigned int planeIdx)#
    -

    Gets a CUDA array plane from a CUDA array.

    -

    Returns in pPlaneArray a CUDA array that represents a single format -plane of the CUDA array hArray.

    -

    If planeIdx is greater than the maximum number of planes in this -array or if the array does not have a multi-planar format e.g: -cudaChannelFormatKindNV12, then -cudaErrorInvalidValue is returned.

    -

    Note that if the hArray has format -cudaChannelFormatKindNV12, then passing in 0 for planeIdx -returns a CUDA array of the same size as hArray but with one 8-bit -channel and cudaChannelFormatKindUnsigned as its format -kind. If 1 is passed for planeIdx, then the returned CUDA array has -half the height and width of hArray with two 8-bit channels and -cudaChannelFormatKindUnsigned as its format kind.

    -
    -
    Parameters:
    -
      -
    • hArray (cudaArray_t) – CUDA array

    • -
    • planeIdx (unsigned int) – Plane index

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuArrayGetPlane

    -
    -
    - -
    -
    -cuda.cudart.cudaArrayGetMemoryRequirements(array, int device)#
    -

    Returns the memory requirements of a CUDA array.

    -

    Returns the memory requirements of a CUDA array in memoryRequirements -If the CUDA array is not allocated with flag -cudaArrayDeferredMapping cudaErrorInvalidValue -will be returned.

    -

    The returned value in size -represents the total size of the CUDA array. The returned value in -alignment represents the -alignment necessary for mapping the CUDA array.

    -
    -
    Parameters:
    -
      -
    • array (cudaArray_t) – CUDA array to get the memory requirements of

    • -
    • device (int) – Device to get the memory requirements for

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMipmappedArrayGetMemoryRequirements(mipmap, int device)#
    -

    Returns the memory requirements of a CUDA mipmapped array.

    -

    Returns the memory requirements of a CUDA mipmapped array in -memoryRequirements If the CUDA mipmapped array is not allocated with -flag cudaArrayDeferredMapping -cudaErrorInvalidValue will be returned.

    -

    The returned value in size -represents the total size of the CUDA mipmapped array. The returned -value in alignment represents -the alignment necessary for mapping the CUDA mipmapped array.

    -
    -
    Parameters:
    -
      -
    • mipmap (cudaMipmappedArray_t) – CUDA mipmapped array to get the memory requirements of

    • -
    • device (int) – Device to get the memory requirements for

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaArrayGetSparseProperties(array)#
    -

    Returns the layout properties of a sparse CUDA array.

    -

    Returns the layout properties of a sparse CUDA array in -sparseProperties. If the CUDA array is not allocated with flag -cudaArraySparse cudaErrorInvalidValue will be -returned.

    -

    If the returned value in flags -contains cudaArraySparsePropertiesSingleMipTail, then -miptailSize represents the total -size of the array. Otherwise, it will be zero. Also, the returned value -in miptailFirstLevel is always -zero. Note that the array must have been allocated using -cudaMallocArray or cudaMalloc3DArray. For CUDA -arrays obtained using cudaMipmappedArrayGetLevel, -cudaErrorInvalidValue will be returned. Instead, -cudaMipmappedArrayGetSparseProperties must be used to -obtain the sparse properties of the entire CUDA mipmapped array to -which array belongs to.

    -
    -
    Parameters:
    -

    array (cudaArray_t) – The CUDA array to get the sparse properties of

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMipmappedArrayGetSparseProperties(mipmap)#
    -

    Returns the layout properties of a sparse CUDA mipmapped array.

    -

    Returns the sparse array layout properties in sparseProperties. If -the CUDA mipmapped array is not allocated with flag -cudaArraySparse cudaErrorInvalidValue will be -returned.

    -

    For non-layered CUDA mipmapped arrays, -miptailSize returns the size of -the mip tail region. The mip tail region includes all mip levels whose -width, height or depth is less than that of the tile. For layered CUDA -mipmapped arrays, if flags -contains cudaArraySparsePropertiesSingleMipTail, then -miptailSize specifies the size of -the mip tail of all layers combined. Otherwise, -miptailSize specifies mip tail -size per layer. The returned value of -miptailFirstLevel is valid only -if miptailSize is non-zero.

    -
    -
    Parameters:
    -

    mipmap (cudaMipmappedArray_t) – The CUDA mipmapped array to get the sparse properties of

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemcpy(dst, src, size_t count, kind: cudaMemcpyKind)#
    -

    Copies data between host and device.

    -
    -

    Copies count bytes from the memory area pointed to by src to the -memory area pointed to by dst, where kind specifies the direction -of the copy, and must be one of cudaMemcpyHostToHost, -cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. Calling cudaMemcpy() with dst -and src pointers that do not match the direction of the copy results in -an undefined behavior.

    -
    -

    ote_sync

    -
    -
    -
    dstAny

    Destination memory address

    -
    -
    srcAny

    Source memory address

    -
    -
    countsize_t

    Size in bytes to copy

    -
    -
    kindcudaMemcpyKind

    Type of transfer

    -
    -
    -
    -
    cudaError_t

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidMemcpyDirection

    -
    -
    -

    cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpyDtoH, cuMemcpyHtoD, cuMemcpyDtoD, cuMemcpy

    -
    -
    - -
    -
    -cuda.cudart.cudaMemcpyPeer(dst, int dstDevice, src, int srcDevice, size_t count)#
    -

    Copies memory between two devices.

    -

    Copies memory from one device to memory on another device. dst is the -base device pointer of the destination memory and dstDevice is the -destination device. src is the base device pointer of the source -memory and srcDevice is the source device. count specifies the -number of bytes to copy.

    -

    Note that this function is asynchronous with respect to the host, but -serialized with respect all pending and future asynchronous work in to -the current device, srcDevice, and dstDevice (use -cudaMemcpyPeerAsync to avoid this synchronization).

    -
    -
    Parameters:
    -
      -
    • dst (Any) – Destination device pointer

    • -
    • dstDevice (int) – Destination device

    • -
    • src (Any) – Source device pointer

    • -
    • srcDevice (int) – Source device

    • -
    • count (size_t) – Size of memory copy in bytes

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemcpy2D(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind)#
    -

    Copies data between host and device.

    -

    Copies a matrix (height rows of width bytes each) from the memory -area pointed to by src to the memory area pointed to by dst, where -kind specifies the direction of the copy, and must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. dpitch and spitch are the widths in -memory in bytes of the 2D arrays pointed to by dst and src, -including any padding added to the end of each row. The memory areas -may not overlap. width must not exceed either dpitch or spitch. -Calling cudaMemcpy2D() with dst and src pointers that -do not match the direction of the copy results in an undefined -behavior. cudaMemcpy2D() returns an error if dpitch or -spitch exceeds the maximum allowed.

    -
    -
    Parameters:
    -
      -
    • dst (Any) – Destination memory address

    • -
    • dpitch (size_t) – Pitch of destination memory

    • -
    • src (Any) – Source memory address

    • -
    • spitch (size_t) – Pitch of source memory

    • -
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • -
    • height (size_t) – Height of matrix transfer (rows)

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaMemcpy, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2D, cuMemcpy2DUnaligned

    -
    -
    - -
    -
    -cuda.cudart.cudaMemcpy2DToArray(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind)#
    -

    Copies data between host and device.

    -

    Copies a matrix (height rows of width bytes each) from the memory -area pointed to by src to the CUDA array dst starting at hOffset -rows and wOffset bytes from the upper left corner, where kind -specifies the direction of the copy, and must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. spitch is the width in memory in bytes of -the 2D array pointed to by src, including any padding added to the -end of each row. wOffset + width must not exceed the width of the -CUDA array dst. width must not exceed spitch. -cudaMemcpy2DToArray() returns an error if spitch exceeds -the maximum allowed.

    -
    -
    Parameters:
    -
      -
    • dst (cudaArray_t) – Destination memory address

    • -
    • wOffset (size_t) – Destination starting X offset (columns in bytes)

    • -
    • hOffset (size_t) – Destination starting Y offset (rows)

    • -
    • src (Any) – Source memory address

    • -
    • spitch (size_t) – Pitch of source memory

    • -
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • -
    • height (size_t) – Height of matrix transfer (rows)

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2D, cuMemcpy2DUnaligned

    -
    -
    - -
    -
    -cuda.cudart.cudaMemcpy2DFromArray(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind: cudaMemcpyKind)#
    -

    Copies data between host and device.

    -

    Copies a matrix (height rows of width bytes each) from the CUDA -array src starting at hOffset rows and wOffset bytes from the -upper left corner to the memory area pointed to by dst, where kind -specifies the direction of the copy, and must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. dpitch is the width in memory in bytes of -the 2D array pointed to by dst, including any padding added to the -end of each row. wOffset + width must not exceed the width of the -CUDA array src. width must not exceed dpitch. -cudaMemcpy2DFromArray() returns an error if dpitch -exceeds the maximum allowed.

    -
    -
    Parameters:
    -
      -
    • dst (Any) – Destination memory address

    • -
    • dpitch (size_t) – Pitch of destination memory

    • -
    • src (cudaArray_const_t) – Source memory address

    • -
    • wOffset (size_t) – Source starting X offset (columns in bytes)

    • -
    • hOffset (size_t) – Source starting Y offset (rows)

    • -
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • -
    • height (size_t) – Height of matrix transfer (rows)

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2D, cuMemcpy2DUnaligned

    -
    -
    - -
    -
    -cuda.cudart.cudaMemcpy2DArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, size_t wOffsetSrc, size_t hOffsetSrc, size_t width, size_t height, kind: cudaMemcpyKind)#
    -

    Copies data between host and device.

    -

    Copies a matrix (height rows of width bytes each) from the CUDA -array src starting at hOffsetSrc rows and wOffsetSrc bytes from -the upper left corner to the CUDA array dst starting at hOffsetDst -rows and wOffsetDst bytes from the upper left corner, where kind -specifies the direction of the copy, and must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. wOffsetDst + width must not exceed the -width of the CUDA array dst. wOffsetSrc + width must not exceed -the width of the CUDA array src.

    -
    -
    Parameters:
    -
      -
    • dst (cudaArray_t) – Destination memory address

    • -
    • wOffsetDst (size_t) – Destination starting X offset (columns in bytes)

    • -
    • hOffsetDst (size_t) – Destination starting Y offset (rows)

    • -
    • src (cudaArray_const_t) – Source memory address

    • -
    • wOffsetSrc (size_t) – Source starting X offset (columns in bytes)

    • -
    • hOffsetSrc (size_t) – Source starting Y offset (rows)

    • -
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • -
    • height (size_t) – Height of matrix transfer (rows)

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2D, cuMemcpy2DUnaligned

    -
    -
    - -
    -
    -cuda.cudart.cudaMemcpyAsync(dst, src, size_t count, kind: cudaMemcpyKind, stream)#
    -

    Copies data between host and device.

    -

    Copies count bytes from the memory area pointed to by src to the -memory area pointed to by dst, where kind specifies the direction -of the copy, and must be one of cudaMemcpyHostToHost, -cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing.

    -

    The memory areas may not overlap. Calling cudaMemcpyAsync() -with dst and src pointers that do not match the direction of the -copy results in an undefined behavior.

    -

    cudaMemcpyAsync() is asynchronous with respect to the host, -so the call may return before the copy is complete. The copy can -optionally be associated to a stream by passing a non-zero stream -argument. If kind is cudaMemcpyHostToDevice or -cudaMemcpyDeviceToHost and the stream is non-zero, the -copy may overlap with operations in other streams.

    -

    The device version of this function only handles device to device -copies and cannot be given local or shared pointers.

    -
    -
    Parameters:
    -
      -
    • dst (Any) – Destination memory address

    • -
    • src (Any) – Source memory address

    • -
    • count (size_t) – Size in bytes to copy

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    • stream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemcpyPeerAsync(dst, int dstDevice, src, int srcDevice, size_t count, stream)#
    -

    Copies memory between two devices asynchronously.

    -

    Copies memory from one device to memory on another device. dst is the -base device pointer of the destination memory and dstDevice is the -destination device. src is the base device pointer of the source -memory and srcDevice is the source device. count specifies the -number of bytes to copy.

    -

    Note that this function is asynchronous with respect to the host and -all work on other devices.

    -
    -
    Parameters:
    -
      -
    • dst (Any) – Destination device pointer

    • -
    • dstDevice (int) – Destination device

    • -
    • src (Any) – Source device pointer

    • -
    • srcDevice (int) – Source device

    • -
    • count (size_t) – Size of memory copy in bytes

    • -
    • stream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemcpy2DAsync(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind, stream)#
    -

    Copies data between host and device.

    -

    Copies a matrix (height rows of width bytes each) from the memory -area pointed to by src to the memory area pointed to by dst, where -kind specifies the direction of the copy, and must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. dpitch and spitch are the widths in -memory in bytes of the 2D arrays pointed to by dst and src, -including any padding added to the end of each row. The memory areas -may not overlap. width must not exceed either dpitch or spitch.

    -

    Calling cudaMemcpy2DAsync() with dst and src pointers -that do not match the direction of the copy results in an undefined -behavior. cudaMemcpy2DAsync() returns an error if dpitch -or spitch is greater than the maximum allowed.

    -

    cudaMemcpy2DAsync() is asynchronous with respect to the -host, so the call may return before the copy is complete. The copy can -optionally be associated to a stream by passing a non-zero stream -argument. If kind is cudaMemcpyHostToDevice or -cudaMemcpyDeviceToHost and stream is non-zero, the copy -may overlap with operations in other streams.

    -

    The device version of this function only handles device to device -copies and cannot be given local or shared pointers.

    -
    -
    Parameters:
    -
      -
    • dst (Any) – Destination memory address

    • -
    • dpitch (size_t) – Pitch of destination memory

    • -
    • src (Any) – Source memory address

    • -
    • spitch (size_t) – Pitch of source memory

    • -
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • -
    • height (size_t) – Height of matrix transfer (rows)

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    • stream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2DAsync

    -
    -
    - -
    -
    -cuda.cudart.cudaMemcpy2DToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind, stream)#
    -

    Copies data between host and device.

    -

    Copies a matrix (height rows of width bytes each) from the memory -area pointed to by src to the CUDA array dst starting at hOffset -rows and wOffset bytes from the upper left corner, where kind -specifies the direction of the copy, and must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. spitch is the width in memory in bytes of -the 2D array pointed to by src, including any padding added to the -end of each row. wOffset + width must not exceed the width of the -CUDA array dst. width must not exceed spitch. -cudaMemcpy2DToArrayAsync() returns an error if spitch -exceeds the maximum allowed.

    -

    cudaMemcpy2DToArrayAsync() is asynchronous with respect to -the host, so the call may return before the copy is complete. The copy -can optionally be associated to a stream by passing a non-zero stream -argument. If kind is cudaMemcpyHostToDevice or -cudaMemcpyDeviceToHost and stream is non-zero, the copy -may overlap with operations in other streams.

    -

    cudaMemcpy2DFromArrayAsync, -cudaMemcpyToSymbolAsync, -cudaMemcpyFromSymbolAsync, cuMemcpy2DAsync

    -
    -
    Parameters:
    -
      -
    • dst (cudaArray_t) – Destination memory address

    • -
    • wOffset (size_t) – Destination starting X offset (columns in bytes)

    • -
    • hOffset (size_t) – Destination starting Y offset (rows)

    • -
    • src (Any) – Source memory address

    • -
    • spitch (size_t) – Pitch of source memory

    • -
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • -
    • height (size_t) – Height of matrix transfer (rows)

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    • stream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemcpy2DFromArrayAsync(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind: cudaMemcpyKind, stream)#
    -

    Copies data between host and device.

    -

    Copies a matrix (height rows of width bytes each) from the CUDA -array src starting at hOffset rows and wOffset bytes from the -upper left corner to the memory area pointed to by dst, where kind -specifies the direction of the copy, and must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. dpitch is the width in memory in bytes of -the 2D array pointed to by dst, including any padding added to the -end of each row. wOffset + width must not exceed the width of the -CUDA array src. width must not exceed dpitch. -cudaMemcpy2DFromArrayAsync() returns an error if dpitch -exceeds the maximum allowed.

    -

    cudaMemcpy2DFromArrayAsync() is asynchronous with respect -to the host, so the call may return before the copy is complete. The -copy can optionally be associated to a stream by passing a non-zero -stream argument. If kind is cudaMemcpyHostToDevice or -cudaMemcpyDeviceToHost and stream is non-zero, the copy -may overlap with operations in other streams.

    -

    cudaMemcpyToSymbolAsync, -cudaMemcpyFromSymbolAsync, cuMemcpy2DAsync

    -
    -
    Parameters:
    -
      -
    • dst (Any) – Destination memory address

    • -
    • dpitch (size_t) – Pitch of destination memory

    • -
    • src (cudaArray_const_t) – Source memory address

    • -
    • wOffset (size_t) – Source starting X offset (columns in bytes)

    • -
    • hOffset (size_t) – Source starting Y offset (rows)

    • -
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • -
    • height (size_t) – Height of matrix transfer (rows)

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    • stream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemset(devPtr, int value, size_t count)#
    -

    Initializes or sets device memory to a value.

    -

    Fills the first count bytes of the memory area pointed to by devPtr -with the constant byte value value.

    -

    Note that this function is asynchronous with respect to the host unless -devPtr refers to pinned host memory.

    -
    -
    Parameters:
    -
      -
    • devPtr (Any) – Pointer to device memory

    • -
    • value (int) – Value to set for each byte of specified memory

    • -
    • count (size_t) – Size in bytes to set

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemset2D(devPtr, size_t pitch, int value, size_t width, size_t height)#
    -

    Initializes or sets device memory to a value.

    -

    Sets to the specified value value a matrix (height rows of width -bytes each) pointed to by dstPtr. pitch is the width in bytes of -the 2D array pointed to by dstPtr, including any padding added to the -end of each row. This function performs fastest when the pitch is one -that has been passed back by cudaMallocPitch().

    -

    Note that this function is asynchronous with respect to the host unless -devPtr refers to pinned host memory.

    -
    -
    Parameters:
    -
      -
    • devPtr (Any) – Pointer to 2D device memory

    • -
    • pitch (size_t) – Pitch in bytes of 2D device memory(Unused if height is 1)

    • -
    • value (int) – Value to set for each byte of specified memory

    • -
    • width (size_t) – Width of matrix set (columns in bytes)

    • -
    • height (size_t) – Height of matrix set (rows)

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemset3D(cudaPitchedPtr pitchedDevPtr: cudaPitchedPtr, int value, cudaExtent extent: cudaExtent)#
    -

    Initializes or sets device memory to a value.

    -

    Initializes each element of a 3D array to the specified value value. -The object to initialize is defined by pitchedDevPtr. The pitch -field of pitchedDevPtr is the width in memory in bytes of the 3D -array pointed to by pitchedDevPtr, including any padding added to the -end of each row. The xsize field specifies the logical width of each -row in bytes, while the ysize field specifies the height of each 2D -slice in rows. The pitch field of pitchedDevPtr is ignored when -height and depth are both equal to 1.

    -

    The extents of the initialized region are specified as a width in -bytes, a height in rows, and a depth in slices.

    -

    Extents with width greater than or equal to the xsize of -pitchedDevPtr may perform significantly faster than extents narrower -than the xsize. Secondarily, extents with height equal to the -ysize of pitchedDevPtr will perform faster than when the height -is shorter than the ysize.

    -

    This function performs fastest when the pitchedDevPtr has been -allocated by cudaMalloc3D().

    -

    Note that this function is asynchronous with respect to the host unless -pitchedDevPtr refers to pinned host memory.

    -
    -
    Parameters:
    -
      -
    • pitchedDevPtr (cudaPitchedPtr) – Pointer to pitched device memory

    • -
    • value (int) – Value to set for each byte of specified memory

    • -
    • extent (cudaExtent) – Size parameters for where to set device memory (width field in -bytes)

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemsetAsync(devPtr, int value, size_t count, stream)#
    -

    Initializes or sets device memory to a value.

    -

    Fills the first count bytes of the memory area pointed to by devPtr -with the constant byte value value.

    -

    cudaMemsetAsync() is asynchronous with respect to the host, -so the call may return before the memset is complete. The operation can -optionally be associated to a stream by passing a non-zero stream -argument. If stream is non-zero, the operation may overlap with -operations in other streams.

    -

    The device version of this function only handles device to device -copies and cannot be given local or shared pointers.

    -
    -
    Parameters:
    -
      -
    • devPtr (Any) – Pointer to device memory

    • -
    • value (int) – Value to set for each byte of specified memory

    • -
    • count (size_t) – Size in bytes to set

    • -
    • stream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemset2DAsync(devPtr, size_t pitch, int value, size_t width, size_t height, stream)#
    -

    Initializes or sets device memory to a value.

    -

    Sets to the specified value value a matrix (height rows of width -bytes each) pointed to by dstPtr. pitch is the width in bytes of -the 2D array pointed to by dstPtr, including any padding added to the -end of each row. This function performs fastest when the pitch is one -that has been passed back by cudaMallocPitch().

    -

    cudaMemset2DAsync() is asynchronous with respect to the -host, so the call may return before the memset is complete. The -operation can optionally be associated to a stream by passing a non- -zero stream argument. If stream is non-zero, the operation may -overlap with operations in other streams.

    -

    The device version of this function only handles device to device -copies and cannot be given local or shared pointers.

    -
    -
    Parameters:
    -
      -
    • devPtr (Any) – Pointer to 2D device memory

    • -
    • pitch (size_t) – Pitch in bytes of 2D device memory(Unused if height is 1)

    • -
    • value (int) – Value to set for each byte of specified memory

    • -
    • width (size_t) – Width of matrix set (columns in bytes)

    • -
    • height (size_t) – Height of matrix set (rows)

    • -
    • stream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemset3DAsync(cudaPitchedPtr pitchedDevPtr: cudaPitchedPtr, int value, cudaExtent extent: cudaExtent, stream)#
    -

    Initializes or sets device memory to a value.

    -

    Initializes each element of a 3D array to the specified value value. -The object to initialize is defined by pitchedDevPtr. The pitch -field of pitchedDevPtr is the width in memory in bytes of the 3D -array pointed to by pitchedDevPtr, including any padding added to the -end of each row. The xsize field specifies the logical width of each -row in bytes, while the ysize field specifies the height of each 2D -slice in rows. The pitch field of pitchedDevPtr is ignored when -height and depth are both equal to 1.

    -

    The extents of the initialized region are specified as a width in -bytes, a height in rows, and a depth in slices.

    -

    Extents with width greater than or equal to the xsize of -pitchedDevPtr may perform significantly faster than extents narrower -than the xsize. Secondarily, extents with height equal to the -ysize of pitchedDevPtr will perform faster than when the height -is shorter than the ysize.

    -

    This function performs fastest when the pitchedDevPtr has been -allocated by cudaMalloc3D().

    -

    cudaMemset3DAsync() is asynchronous with respect to the -host, so the call may return before the memset is complete. The -operation can optionally be associated to a stream by passing a non- -zero stream argument. If stream is non-zero, the operation may -overlap with operations in other streams.

    -

    The device version of this function only handles device to device -copies and cannot be given local or shared pointers.

    -
    -
    Parameters:
    -
      -
    • pitchedDevPtr (cudaPitchedPtr) – Pointer to pitched device memory

    • -
    • value (int) – Value to set for each byte of specified memory

    • -
    • extent (cudaExtent) – Size parameters for where to set device memory (width field in -bytes)

    • -
    • stream (CUstream or cudaStream_t) – Stream identifier

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemPrefetchAsync(devPtr, size_t count, int dstDevice, stream)#
    -

    Prefetches memory to the specified destination device.

    -

    Prefetches memory to the specified destination device. devPtr is the -base device pointer of the memory to be prefetched and dstDevice is -the destination device. count specifies the number of bytes to copy. -stream is the stream in which the operation is enqueued. The memory -range must refer to managed memory allocated via -cudaMallocManaged or declared via managed variables, or it -may also refer to system-allocated memory on systems with non-zero -cudaDevAttrPageableMemoryAccess.

    -

    Passing in cudaCpuDeviceId for dstDevice will prefetch the data to -host memory. If dstDevice is a GPU, then the device attribute -cudaDevAttrConcurrentManagedAccess must be non-zero. -Additionally, stream must be associated with a device that has a non- -zero value for the device attribute -cudaDevAttrConcurrentManagedAccess.

    -

    The start address and end address of the memory range will be rounded -down and rounded up respectively to be aligned to CPU page size before -the prefetch operation is enqueued in the stream.

    -

    If no physical memory has been allocated for this region, then this -memory region will be populated and mapped on the destination device. -If there’s insufficient memory to prefetch the desired region, the -Unified Memory driver may evict pages from other -cudaMallocManaged allocations to host memory in order to -make room. Device memory allocated using cudaMalloc or -cudaMallocArray will not be evicted.

    -

    By default, any mappings to the previous location of the migrated pages -are removed and mappings for the new location are only setup on -dstDevice. The exact behavior however also depends on the settings -applied to this memory range via cudaMemAdvise as described -below:

    -

    If cudaMemAdviseSetReadMostly was set on any subset of this -memory range, then that subset will create a read-only copy of the -pages on dstDevice.

    -

    If cudaMemAdviseSetPreferredLocation was called on any -subset of this memory range, then the pages will be migrated to -dstDevice even if dstDevice is not the preferred location of any -pages in the memory range.

    -

    If cudaMemAdviseSetAccessedBy was called on any subset of -this memory range, then mappings to those pages from all the -appropriate processors are updated to refer to the new location if -establishing such a mapping is possible. Otherwise, those mappings are -cleared.

    -

    Note that this API is not required for functionality and only serves to -improve performance by allowing the application to migrate data to a -suitable location before it is accessed. Memory accesses to this range -are always coherent and are allowed even when the data is actively -being migrated.

    -

    Note that this function is asynchronous with respect to the host and -all work on other devices.

    -
    -
    Parameters:
    -
      -
    • devPtr (Any) – Pointer to be prefetched

    • -
    • count (size_t) – Size in bytes

    • -
    • dstDevice (int) – Destination device to prefetch to

    • -
    • stream (CUstream or cudaStream_t) – Stream to enqueue prefetch operation

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemPrefetchAsync_v2(devPtr, size_t count, cudaMemLocation location: cudaMemLocation, unsigned int flags, stream)#
    -

    Prefetches memory to the specified destination location.

    -

    Prefetches memory to the specified destination location. devPtr is -the base device pointer of the memory to be prefetched and location -specifies the destination location. count specifies the number of -bytes to copy. stream is the stream in which the operation is -enqueued. The memory range must refer to managed memory allocated via -cudaMallocManaged or declared via managed variables, or it -may also refer to system-allocated memory on systems with non-zero -cudaDevAttrPageableMemoryAccess.

    -

    Specifying cudaMemLocationTypeDevice for -type will prefetch memory to GPU specified -by device ordinal id which must have non- -zero value for the device attribute -concurrentManagedAccess. Additionally, stream must be -associated with a device that has a non-zero value for the device -attribute concurrentManagedAccess. Specifying -cudaMemLocationTypeHost as type -will prefetch data to host memory. Applications can request prefetching -memory to a specific host NUMA node by specifying -cudaMemLocationTypeHostNuma for -type and a valid host NUMA node id in -id Users can also request prefetching -memory to the host NUMA node closest to the current thread’s CPU by -specifying cudaMemLocationTypeHostNumaCurrent for -type. Note when -type is etiher -cudaMemLocationTypeHost OR -cudaMemLocationTypeHostNumaCurrent, -id will be ignored.

    -

    The start address and end address of the memory range will be rounded -down and rounded up respectively to be aligned to CPU page size before -the prefetch operation is enqueued in the stream.

    -

    If no physical memory has been allocated for this region, then this -memory region will be populated and mapped on the destination device. -If there’s insufficient memory to prefetch the desired region, the -Unified Memory driver may evict pages from other -cudaMallocManaged allocations to host memory in order to -make room. Device memory allocated using cudaMalloc or -cudaMallocArray will not be evicted.

    -

    By default, any mappings to the previous location of the migrated pages -are removed and mappings for the new location are only setup on the -destination location. The exact behavior however also depends on the -settings applied to this memory range via cuMemAdvise as -described below:

    -

    If cudaMemAdviseSetReadMostly was set on any subset of this -memory range, then that subset will create a read-only copy of the -pages on destination location. If however the destination location is a -host NUMA node, then any pages of that subset that are already in -another host NUMA node will be transferred to the destination.

    -

    If cudaMemAdviseSetPreferredLocation was called on any -subset of this memory range, then the pages will be migrated to -location even if location is not the preferred location of any -pages in the memory range.

    -

    If cudaMemAdviseSetAccessedBy was called on any subset of -this memory range, then mappings to those pages from all the -appropriate processors are updated to refer to the new location if -establishing such a mapping is possible. Otherwise, those mappings are -cleared.

    -

    Note that this API is not required for functionality and only serves to -improve performance by allowing the application to migrate data to a -suitable location before it is accessed. Memory accesses to this range -are always coherent and are allowed even when the data is actively -being migrated.

    -

    Note that this function is asynchronous with respect to the host and -all work on other devices.

    -
    -
    Parameters:
    -
      -
    • devPtr (Any) – Pointer to be prefetched

    • -
    • count (size_t) – Size in bytes

    • -
    • location (cudaMemLocation) – location to prefetch to

    • -
    • flags (unsigned int) – flags for future use, must be zero now.

    • -
    • stream (CUstream or cudaStream_t) – Stream to enqueue prefetch operation

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemAdvise(devPtr, size_t count, advice: cudaMemoryAdvise, int device)#
    -

    Advise about the usage of a given memory range.

    -

    Advise the Unified Memory subsystem about the usage pattern for the -memory range starting at devPtr with a size of count bytes. The -start address and end address of the memory range will be rounded down -and rounded up respectively to be aligned to CPU page size before the -advice is applied. The memory range must refer to managed memory -allocated via cudaMallocManaged or declared via managed -variables. The memory range could also refer to system-allocated -pageable memory provided it represents a valid, host-accessible region -of memory and all additional constraints imposed by advice as -outlined below are also satisfied. Specifying an invalid system- -allocated pageable memory range results in an error being returned.

    -

    The advice parameter can take the following values:

    -
      -
    • cudaMemAdviseSetReadMostly: This implies that the data is -mostly going to be read from and only occasionally written to. Any -read accesses from any processor to this region will create a read- -only copy of at least the accessed pages in that processor’s memory. -Additionally, if cudaMemPrefetchAsync is called on this -region, it will create a read-only copy of the data on the -destination processor. If any processor writes to this region, all -copies of the corresponding page will be invalidated except for the -one where the write occurred. The device argument is ignored for -this advice. Note that for a page to be read-duplicated, the -accessing processor must either be the CPU or a GPU that has a non- -zero value for the device attribute -cudaDevAttrConcurrentManagedAccess. Also, if a context is -created on a device that does not have the device attribute -cudaDevAttrConcurrentManagedAccess set, then read- -duplication will not occur until all such contexts are destroyed. If -the memory region refers to valid system-allocated pageable memory, -then the accessing device must have a non-zero value for the device -attribute cudaDevAttrPageableMemoryAccess for a read-only -copy to be created on that device. Note however that if the accessing -device also has a non-zero value for the device attribute -cudaDevAttrPageableMemoryAccessUsesHostPageTables, then -setting this advice will not create a read-only copy when that device -accesses this memory region.

    • -
    • cudaMemAdviceUnsetReadMostly: Undoes the effect of -cudaMemAdviceReadMostly and also prevents the Unified -Memory driver from attempting heuristic read-duplication on the -memory range. Any read-duplicated copies of the data will be -collapsed into a single copy. The location for the collapsed copy -will be the preferred location if the page has a preferred location -and one of the read-duplicated copies was resident at that location. -Otherwise, the location chosen is arbitrary.

    • -
    • cudaMemAdviseSetPreferredLocation: This advice sets the -preferred location for the data to be the memory belonging to -device. Passing in cudaCpuDeviceId for device sets the preferred -location as host memory. If device is a GPU, then it must have a -non-zero value for the device attribute -cudaDevAttrConcurrentManagedAccess. Setting the preferred -location does not cause data to migrate to that location immediately. -Instead, it guides the migration policy when a fault occurs on that -memory region. If the data is already in its preferred location and -the faulting processor can establish a mapping without requiring the -data to be migrated, then data migration will be avoided. On the -other hand, if the data is not in its preferred location or if a -direct mapping cannot be established, then it will be migrated to the -processor accessing it. It is important to note that setting the -preferred location does not prevent data prefetching done using -cudaMemPrefetchAsync. Having a preferred location can -override the page thrash detection and resolution logic in the -Unified Memory driver. Normally, if a page is detected to be -constantly thrashing between for example host and device memory, the -page may eventually be pinned to host memory by the Unified Memory -driver. But if the preferred location is set as device memory, then -the page will continue to thrash indefinitely. If -cudaMemAdviseSetReadMostly is also set on this memory -region or any subset of it, then the policies associated with that -advice will override the policies of this advice, unless read -accesses from device will not result in a read-only copy being -created on that device as outlined in description for the advice -cudaMemAdviseSetReadMostly. If the memory region refers -to valid system-allocated pageable memory, then device must have a -non-zero value for the device attribute -cudaDevAttrPageableMemoryAccess.

    • -
    • cudaMemAdviseUnsetPreferredLocation: Undoes the effect of -cudaMemAdviseSetPreferredLocation and changes the -preferred location to none.

    • -
    • cudaMemAdviseSetAccessedBy: This advice implies that the -data will be accessed by device. Passing in -cudaCpuDeviceId for device will set the advice for the -CPU. If device is a GPU, then the device attribute -cudaDevAttrConcurrentManagedAccess must be non-zero. This -advice does not cause data migration and has no impact on the -location of the data per se. Instead, it causes the data to always be -mapped in the specified processor’s page tables, as long as the -location of the data permits a mapping to be established. If the data -gets migrated for any reason, the mappings are updated accordingly. -This advice is recommended in scenarios where data locality is not -important, but avoiding faults is. Consider for example a system -containing multiple GPUs with peer-to-peer access enabled, where the -data located on one GPU is occasionally accessed by peer GPUs. In -such scenarios, migrating data over to the other GPUs is not as -important because the accesses are infrequent and the overhead of -migration may be too high. But preventing faults can still help -improve performance, and so having a mapping set up in advance is -useful. Note that on CPU access of this data, the data may be -migrated to host memory because the CPU typically cannot access -device memory directly. Any GPU that had the -cudaMemAdviceSetAccessedBy flag set for this data will -now have its mapping updated to point to the page in host memory. If -cudaMemAdviseSetReadMostly is also set on this memory -region or any subset of it, then the policies associated with that -advice will override the policies of this advice. Additionally, if -the preferred location of this memory region or any subset of it is -also device, then the policies associated with -cudaMemAdviseSetPreferredLocation will override the -policies of this advice. If the memory region refers to valid system- -allocated pageable memory, then device must have a non-zero value -for the device attribute cudaDevAttrPageableMemoryAccess. -Additionally, if device has a non-zero value for the device -attribute -cudaDevAttrPageableMemoryAccessUsesHostPageTables, then -this call has no effect.

    • -
    • cudaMemAdviseUnsetAccessedBy: Undoes the effect of -cudaMemAdviseSetAccessedBy. Any mappings to the data from -device may be removed at any time causing accesses to result in -non-fatal page faults. If the memory region refers to valid system- -allocated pageable memory, then device must have a non-zero value -for the device attribute cudaDevAttrPageableMemoryAccess. -Additionally, if device has a non-zero value for the device -attribute -cudaDevAttrPageableMemoryAccessUsesHostPageTables, then -this call has no effect.

    • -
    -
    -
    Parameters:
    -
      -
    • devPtr (Any) – Pointer to memory to set the advice for

    • -
    • count (size_t) – Size in bytes of the memory range

    • -
    • advice (cudaMemoryAdvise) – Advice to be applied for the specified memory range

    • -
    • device (int) – Device to apply the advice for

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemAdvise_v2(devPtr, size_t count, advice: cudaMemoryAdvise, cudaMemLocation location: cudaMemLocation)#
    -

    Advise about the usage of a given memory range.

    -

    Advise the Unified Memory subsystem about the usage pattern for the -memory range starting at devPtr with a size of count bytes. The -start address and end address of the memory range will be rounded down -and rounded up respectively to be aligned to CPU page size before the -advice is applied. The memory range must refer to managed memory -allocated via cudaMallocManaged or declared via managed -variables. The memory range could also refer to system-allocated -pageable memory provided it represents a valid, host-accessible region -of memory and all additional constraints imposed by advice as -outlined below are also satisfied. Specifying an invalid system- -allocated pageable memory range results in an error being returned.

    -

    The advice parameter can take the following values:

    -
      -
    • cudaMemAdviseSetReadMostly: This implies that the data is -mostly going to be read from and only occasionally written to. Any -read accesses from any processor to this region will create a read- -only copy of at least the accessed pages in that processor’s memory. -Additionally, if cudaMemPrefetchAsync or -cudaMemPrefetchAsync_v2 is called on this region, it will -create a read-only copy of the data on the destination processor. If -the target location for cudaMemPrefetchAsync_v2 is a host -NUMA node and a read-only copy already exists on another host NUMA -node, that copy will be migrated to the targeted host NUMA node. If -any processor writes to this region, all copies of the corresponding -page will be invalidated except for the one where the write occurred. -If the writing processor is the CPU and the preferred location of the -page is a host NUMA node, then the page will also be migrated to that -host NUMA node. The location argument is ignored for this advice. -Note that for a page to be read-duplicated, the accessing processor -must either be the CPU or a GPU that has a non-zero value for the -device attribute cudaDevAttrConcurrentManagedAccess. -Also, if a context is created on a device that does not have the -device attribute cudaDevAttrConcurrentManagedAccess set, -then read-duplication will not occur until all such contexts are -destroyed. If the memory region refers to valid system-allocated -pageable memory, then the accessing device must have a non-zero value -for the device attribute cudaDevAttrPageableMemoryAccess -for a read-only copy to be created on that device. Note however that -if the accessing device also has a non-zero value for the device -attribute -cudaDevAttrPageableMemoryAccessUsesHostPageTables, then -setting this advice will not create a read-only copy when that device -accesses this memory region.

    • -
    • cudaMemAdviceUnsetReadMostly: Undoes the effect of -cudaMemAdviseSetReadMostly and also prevents the Unified -Memory driver from attempting heuristic read-duplication on the -memory range. Any read-duplicated copies of the data will be -collapsed into a single copy. The location for the collapsed copy -will be the preferred location if the page has a preferred location -and one of the read-duplicated copies was resident at that location. -Otherwise, the location chosen is arbitrary. Note: The location -argument is ignored for this advice.

    • -
    • cudaMemAdviseSetPreferredLocation: This advice sets the -preferred location for the data to be the memory belonging to -location. When type is -cudaMemLocationTypeHost, id -is ignored and the preferred location is set to be host memory. To -set the preferred location to a specific host NUMA node, applications -must set type to -cudaMemLocationTypeHostNuma and -id must specify the NUMA ID of the host -NUMA node. If type is set to -cudaMemLocationTypeHostNumaCurrent, -id will be ignored and the host NUMA node -closest to the calling thread’s CPU will be used as the preferred -location. If type is a -cudaMemLocationTypeDevice, then -id must be a valid device ordinal and the -device must have a non-zero value for the device attribute -cudaDevAttrConcurrentManagedAccess. Setting the preferred -location does not cause data to migrate to that location immediately. -Instead, it guides the migration policy when a fault occurs on that -memory region. If the data is already in its preferred location and -the faulting processor can establish a mapping without requiring the -data to be migrated, then data migration will be avoided. On the -other hand, if the data is not in its preferred location or if a -direct mapping cannot be established, then it will be migrated to the -processor accessing it. It is important to note that setting the -preferred location does not prevent data prefetching done using -cudaMemPrefetchAsync. Having a preferred location can -override the page thrash detection and resolution logic in the -Unified Memory driver. Normally, if a page is detected to be -constantly thrashing between for example host and device memory, the -page may eventually be pinned to host memory by the Unified Memory -driver. But if the preferred location is set as device memory, then -the page will continue to thrash indefinitely. If -cudaMemAdviseSetReadMostly is also set on this memory -region or any subset of it, then the policies associated with that -advice will override the policies of this advice, unless read -accesses from location will not result in a read-only copy being -created on that procesor as outlined in description for the advice -cudaMemAdviseSetReadMostly. If the memory region refers -to valid system-allocated pageable memory, and -type is -cudaMemLocationTypeDevice then -id must be a valid device that has a non- -zero alue for the device attribute -cudaDevAttrPageableMemoryAccess.

    • -
    • cudaMemAdviseUnsetPreferredLocation: Undoes the effect of -cudaMemAdviseSetPreferredLocation and changes the -preferred location to none. The location argument is ignored for -this advice.

    • -
    • cudaMemAdviseSetAccessedBy: This advice implies that the -data will be accessed by processor location. The -type must be either -cudaMemLocationTypeDevice with -id representing a valid device ordinal or -cudaMemLocationTypeHost and -id will be ignored. All other location -types are invalid. If id is a GPU, then -the device attribute cudaDevAttrConcurrentManagedAccess -must be non-zero. This advice does not cause data migration and has -no impact on the location of the data per se. Instead, it causes the -data to always be mapped in the specified processor’s page tables, as -long as the location of the data permits a mapping to be established. -If the data gets migrated for any reason, the mappings are updated -accordingly. This advice is recommended in scenarios where data -locality is not important, but avoiding faults is. Consider for -example a system containing multiple GPUs with peer-to-peer access -enabled, where the data located on one GPU is occasionally accessed -by peer GPUs. In such scenarios, migrating data over to the other -GPUs is not as important because the accesses are infrequent and the -overhead of migration may be too high. But preventing faults can -still help improve performance, and so having a mapping set up in -advance is useful. Note that on CPU access of this data, the data may -be migrated to host memory because the CPU typically cannot access -device memory directly. Any GPU that had the -cudaMemAdviseSetAccessedBy flag set for this data will -now have its mapping updated to point to the page in host memory. If -cudaMemAdviseSetReadMostly is also set on this memory -region or any subset of it, then the policies associated with that -advice will override the policies of this advice. Additionally, if -the preferred location of this memory region or any subset of it is -also location, then the policies associated with -CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the -policies of this advice. If the memory region refers to valid system- -allocated pageable memory, and type is -cudaMemLocationTypeDevice then device in -id must have a non-zero value for the -device attribute cudaDevAttrPageableMemoryAccess. -Additionally, if id has a non-zero value -for the device attribute -cudaDevAttrPageableMemoryAccessUsesHostPageTables, then -this call has no effect.

    • -
    • CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of -cudaMemAdviseSetAccessedBy. Any mappings to the data from -location may be removed at any time causing accesses to result in -non-fatal page faults. If the memory region refers to valid system- -allocated pageable memory, and type is -cudaMemLocationTypeDevice then device in -id must have a non-zero value for the -device attribute cudaDevAttrPageableMemoryAccess. -Additionally, if id has a non-zero value -for the device attribute -cudaDevAttrPageableMemoryAccessUsesHostPageTables, then -this call has no effect.

    • -
    -
    -
    Parameters:
    -
      -
    • devPtr (Any) – Pointer to memory to set the advice for

    • -
    • count (size_t) – Size in bytes of the memory range

    • -
    • advice (cudaMemoryAdvise) – Advice to be applied for the specified memory range

    • -
    • location (cudaMemLocation) – location to apply the advice for

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemRangeGetAttribute(size_t dataSize, attribute: cudaMemRangeAttribute, devPtr, size_t count)#
    -

    Query an attribute of a given memory range.

    -

    Query an attribute about the memory range starting at devPtr with a -size of count bytes. The memory range must refer to managed memory -allocated via cudaMallocManaged or declared via managed -variables.

    -

    The attribute parameter can take the following values:

    -
      -
    • cudaMemRangeAttributeReadMostly: If this attribute is -specified, data will be interpreted as a 32-bit integer, and -dataSize must be 4. The result returned will be 1 if all pages in -the given memory range have read-duplication enabled, or 0 otherwise.

    • -
    • cudaMemRangeAttributePreferredLocation: If this attribute -is specified, data will be interpreted as a 32-bit integer, and -dataSize must be 4. The result returned will be a GPU device id if -all pages in the memory range have that GPU as their preferred -location, or it will be cudaCpuDeviceId if all pages in the memory -range have the CPU as their preferred location, or it will be -cudaInvalidDeviceId if either all the pages don’t have the same -preferred location or some of the pages don’t have a preferred -location at all. Note that the actual location of the pages in the -memory range at the time of the query may be different from the -preferred location.

    • -
    • cudaMemRangeAttributeAccessedBy: If this attribute is -specified, data will be interpreted as an array of 32-bit integers, -and dataSize must be a non-zero multiple of 4. The result returned -will be a list of device ids that had -cudaMemAdviceSetAccessedBy set for that entire memory -range. If any device does not have that advice set for the entire -memory range, that device will not be included. If data is larger -than the number of devices that have that advice set for that memory -range, cudaInvalidDeviceId will be returned in all the extra space -provided. For ex., if dataSize is 12 (i.e. data has 3 elements) -and only device 0 has the advice set, then the result returned will -be { 0, cudaInvalidDeviceId, cudaInvalidDeviceId }. If data is -smaller than the number of devices that have that advice set, then -only as many devices will be returned as can fit in the array. There -is no guarantee on which specific devices will be returned, however.

    • -
    • cudaMemRangeAttributeLastPrefetchLocation: If this -attribute is specified, data will be interpreted as a 32-bit -integer, and dataSize must be 4. The result returned will be the -last location to which all pages in the memory range were prefetched -explicitly via cudaMemPrefetchAsync. This will either be -a GPU id or cudaCpuDeviceId depending on whether the last location -for prefetch was a GPU or the CPU respectively. If any page in the -memory range was never explicitly prefetched or if all pages were not -prefetched to the same location, cudaInvalidDeviceId will be -returned. Note that this simply returns the last location that the -applicaton requested to prefetch the memory range to. It gives no -indication as to whether the prefetch operation to that location has -completed or even begun.

      - -
    • -
    • cudaMemRangeAttributePreferredLocationId: If this

    • -
    -

    attribute is specified, data will be interpreted as a 32-bit integer, -and dataSize must be 4. If the -cudaMemRangeAttributePreferredLocationType query for the -same address range returns cudaMemLocationTypeDevice, it -will be a valid device ordinal or if it returns -cudaMemLocationTypeHostNuma, it will be a valid host NUMA -node ID or if it returns any other location type, the id should be -ignored.

    -
    -
    -
    - -

    attribute is specified, data will be interpreted as a 32-bit integer, -and dataSize must be 4. If the -cudaMemRangeAttributeLastPrefetchLocationType query for the -same address range returns cudaMemLocationTypeDevice, it -will be a valid device ordinal or if it returns -cudaMemLocationTypeHostNuma, it will be a valid host NUMA -node ID or if it returns any other location type, the id should be -ignored.

    -
    -
    Parameters:
    -
      -
    • dataSize (size_t) – Array containing the size of data

    • -
    • attribute (cudaMemRangeAttribute) – The attribute to query

    • -
    • devPtr (Any) – Start of the range to query

    • -
    • count (size_t) – Size of the range to query

    • -
    -
    -
    Returns:
    -

      -
    • cudaError_tcudaSuccess, cudaErrorInvalidValue

    • -
    • data (Any) – A pointers to a memory location where the result of each attribute -query will be written to.

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemRangeGetAttributes(dataSizes: Tuple[int] | List[int], attributes: Optional[Tuple[cudaMemRangeAttribute] | List[cudaMemRangeAttribute]], size_t numAttributes, devPtr, size_t count)#
    -

    Query attributes of a given memory range.

    -

    Query attributes of the memory range starting at devPtr with a size -of count bytes. The memory range must refer to managed memory -allocated via cudaMallocManaged or declared via managed -variables. The attributes array will be interpreted to have -numAttributes entries. The dataSizes array will also be interpreted -to have numAttributes entries. The results of the query will be -stored in data.

    -

    The list of supported attributes are given below. Please refer to -cudaMemRangeGetAttribute for attribute descriptions and -restrictions.

    - -
    -
    Parameters:
    -
      -
    • dataSizes (List[int]) – Array containing the sizes of each result

    • -
    • attributes (List[cudaMemRangeAttribute]) – An array of attributes to query (numAttributes and the number of -attributes in this array should match)

    • -
    • numAttributes (size_t) – Number of attributes to query

    • -
    • devPtr (Any) – Start of the range to query

    • -
    • count (size_t) – Size of the range to query

    • -
    -
    -
    Returns:
    -

      -
    • cudaError_tcudaSuccess, cudaErrorInvalidValue

    • -
    • data (List[Any]) – A two-dimensional array containing pointers to memory locations -where the result of each attribute query will be written to.

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.make_cudaPitchedPtr(d, size_t p, size_t xsz, size_t ysz)#
    -

    Returns a cudaPitchedPtr based on input parameters.

    -

    Returns a cudaPitchedPtr based on the specified input -parameters d, p, xsz, and ysz.

    -
    -
    Parameters:
    -
      -
    • d (Any) – Pointer to allocated memory

    • -
    • p (size_t) – Pitch of allocated memory in bytes

    • -
    • xsz (size_t) – Logical width of allocation in elements

    • -
    • ysz (size_t) – Logical height of allocation in elements

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.make_cudaPos(size_t x, size_t y, size_t z)#
    -

    Returns a cudaPos based on input parameters.

    -

    Returns a cudaPos based on the specified input parameters -x, y, and z.

    -
    -
    Parameters:
    -
      -
    • x (size_t) – X position

    • -
    • y (size_t) – Y position

    • -
    • z (size_t) – Z position

    • -
    -
    -
    Returns:
    -

      -
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • -
    • cudaPoscudaPos specified by x, y, and z

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.make_cudaExtent(size_t w, size_t h, size_t d)#
    -

    Returns a cudaExtent based on input parameters.

    -

    Returns a cudaExtent based on the specified input -parameters w, h, and d.

    -
    -
    Parameters:
    -
      -
    • w (size_t) – Width in elements when referring to array memory, in bytes when -referring to linear memory

    • -
    • h (size_t) – Height in elements

    • -
    • d (size_t) – Depth in elements

    • -
    -
    -
    Returns:
    -

      -
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • -
    • cudaExtentcudaExtent specified by w, h, and d

    • -
    -

    -
    -
    - -
    - -
    -
    -

    Stream Ordered Memory Allocator#

    -

    overview

    -

    The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior.

    -

    The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee.

    -

    Supported Platforms

    -

    Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cudaDeviceGetAttribute() with the device attribute cudaDevAttrMemoryPoolsSupported.

    -
    -
    -cuda.cudart.cudaMallocAsync(size_t size, hStream)#
    -

    Allocates memory with stream ordered semantics.

    -

    Inserts an allocation operation into hStream. A pointer to the -allocated memory is returned immediately in *dptr. The allocation must -not be accessed until the the allocation operation completes. The -allocation comes from the memory pool associated with the stream’s -device.

    -
    -
    Parameters:
    -
      -
    • size (size_t) – Number of bytes to allocate

    • -
    • hStream (CUstream or cudaStream_t) – The stream establishing the stream ordering contract and the memory -pool to allocate from

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    The default memory pool of a device contains device memory from that device.

    -

    Basic stream ordering allows future work submitted into the same stream to use the allocation. Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation operation completes before work submitted in a separate stream runs.

    -

    During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool’s properties are used to set the node’s creation parameters.

    -
    - -
    -
    -cuda.cudart.cudaFreeAsync(devPtr, hStream)#
    -

    Frees memory with stream ordered semantics.

    -

    Inserts a free operation into hStream. The allocation must not be -accessed after stream execution reaches the free. After this API -returns, accessing the memory from any subsequent work launched on the -GPU or querying its pointer attributes results in undefined behavior.

    -
    -
    Parameters:
    -
      -
    • dptr (Any) – memory to free

    • -
    • hStream (CUstream or cudaStream_t) – The stream establishing the stream ordering promise

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorNotSupported

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -

    Notes

    -

    During stream capture, this function results in the creation of a free node and must therefore be passed the address of a graph allocation.

    -
    - -
    -
    -cuda.cudart.cudaMemPoolTrimTo(memPool, size_t minBytesToKeep)#
    -

    Tries to release memory back to the OS.

    -

    Releases memory back to the OS until the pool contains fewer than -minBytesToKeep reserved bytes, or there is no more memory that the -allocator can safely release. The allocator cannot release OS -allocations that back outstanding asynchronous allocations. The OS -allocations may happen at different granularity from the user -allocations.

    -
    -
    Parameters:
    -
      -
    • pool (CUmemoryPool or cudaMemPool_t) – The memory pool to trim

    • -
    • minBytesToKeep (size_t) – If the pool has less than minBytesToKeep reserved, the TrimTo -operation is a no-op. Otherwise the pool will be guaranteed to have -at least minBytesToKeep bytes reserved after the operation.

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -

    Notes

    -

    : Allocations that have not been freed count as outstanding.

    -

    : Allocations that have been asynchronously freed but whose completion has not been observed on the host (eg. by a synchronize) can count as outstanding.

    -
    - -
    -
    -cuda.cudart.cudaMemPoolSetAttribute(memPool, attr: cudaMemPoolAttr, value)#
    -

    Sets attributes of a memory pool.

    -

    Supported attributes are:

    -
      -
    • cudaMemPoolAttrReleaseThreshold: (value type = -cuuint64_t) Amount of reserved memory in bytes to hold onto before -trying to release memory back to the OS. When more than the release -threshold bytes of memory are held by the memory pool, the allocator -will try to release memory back to the OS on the next call to stream, -event or context synchronize. (default 0)

    • -
    • cudaMemPoolReuseFollowEventDependencies: (value type = -int) Allow cudaMallocAsync to use memory asynchronously -freed in another stream as long as a stream ordering dependency of -the allocating stream on the free action exists. Cuda events and null -stream interactions can create the required stream ordered -dependencies. (default enabled)

    • -
    • cudaMemPoolReuseAllowOpportunistic: (value type = int) -Allow reuse of already completed frees when there is no dependency -between the free and allocation. (default enabled)

    • -
    • cudaMemPoolReuseAllowInternalDependencies: (value type = -int) Allow cudaMallocAsync to insert new stream -dependencies in order to establish the stream ordering required to -reuse a piece of memory released by cudaFreeAsync -(default enabled).

    • -
    • cudaMemPoolAttrReservedMemHigh: (value type = cuuint64_t) -Reset the high watermark that tracks the amount of backing memory -that was allocated for the memory pool. It is illegal to set this -attribute to a non-zero value.

    • -
    • cudaMemPoolAttrUsedMemHigh: (value type = cuuint64_t) -Reset the high watermark that tracks the amount of used memory that -was allocated for the memory pool. It is illegal to set this -attribute to a non-zero value.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemPoolGetAttribute(memPool, attr: cudaMemPoolAttr)#
    -

    Gets attributes of a memory pool.

    -

    Supported attributes are:

    -
      -
    • cudaMemPoolAttrReleaseThreshold: (value type = -cuuint64_t) Amount of reserved memory in bytes to hold onto before -trying to release memory back to the OS. When more than the release -threshold bytes of memory are held by the memory pool, the allocator -will try to release memory back to the OS on the next call to stream, -event or context synchronize. (default 0)

    • -
    • cudaMemPoolReuseFollowEventDependencies: (value type = -int) Allow cudaMallocAsync to use memory asynchronously -freed in another stream as long as a stream ordering dependency of -the allocating stream on the free action exists. Cuda events and null -stream interactions can create the required stream ordered -dependencies. (default enabled)

    • -
    • cudaMemPoolReuseAllowOpportunistic: (value type = int) -Allow reuse of already completed frees when there is no dependency -between the free and allocation. (default enabled)

    • -
    • cudaMemPoolReuseAllowInternalDependencies: (value type = -int) Allow cudaMallocAsync to insert new stream -dependencies in order to establish the stream ordering required to -reuse a piece of memory released by cudaFreeAsync -(default enabled).

    • -
    • cudaMemPoolAttrReservedMemCurrent: (value type = -cuuint64_t) Amount of backing memory currently allocated for the -mempool.

    • -
    • cudaMemPoolAttrReservedMemHigh: (value type = cuuint64_t) -High watermark of backing memory allocated for the mempool since the -last time it was reset.

    • -
    • cudaMemPoolAttrUsedMemCurrent: (value type = cuuint64_t) -Amount of memory from the pool that is currently in use by the -application.

    • -
    • cudaMemPoolAttrUsedMemHigh: (value type = cuuint64_t) -High watermark of the amount of memory from the pool that was in use -by the application since the last time it was reset.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemPoolSetAccess(memPool, descList: Optional[Tuple[cudaMemAccessDesc] | List[cudaMemAccessDesc]], size_t count)#
    -

    Controls visibility of pools between devices.

    -
    -
    Parameters:
    -
      -
    • pool (CUmemoryPool or cudaMemPool_t) – The pool being modified

    • -
    • map (List[cudaMemAccessDesc]) – Array of access descriptors. Each descriptor instructs the access -to enable for a single gpu

    • -
    • count (size_t) – Number of descriptors in the map array.

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemPoolGetAccess(memPool, cudaMemLocation location: Optional[cudaMemLocation])#
    -

    Returns the accessibility of a pool from a device.

    -

    Returns the accessibility of the pool’s memory from the specified -location.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

      -
    • cudaError_t

    • -
    • flags (cudaMemAccessFlags) – the accessibility of the pool from the specified location

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemPoolCreate(cudaMemPoolProps poolProps: Optional[cudaMemPoolProps])#
    -

    Creates a memory pool.

    -

    Creates a CUDA memory pool and returns the handle in pool. The -poolProps determines the properties of the pool such as the backing -device and IPC capabilities.

    -

    To create a memory pool targeting a specific host NUMA node, -applications must set -cudaMemPoolProps::cudaMemLocation::type to -cudaMemLocationTypeHostNuma and -cudaMemPoolProps::cudaMemLocation::id must specify the NUMA -ID of the host memory node. Specifying -cudaMemLocationTypeHostNumaCurrent or -cudaMemLocationTypeHost as the -cudaMemPoolProps::cudaMemLocation::type will result in -cudaErrorInvalidValue. By default, the pool’s memory will -be accessible from the device it is allocated on. In the case of pools -created with cudaMemLocationTypeHostNuma, their default -accessibility will be from the host CPU. Applications can control the -maximum size of the pool by specifying a non-zero value for -maxSize. If set to 0, the maximum size of -the pool will default to a system dependent value.

    -

    Applications can set handleTypes to -cudaMemHandleTypeFabric in order to create -cudaMemPool_t suitable for sharing within an IMEX domain. -An IMEX domain is either an OS instance or a group of securely -connected OS instances using the NVIDIA IMEX daemon. An IMEX channel is -a global resource within the IMEX domain that represents a logical -entity that aims to provide fine grained accessibility control for the -participating processes. When exporter and importer CUDA processes have -been granted access to the same IMEX channel, they can securely share -memory. If the allocating process does not have access setup for an -IMEX channel, attempting to export a CUmemoryPool with -cudaMemHandleTypeFabric will result in -cudaErrorNotPermitted. The nvidia-modprobe CLI provides -more information regarding setting up of IMEX channels.

    -
    -
    Parameters:
    -

    poolProps (cudaMemPoolProps) – None

    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Specifying cudaMemHandleTypeNone creates a memory pool that will not support IPC.

    -
    - -
    -
    -cuda.cudart.cudaMemPoolDestroy(memPool)#
    -

    Destroys the specified memory pool.

    -

    If any pointers obtained from this pool haven’t been freed or the pool -has free operations that haven’t completed when -cudaMemPoolDestroy is invoked, the function will return -immediately and the resources associated with the pool will be released -automatically once there are no more outstanding allocations.

    -

    Destroying the current mempool of a device sets the default mempool of -that device as the current mempool for that device.

    -
    -
    Parameters:
    -

    memPool (CUmemoryPool or cudaMemPool_t) – None

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -

    Notes

    -

    A device’s default memory pool cannot be destroyed.

    -
    - -
    -
    -cuda.cudart.cudaMallocFromPoolAsync(size_t size, memPool, stream)#
    -

    Allocates memory from a specified pool with stream ordered semantics.

    -

    Inserts an allocation operation into hStream. A pointer to the -allocated memory is returned immediately in *dptr. The allocation must -not be accessed until the the allocation operation completes. The -allocation comes from the specified memory pool.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool’s properties are used to set the node’s creation parameters.

    -
    - -
    -
    -cuda.cudart.cudaMemPoolExportToShareableHandle(memPool, handleType: cudaMemAllocationHandleType, unsigned int flags)#
    -

    Exports a memory pool to the requested handle type.

    -

    Given an IPC capable mempool, create an OS handle to share the pool -with another process. A recipient process can convert the shareable -handle into a mempool with -cudaMemPoolImportFromShareableHandle. Individual pointers -can then be shared with the cudaMemPoolExportPointer and -cudaMemPoolImportPointer APIs. The implementation of what -the shareable handle is and how it can be transferred is defined by the -requested handle type.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

      -
    • cudaError_tcudaSuccess, cudaErrorInvalidValue, cudaErrorOutOfMemory

    • -
    • handle_out (Any) – pointer to the location in which to store the requested handle

    • -
    -

    -
    -
    - -

    Notes

    -

    : To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than cudaMemHandleTypeNone.

    -
    - -
    -
    -cuda.cudart.cudaMemPoolImportFromShareableHandle(shareableHandle, handleType: cudaMemAllocationHandleType, unsigned int flags)#
    -

    imports a memory pool from a shared handle.

    -

    Specific allocations can be imported from the imported pool with -cudaMemPoolImportPointer.

    -
    -
    Parameters:
    -
      -
    • handle (Any) – OS handle of the pool to open

    • -
    • handleType (cudaMemAllocationHandleType) – The type of handle being imported

    • -
    • flags (unsigned int) – must be 0

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Imported memory pools do not support creating new allocations. As such imported memory pools may not be used in cudaDeviceSetMemPool or cudaMallocFromPoolAsync calls.

    -
    - -
    -
    -cuda.cudart.cudaMemPoolExportPointer(ptr)#
    -

    Export data to share a memory pool allocation between processes.

    -

    Constructs shareData_out for sharing a specific allocation from an -already shared memory pool. The recipient process can import the -allocation with the cudaMemPoolImportPointer api. The data -is not a handle and may be shared through any IPC mechanism.

    -
    -
    Parameters:
    -

    ptr (Any) – pointer to memory being exported

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaMemPoolImportPointer(memPool, cudaMemPoolPtrExportData exportData: Optional[cudaMemPoolPtrExportData])#
    -

    Import a memory pool allocation from another process.

    -

    Returns in ptr_out a pointer to the imported memory. The imported -memory must not be accessed before the allocation operation completes -in the exporting process. The imported memory must be freed from all -importing processes before being freed in the exporting process. The -pointer may be freed with cudaFree or cudaFreeAsync. If -cudaFreeAsync is used, the free must be completed on the -importing process before the free operation on the exporting process.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    The cudaFreeAsync api may be used in the exporting process before the cudaFreeAsync operation completes in its stream as long as the cudaFreeAsync in the exporting process specifies a stream with a stream dependency on the importing process’s cudaFreeAsync.

    -
    - -
    -
    -

    Unified Addressing#

    -

    This section describes the unified addressing functions of the CUDA runtime application programming interface.

    -

    Overview

    -

    CUDA devices can share a unified address space with the host.

    -
    -

    For these devices there is no distinction between a device pointer and a host pointer – the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below).

    -
    -

    Supported Platforms

    -

    Whether or not a device supports unified addressing may be queried by calling cudaGetDeviceProperties() with the device property cudaDeviceProp::unifiedAddressing.

    -

    Unified addressing is automatically enabled in 64-bit processes .

    -

    Looking Up Information from Pointer Values

    -

    It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cudaPointerGetAttributes()

    -

    Since pointers are unique, it is not necessary to specify information about the pointers specified to cudaMemcpy() and other copy functions.

    -
    -

    The copy direction cudaMemcpyDefault may be used to specify that the CUDA runtime should infer the location of the pointer from its value.

    -
    -

    Automatic Mapping of Host Allocated Host Memory

    -

    All host memory allocated through all devices using cudaMallocHost() and cudaHostAlloc() is always directly accessible from all devices that support unified addressing. This is the case regardless of whether or not the flags cudaHostAllocPortable and cudaHostAllocMapped are specified.

    -

    The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host. It is not necessary to call cudaHostGetDevicePointer() to get the device pointer for these allocations.

    -

    Note that this is not the case for memory allocated using the flag cudaHostAllocWriteCombined, as discussed below.

    -

    Direct Access of Peer Memory

    -

    Upon enabling direct access from a device that supports unified addressing to another peer device that supports unified addressing using cudaDeviceEnablePeerAccess() all memory allocated in the peer device using cudaMalloc() and cudaMallocPitch() will immediately be accessible by the current device. The device pointer value through which any peer’s memory may be accessed in the current device is the same pointer value through which that memory may be accessed from the peer device.

    -

    Exceptions, Disjoint Addressing

    -

    Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cudaHostRegister() and host memory allocated using the flag cudaHostAllocWriteCombined. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all devices that support unified addressing.

    -

    This device address may be queried using cudaHostGetDevicePointer() when a device using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory in cudaMemcpy() and similar functions using the cudaMemcpyDefault memory direction.

    -
    -
    -cuda.cudart.cudaPointerGetAttributes(ptr)#
    -

    Returns attributes about a specified pointer.

    -

    Returns in *attributes the attributes of the pointer ptr. If -pointer was not allocated in, mapped by or registered with context -supporting unified addressing cudaErrorInvalidValue is -returned.

    -

    The cudaPointerAttributes structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    In this structure, the individual fields mean

    -
      -
    • type identifies type of memory. It -can be cudaMemoryTypeUnregistered for unregistered host -memory, cudaMemoryTypeHost for registered host memory, -cudaMemoryTypeDevice for device memory or -cudaMemoryTypeManaged for managed memory.

    • -
    • device is the device against which ptr was allocated. -If ptr has memory type cudaMemoryTypeDevice then this -identifies the device on which the memory referred to by ptr -physically resides. If ptr has memory type -cudaMemoryTypeHost then this identifies the device which -was current when the allocation was made (and if that device is -deinitialized then this allocation will vanish with that device’s -state).

    • -
    • devicePointer is the device pointer alias through which -the memory referred to by ptr may be accessed on the current -device. If the memory referred to by ptr cannot be accessed -directly by the current device then this is NULL.

    • -
    • hostPointer is the host pointer alias through which the -memory referred to by ptr may be accessed on the host. If the -memory referred to by ptr cannot be accessed directly by the host -then this is NULL.

    • -
    -
    -
    Parameters:
    -

    ptr (Any) – Pointer to get attributes for

    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    In CUDA 11.0 forward passing host pointer will return cudaMemoryTypeUnregistered in type and call will return cudaSuccess.

    -
    - -
    -
    -

    Peer Device Memory Access#

    -

    This section describes the peer device memory access functions of the CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaDeviceCanAccessPeer(int device, int peerDevice)#
    -

    Queries if a device may directly access a peer device’s memory.

    -

    Returns in *canAccessPeer a value of 1 if device device is capable -of directly accessing memory from peerDevice and 0 otherwise. If -direct access of peerDevice from device is possible, then access -may be enabled by calling cudaDeviceEnablePeerAccess().

    -
    -
    Parameters:
    -
      -
    • device (int) – Device from which allocations on peerDevice are to be directly -accessed.

    • -
    • peerDevice (int) – Device on which the allocations to be directly accessed by device -reside.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags)#
    -

    Enables direct access to memory allocations on a peer device.

    -

    On success, all allocations from peerDevice will immediately be -accessible by the current device. They will remain accessible until -access is explicitly disabled using -cudaDeviceDisablePeerAccess() or either device is reset -using cudaDeviceReset().

    -

    Note that access granted by this call is unidirectional and that in -order to access memory on the current device from peerDevice, a -separate symmetric call to cudaDeviceEnablePeerAccess() is -required.

    -

    Note that there are both device-wide and system-wide limitations per -system configuration, as noted in the CUDA Programming Guide under the -section “Peer-to-Peer Memory Access”.

    -

    Returns cudaErrorInvalidDevice if -cudaDeviceCanAccessPeer() indicates that the current device -cannot directly access memory from peerDevice.

    -

    Returns cudaErrorPeerAccessAlreadyEnabled if direct access -of peerDevice from the current device has already been enabled.

    -

    Returns cudaErrorInvalidValue if flags is not 0.

    -
    -
    Parameters:
    -
      -
    • peerDevice (int) – Peer device to enable direct access to from the current device

    • -
    • flags (unsigned int) – Reserved for future use and must be set to 0

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidDevice, cudaErrorPeerAccessAlreadyEnabled, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceDisablePeerAccess(int peerDevice)#
    -

    Disables direct access to memory allocations on a peer device.

    -

    Returns cudaErrorPeerAccessNotEnabled if direct access to -memory on peerDevice has not yet been enabled from the current -device.

    -
    -
    Parameters:
    -

    peerDevice (int) – Peer device to disable direct access to

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorPeerAccessNotEnabled, cudaErrorInvalidDevice

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -

    OpenGL Interoperability#

    -

    impl_private

    -

    This section describes the OpenGL interoperability functions of the CUDA runtime application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interopability.

    -
    -
    -class cuda.cudart.cudaGLDeviceList(value)#
    -

    CUDA devices corresponding to the current OpenGL context

    -
    -
    -cudaGLDeviceListAll = 1#
    -

    The CUDA devices for all GPUs used by the current OpenGL context

    -
    - -
    -
    -cudaGLDeviceListCurrentFrame = 2#
    -

    The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame

    -
    - -
    -
    -cudaGLDeviceListNextFrame = 3#
    -

    The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame

    -
    - -
    - -
    -
    -cuda.cudart.cudaGLGetDevices(unsigned int cudaDeviceCount, deviceList: cudaGLDeviceList)#
    -

    Gets the CUDA devices associated with the current OpenGL context.

    -

    Returns in *pCudaDeviceCount the number of CUDA-compatible devices -corresponding to the current OpenGL context. Also returns in -*pCudaDevices at most cudaDeviceCount of the CUDA-compatible -devices corresponding to the current OpenGL context. If any of the GPUs -being used by the current OpenGL context are not CUDA capable then the -call will return cudaErrorNoDevice.

    -
    -
    Parameters:
    -
      -
    • cudaDeviceCount (unsigned int) – The size of the output device array pCudaDevices

    • -
    • deviceList (cudaGLDeviceList) – The set of devices to return. This set may be cudaGLDeviceListAll -for all devices, cudaGLDeviceListCurrentFrame for the devices used -to render the current frame (in SLI), or cudaGLDeviceListNextFrame -for the devices used to render the next frame (in SLI).

    • -
    -
    -
    Returns:
    -

      -
    • cudaError_t – cudaSuccess -cudaErrorNoDevice -cudaErrorInvalidGraphicsContext -cudaErrorUnknown

    • -
    • pCudaDeviceCount (unsigned int) – Returned number of CUDA devices corresponding to the current OpenGL -context

    • -
    • pCudaDevices (List[int]) – Returned CUDA devices corresponding to the current OpenGL context

    • -
    -

    -
    -
    - -

    Notes

    -

    This function is not supported on Mac OS X.

    -
    - -
    -
    -cuda.cudart.cudaGraphicsGLRegisterImage(image, target, unsigned int flags)#
    -

    Register an OpenGL texture or renderbuffer object.

    -

    Registers the texture or renderbuffer object specified by image for -access by CUDA. A handle to the registered object is returned as -resource.

    -

    target must match the type of the object, and must be one of -GL_TEXTURE_2D, GL_TEXTURE_RECTANGLE, -GL_TEXTURE_CUBE_MAP, GL_TEXTURE_3D, -GL_TEXTURE_2D_ARRAY, or GL_RENDERBUFFER.

    -

    The register flags flags specify the intended usage, as follows:

    - -

    The following image formats are supported. For brevity’s sake, the list -is abbreviated. For ex., {GL_R, GL_RG} X {8, 16} would expand to the -following 4 formats {GL_R8, GL_R16, GL_RG8, GL_RG16} :

    -
      -
    • GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, -GL_INTENSITY

    • -
    • {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, -32I}

    • -
    • {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X {8, 16, -16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, -32I_EXT}

    • -
    -

    The following image classes are currently disallowed:

    -
      -
    • Textures with borders

    • -
    • Multisampled renderbuffers

    • -
    -
    -
    Parameters:
    -
      -
    • image (GLuint) – name of texture or renderbuffer object to be registered

    • -
    • target (GLenum) – Identifies the type of object specified by image

    • -
    • flags (unsigned int) – Register flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphicsGLRegisterBuffer(buffer, unsigned int flags)#
    -

    Registers an OpenGL buffer object.

    -

    Registers the buffer object specified by buffer for access by CUDA. A -handle to the registered object is returned as resource. The register -flags flags specify the intended usage, as follows:

    -
      -
    • cudaGraphicsRegisterFlagsNone: Specifies no hints about -how this resource will be used. It is therefore assumed that this -resource will be read from and written to by CUDA. This is the -default value.

    • -
    • cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA -will not write to this resource.

    • -
    • cudaGraphicsRegisterFlagsWriteDiscard: Specifies that -CUDA will not read from this resource and will write over the entire -contents of the resource, so none of the data previously stored in -the resource will be preserved.

    • -
    -
    -
    Parameters:
    -
      -
    • buffer (GLuint) – name of buffer object to be registered

    • -
    • flags (unsigned int) – Register flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Direct3D 9 Interoperability#

    -
    -
    -

    Direct3D 10 Interoperability#

    -
    -
    -

    Direct3D 11 Interoperability#

    -
    -
    -

    VDPAU Interoperability#

    -

    This section describes the VDPAU interoperability functions of the CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaVDPAUGetDevice(vdpDevice, vdpGetProcAddress)#
    -

    Gets the CUDA device associated with a VdpDevice.

    -

    Returns the CUDA device associated with a VdpDevice, if applicable.

    -
    -
    Parameters:
    -
      -
    • vdpDevice (VdpDevice) – A VdpDevice handle

    • -
    • vdpGetProcAddress (VdpGetProcAddress) – VDPAU’s VdpGetProcAddress function pointer

    • -
    -
    -
    Returns:
    -

      -
    • cudaError_tcudaSuccess

    • -
    • device (int) – Returns the device associated with vdpDevice, or -1 if the device -associated with vdpDevice is not a compute device.

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaVDPAUSetVDPAUDevice(int device, vdpDevice, vdpGetProcAddress)#
    -

    Sets a CUDA device to use VDPAU interoperability.

    -

    Records vdpDevice as the VdpDevice for VDPAU interoperability with -the CUDA device device and sets device as the current device for -the calling host thread.

    -

    This function will immediately initialize the primary context on -device if needed.

    -

    If device has already been initialized then this call will fail with -the error cudaErrorSetOnActiveProcess. In this case it is -necessary to reset device using cudaDeviceReset() before -VDPAU interoperability on device may be enabled.

    -
    -
    Parameters:
    -
      -
    • device (int) – Device to use for VDPAU interoperability

    • -
    • vdpDevice (VdpDevice) – The VdpDevice to interoperate with

    • -
    • vdpGetProcAddress (VdpGetProcAddress) – VDPAU’s VdpGetProcAddress function pointer

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidDevice, cudaErrorSetOnActiveProcess

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphicsVDPAURegisterVideoSurface(vdpSurface, unsigned int flags)#
    -

    Register a VdpVideoSurface object.

    -

    Registers the VdpVideoSurface specified by vdpSurface for access by -CUDA. A handle to the registered object is returned as resource. The -surface’s intended usage is specified using flags, as follows:

    -
      -
    • cudaGraphicsMapFlagsNone: Specifies no hints about how -this resource will be used. It is therefore assumed that this -resource will be read from and written to by CUDA. This is the -default value.

    • -
    • cudaGraphicsMapFlagsReadOnly: Specifies that CUDA will -not write to this resource.

    • -
    • cudaGraphicsMapFlagsWriteDiscard: Specifies that CUDA -will not read from this resource and will write over the entire -contents of the resource, so none of the data previously stored in -the resource will be preserved.

    • -
    -
    -
    Parameters:
    -
      -
    • vdpSurface (VdpVideoSurface) – VDPAU object to be registered

    • -
    • flags (unsigned int) – Map flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphicsVDPAURegisterOutputSurface(vdpSurface, unsigned int flags)#
    -

    Register a VdpOutputSurface object.

    -

    Registers the VdpOutputSurface specified by vdpSurface for access by -CUDA. A handle to the registered object is returned as resource. The -surface’s intended usage is specified using flags, as follows:

    -
      -
    • cudaGraphicsMapFlagsNone: Specifies no hints about how -this resource will be used. It is therefore assumed that this -resource will be read from and written to by CUDA. This is the -default value.

    • -
    • cudaGraphicsMapFlagsReadOnly: Specifies that CUDA will -not write to this resource.

    • -
    • cudaGraphicsMapFlagsWriteDiscard: Specifies that CUDA -will not read from this resource and will write over the entire -contents of the resource, so none of the data previously stored in -the resource will be preserved.

    • -
    -
    -
    Parameters:
    -
      -
    • vdpSurface (VdpOutputSurface) – VDPAU object to be registered

    • -
    • flags (unsigned int) – Map flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    EGL Interoperability#

    -

    This section describes the EGL interoperability functions of the CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaGraphicsEGLRegisterImage(image, unsigned int flags)#
    -

    Registers an EGL image.

    -

    Registers the EGLImageKHR specified by image for access by CUDA. A -handle to the registered object is returned as pCudaResource. -Additional Mapping/Unmapping is not required for the registered -resource and cudaGraphicsResourceGetMappedEglFrame can be -directly called on the pCudaResource.

    -

    The application will be responsible for synchronizing access to shared -objects. The application must ensure that any pending operation which -access the objects have completed before passing control to CUDA. This -may be accomplished by issuing and waiting for glFinish command on all -GLcontexts (for OpenGL and likewise for other APIs). The application -will be also responsible for ensuring that any pending operation on the -registered CUDA resource has completed prior to executing subsequent -commands in other APIs accesing the same memory objects. This can be -accomplished by calling cuCtxSynchronize or cuEventSynchronize -(preferably).

    -

    The surface’s intended usage is specified using flags, as follows:

    -
      -
    • cudaGraphicsRegisterFlagsNone: Specifies no hints about -how this resource will be used. It is therefore assumed that this -resource will be read from and written to by CUDA. This is the -default value.

    • -
    • cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA -will not write to this resource.

    • -
    • cudaGraphicsRegisterFlagsWriteDiscard: Specifies that -CUDA will not read from this resource and will write over the entire -contents of the resource, so none of the data previously stored in -the resource will be preserved.

    • -
    -

    The EGLImageKHR is an object which can be used to create EGLImage -target resource. It is defined as a void pointer. typedef void* -EGLImageKHR

    -
    -
    Parameters:
    -
      -
    • image (EGLImageKHR) – An EGLImageKHR image which can be used to create target resource.

    • -
    • flags (unsigned int) – Map flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEGLStreamConsumerConnect(eglStream)#
    -

    Connect CUDA to EGLStream as a consumer.

    -

    Connect CUDA as a consumer to EGLStreamKHR specified by eglStream.

    -

    The EGLStreamKHR is an EGL object that transfers a sequence of image -frames from one API to another.

    -
    -
    Parameters:
    -

    eglStream (EGLStreamKHR) – EGLStreamKHR handle

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEGLStreamConsumerConnectWithFlags(eglStream, unsigned int flags)#
    -

    Connect CUDA to EGLStream as a consumer with given flags.

    -

    Connect CUDA as a consumer to EGLStreamKHR specified by stream with -specified flags defined by cudaEglResourceLocationFlags.

    -

    The flags specify whether the consumer wants to access frames from -system memory or video memory. Default is -cudaEglResourceLocationVidmem.

    -
    -
    Parameters:
    -
      -
    • eglStream (EGLStreamKHR) – EGLStreamKHR handle

    • -
    • flags (unsigned int) – Flags denote intended location - system or video.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEGLStreamConsumerDisconnect(conn)#
    -

    Disconnect CUDA as a consumer to EGLStream .

    -

    Disconnect CUDA as a consumer to EGLStreamKHR.

    -
    -
    Parameters:
    -

    conn (cudaEglStreamConnection) – Conection to disconnect.

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, unsigned int timeout)#
    -

    Acquire an image frame from the EGLStream with CUDA as a consumer.

    -

    Acquire an image frame from EGLStreamKHR. -cudaGraphicsResourceGetMappedEglFrame can be called on -pCudaResource to get cudaEglFrame.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown, cudaErrorLaunchTimeout

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream)#
    -

    Releases the last frame acquired from the EGLStream.

    -

    Release the acquired image frame specified by pCudaResource to -EGLStreamKHR.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEGLStreamProducerConnect(eglStream, width, height)#
    -

    Connect CUDA to EGLStream as a producer.

    -

    Connect CUDA as a producer to EGLStreamKHR specified by stream.

    -

    The EGLStreamKHR is an EGL object that transfers a sequence of image -frames from one API to another.

    -
    -
    Parameters:
    -
      -
    • eglStream (EGLStreamKHR) – EGLStreamKHR handle

    • -
    • width (EGLint) – width of the image to be submitted to the stream

    • -
    • height (EGLint) – height of the image to be submitted to the stream

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEGLStreamProducerDisconnect(conn)#
    -

    Disconnect CUDA as a producer to EGLStream .

    -

    Disconnect CUDA as a producer to EGLStreamKHR.

    -
    -
    Parameters:
    -

    conn (cudaEglStreamConnection) – Conection to disconnect.

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEGLStreamProducerPresentFrame(conn, cudaEglFrame eglframe: cudaEglFrame, pStream)#
    -

    Present a CUDA eglFrame to the EGLStream with CUDA as a producer.

    -

    The cudaEglFrame is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    For cudaEglFrame of type cudaEglFrameTypePitch, -the application may present sub-region of a memory allocation. In that -case, ptr will specify the start address of -the sub-region in the allocation and cudaEglPlaneDesc will -specify the dimensions of the sub-region.

    -
    -
    Parameters:
    -
      -
    • conn (cudaEglStreamConnection) – Connection on which to present the CUDA array

    • -
    • eglframe (cudaEglFrame) – CUDA Eglstream Proucer Frame handle to be sent to the consumer over -EglStream.

    • -
    • pStream (cudaStream_t) – CUDA stream on which to present the frame.

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaEGLStreamProducerReturnFrame(conn, cudaEglFrame eglframe: Optional[cudaEglFrame], pStream)#
    -

    Return the CUDA eglFrame to the EGLStream last released by the consumer.

    -

    This API can potentially return cudaErrorLaunchTimeout if the consumer -has not returned a frame to EGL stream. If timeout is returned the -application can retry.

    -
    -
    Parameters:
    -
      -
    • conn (cudaEglStreamConnection) – Connection on which to present the CUDA array

    • -
    • eglframe (cudaEglFrame) – CUDA Eglstream Proucer Frame handle returned from the consumer over -EglStream.

    • -
    • pStream (cudaStream_t) – CUDA stream on which to return the frame.

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorLaunchTimeout, cudaErrorInvalidValue, cudaErrorUnknown

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphicsResourceGetMappedEglFrame(resource, unsigned int index, unsigned int mipLevel)#
    -

    Get an eglFrame through which to access a registered EGL graphics resource.

    -

    Returns in *eglFrame an eglFrame pointer through which the registered -graphics resource resource may be accessed. This API can only be -called for EGL graphics resources.

    -

    The cudaEglFrame is defined as

    -

    View CUDA Toolkit Documentation for a C++ code example

    -
    -
    Parameters:
    -
      -
    • resource (cudaGraphicsResource_t) – Registered resource to access.

    • -
    • index (unsigned int) – Index for cubemap surfaces.

    • -
    • mipLevel (unsigned int) – Mipmap level for the subresource to access.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Note that in case of multiplanar *eglFrame, pitch of only first plane (unsigned int pitch) is to be considered by the application.

    -
    - -
    -
    -cuda.cudart.cudaEventCreateFromEGLSync(eglSync, unsigned int flags)#
    -

    Creates an event from EGLSync object.

    -

    Creates an event *phEvent from an EGLSyncKHR eglSync with the flages -specified via flags. Valid flags include:

    - -

    cudaEventRecord and TimingData are not supported for events -created from EGLSync.

    -

    The EGLSyncKHR is an opaque handle to an EGL sync object. typedef void* -EGLSyncKHR

    -
    -
    Parameters:
    -
      -
    • eglSync (EGLSyncKHR) – Opaque handle to EGLSync object

    • -
    • flags (unsigned int) – Event creation flags

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Graphics Interoperability#

    -

    This section describes the graphics interoperability functions of the CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaGraphicsUnregisterResource(resource)#
    -

    Unregisters a graphics resource for access by CUDA.

    -

    Unregisters the graphics resource resource so it is not accessible by -CUDA unless registered again.

    -

    If resource is invalid then -cudaErrorInvalidResourceHandle is returned.

    -
    -
    Parameters:
    -

    resource (cudaGraphicsResource_t) – Resource to unregister

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorUnknown

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaGraphicsD3D9RegisterResource, cudaGraphicsD3D10RegisterResource, cudaGraphicsD3D11RegisterResource, cudaGraphicsGLRegisterBuffer, cudaGraphicsGLRegisterImage, cuGraphicsUnregisterResource

    -
    -
    - -
    -
    -cuda.cudart.cudaGraphicsResourceSetMapFlags(resource, unsigned int flags)#
    -

    Set usage flags for mapping a graphics resource.

    -

    Set flags for mapping the graphics resource resource.

    -

    Changes to flags will take effect the next time resource is mapped. -The flags argument may be any of the following:

    -
      -
    • cudaGraphicsMapFlagsNone: Specifies no hints about how -resource will be used. It is therefore assumed that CUDA may read -from or write to resource.

    • -
    • cudaGraphicsMapFlagsReadOnly: Specifies that CUDA will -not write to resource.

    • -
    • cudaGraphicsMapFlagsWriteDiscard: Specifies CUDA will not -read from resource and will write over the entire contents of -resource, so none of the data previously stored in resource will -be preserved.

    • -
    -

    If resource is presently mapped for access by CUDA then -cudaErrorUnknown is returned. If flags is not one of the -above values then cudaErrorInvalidValue is returned.

    -
    -
    Parameters:
    -
      -
    • resource (cudaGraphicsResource_t) – Registered resource to set flags for

    • -
    • flags (unsigned int) – Parameters for resource mapping

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorUnknown,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphicsMapResources(int count, resources, stream)#
    -

    Map graphics resources for access by CUDA.

    -

    Maps the count graphics resources in resources for access by CUDA.

    -

    The resources in resources may be accessed by CUDA until they are -unmapped. The graphics API from which resources were registered -should not access any resources while they are mapped by CUDA. If an -application does so, the results are undefined.

    -

    This function provides the synchronization guarantee that any graphics -calls issued before cudaGraphicsMapResources() will -complete before any subsequent CUDA work issued in stream begins.

    -

    If resources contains any duplicate entries then -cudaErrorInvalidResourceHandle is returned. If any of -resources are presently mapped for access by CUDA then -cudaErrorUnknown is returned.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorUnknown

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphicsUnmapResources(int count, resources, stream)#
    -

    Unmap graphics resources.

    -

    Unmaps the count graphics resources in resources.

    -

    Once unmapped, the resources in resources may not be accessed by CUDA -until they are mapped again.

    -

    This function provides the synchronization guarantee that any CUDA work -issued in stream before cudaGraphicsUnmapResources() will -complete before any subsequently issued graphics work begins.

    -

    If resources contains any duplicate entries then -cudaErrorInvalidResourceHandle is returned. If any of -resources are not presently mapped for access by CUDA then -cudaErrorUnknown is returned.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorUnknown

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphicsResourceGetMappedPointer(resource)#
    -

    Get an device pointer through which to access a mapped graphics resource.

    -

    Returns in *devPtr a pointer through which the mapped graphics -resource resource may be accessed. Returns in *size the size of the -memory in bytes which may be accessed from that pointer. The value set -in devPtr may change every time that resource is mapped.

    -

    If resource is not a buffer then it cannot be accessed via a pointer -and cudaErrorUnknown is returned. If resource is not -mapped then cudaErrorUnknown is returned.

    -
    -
    Parameters:
    -

    resource (cudaGraphicsResource_t) – None

    -
    -
    Returns:
    -

      -
    • cudaError_t

    • -
    • devPtr (Any) – None

    • -
    • size (int) – None

    • -
    -

    -
    -
    -
    - -
    -
    -cuda.cudart.cudaGraphicsSubResourceGetMappedArray(resource, unsigned int arrayIndex, unsigned int mipLevel)#
    -

    Get an array through which to access a subresource of a mapped graphics resource.

    -

    Returns in *array an array through which the subresource of the -mapped graphics resource resource which corresponds to array index -arrayIndex and mipmap level mipLevel may be accessed. The value set -in array may change every time that resource is mapped.

    -

    If resource is not a texture then it cannot be accessed via an array -and cudaErrorUnknown is returned. If arrayIndex is not a -valid array index for resource then cudaErrorInvalidValue -is returned. If mipLevel is not a valid mipmap level for resource -then cudaErrorInvalidValue is returned. If resource is -not mapped then cudaErrorUnknown is returned.

    -
    -
    Parameters:
    -
      -
    • resource (cudaGraphicsResource_t) – Mapped resource to access

    • -
    • arrayIndex (unsigned int) – Array index for array textures or cubemap face index as defined by -cudaGraphicsCubeFace for cubemap textures for the -subresource to access

    • -
    • mipLevel (unsigned int) – Mipmap level for the subresource to access

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphicsResourceGetMappedMipmappedArray(resource)#
    -

    Get a mipmapped array through which to access a mapped graphics resource.

    -

    Returns in *mipmappedArray a mipmapped array through which the mapped -graphics resource resource may be accessed. The value set in -mipmappedArray may change every time that resource is mapped.

    -

    If resource is not a texture then it cannot be accessed via an array -and cudaErrorUnknown is returned. If resource is not -mapped then cudaErrorUnknown is returned.

    -
    -
    Parameters:
    -

    resource (cudaGraphicsResource_t) – Mapped resource to access

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Texture Object Management#

    -

    This section describes the low level texture object management functions of the CUDA runtime application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher.

    -
    -
    -cuda.cudart.cudaGetChannelDesc(array)#
    -

    Get the channel descriptor of an array.

    -

    Returns in *desc the channel descriptor of the CUDA array array.

    -
    -
    Parameters:
    -

    array (cudaArray_const_t) – Memory array on device

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaCreateChannelDesc (C API), cudaCreateTextureObject, cudaCreateSurfaceObject

    -
    -
    - -
    -
    -cuda.cudart.cudaCreateChannelDesc(int x, int y, int z, int w, f: cudaChannelFormatKind)#
    -

    Returns a channel descriptor using the specified format.

    -

    Returns a channel descriptor with format f and number of bits of each -component x, y, z, and w. The cudaChannelFormatDesc -is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where cudaChannelFormatKind is one of -cudaChannelFormatKindSigned, -cudaChannelFormatKindUnsigned, or -cudaChannelFormatKindFloat.

    -
    -
    Parameters:
    -
      -
    • x (int) – X component

    • -
    • y (int) – Y component

    • -
    • z (int) – Z component

    • -
    • w (int) – W component

    • -
    • f (cudaChannelFormatKind) – Channel format

    • -
    -
    -
    Returns:
    -

      -
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • -
    • cudaChannelFormatDesc – Channel descriptor with format f

    • -
    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaCreateTextureObject(cudaResourceDesc pResDesc: Optional[cudaResourceDesc], cudaTextureDesc pTexDesc: Optional[cudaTextureDesc], cudaResourceViewDesc pResViewDesc: Optional[cudaResourceViewDesc])#
    -

    Creates a texture object.

    -

    Creates a texture object and returns it in pTexObject. pResDesc -describes the data to texture from. pTexDesc describes how the data -should be sampled. pResViewDesc is an optional argument that -specifies an alternate format for the data described by pResDesc, and -also describes the subresource region to restrict access to when -texturing. pResViewDesc can only be specified if the type of resource -is a CUDA array or a CUDA mipmapped array not in a block compressed -format.

    -

    Texture objects are only supported on devices of compute capability 3.0 -or higher. Additionally, a texture object is an opaque value, and, as -such, should only be accessed through CUDA API calls.

    -

    The cudaResourceDesc structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • resType specifies the type of resource -to texture from. CUresourceType is defined as:

    • -
    • View CUDA Toolkit Documentation for a C++ code example

    • -
    -

    If resType is set to -cudaResourceTypeArray, -cudaResourceDesc::res::array::array must be set to a valid -CUDA array handle.

    -

    If resType is set to -cudaResourceTypeMipmappedArray, -cudaResourceDesc::res::mipmap::mipmap must be set to a -valid CUDA mipmapped array handle and -normalizedCoords must be set to true.

    -

    If resType is set to -cudaResourceTypeLinear, -cudaResourceDesc::res::linear::devPtr must be set to a -valid device pointer, that is aligned to -textureAlignment. -cudaResourceDesc::res::linear::desc describes the format -and the number of components per array element. -cudaResourceDesc::res::linear::sizeInBytes specifies the -size of the array in bytes. The total number of elements in the linear -address range cannot exceed -maxTexture1DLinear. The number of elements -is computed as (sizeInBytes / sizeof(desc)).

    -

    If resType is set to -cudaResourceTypePitch2D, -cudaResourceDesc::res::pitch2D::devPtr must be set to a -valid device pointer, that is aligned to -textureAlignment. -cudaResourceDesc::res::pitch2D::desc describes the format -and the number of components per array element. -cudaResourceDesc::res::pitch2D::width and -cudaResourceDesc::res::pitch2D::height specify the width -and height of the array in elements, and cannot exceed -cudaResourceDesc::res::pitch2D::pitchInBytes specifies the -pitch between two rows in bytes and has to be aligned to -texturePitchAlignment. Pitch cannot exceed -:py:obj:`~.cudaDeviceProp.maxTexture2DLinear`[2].

    -

    The cudaTextureDesc struct is defined as

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where

    - -

    The cudaResourceViewDesc struct is defined as

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    where:

    -
      -
    • format specifies how the data -contained in the CUDA array or CUDA mipmapped array should be -interpreted. Note that this can incur a change in size of the texture -data. If the resource view format is a block compressed format, then -the underlying CUDA array or CUDA mipmapped array has to have a -32-bit unsigned integer format with 2 or 4 channels, depending on the -block compressed format. For ex., BC1 and BC4 require the underlying -CUDA array to have a 32-bit unsigned int with 2 channels. The other -BC formats require the underlying resource to have the same 32-bit -unsigned int format but with 4 channels.

    • -
    • width specifies the new width of the -texture data. If the resource view format is a block compressed -format, this value has to be 4 times the original width of the -resource. For non block compressed formats, this value has to be -equal to that of the original resource.

    • -
    • height specifies the new height of -the texture data. If the resource view format is a block compressed -format, this value has to be 4 times the original height of the -resource. For non block compressed formats, this value has to be -equal to that of the original resource.

    • -
    • depth specifies the new depth of the -texture data. This value has to be equal to that of the original -resource.

    • -
    • firstMipmapLevel specifies the most -detailed mipmap level. This will be the new mipmap level zero. For -non-mipmapped resources, this value has to be -zero.:py:obj:~.cudaTextureDesc.minMipmapLevelClamp and -maxMipmapLevelClamp will be relative to -this value. For ex., if the firstMipmapLevel is set to 2, and a -minMipmapLevelClamp of 1.2 is specified, then the actual minimum -mipmap level clamp will be 3.2.

    • -
    • lastMipmapLevel specifies the least -detailed mipmap level. For non-mipmapped resources, this value has to -be zero.

    • -
    • firstLayer specifies the first layer -index for layered textures. This will be the new layer zero. For non- -layered resources, this value has to be zero.

    • -
    • lastLayer specifies the last layer -index for layered textures. For non-layered resources, this value has -to be zero.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDestroyTextureObject(texObject)#
    -

    Destroys a texture object.

    -

    Destroys the texture object specified by texObject.

    -
    -
    Parameters:
    -

    texObject (cudaTextureObject_t) – Texture object to destroy

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetTextureObjectResourceDesc(texObject)#
    -

    Returns a texture object’s resource descriptor.

    -

    Returns the resource descriptor for the texture object specified by -texObject.

    -
    -
    Parameters:
    -

    texObject (cudaTextureObject_t) – Texture object

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetTextureObjectTextureDesc(texObject)#
    -

    Returns a texture object’s texture descriptor.

    -

    Returns the texture descriptor for the texture object specified by -texObject.

    -
    -
    Parameters:
    -

    texObject (cudaTextureObject_t) – Texture object

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetTextureObjectResourceViewDesc(texObject)#
    -

    Returns a texture object’s resource view descriptor.

    -

    Returns the resource view descriptor for the texture object specified -by texObject. If no resource view was specified, -cudaErrorInvalidValue is returned.

    -
    -
    Parameters:
    -

    texObject (cudaTextureObject_t) – Texture object

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Surface Object Management#

    -

    This section describes the low level texture object management functions of the CUDA runtime application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher.

    -
    -
    -cuda.cudart.cudaCreateSurfaceObject(cudaResourceDesc pResDesc: Optional[cudaResourceDesc])#
    -

    Creates a surface object.

    -

    Creates a surface object and returns it in pSurfObject. pResDesc -describes the data to perform surface load/stores on. -resType must be -cudaResourceTypeArray and -cudaResourceDesc::res::array::array must be set to a valid -CUDA array handle.

    -

    Surface objects are only supported on devices of compute capability 3.0 -or higher. Additionally, a surface object is an opaque value, and, as -such, should only be accessed through CUDA API calls.

    -
    -
    Parameters:
    -

    pResDesc (cudaResourceDesc) – Resource descriptor

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDestroySurfaceObject(surfObject)#
    -

    Destroys a surface object.

    -

    Destroys the surface object specified by surfObject.

    -
    -
    Parameters:
    -

    surfObject (cudaSurfaceObject_t) – Surface object to destroy

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGetSurfaceObjectResourceDesc(surfObject)#
    -

    Returns a surface object’s resource descriptor Returns the resource descriptor for the surface object specified by surfObject.

    -
    -
    Parameters:
    -

    surfObject (cudaSurfaceObject_t) – Surface object

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Version Management#

    -
    -
    -cuda.cudart.cudaDriverGetVersion()#
    -

    Returns the latest version of CUDA supported by the driver.

    -

    Returns in *driverVersion the latest version of CUDA supported by the -driver. The version is returned as (1000 * major + 10 * minor). For -example, CUDA 9.2 would be represented by 9020. If no driver is -installed, then 0 is returned as the driver version.

    -

    This function automatically returns cudaErrorInvalidValue -if driverVersion is NULL.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaRuntimeGetVersion()#
    -

    Returns the CUDA Runtime version.

    -

    Returns in *runtimeVersion the version number of the current CUDA -Runtime instance. The version is returned as (1000 * major + 10 * -minor). For example, CUDA 9.2 would be represented by 9020.

    -

    As of CUDA 12.0, this function no longer initializes CUDA. The purpose -of this API is solely to return a compile-time constant stating the -CUDA Toolkit version in the above format.

    -

    This function automatically returns cudaErrorInvalidValue -if the runtimeVersion argument is NULL.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.getLocalRuntimeVersion()#
    -

    Returns the CUDA Runtime version of local shared library.

    -

    Returns in *runtimeVersion the version number of the current CUDA -Runtime instance. The version is returned as (1000 * major + 10 * -minor). For example, CUDA 9.2 would be represented by 9020.

    -

    As of CUDA 12.0, this function no longer initializes CUDA. The purpose -of this API is solely to return a compile-time constant stating the -CUDA Toolkit version in the above format.

    -

    This function automatically returns cudaErrorInvalidValue -if the runtimeVersion argument is NULL.

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -

    Graph Management#

    -

    This section describes the graph management functions of CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaGraphCreate(unsigned int flags)#
    -

    Creates a graph.

    -

    Creates an empty graph, which is returned via pGraph.

    -
    -
    Parameters:
    -

    flags (unsigned int) – Graph creation flags, must be 0

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddKernelNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaKernelNodeParams pNodeParams: Optional[cudaKernelNodeParams])#
    -

    Creates a kernel execution node and adds it to a graph.

    -

    Creates a new kernel execution node and adds it to graph with -numDependencies dependencies specified via pDependencies and -arguments specified in pNodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. pDependencies may not have any duplicate entries. -A handle to the new node will be returned in pGraphNode.

    -

    The cudaKernelNodeParams structure is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    When the graph is launched, the node will invoke kernel func on a -(gridDim.x x gridDim.y x gridDim.z) grid of blocks. Each block -contains (blockDim.x x blockDim.y x blockDim.z) threads.

    -

    sharedMem sets the amount of dynamic shared memory that will be -available to each thread block.

    -

    Kernel parameters to func can be specified in one of two ways:

    -

    1) Kernel parameters can be specified via kernelParams. If the kernel -has N parameters, then kernelParams needs to be an array of N -pointers. Each pointer, from `kernelParams`[0] to `kernelParams`[N-1], -points to the region of memory from which the actual parameter will be -copied. The number of kernel parameters and their offsets and sizes do -not need to be specified as that information is retrieved directly from -the kernel’s image.

    -

    2) Kernel parameters can also be packaged by the application into a -single buffer that is passed in via extra. This places the burden on -the application of knowing each kernel parameter’s size and -alignment/padding within the buffer. The extra parameter exists to -allow this function to take additional less commonly used arguments. -extra specifies a list of names of extra settings and their -corresponding values. Each extra setting name is immediately followed -by the corresponding value. The list must be terminated with either -NULL or CU_LAUNCH_PARAM_END.

    - -

    The error cudaErrorInvalidValue will be returned if kernel -parameters are specified with both kernelParams and extra (i.e. -both kernelParams and extra are non-NULL).

    -

    The kernelParams or extra array, as well as the argument values it -points to, are copied during this call.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • pNodeParams (cudaKernelNodeParams) – Parameters for the GPU execution node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Kernels launched using graphs must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    -
    - -
    -
    -cuda.cudart.cudaGraphKernelNodeGetParams(node)#
    -

    Returns a kernel node’s parameters.

    -

    Returns the parameters of kernel node node in pNodeParams. The -kernelParams or extra array returned in pNodeParams, as well as -the argument values it points to, are owned by the node. This memory -remains valid until the node is destroyed or its parameters are -modified, and should not be modified directly. Use -cudaGraphKernelNodeSetParams to update the parameters of -this node.

    -

    The params will contain either kernelParams or extra, according to -which of these was most recently set on the node.

    -
    -
    Parameters:
    -

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaLaunchKernel, cudaGraphAddKernelNode, cudaGraphKernelNodeSetParams

    -
    -
    - -
    -
    -cuda.cudart.cudaGraphKernelNodeSetParams(node, cudaKernelNodeParams pNodeParams: Optional[cudaKernelNodeParams])#
    -

    Sets a kernel node’s parameters.

    -

    Sets the parameters of kernel node node to pNodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorMemoryAllocation

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphKernelNodeCopyAttributes(hSrc, hDst)#
    -

    Copies attributes from source node to destination node.

    -

    Copies attributes from source node src to destination node dst. -Both node must have the same context.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidContext

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphKernelNodeGetAttribute(hNode, attr: cudaKernelNodeAttrID)#
    -

    Queries node attribute.

    -

    Queries attribute attr from node hNode and stores it in -corresponding member of value_out.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphKernelNodeSetAttribute(hNode, attr: cudaKernelNodeAttrID, cudaKernelNodeAttrValue value: Optional[cudaKernelNodeAttrValue])#
    -

    Sets node attribute.

    -

    Sets attribute attr on node hNode from corresponding attribute of -value.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddMemcpyNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaMemcpy3DParms pCopyParams: Optional[cudaMemcpy3DParms])#
    -

    Creates a memcpy node and adds it to a graph.

    -

    Creates a new memcpy node and adds it to graph with numDependencies -dependencies specified via pDependencies. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. pDependencies may not have any duplicate entries. -A handle to the new node will be returned in pGraphNode.

    -

    When the graph is launched, the node will perform the memcpy described -by pCopyParams. See cudaMemcpy3D() for a description of -the structure and its restrictions.

    -

    Memcpy nodes have some additional restrictions with regards to managed -memory, if the system contains at least one device which has a zero -value for the device attribute -cudaDevAttrConcurrentManagedAccess.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • pCopyParams (cudaMemcpy3DParms) – Parameters for the memory copy

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddMemcpyNode1D(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, dst, src, size_t count, kind: cudaMemcpyKind)#
    -

    Creates a 1D memcpy node and adds it to a graph.

    -

    Creates a new 1D memcpy node and adds it to graph with -numDependencies dependencies specified via pDependencies. It is -possible for numDependencies to be 0, in which case the node will be -placed at the root of the graph. pDependencies may not have any -duplicate entries. A handle to the new node will be returned in -pGraphNode.

    -

    When the graph is launched, the node will copy count bytes from the -memory area pointed to by src to the memory area pointed to by dst, -where kind specifies the direction of the copy, and must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. Launching a memcpy node with dst and src -pointers that do not match the direction of the copy results in an -undefined behavior.

    -

    Memcpy nodes have some additional restrictions with regards to managed -memory, if the system contains at least one device which has a zero -value for the device attribute -cudaDevAttrConcurrentManagedAccess.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • dst (Any) – Destination memory address

    • -
    • src (Any) – Source memory address

    • -
    • count (size_t) – Size in bytes to copy

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphMemcpyNodeGetParams(node)#
    -

    Returns a memcpy node’s parameters.

    -

    Returns the parameters of memcpy node node in pNodeParams.

    -
    -
    Parameters:
    -

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphMemcpyNodeSetParams(node, cudaMemcpy3DParms pNodeParams: Optional[cudaMemcpy3DParms])#
    -

    Sets a memcpy node’s parameters.

    -

    Sets the parameters of memcpy node node to pNodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaGraphNodeSetParams, cudaMemcpy3D, cudaGraphMemcpyNodeSetParamsToSymbol, cudaGraphMemcpyNodeSetParamsFromSymbol, cudaGraphMemcpyNodeSetParams1D, cudaGraphAddMemcpyNode, cudaGraphMemcpyNodeGetParams

    -
    -
    - -
    -
    -cuda.cudart.cudaGraphMemcpyNodeSetParams1D(node, dst, src, size_t count, kind: cudaMemcpyKind)#
    -

    Sets a memcpy node’s parameters to perform a 1-dimensional copy.

    -

    Sets the parameters of memcpy node node to the copy described by the -provided parameters.

    -

    When the graph is launched, the node will copy count bytes from the -memory area pointed to by src to the memory area pointed to by dst, -where kind specifies the direction of the copy, and must be one of -cudaMemcpyHostToHost, cudaMemcpyHostToDevice, -cudaMemcpyDeviceToHost, -cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. -Passing cudaMemcpyDefault is recommended, in which case the -type of transfer is inferred from the pointer values. However, -cudaMemcpyDefault is only allowed on systems that support -unified virtual addressing. Launching a memcpy node with dst and src -pointers that do not match the direction of the copy results in an -undefined behavior.

    -
    -
    Parameters:
    -
      -
    • node (CUgraphNode or cudaGraphNode_t) – Node to set the parameters for

    • -
    • dst (Any) – Destination memory address

    • -
    • src (Any) – Source memory address

    • -
    • count (size_t) – Size in bytes to copy

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddMemsetNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaMemsetParams pMemsetParams: Optional[cudaMemsetParams])#
    -

    Creates a memset node and adds it to a graph.

    -

    Creates a new memset node and adds it to graph with numDependencies -dependencies specified via pDependencies. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. pDependencies may not have any duplicate entries. -A handle to the new node will be returned in pGraphNode.

    -

    The element size must be 1, 2, or 4 bytes. When the graph is launched, -the node will perform the memset described by pMemsetParams.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • pMemsetParams (cudaMemsetParams) – Parameters for the memory set

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphMemsetNodeGetParams(node)#
    -

    Returns a memset node’s parameters.

    -

    Returns the parameters of memset node node in pNodeParams.

    -
    -
    Parameters:
    -

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphMemsetNodeSetParams(node, cudaMemsetParams pNodeParams: Optional[cudaMemsetParams])#
    -

    Sets a memset node’s parameters.

    -

    Sets the parameters of memset node node to pNodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddHostNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaHostNodeParams pNodeParams: Optional[cudaHostNodeParams])#
    -

    Creates a host execution node and adds it to a graph.

    -

    Creates a new CPU execution node and adds it to graph with -numDependencies dependencies specified via pDependencies and -arguments specified in pNodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. pDependencies may not have any duplicate entries. -A handle to the new node will be returned in pGraphNode.

    -

    When the graph is launched, the node will invoke the specified CPU -function. Host nodes are not supported under MPS with pre-Volta GPUs.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • pNodeParams (cudaHostNodeParams) – Parameters for the host node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphHostNodeGetParams(node)#
    -

    Returns a host node’s parameters.

    -

    Returns the parameters of host node node in pNodeParams.

    -
    -
    Parameters:
    -

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphHostNodeSetParams(node, cudaHostNodeParams pNodeParams: Optional[cudaHostNodeParams])#
    -

    Sets a host node’s parameters.

    -

    Sets the parameters of host node node to nodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddChildGraphNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, childGraph)#
    -

    Creates a child graph node and adds it to a graph.

    -

    Creates a new node which executes an embedded graph, and adds it to -graph with numDependencies dependencies specified via -pDependencies. It is possible for numDependencies to be 0, in which -case the node will be placed at the root of the graph. pDependencies -may not have any duplicate entries. A handle to the new node will be -returned in pGraphNode.

    -

    If hGraph contains allocation or free nodes, this call will return an -error.

    -

    The node executes an embedded child graph. The child graph is cloned in -this call.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • childGraph (CUgraph or cudaGraph_t) – The graph to clone into this node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphChildGraphNodeGetGraph(node)#
    -

    Gets a handle to the embedded graph of a child graph node.

    -

    Gets a handle to the embedded graph in a child graph node. This call -does not clone the graph. Changes to the graph will be reflected in the -node, and the node retains ownership of the graph.

    -

    Allocation and free nodes cannot be added to the returned graph. -Attempting to do so will return an error.

    -
    -
    Parameters:
    -

    node (CUgraphNode or cudaGraphNode_t) – Node to get the embedded graph for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddEmptyNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies)#
    -

    Creates an empty node and adds it to a graph.

    -

    Creates a new node which performs no operation, and adds it to graph -with numDependencies dependencies specified via pDependencies. It -is possible for numDependencies to be 0, in which case the node will -be placed at the root of the graph. pDependencies may not have any -duplicate entries. A handle to the new node will be returned in -pGraphNode.

    -

    An empty node performs no operation during execution, but can be used -for transitive ordering. For example, a phased execution graph with 2 -groups of n nodes with a barrier between them can be represented using -an empty node and 2*n dependency edges, rather than no empty node and -n^2 dependency edges.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddEventRecordNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, event)#
    -

    Creates an event record node and adds it to a graph.

    -

    Creates a new event record node and adds it to hGraph with -numDependencies dependencies specified via dependencies and event -specified in event. It is possible for numDependencies to be 0, in -which case the node will be placed at the root of the graph. -dependencies may not have any duplicate entries. A handle to the new -node will be returned in phGraphNode.

    -

    Each launch of the graph will record event to capture execution of -the node’s dependencies.

    -

    These nodes may not be used in loops or conditionals.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphEventRecordNodeGetEvent(node)#
    -

    Returns the event associated with an event record node.

    -

    Returns the event of event record node hNode in event_out.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the event for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphEventRecordNodeSetEvent(node, event)#
    -

    Sets an event record node’s event.

    -

    Sets the event of event record node hNode to event.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddEventWaitNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, event)#
    -

    Creates an event wait node and adds it to a graph.

    -

    Creates a new event wait node and adds it to hGraph with -numDependencies dependencies specified via dependencies and event -specified in event. It is possible for numDependencies to be 0, in -which case the node will be placed at the root of the graph. -dependencies may not have any duplicate entries. A handle to the new -node will be returned in phGraphNode.

    -

    The graph node will wait for all work captured in event. See -cuEventRecord() for details on what is captured by an -event. The synchronization will be performed efficiently on the device -when applicable. event may be from a different context or device than -the launch stream.

    -

    These nodes may not be used in loops or conditionals.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphEventWaitNodeGetEvent(node)#
    -

    Returns the event associated with an event wait node.

    -

    Returns the event of event wait node hNode in event_out.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the event for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphEventWaitNodeSetEvent(node, event)#
    -

    Sets an event wait node’s event.

    -

    Sets the event of event wait node hNode to event.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddExternalSemaphoresSignalNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaExternalSemaphoreSignalNodeParams nodeParams: Optional[cudaExternalSemaphoreSignalNodeParams])#
    -

    Creates an external semaphore signal node and adds it to a graph.

    -

    Creates a new external semaphore signal node and adds it to graph -with numDependencies dependencies specified via dependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. dependencies may not have any duplicate entries. A -handle to the new node will be returned in pGraphNode.

    -

    Performs a signal operation on a set of externally allocated semaphore -objects when the node is launched. The operation(s) will occur after -all of the node’s dependencies have completed.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExternalSemaphoresSignalNodeGetParams(hNode)#
    -

    Returns an external semaphore signal node’s parameters.

    -

    Returns the parameters of an external semaphore signal node hNode in -params_out. The extSemArray and paramsArray returned in -params_out, are owned by the node. This memory remains valid until -the node is destroyed or its parameters are modified, and should not be -modified directly. Use -cudaGraphExternalSemaphoresSignalNodeSetParams to update -the parameters of this node.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExternalSemaphoresSignalNodeSetParams(hNode, cudaExternalSemaphoreSignalNodeParams nodeParams: Optional[cudaExternalSemaphoreSignalNodeParams])#
    -

    Sets an external semaphore signal node’s parameters.

    -

    Sets the parameters of an external semaphore signal node hNode to -nodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddExternalSemaphoresWaitNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaExternalSemaphoreWaitNodeParams nodeParams: Optional[cudaExternalSemaphoreWaitNodeParams])#
    -

    Creates an external semaphore wait node and adds it to a graph.

    -

    Creates a new external semaphore wait node and adds it to graph with -numDependencies dependencies specified via dependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. dependencies may not have any duplicate entries. A -handle to the new node will be returned in pGraphNode.

    -

    Performs a wait operation on a set of externally allocated semaphore -objects when the node is launched. The node’s dependencies will not be -launched until the wait operation has completed.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExternalSemaphoresWaitNodeGetParams(hNode)#
    -

    Returns an external semaphore wait node’s parameters.

    -

    Returns the parameters of an external semaphore wait node hNode in -params_out. The extSemArray and paramsArray returned in -params_out, are owned by the node. This memory remains valid until -the node is destroyed or its parameters are modified, and should not be -modified directly. Use -cudaGraphExternalSemaphoresSignalNodeSetParams to update -the parameters of this node.

    -
    -
    Parameters:
    -

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExternalSemaphoresWaitNodeSetParams(hNode, cudaExternalSemaphoreWaitNodeParams nodeParams: Optional[cudaExternalSemaphoreWaitNodeParams])#
    -

    Sets an external semaphore wait node’s parameters.

    -

    Sets the parameters of an external semaphore wait node hNode to -nodeParams.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddMemAllocNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaMemAllocNodeParams nodeParams: Optional[cudaMemAllocNodeParams])#
    -

    Creates an allocation node and adds it to a graph.

    -

    Creates a new allocation node and adds it to graph with -numDependencies dependencies specified via pDependencies and -arguments specified in nodeParams. It is possible for -numDependencies to be 0, in which case the node will be placed at the -root of the graph. pDependencies may not have any duplicate entries. -A handle to the new node will be returned in pGraphNode.

    -

    When cudaGraphAddMemAllocNode creates an allocation node, -it returns the address of the allocation in nodeParams.dptr. The -allocation’s address remains fixed across instantiations and launches.

    -

    If the allocation is freed in the same graph, by creating a free node -using cudaGraphAddMemFreeNode, the allocation can be -accessed by nodes ordered after the allocation node but before the free -node. These allocations cannot be freed outside the owning graph, and -they can only be freed once in the owning graph.

    -

    If the allocation is not freed in the same graph, then it can be -accessed not only by nodes in the graph which are ordered after the -allocation node, but also by stream operations ordered after the -graph’s execution but before the allocation is freed.

    -

    Allocations which are not freed in the same graph can be freed by:

    -
      -
    • passing the allocation to cudaMemFreeAsync or -cudaMemFree;

    • -
    • launching a graph with a free node for that allocation; or

    • -
    • specifying cudaGraphInstantiateFlagAutoFreeOnLaunch -during instantiation, which makes each launch behave as though it -called cudaMemFreeAsync for every unfreed allocation.

    • -
    -

    It is not possible to free an allocation in both the owning graph and -another graph. If the allocation is freed in the same graph, a free -node cannot be added to another graph. If the allocation is freed in -another graph, a free node can no longer be added to the owning graph.

    -

    The following restrictions apply to graphs which contain allocation -and/or memory free nodes:

    -
      -
    • Nodes and edges of the graph cannot be deleted.

    • -
    • The graph cannot be used in a child node.

    • -
    • Only one instantiation of the graph may exist at any point in time.

    • -
    • The graph cannot be cloned.

    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphMemAllocNodeGetParams(node)#
    -

    Returns a memory alloc node’s parameters.

    -

    Returns the parameters of a memory alloc node hNode in params_out. -The poolProps and accessDescs returned in params_out, are owned -by the node. This memory remains valid until the node is destroyed. The -returned parameters must not be modified.

    -
    -
    Parameters:
    -

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddMemFreeNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, dptr)#
    -

    Creates a memory free node and adds it to a graph.

    -

    Creates a new memory free node and adds it to graph with -numDependencies dependencies specified via pDependencies and -address specified in dptr. It is possible for numDependencies to be -0, in which case the node will be placed at the root of the graph. -pDependencies may not have any duplicate entries. A handle to the new -node will be returned in pGraphNode.

    -

    cudaGraphAddMemFreeNode will return -cudaErrorInvalidValue if the user attempts to free:

    -
      -
    • an allocation twice in the same graph.

    • -
    • an address that was not returned by an allocation node.

    • -
    • an invalid address.

    • -
    -

    The following restrictions apply to graphs which contain allocation -and/or memory free nodes:

    -
      -
    • Nodes and edges of the graph cannot be deleted.

    • -
    • The graph cannot be used in a child node.

    • -
    • Only one instantiation of the graph may exist at any point in time.

    • -
    • The graph cannot be cloned.

    • -
    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • dptr (Any) – Address of memory to free

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphMemFreeNodeGetParams(node)#
    -

    Returns a memory free node’s parameters.

    -

    Returns the address of a memory free node hNode in dptr_out.

    -
    -
    Parameters:
    -

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGraphMemTrim(int device)#
    -

    Free unused memory that was cached on the specified device for use with graphs back to the OS.

    -

    Blocks which are not in use by a graph that is either currently -executing or scheduled to execute are freed back to the operating -system.

    -
    -
    Parameters:
    -

    device (int) – The device for which cached memory should be freed.

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceGetGraphMemAttribute(int device, attr: cudaGraphMemAttributeType)#
    -

    Query asynchronous allocation attributes related to graphs.

    -

    Valid attributes are:

    - -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaDeviceSetGraphMemAttribute(int device, attr: cudaGraphMemAttributeType, value)#
    -

    Set asynchronous allocation attributes related to graphs.

    -

    Valid attributes are:

    -
      -
    • cudaGraphMemAttrUsedMemHigh: High watermark of memory, in -bytes, associated with graphs since the last time it was reset. High -watermark can only be reset to zero.

    • -
    • cudaGraphMemAttrReservedMemHigh: High watermark of -memory, in bytes, currently allocated for use by the CUDA graphs -asynchronous allocator.

    • -
    -
    -
    Parameters:
    -
      -
    • device (int) – Specifies the scope of the query

    • -
    • attr (cudaGraphMemAttributeType) – attribute to get

    • -
    • value (Any) – pointer to value to set

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidDevice

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphClone(originalGraph)#
    -

    Clones a graph.

    -

    This function creates a copy of originalGraph and returns it in -pGraphClone. All parameters are copied into the cloned graph. The -original graph may be modified after this call without affecting the -clone.

    -

    Child graph nodes in the original graph are recursively copied into the -clone.

    -
    -
    Parameters:
    -

    originalGraph (CUgraph or cudaGraph_t) – Graph to clone

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphNodeFindInClone(originalNode, clonedGraph)#
    -

    Finds a cloned version of a node.

    -

    This function returns the node in clonedGraph corresponding to -originalNode in the original graph.

    -

    clonedGraph must have been cloned from originalGraph via -cudaGraphClone. originalNode must have been in -originalGraph at the time of the call to cudaGraphClone, -and the corresponding cloned node in clonedGraph must not have been -removed. The cloned node is then returned via pClonedNode.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaGraphClone

    -
    -
    - -
    -
    -cuda.cudart.cudaGraphNodeGetType(node)#
    -

    Returns a node’s type.

    -

    Returns the node type of node in pType.

    -
    -
    Parameters:
    -

    node (CUgraphNode or cudaGraphNode_t) – Node to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphGetNodes(graph, size_t numNodes=0)#
    -

    Returns a graph’s nodes.

    -

    Returns a list of graph’s nodes. nodes may be NULL, in which case -this function will return the number of nodes in numNodes. Otherwise, -numNodes entries will be filled in. If numNodes is higher than the -actual number of nodes, the remaining entries in nodes will be set to -NULL, and the number of nodes actually obtained will be returned in -numNodes.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphGetRootNodes(graph, size_t pNumRootNodes=0)#
    -

    Returns a graph’s root nodes.

    -

    Returns a list of graph’s root nodes. pRootNodes may be NULL, in -which case this function will return the number of root nodes in -pNumRootNodes. Otherwise, pNumRootNodes entries will be filled in. -If pNumRootNodes is higher than the actual number of root nodes, the -remaining entries in pRootNodes will be set to NULL, and the number -of nodes actually obtained will be returned in pNumRootNodes.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to query

    • -
    • pNumRootNodes (int) – See description

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphGetEdges(graph, size_t numEdges=0)#
    -

    Returns a graph’s dependency edges.

    -

    Returns a list of graph’s dependency edges. Edges are returned via -corresponding indices in from and to; that is, the node in to`[i] -has a dependency on the node in `from`[i]. `from and to may both be -NULL, in which case this function only returns the number of edges in -numEdges. Otherwise, numEdges entries will be filled in. If -numEdges is higher than the actual number of edges, the remaining -entries in from and to will be set to NULL, and the number of edges -actually returned will be written to numEdges.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to get the edges from

    • -
    • numEdges (int) – See description

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphGetEdges_v2(graph, size_t numEdges=0)#
    -

    Returns a graph’s dependency edges (12.3+)

    -

    Returns a list of graph’s dependency edges. Edges are returned via -corresponding indices in from, to and edgeData; that is, the node -in to`[i] has a dependency on the node in `from`[i] with data -`edgeData`[i]. `from and to may both be NULL, in which case this -function only returns the number of edges in numEdges. Otherwise, -numEdges entries will be filled in. If numEdges is higher than the -actual number of edges, the remaining entries in from and to will -be set to NULL, and the number of edges actually returned will be -written to numEdges. edgeData may alone be NULL, in which case the -edges must all have default (zeroed) edge data. Attempting a losst -query via NULL edgeData will result in -cudaErrorLossyQuery. If edgeData is non-NULL then from -and to must be as well.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to get the edges from

    • -
    • numEdges (int) – See description

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphNodeGetDependencies(node, size_t pNumDependencies=0)#
    -

    Returns a node’s dependencies.

    -

    Returns a list of node’s dependencies. pDependencies may be NULL, -in which case this function will return the number of dependencies in -pNumDependencies. Otherwise, pNumDependencies entries will be -filled in. If pNumDependencies is higher than the actual number of -dependencies, the remaining entries in pDependencies will be set to -NULL, and the number of nodes actually obtained will be returned in -pNumDependencies.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphNodeGetDependencies_v2(node, size_t pNumDependencies=0)#
    -

    Returns a node’s dependencies (12.3+)

    -

    Returns a list of node’s dependencies. pDependencies may be NULL, -in which case this function will return the number of dependencies in -pNumDependencies. Otherwise, pNumDependencies entries will be -filled in. If pNumDependencies is higher than the actual number of -dependencies, the remaining entries in pDependencies will be set to -NULL, and the number of nodes actually obtained will be returned in -pNumDependencies.

    -

    Note that if an edge has non-zero (non-default) edge data and -edgeData is NULL, this API will return -cudaErrorLossyQuery. If edgeData is non-NULL, then -pDependencies must be as well.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphNodeGetDependentNodes(node, size_t pNumDependentNodes=0)#
    -

    Returns a node’s dependent nodes.

    -

    Returns a list of node’s dependent nodes. pDependentNodes may be -NULL, in which case this function will return the number of dependent -nodes in pNumDependentNodes. Otherwise, pNumDependentNodes entries -will be filled in. If pNumDependentNodes is higher than the actual -number of dependent nodes, the remaining entries in pDependentNodes -will be set to NULL, and the number of nodes actually obtained will be -returned in pNumDependentNodes.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphNodeGetDependentNodes_v2(node, size_t pNumDependentNodes=0)#
    -

    Returns a node’s dependent nodes (12.3+)

    -

    Returns a list of node’s dependent nodes. pDependentNodes may be -NULL, in which case this function will return the number of dependent -nodes in pNumDependentNodes. Otherwise, pNumDependentNodes entries -will be filled in. If pNumDependentNodes is higher than the actual -number of dependent nodes, the remaining entries in pDependentNodes -will be set to NULL, and the number of nodes actually obtained will be -returned in pNumDependentNodes.

    -

    Note that if an edge has non-zero (non-default) edge data and -edgeData is NULL, this API will return -cudaErrorLossyQuery. If edgeData is non-NULL, then -pDependentNodes must be as well.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddDependencies(graph, from_: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], to: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies)#
    -

    Adds dependency edges to a graph.

    -

    The number of dependencies to be added is defined by numDependencies -Elements in pFrom and pTo at corresponding indices define a -dependency. Each node in pFrom and pTo must belong to graph.

    -

    If numDependencies is 0, elements in pFrom and pTo will be -ignored. Specifying an existing dependency will return an error.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which dependencies are added

    • -
    • from (List[cudaGraphNode_t]) – Array of nodes that provide the dependencies

    • -
    • to (List[cudaGraphNode_t]) – Array of dependent nodes

    • -
    • numDependencies (size_t) – Number of dependencies to be added

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddDependencies_v2(graph, from_: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], to: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], edgeData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies)#
    -

    Adds dependency edges to a graph. (12.3+)

    -

    The number of dependencies to be added is defined by numDependencies -Elements in pFrom and pTo at corresponding indices define a -dependency. Each node in pFrom and pTo must belong to graph.

    -

    If numDependencies is 0, elements in pFrom and pTo will be -ignored. Specifying an existing dependency will return an error.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which dependencies are added

    • -
    • from (List[cudaGraphNode_t]) – Array of nodes that provide the dependencies

    • -
    • to (List[cudaGraphNode_t]) – Array of dependent nodes

    • -
    • edgeData (List[cudaGraphEdgeData]) – Optional array of edge data. If NULL, default (zeroed) edge data is -assumed.

    • -
    • numDependencies (size_t) – Number of dependencies to be added

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphRemoveDependencies(graph, from_: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], to: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies)#
    -

    Removes dependency edges from a graph.

    -

    The number of pDependencies to be removed is defined by -numDependencies. Elements in pFrom and pTo at corresponding -indices define a dependency. Each node in pFrom and pTo must belong -to graph.

    -

    If numDependencies is 0, elements in pFrom and pTo will be -ignored. Specifying a non-existing dependency will return an error.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph from which to remove dependencies

    • -
    • from (List[cudaGraphNode_t]) – Array of nodes that provide the dependencies

    • -
    • to (List[cudaGraphNode_t]) – Array of dependent nodes

    • -
    • numDependencies (size_t) – Number of dependencies to be removed

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphRemoveDependencies_v2(graph, from_: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], to: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], edgeData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies)#
    -

    Removes dependency edges from a graph. (12.3+)

    -

    The number of pDependencies to be removed is defined by -numDependencies. Elements in pFrom and pTo at corresponding -indices define a dependency. Each node in pFrom and pTo must belong -to graph.

    -

    If numDependencies is 0, elements in pFrom and pTo will be -ignored. Specifying an edge that does not exist in the graph, with data -matching edgeData, results in an error. edgeData is nullable, which -is equivalent to passing default (zeroed) data for each edge.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph from which to remove dependencies

    • -
    • from (List[cudaGraphNode_t]) – Array of nodes that provide the dependencies

    • -
    • to (List[cudaGraphNode_t]) – Array of dependent nodes

    • -
    • edgeData (List[cudaGraphEdgeData]) – Optional array of edge data. If NULL, edge data is assumed to be -default (zeroed).

    • -
    • numDependencies (size_t) – Number of dependencies to be removed

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphDestroyNode(node)#
    -

    Remove a node from the graph.

    -

    Removes node from its graph. This operation also severs any -dependencies of other nodes on node and vice versa.

    -

    Dependencies cannot be removed from graphs which contain allocation or -free nodes. Any attempt to do so will return an error.

    -
    -
    Parameters:
    -

    node (CUgraphNode or cudaGraphNode_t) – Node to remove

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphInstantiate(graph, unsigned long long flags)#
    -

    Creates an executable graph from a graph.

    -

    Instantiates graph as an executable graph. The graph is validated for -any structural constraints or intra-node constraints which were not -previously validated. If instantiation is successful, a handle to the -instantiated graph is returned in pGraphExec.

    -

    The flags parameter controls the behavior of instantiation and -subsequent graph launches. Valid flags are:

    -
      -
    • cudaGraphInstantiateFlagAutoFreeOnLaunch, which -configures a graph containing memory allocation nodes to -automatically free any unfreed memory allocations before the graph is -relaunched.

    • -
    • cudaGraphInstantiateFlagDeviceLaunch, which configures -the graph for launch from the device. If this flag is passed, the -executable graph handle returned can be used to launch the graph from -both the host and device. This flag cannot be used in conjunction -with cudaGraphInstantiateFlagAutoFreeOnLaunch.

    • -
    • cudaGraphInstantiateFlagUseNodePriority, which causes the -graph to use the priorities from the per-node attributes rather than -the priority of the launch stream during execution. Note that -priorities are only available on kernel nodes, and are copied from -stream priority during stream capture.

    • -
    -

    If graph contains any allocation or free nodes, there can be at most -one executable graph in existence for that graph at a time. An attempt -to instantiate a second executable graph before destroying the first -with cudaGraphExecDestroy will result in an error. The same -also applies if graph contains any device-updatable kernel nodes.

    -

    Graphs instantiated for launch on the device have additional -restrictions which do not apply to host graphs:

    -
      -
    • The graph’s nodes must reside on a single device.

    • -
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, -and child graph nodes.

    • -
    • The graph cannot be empty and must contain at least one kernel, -memcpy, or memset node. Operation-specific restrictions are outlined -below.

    • -
    • Kernel nodes:

      -
        -
      • Use of CUDA Dynamic Parallelism is not permitted.

      • -
      • Cooperative launches are permitted as long as MPS is not in use.

      • -
      -
    • -
    • Memcpy nodes:

      -
        -
      • Only copies involving device memory and/or pinned device-mapped -host memory are permitted.

      • -
      • Copies involving CUDA arrays are not permitted.

      • -
      • Both operands must be accessible from the current device, and the -current device must match the device of other nodes in the graph.

      • -
      -
    • -
    -

    If graph is not instantiated for launch on the device but contains -kernels which call device-side cudaGraphLaunch() from -multiple devices, this will result in an error.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphInstantiateWithFlags(graph, unsigned long long flags)#
    -

    Creates an executable graph from a graph.

    -

    Instantiates graph as an executable graph. The graph is validated for -any structural constraints or intra-node constraints which were not -previously validated. If instantiation is successful, a handle to the -instantiated graph is returned in pGraphExec.

    -

    The flags parameter controls the behavior of instantiation and -subsequent graph launches. Valid flags are:

    -
      -
    • cudaGraphInstantiateFlagAutoFreeOnLaunch, which -configures a graph containing memory allocation nodes to -automatically free any unfreed memory allocations before the graph is -relaunched.

    • -
    • cudaGraphInstantiateFlagDeviceLaunch, which configures -the graph for launch from the device. If this flag is passed, the -executable graph handle returned can be used to launch the graph from -both the host and device. This flag can only be used on platforms -which support unified addressing. This flag cannot be used in -conjunction with -cudaGraphInstantiateFlagAutoFreeOnLaunch.

    • -
    • cudaGraphInstantiateFlagUseNodePriority, which causes the -graph to use the priorities from the per-node attributes rather than -the priority of the launch stream during execution. Note that -priorities are only available on kernel nodes, and are copied from -stream priority during stream capture.

    • -
    -

    If graph contains any allocation or free nodes, there can be at most -one executable graph in existence for that graph at a time. An attempt -to instantiate a second executable graph before destroying the first -with cudaGraphExecDestroy will result in an error. The same -also applies if graph contains any device-updatable kernel nodes.

    -

    If graph contains kernels which call device-side -cudaGraphLaunch() from multiple devices, this will result -in an error.

    -

    Graphs instantiated for launch on the device have additional -restrictions which do not apply to host graphs:

    -
      -
    • The graph’s nodes must reside on a single device.

    • -
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, -and child graph nodes.

    • -
    • The graph cannot be empty and must contain at least one kernel, -memcpy, or memset node. Operation-specific restrictions are outlined -below.

    • -
    • Kernel nodes:

      -
        -
      • Use of CUDA Dynamic Parallelism is not permitted.

      • -
      • Cooperative launches are permitted as long as MPS is not in use.

      • -
      -
    • -
    • Memcpy nodes:

      -
        -
      • Only copies involving device memory and/or pinned device-mapped -host memory are permitted.

      • -
      • Copies involving CUDA arrays are not permitted.

      • -
      • Both operands must be accessible from the current device, and the -current device must match the device of other nodes in the graph.

      • -
      -
    • -
    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphInstantiateWithParams(graph, cudaGraphInstantiateParams instantiateParams: Optional[cudaGraphInstantiateParams])#
    -

    Creates an executable graph from a graph.

    -

    Instantiates graph as an executable graph according to the -instantiateParams structure. The graph is validated for any -structural constraints or intra-node constraints which were not -previously validated. If instantiation is successful, a handle to the -instantiated graph is returned in pGraphExec.

    -

    instantiateParams controls the behavior of instantiation and -subsequent graph launches, as well as returning more detailed -information in the event of an error. -cudaGraphInstantiateParams is defined as:

    -

    View CUDA Toolkit Documentation for a C++ code example

    -

    The flags field controls the behavior of instantiation and subsequent -graph launches. Valid flags are:

    -
      -
    • cudaGraphInstantiateFlagAutoFreeOnLaunch, which -configures a graph containing memory allocation nodes to -automatically free any unfreed memory allocations before the graph is -relaunched.

    • -
    • cudaGraphInstantiateFlagUpload, which will perform an -upload of the graph into uploadStream once the graph has been -instantiated.

    • -
    • cudaGraphInstantiateFlagDeviceLaunch, which configures -the graph for launch from the device. If this flag is passed, the -executable graph handle returned can be used to launch the graph from -both the host and device. This flag can only be used on platforms -which support unified addressing. This flag cannot be used in -conjunction with -cudaGraphInstantiateFlagAutoFreeOnLaunch.

    • -
    • cudaGraphInstantiateFlagUseNodePriority, which causes the -graph to use the priorities from the per-node attributes rather than -the priority of the launch stream during execution. Note that -priorities are only available on kernel nodes, and are copied from -stream priority during stream capture.

    • -
    -

    If graph contains any allocation or free nodes, there can be at most -one executable graph in existence for that graph at a time. An attempt -to instantiate a second executable graph before destroying the first -with cudaGraphExecDestroy will result in an error. The same -also applies if graph contains any device-updatable kernel nodes.

    -

    If graph contains kernels which call device-side -cudaGraphLaunch() from multiple devices, this will result -in an error.

    -

    Graphs instantiated for launch on the device have additional -restrictions which do not apply to host graphs:

    -
      -
    • The graph’s nodes must reside on a single device.

    • -
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, -and child graph nodes.

    • -
    • The graph cannot be empty and must contain at least one kernel, -memcpy, or memset node. Operation-specific restrictions are outlined -below.

    • -
    • Kernel nodes:

      -
        -
      • Use of CUDA Dynamic Parallelism is not permitted.

      • -
      • Cooperative launches are permitted as long as MPS is not in use.

      • -
      -
    • -
    • Memcpy nodes:

      -
        -
      • Only copies involving device memory and/or pinned device-mapped -host memory are permitted.

      • -
      • Copies involving CUDA arrays are not permitted.

      • -
      • Both operands must be accessible from the current device, and the -current device must match the device of other nodes in the graph.

      • -
      -
    • -
    -

    In the event of an error, the result_out and errNode_out fields -will contain more information about the nature of the error. Possible -error reporting includes:

    -
      -
    • cudaGraphInstantiateError, if passed an invalid value or -if an unexpected error occurred which is described by the return -value of the function. errNode_out will be set to NULL.

    • -
    • cudaGraphInstantiateInvalidStructure, if the graph -structure is invalid. errNode_out will be set to one of the -offending nodes.

    • -
    • cudaGraphInstantiateNodeOperationNotSupported, if the -graph is instantiated for device launch but contains a node of an -unsupported node type, or a node which performs unsupported -operations, such as use of CUDA dynamic parallelism within a kernel -node. errNode_out will be set to this node.

    • -
    • cudaGraphInstantiateMultipleDevicesNotSupported, if the -graph is instantiated for device launch but a node’s device differs -from that of another node. This error can also be returned if a graph -is not instantiated for device launch and it contains kernels which -call device-side cudaGraphLaunch() from multiple devices. -errNode_out will be set to this node.

    • -
    -

    If instantiation is successful, result_out will be set to -cudaGraphInstantiateSuccess, and hErrNode_out will be set -to NULL.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecGetFlags(graphExec)#
    -

    Query the instantiation flags of an executable graph.

    -

    Returns the flags that were passed to instantiation for the given -executable graph. cudaGraphInstantiateFlagUpload will not -be returned by this API as it does not affect the resulting executable -graph.

    -
    -
    Parameters:
    -

    graphExec (CUgraphExec or cudaGraphExec_t) – The executable graph to query

    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecKernelNodeSetParams(hGraphExec, node, cudaKernelNodeParams pNodeParams: Optional[cudaKernelNodeParams])#
    -

    Sets the parameters for a kernel node in the given graphExec.

    -

    Sets the parameters of a kernel node in an executable graph -hGraphExec. The node is identified by the corresponding node node -in the non-executable graph, from which the executable graph was -instantiated.

    -

    node must not have been removed from the original graph. All -nodeParams fields may change, but the following restrictions apply to -func updates:

    -
      -
    • The owning device of the function cannot change.

    • -
    • A node whose function originally did not use CUDA dynamic parallelism -cannot be updated to a function which uses CDP

    • -
    • A node whose function originally did not make device-side update -calls cannot be updated to a function which makes device-side update -calls.

    • -
    • If hGraphExec was not instantiated for device launch, a node whose -function originally did not use device-side -cudaGraphLaunch() cannot be updated to a function which -uses device-side cudaGraphLaunch() unless the node -resides on the same device as nodes which contained such calls at -instantiate-time. If no such calls were present at instantiation, -these updates cannot be performed at all.

    • -
    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. node is also not modified by this call.

    -

    If node is a device-updatable kernel node, the next upload/launch of -hGraphExec will overwrite any previous device-side updates. -Additionally, applying host updates to a device-updatable kernel node -while it is being updated from the device will result in undefined -behavior.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecMemcpyNodeSetParams(hGraphExec, node, cudaMemcpy3DParms pNodeParams: Optional[cudaMemcpy3DParms])#
    -

    Sets the parameters for a memcpy node in the given graphExec.

    -

    Updates the work represented by node in hGraphExec as though node -had contained pNodeParams at instantiation. node must remain in the -graph which was used to instantiate hGraphExec. Changed edges to and -from node are ignored.

    -

    The source and destination memory in pNodeParams must be allocated -from the same contexts as the original source and destination memory. -Both the instantiation-time memory operands and the memory operands in -pNodeParams must be 1-dimensional. Zero-length operations are not -supported.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. node is also not modified by this call.

    -

    Returns cudaErrorInvalidValue if the memory operands’ -mappings changed or either the original or new memory operands are -multidimensional.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecMemcpyNodeSetParams1D(hGraphExec, node, dst, src, size_t count, kind: cudaMemcpyKind)#
    -

    Sets the parameters for a memcpy node in the given graphExec to perform a 1-dimensional copy.

    -

    Updates the work represented by node in hGraphExec as though node -had contained the given params at instantiation. node must remain in -the graph which was used to instantiate hGraphExec. Changed edges to -and from node are ignored.

    -

    src and dst must be allocated from the same contexts as the -original source and destination memory. The instantiation-time memory -operands must be 1-dimensional. Zero-length operations are not -supported.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. node is also not modified by this call.

    -

    Returns cudaErrorInvalidValue if the memory operands’ -mappings changed or the original memory operands are multidimensional.

    -
    -
    Parameters:
    -
      -
    • hGraphExec (CUgraphExec or cudaGraphExec_t) – The executable graph in which to set the specified node

    • -
    • node (CUgraphNode or cudaGraphNode_t) – Memcpy node from the graph which was used to instantiate graphExec

    • -
    • dst (Any) – Destination memory address

    • -
    • src (Any) – Source memory address

    • -
    • count (size_t) – Size in bytes to copy

    • -
    • kind (cudaMemcpyKind) – Type of transfer

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecMemsetNodeSetParams(hGraphExec, node, cudaMemsetParams pNodeParams: Optional[cudaMemsetParams])#
    -

    Sets the parameters for a memset node in the given graphExec.

    -

    Updates the work represented by node in hGraphExec as though node -had contained pNodeParams at instantiation. node must remain in the -graph which was used to instantiate hGraphExec. Changed edges to and -from node are ignored.

    -

    Zero sized operations are not supported.

    -

    The new destination pointer in pNodeParams must be to the same kind -of allocation as the original destination pointer and have the same -context association and device mapping as the original destination -pointer.

    -

    Both the value and pointer address may be updated. Changing other -aspects of the memset (width, height, element size or pitch) may cause -the update to be rejected. Specifically, for 2d memsets, all dimension -changes are rejected. For 1d memsets, changes in height are explicitly -rejected and other changes are oportunistically allowed if the -resulting work maps onto the work resources already allocated for the -node.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. node is also not modified by this call.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecHostNodeSetParams(hGraphExec, node, cudaHostNodeParams pNodeParams: Optional[cudaHostNodeParams])#
    -

    Sets the parameters for a host node in the given graphExec.

    -

    Updates the work represented by node in hGraphExec as though node -had contained pNodeParams at instantiation. node must remain in the -graph which was used to instantiate hGraphExec. Changed edges to and -from node are ignored.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. node is also not modified by this call.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecChildGraphNodeSetParams(hGraphExec, node, childGraph)#
    -

    Updates node parameters in the child graph node in the given graphExec.

    -

    Updates the work represented by node in hGraphExec as though the -nodes contained in node’s graph had the parameters contained in -childGraph’s nodes at instantiation. node must remain in the graph -which was used to instantiate hGraphExec. Changed edges to and from -node are ignored.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. node is also not modified by this call.

    -

    The topology of childGraph, as well as the node insertion order, must -match that of the graph contained in node. See -cudaGraphExecUpdate() for a list of restrictions on what -can be updated in an instantiated graph. The update is recursive, so -child graph nodes contained within the top level child graph will also -be updated.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event)#
    -

    Sets the event for an event record node in the given graphExec.

    -

    Sets the event of an event record node in an executable graph -hGraphExec. The node is identified by the corresponding node hNode -in the non-executable graph, from which the executable graph was -instantiated.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event)#
    -

    Sets the event for an event wait node in the given graphExec.

    -

    Sets the event of an event wait node in an executable graph -hGraphExec. The node is identified by the corresponding node hNode -in the non-executable graph, from which the executable graph was -instantiated.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, cudaExternalSemaphoreSignalNodeParams nodeParams: Optional[cudaExternalSemaphoreSignalNodeParams])#
    -

    Sets the parameters for an external semaphore signal node in the given graphExec.

    -

    Sets the parameters of an external semaphore signal node in an -executable graph hGraphExec. The node is identified by the -corresponding node hNode in the non-executable graph, from which the -executable graph was instantiated.

    -

    hNode must not have been removed from the original graph.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    Changing nodeParams->numExtSems is not supported.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, cudaExternalSemaphoreWaitNodeParams nodeParams: Optional[cudaExternalSemaphoreWaitNodeParams])#
    -

    Sets the parameters for an external semaphore wait node in the given graphExec.

    -

    Sets the parameters of an external semaphore wait node in an executable -graph hGraphExec. The node is identified by the corresponding node -hNode in the non-executable graph, from which the executable graph -was instantiated.

    -

    hNode must not have been removed from the original graph.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -

    Changing nodeParams->numExtSems is not supported.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphNodeSetEnabled(hGraphExec, hNode, unsigned int isEnabled)#
    -

    Enables or disables the specified node in the given graphExec.

    -

    Sets hNode to be either enabled or disabled. Disabled nodes are -functionally equivalent to empty nodes until they are reenabled. -Existing node parameters are not affected by disabling/enabling the -node.

    -

    The node is identified by the corresponding node hNode in the non- -executable graph, from which the executable graph was instantiated.

    -

    hNode must not have been removed from the original graph.

    -

    The modifications only affect future launches of hGraphExec. Already -enqueued or running launches of hGraphExec are not affected by this -call. hNode is also not modified by this call.

    -
    -
    Parameters:
    -
      -
    • hGraphExec (CUgraphExec or cudaGraphExec_t) – The executable graph in which to set the specified node

    • -
    • hNode (CUgraphNode or cudaGraphNode_t) – Node from the graph from which graphExec was instantiated

    • -
    • isEnabled (unsigned int) – Node is enabled if != 0, otherwise the node is disabled

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -

    Notes

    -

    Currently only kernel, memset and memcpy nodes are supported.

    -
    - -
    -
    -cuda.cudart.cudaGraphNodeGetEnabled(hGraphExec, hNode)#
    -

    Query whether a node in the given graphExec is enabled.

    -

    Sets isEnabled to 1 if hNode is enabled, or 0 if hNode is disabled.

    -

    The node is identified by the corresponding node hNode in the non- -executable graph, from which the executable graph was instantiated.

    -

    hNode must not have been removed from the original graph.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -

    Notes

    -

    Currently only kernel, memset and memcpy nodes are supported.

    -
    - -
    -
    -cuda.cudart.cudaGraphExecUpdate(hGraphExec, hGraph)#
    -

    Check whether an executable graph can be updated with a graph and perform the update if possible.

    -

    Updates the node parameters in the instantiated graph specified by -hGraphExec with the node parameters in a topologically identical -graph specified by hGraph.

    -

    Limitations:

    -
      -
    • Kernel nodes:

      -
        -
      • The owning context of the function cannot change.

      • -
      • A node whose function originally did not use CUDA dynamic -parallelism cannot be updated to a function which uses CDP.

      • -
      • A node whose function originally did not make device-side update -calls cannot be updated to a function which makes device-side -update calls.

      • -
      • A cooperative node cannot be updated to a non-cooperative node, and -vice-versa.

      • -
      • If the graph was instantiated with -cudaGraphInstantiateFlagUseNodePriority, the priority attribute -cannot change. Equality is checked on the originally requested -priority values, before they are clamped to the device’s supported -range.

      • -
      • If hGraphExec was not instantiated for device launch, a node -whose function originally did not use device-side -cudaGraphLaunch() cannot be updated to a function which -uses device-side cudaGraphLaunch() unless the node -resides on the same device as nodes which contained such calls at -instantiate-time. If no such calls were present at instantiation, -these updates cannot be performed at all.

      • -
      • Neither hGraph nor hGraphExec may contain device-updatable -kernel nodes.

      • -
      -
    • -
    • Memset and memcpy nodes:

      -
        -
      • The CUDA device(s) to which the operand(s) was allocated/mapped -cannot change.

      • -
      • The source/destination memory must be allocated from the same -contexts as the original source/destination memory.

      • -
      • For 2d memsets, only address and assinged value may be updated.

      • -
      • For 1d memsets, updating dimensions is also allowed, but may fail -if the resulting operation doesn’t map onto the work resources -already allocated for the node.

      • -
      -
    • -
    • Additional memcpy node restrictions:

      -
        -
      • Changing either the source or destination memory type(i.e. -CU_MEMORYTYPE_DEVICE, CU_MEMORYTYPE_ARRAY, etc.) is not supported.

      • -
      -
    • -
    • Conditional nodes:

      -
        -
      • Changing node parameters is not supported.

      • -
      • Changeing parameters of nodes within the conditional body graph is -subject to the rules above.

      • -
      • Conditional handle flags and default values are updated as part of -the graph update.

      • -
      -
    • -
    -

    Note: The API may add further restrictions in future releases. The -return code should always be checked.

    -

    cudaGraphExecUpdate sets the result member of resultInfo to -cudaGraphExecUpdateErrorTopologyChanged under the following conditions:

    -
      -
    • The count of nodes directly in hGraphExec and hGraph differ, in -which case resultInfo->errorNode is set to NULL.

    • -
    • hGraph has more exit nodes than hGraph, in which case -resultInfo->errorNode is set to one of the exit nodes in hGraph.

    • -
    • A node in hGraph has a different number of dependencies than the -node from hGraphExec it is paired with, in which case -resultInfo->errorNode is set to the node from hGraph.

    • -
    • A node in hGraph has a dependency that does not match with the -corresponding dependency of the paired node from hGraphExec. -resultInfo->errorNode will be set to the node from hGraph. -resultInfo->errorFromNode will be set to the mismatched dependency. -The dependencies are paired based on edge order and a dependency does -not match when the nodes are already paired based on other edges -examined in the graph.

    • -
    -

    cudaGraphExecUpdate sets the result member of resultInfo to:

    -
      -
    • cudaGraphExecUpdateError if passed an invalid value.

    • -
    • cudaGraphExecUpdateErrorTopologyChanged if the graph topology changed

    • -
    • cudaGraphExecUpdateErrorNodeTypeChanged if the type of a node -changed, in which case hErrorNode_out is set to the node from -hGraph.

    • -
    • cudaGraphExecUpdateErrorFunctionChanged if the function of a kernel -node changed (CUDA driver < 11.2)

    • -
    • cudaGraphExecUpdateErrorUnsupportedFunctionChange if the func field -of a kernel changed in an unsupported way(see note above), in which -case hErrorNode_out is set to the node from hGraph

    • -
    • cudaGraphExecUpdateErrorParametersChanged if any parameters to a node -changed in a way that is not supported, in which case -hErrorNode_out is set to the node from hGraph

    • -
    • cudaGraphExecUpdateErrorAttributesChanged if any attributes of a node -changed in a way that is not supported, in which case -hErrorNode_out is set to the node from hGraph

    • -
    • cudaGraphExecUpdateErrorNotSupported if something about a node is -unsupported, like the node’s type or configuration, in which case -hErrorNode_out is set to the node from hGraph

    • -
    -

    If the update fails for a reason not listed above, the result member of -resultInfo will be set to cudaGraphExecUpdateError. If the update -succeeds, the result member will be set to cudaGraphExecUpdateSuccess.

    -

    cudaGraphExecUpdate returns cudaSuccess when the updated was performed -successfully. It returns cudaErrorGraphExecUpdateFailure if the graph -update was not performed because it included changes which violated -constraints specific to instantiated graph update.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaGraphInstantiate

    -
    -
    - -
    -
    -cuda.cudart.cudaGraphUpload(graphExec, stream)#
    -

    Uploads an executable graph in a stream.

    -

    Uploads hGraphExec to the device in hStream without executing it. -Uploads of the same hGraphExec will be serialized. Each upload is -ordered behind both any previous work in hStream and any previous -launches of hGraphExec. Uses memory cached by stream to back the -allocations owned by graphExec.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue,

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphLaunch(graphExec, stream)#
    -

    Launches an executable graph in a stream.

    -

    Executes graphExec in stream. Only one instance of graphExec may -be executing at a time. Each launch is ordered behind both any previous -work in stream and any previous launches of graphExec. To execute a -graph concurrently, it must be instantiated multiple times into -multiple executable graphs.

    -

    If any allocations created by graphExec remain unfreed (from a -previous launch) and graphExec was not instantiated with -cudaGraphInstantiateFlagAutoFreeOnLaunch, the launch will -fail with cudaErrorInvalidValue.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecDestroy(graphExec)#
    -

    Destroys an executable graph.

    -

    Destroys the executable graph specified by graphExec.

    -
    -
    Parameters:
    -

    graphExec (CUgraphExec or cudaGraphExec_t) – Executable graph to destroy

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphDestroy(graph)#
    -

    Destroys a graph.

    -

    Destroys the graph specified by graph, as well as all of its nodes.

    -
    -
    Parameters:
    -

    graph (CUgraph or cudaGraph_t) – Graph to destroy

    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -

    cudaGraphCreate

    -
    -
    - -
    -
    -cuda.cudart.cudaGraphDebugDotPrint(graph, char *path, unsigned int flags)#
    -

    Write a DOT file describing graph structure.

    -

    Using the provided graph, write to path a DOT formatted description -of the graph. By default this includes the graph topology, node types, -node id, kernel names and memcpy direction. flags can be specified to -write more detailed information about each node type such as parameter -values, kernel attributes, node and function handles.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – The graph to create a DOT file from

    • -
    • path (bytes) – The path to write the DOT file to

    • -
    • flags (unsigned int) – Flags from cudaGraphDebugDotFlags for specifying which additional -node information to write

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorOperatingSystem

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    - -
    -
    -cuda.cudart.cudaUserObjectCreate(ptr, destroy, unsigned int initialRefcount, unsigned int flags)#
    -

    Create a user object.

    -

    Create a user object with the specified destructor callback and initial -reference count. The initial references are owned by the caller.

    -

    Destructor callbacks cannot make CUDA API calls and should avoid -blocking behavior, as they are executed by a shared internal thread. -Another thread may be signaled to perform such actions, if it does not -block forward progress of tasks scheduled through CUDA.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • ptr (Any) – The pointer to pass to the destroy function

    • -
    • destroy (cudaHostFn_t) – Callback to free the user object when it is no longer in use

    • -
    • initialRefcount (unsigned int) – The initial refcount to create the object with, typically 1. The -initial references are owned by the calling thread.

    • -
    • flags (unsigned int) – Currently it is required to pass -cudaUserObjectNoDestructorSync, which is the only -defined flag. This indicates that the destroy callback cannot be -waited on by any CUDA API. Users requiring synchronization of the -callback should signal its completion manually.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaUserObjectRetain(object, unsigned int count)#
    -

    Retain a reference to a user object.

    -

    Retains new references to a user object. The new references are owned -by the caller.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • object (cudaUserObject_t) – The object to retain

    • -
    • count (unsigned int) – The number of references to retain, typically 1. Must be nonzero -and not larger than INT_MAX.

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaUserObjectRelease(object, unsigned int count)#
    -

    Release a reference to a user object.

    -

    Releases user object references owned by the caller. The object’s -destructor is invoked if the reference count reaches zero.

    -

    It is undefined behavior to release references not owned by the caller, -or to use a user object handle after all references are released.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • object (cudaUserObject_t) – The object to release

    • -
    • count (unsigned int) – The number of references to release, typically 1. Must be nonzero -and not larger than INT_MAX.

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphRetainUserObject(graph, object, unsigned int count, unsigned int flags)#
    -

    Retain a reference to a user object from a graph.

    -

    Creates or moves user object references that will be owned by a CUDA -graph.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – The graph to associate the reference with

    • -
    • object (cudaUserObject_t) – The user object to retain a reference for

    • -
    • count (unsigned int) – The number of references to add to the graph, typically 1. Must be -nonzero and not larger than INT_MAX.

    • -
    • flags (unsigned int) – The optional flag cudaGraphUserObjectMove transfers -references from the calling thread, rather than create new -references. Pass 0 to create new references.

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -
    -
    cudaUserObjectCreate

    py:obj:~.cudaUserObjectRetain, cudaUserObjectRelease, cudaGraphReleaseUserObject, cudaGraphCreate

    -
    -
    -
    -
    - -
    -
    -cuda.cudart.cudaGraphReleaseUserObject(graph, object, unsigned int count)#
    -

    Release a user object reference from a graph.

    -

    Releases user object references owned by a graph.

    -

    See CUDA User Objects in the CUDA C++ Programming Guide for more -information on user objects.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – The graph that will release the reference

    • -
    • object (cudaUserObject_t) – The user object to release a reference for

    • -
    • count (unsigned int) – The number of references to release, typically 1. Must be nonzero -and not larger than INT_MAX.

    • -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    -
    -

    See also

    -
    -
    cudaUserObjectCreate

    py:obj:~.cudaUserObjectRetain, cudaUserObjectRelease, cudaGraphRetainUserObject, cudaGraphCreate

    -
    -
    -
    -
    - -
    -
    -cuda.cudart.cudaGraphAddNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaGraphNodeParams nodeParams: Optional[cudaGraphNodeParams])#
    -

    Adds a node of arbitrary type to a graph.

    -

    Creates a new node in graph described by nodeParams with -numDependencies dependencies specified via pDependencies. -numDependencies may be 0. pDependencies may be null if -numDependencies is 0. pDependencies may not have any duplicate -entries.

    -

    nodeParams is a tagged union. The node type should be specified in -the typename field, and type-specific parameters in the corresponding -union member. All unused bytes - that is, reserved0 and all bytes -past the utilized union member - must be set to zero. It is recommended -to use brace initialization or memset to ensure all bytes are -initialized.

    -

    Note that for some node types, nodeParams may contain “out -parameters” which are modified during the call, such as -nodeParams->alloc.dptr.

    -

    A handle to the new node will be returned in phGraphNode.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphAddNode_v2(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], dependencyData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies, cudaGraphNodeParams nodeParams: Optional[cudaGraphNodeParams])#
    -

    Adds a node of arbitrary type to a graph (12.3+)

    -

    Creates a new node in graph described by nodeParams with -numDependencies dependencies specified via pDependencies. -numDependencies may be 0. pDependencies may be null if -numDependencies is 0. pDependencies may not have any duplicate -entries.

    -

    nodeParams is a tagged union. The node type should be specified in -the typename field, and type-specific parameters in the corresponding -union member. All unused bytes - that is, reserved0 and all bytes -past the utilized union member - must be set to zero. It is recommended -to use brace initialization or memset to ensure all bytes are -initialized.

    -

    Note that for some node types, nodeParams may contain “out -parameters” which are modified during the call, such as -nodeParams->alloc.dptr.

    -

    A handle to the new node will be returned in phGraphNode.

    -
    -
    Parameters:
    -
      -
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • -
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • -
    • dependencyData (List[cudaGraphEdgeData]) – Optional edge data for the dependencies. If NULL, the data is -assumed to be default (zeroed) for all dependencies.

    • -
    • numDependencies (size_t) – Number of dependencies

    • -
    • nodeParams (cudaGraphNodeParams) – Specification of the node

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphNodeSetParams(node, cudaGraphNodeParams nodeParams: Optional[cudaGraphNodeParams])#
    -

    Update’s a graph node’s parameters.

    -

    Sets the parameters of graph node node to nodeParams. The node type -specified by nodeParams->type must match the type of node. -nodeParams must be fully initialized and all unused bytes (reserved, -padding) zeroed.

    -

    Modifying parameters is not supported for node types -cudaGraphNodeTypeMemAlloc and cudaGraphNodeTypeMemFree.

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDeviceFunction, cudaErrorNotSupported

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphExecNodeSetParams(graphExec, node, cudaGraphNodeParams nodeParams: Optional[cudaGraphNodeParams])#
    -

    Update’s a graph node’s parameters in an instantiated graph.

    -

    Sets the parameters of a node in an executable graph graphExec. The -node is identified by the corresponding node node in the non- -executable graph from which the executable graph was instantiated. -node must not have been removed from the original graph.

    -

    The modifications only affect future launches of graphExec. Already -enqueued or running launches of graphExec are not affected by this -call. node is also not modified by this call.

    -

    Allowed changes to parameters on executable graphs are as follows:

    -

    View CUDA Toolkit Documentation for a table example

    -
    -
    Parameters:
    -
    -
    -
    Returns:
    -

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDeviceFunction, cudaErrorNotSupported

    -
    -
    Return type:
    -

    cudaError_t

    -
    -
    - -
    - -
    -
    -cuda.cudart.cudaGraphConditionalHandleCreate(graph, unsigned int defaultLaunchValue, unsigned int flags)#
    -

    Create a conditional handle.

    -

    Creates a conditional handle associated with hGraph.

    -

    The conditional handle must be associated with a conditional node in -this graph or one of its children.

    -

    Handles not associated with a conditional node may cause graph -instantiation to fail.

    -
    -
    Parameters:
    -
      -
    • hGraph (CUgraph or cudaGraph_t) – Graph which will contain the conditional node using this handle.

    • -
    • defaultLaunchValue (unsigned int) – Optional initial value for the conditional variable.

    • -
    • flags (unsigned int) – Currently must be cudaGraphCondAssignDefault or 0.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuGraphAddNode

    -
    -
    - -
    -
    -

    Driver Entry Point Access#

    -

    This section describes the driver entry point access functions of CUDA runtime application programming interface.

    -
    -
    -cuda.cudart.cudaGetDriverEntryPoint(char *symbol, unsigned long long flags)#
    -

    Returns the requested driver API function pointer.

    -

    Returns in **funcPtr the address of the CUDA driver function for the -requested flags.

    -

    For a requested driver symbol, if the CUDA version in which the driver -symbol was introduced is less than or equal to the CUDA runtime -version, the API will return the function pointer to the corresponding -versioned driver function.

    -

    The pointer returned by the API should be cast to a function pointer -matching the requested driver function’s definition in the API header -file. The function pointer typedef can be picked up from the -corresponding typedefs header file. For example, cudaTypedefs.h -consists of function pointer typedefs for driver APIs defined in -cuda.h.

    -

    The API will return cudaSuccess and set the returned -funcPtr if the requested driver function is valid and supported on -the platform.

    -

    The API will return cudaSuccess and set the returned -funcPtr to NULL if the requested driver function is not supported on -the platform, no ABI compatible driver function exists for the CUDA -runtime version or if the driver symbol is invalid.

    -

    It will also set the optional driverStatus to one of the values in -cudaDriverEntryPointQueryResult with the following -meanings:

    - -

    The requested flags can be:

    -
      -
    • cudaEnableDefault: This is the default mode. This is -equivalent to cudaEnablePerThreadDefaultStream if the -code is compiled with –default-stream per-thread compilation flag or -the macro CUDA_API_PER_THREAD_DEFAULT_STREAM is defined; -cudaEnableLegacyStream otherwise.

    • -
    • cudaEnableLegacyStream: This will enable the search for -all driver symbols that match the requested driver symbol name except -the corresponding per-thread versions.

    • -
    • cudaEnablePerThreadDefaultStream: This will enable the -search for all driver symbols that match the requested driver symbol -name including the per-thread versions. If a per-thread version is -not found, the API will return the legacy version of the driver -function.

    • -
    -
    -
    Parameters:
    -
      -
    • symbol (bytes) – The base name of the driver API function to look for. As an -example, for the driver API cuMemAlloc_v2, symbol -would be cuMemAlloc. Note that the API will use the CUDA runtime -version to return the address to the most recent ABI compatible -driver symbol, cuMemAlloc or cuMemAlloc_v2.

    • -
    • flags (unsigned long long) – Flags to specify search options.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuGetProcAddress

    -
    -
    - -
    -
    -cuda.cudart.cudaGetDriverEntryPointByVersion(char *symbol, unsigned int cudaVersion, unsigned long long flags)#
    -

    Returns the requested driver API function pointer by CUDA version.

    -

    Returns in **funcPtr the address of the CUDA driver function for the -requested flags and CUDA driver version.

    -

    The CUDA version is specified as (1000 * major + 10 * minor), so CUDA -11.2 should be specified as 11020. For a requested driver symbol, if -the specified CUDA version is greater than or equal to the CUDA version -in which the driver symbol was introduced, this API will return the -function pointer to the corresponding versioned function.

    -

    The pointer returned by the API should be cast to a function pointer -matching the requested driver function’s definition in the API header -file. The function pointer typedef can be picked up from the -corresponding typedefs header file. For example, cudaTypedefs.h -consists of function pointer typedefs for driver APIs defined in -cuda.h.

    -

    For the case where the CUDA version requested is greater than the CUDA -Toolkit installed, there may not be an appropriate function pointer -typedef in the corresponding header file and may need a custom typedef -to match the driver function signature returned. This can be done by -getting the typedefs from a later toolkit or creating appropriately -matching custom function typedefs.

    -

    The API will return cudaSuccess and set the returned -funcPtr if the requested driver function is valid and supported on -the platform.

    -

    The API will return cudaSuccess and set the returned -funcPtr to NULL if the requested driver function is not supported on -the platform, no ABI compatible driver function exists for the -requested version or if the driver symbol is invalid.

    -

    It will also set the optional driverStatus to one of the values in -cudaDriverEntryPointQueryResult with the following -meanings:

    - -

    The requested flags can be:

    -
      -
    • cudaEnableDefault: This is the default mode. This is -equivalent to cudaEnablePerThreadDefaultStream if the -code is compiled with –default-stream per-thread compilation flag or -the macro CUDA_API_PER_THREAD_DEFAULT_STREAM is defined; -cudaEnableLegacyStream otherwise.

    • -
    • cudaEnableLegacyStream: This will enable the search for -all driver symbols that match the requested driver symbol name except -the corresponding per-thread versions.

    • -
    • cudaEnablePerThreadDefaultStream: This will enable the -search for all driver symbols that match the requested driver symbol -name including the per-thread versions. If a per-thread version is -not found, the API will return the legacy version of the driver -function.

    • -
    -
    -
    Parameters:
    -
      -
    • symbol (bytes) – The base name of the driver API function to look for. As an -example, for the driver API cuMemAlloc_v2, symbol -would be cuMemAlloc.

    • -
    • cudaVersion (unsigned int) – The CUDA version to look for the requested driver symbol

    • -
    • flags (unsigned long long) – Flags to specify search options.

    • -
    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cuGetProcAddress

    -
    -
    - -
    -
    -

    C++ API Routines#

    -

    C++-style interface built on top of CUDA runtime API. -impl_private

    -

    This section describes the C++ high level API functions of the CUDA runtime application programming interface. To use these functions, your application needs to be compiled with the nvcc compiler.

    -
    -
    -

    Interactions with the CUDA Driver API#

    -

    This section describes the interactions between the CUDA Driver API and the CUDA Runtime API

    -

    Primary Contexts

    -

    There exists a one to one relationship between CUDA devices in the CUDA Runtime API and ::CUcontext s in the CUDA Driver API within a process. The specific context which the CUDA Runtime API uses for a device is called the device’s primary context. From the perspective of the CUDA Runtime API, a device and its primary context are synonymous.

    -

    Initialization and Tear-Down

    -

    CUDA Runtime API calls operate on the CUDA Driver API ::CUcontext which is current to to the calling host thread.

    -

    The function cudaInitDevice() ensures that the primary context is initialized for the requested device but does not make it current to the calling thread.

    -

    The function cudaSetDevice() initializes the primary context for the specified device and makes it current to the calling thread by calling ::cuCtxSetCurrent().

    -

    The CUDA Runtime API will automatically initialize the primary context for a device at the first CUDA Runtime API call which requires an active context. If no ::CUcontext is current to the calling thread when a CUDA Runtime API call which requires an active context is made, then the primary context for a device will be selected, made current to the calling thread, and initialized.

    -

    The context which the CUDA Runtime API initializes will be initialized using the parameters specified by the CUDA Runtime API functions cudaSetDeviceFlags(), ::cudaD3D9SetDirect3DDevice(), ::cudaD3D10SetDirect3DDevice(), ::cudaD3D11SetDirect3DDevice(), cudaGLSetGLDevice(), and cudaVDPAUSetVDPAUDevice(). Note that these functions will fail with cudaErrorSetOnActiveProcess if they are called when the primary context for the specified device has already been initialized. (or if the current device has already been initialized, in the case of cudaSetDeviceFlags()).

    -

    Primary contexts will remain active until they are explicitly deinitialized using cudaDeviceReset(). The function cudaDeviceReset() will deinitialize the primary context for the calling thread’s current device immediately. The context will remain current to all of the threads that it was current to. The next CUDA Runtime API call on any thread which requires an active context will trigger the reinitialization of that device’s primary context.

    -

    Note that primary contexts are shared resources. It is recommended that the primary context not be reset except just before exit or to recover from an unspecified launch failure.

    -

    Context Interoperability

    -

    Note that the use of multiple ::CUcontext s per device within a single process will substantially degrade performance and is strongly discouraged. Instead, it is highly recommended that the implicit one-to-one device-to-context mapping for the process provided by the CUDA Runtime API be used.

    -

    If a non-primary ::CUcontext created by the CUDA Driver API is current to a thread then the CUDA Runtime API calls to that thread will operate on that ::CUcontext, with some exceptions listed below. Interoperability between data types is discussed in the following sections.

    -

    The function cudaPointerGetAttributes() will return the error cudaErrorIncompatibleDriverContext if the pointer being queried was allocated by a non-primary context. The function cudaDeviceEnablePeerAccess() and the rest of the peer access API may not be called when a non-primary ::CUcontext is current.

    -
    -

    To use the pointer query and peer access APIs with a context created using the CUDA Driver API, it is necessary that the CUDA Driver API be used to access these features.

    -
    -

    All CUDA Runtime API state (e.g, global variables’ addresses and values) travels with its underlying ::CUcontext. In particular, if a ::CUcontext is moved from one thread to another then all CUDA Runtime API state will move to that thread as well.

    -

    Please note that attaching to legacy contexts (those with a version of 3010 as returned by ::cuCtxGetApiVersion()) is not possible. The CUDA Runtime will return cudaErrorIncompatibleDriverContext in such cases.

    -

    Interactions between CUstream and cudaStream_t

    -

    The types ::CUstream and cudaStream_t are identical and may be used interchangeably.

    -

    Interactions between CUevent and cudaEvent_t

    -

    The types ::CUevent and cudaEvent_t are identical and may be used interchangeably.

    -

    Interactions between CUarray and cudaArray_t

    -

    The types ::CUarray and struct ::cudaArray * represent the same data type and may be used interchangeably by casting the two types between each other.

    -

    In order to use a ::CUarray in a CUDA Runtime API function which takes a struct ::cudaArray *, it is necessary to explicitly cast the ::CUarray to a struct ::cudaArray *.

    -

    In order to use a struct ::cudaArray * in a CUDA Driver API function which takes a ::CUarray, it is necessary to explicitly cast the struct ::cudaArray * to a ::CUarray .

    -

    Interactions between CUgraphicsResource and cudaGraphicsResource_t

    -

    The types ::CUgraphicsResource and cudaGraphicsResource_t represent the same data type and may be used interchangeably by casting the two types between each other.

    -

    In order to use a ::CUgraphicsResource in a CUDA Runtime API function which takes a cudaGraphicsResource_t, it is necessary to explicitly cast the ::CUgraphicsResource to a cudaGraphicsResource_t.

    -

    In order to use a cudaGraphicsResource_t in a CUDA Driver API function which takes a ::CUgraphicsResource, it is necessary to explicitly cast the cudaGraphicsResource_t to a ::CUgraphicsResource.

    -

    Interactions between CUtexObject and cudaTextureObject_t

    -

    The types ::CUtexObject and cudaTextureObject_t represent the same data type and may be used interchangeably by casting the two types between each other.

    -

    In order to use a ::CUtexObject in a CUDA Runtime API function which takes a cudaTextureObject_t, it is necessary to explicitly cast the ::CUtexObject to a cudaTextureObject_t.

    -

    In order to use a cudaTextureObject_t in a CUDA Driver API function which takes a ::CUtexObject, it is necessary to explicitly cast the cudaTextureObject_t to a ::CUtexObject.

    -

    Interactions between CUsurfObject and cudaSurfaceObject_t

    -

    The types ::CUsurfObject and cudaSurfaceObject_t represent the same data type and may be used interchangeably by casting the two types between each other.

    -

    In order to use a ::CUsurfObject in a CUDA Runtime API function which takes a cudaSurfaceObject_t, it is necessary to explicitly cast the ::CUsurfObject to a cudaSurfaceObject_t.

    -

    In order to use a cudaSurfaceObject_t in a CUDA Driver API function which takes a ::CUsurfObject, it is necessary to explicitly cast the cudaSurfaceObject_t to a ::CUsurfObject.

    -

    Interactions between CUfunction and cudaFunction_t

    -

    The types ::CUfunction and cudaFunction_t represent the same data type and may be used interchangeably by casting the two types between each other.

    -

    In order to use a cudaFunction_t in a CUDA Driver API function which takes a ::CUfunction, it is necessary to explicitly cast the cudaFunction_t to a ::CUfunction.

    -
    -
    -cuda.cudart.cudaGetKernel(entryFuncAddr)#
    -

    Get pointer to device kernel that matches entry function entryFuncAddr.

    -

    Returns in kernelPtr the device kernel corresponding to the entry -function entryFuncAddr.

    -
    -
    Parameters:
    -

    entryFuncAddr (Any) – Address of device entry function to search kernel for

    -
    -
    Returns:
    -

    -

    -
    -
    -
    -

    See also

    -

    cudaGetKernel

    -
    -
    - -
    -
    -

    Data types used by CUDA Runtime#

    -
    -
    -class cuda.cudart.cudaEglPlaneDesc_st(void_ptr _ptr=0)#
    -

    CUDA EGL Plane Descriptor - structure defining each plane of a CUDA -EGLFrame

    -
    -
    -width#
    -

    Width of plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -height#
    -

    Height of plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth of plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -numChannels#
    -

    Number of channels for the plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -channelDesc#
    -

    Channel Format Descriptor

    -
    -
    Type:
    -

    cudaChannelFormatDesc

    -
    -
    -
    - -
    -
    -reserved#
    -

    Reserved for future use

    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEglFrame_st(void_ptr _ptr=0)#
    -

    CUDA EGLFrame Descriptor - structure defining one frame of EGL. -Each frame may contain one or more planes depending on whether the -surface is Multiplanar or not. Each plane of EGLFrame is -represented by cudaEglPlaneDesc which is defined as: -typedefstructcudaEglPlaneDesc_st unsignedintwidth; -unsignedintheight; unsignedintdepth; unsignedintpitch; -unsignedintnumChannels; structcudaChannelFormatDescchannelDesc; -unsignedintreserved[4]; cudaEglPlaneDesc;

    -
    -
    -frame#
    -
    -
    Type:
    -

    anon_union10

    -
    -
    -
    - -
    -
    -planeDesc#
    -

    CUDA EGL Plane Descriptor cudaEglPlaneDesc

    -
    -
    Type:
    -

    List[cudaEglPlaneDesc]

    -
    -
    -
    - -
    -
    -planeCount#
    -

    Number of planes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -frameType#
    -

    Array or Pitch

    -
    -
    Type:
    -

    cudaEglFrameType

    -
    -
    -
    - -
    -
    -eglColorFormat#
    -

    CUDA EGL Color Format

    -
    -
    Type:
    -

    cudaEglColorFormat

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaChannelFormatDesc(void_ptr _ptr=0)#
    -

    CUDA Channel format descriptor

    -
    -
    -x#
    -

    x

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -y#
    -

    y

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -z#
    -

    z

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -w#
    -

    w

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -f#
    -

    Channel format kind

    -
    -
    Type:
    -

    cudaChannelFormatKind

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaArraySparseProperties(void_ptr _ptr=0)#
    -

    Sparse CUDA array and CUDA mipmapped array properties

    -
    -
    -tileExtent#
    -
    -
    Type:
    -

    anon_struct0

    -
    -
    -
    - -
    -
    -miptailFirstLevel#
    -

    First mip level at which the mip tail begins

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -miptailSize#
    -

    Total size of the mip tail.

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags will either be zero or cudaArraySparsePropertiesSingleMipTail

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaArrayMemoryRequirements(void_ptr _ptr=0)#
    -

    CUDA array and CUDA mipmapped array memory requirements

    -
    -
    -size#
    -

    Total size of the array.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -alignment#
    -

    Alignment necessary for mapping the array.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaPitchedPtr(void_ptr _ptr=0)#
    -

    CUDA Pitched memory pointer ::make_cudaPitchedPtr

    -
    -
    -ptr#
    -

    Pointer to allocated memory

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of allocated memory in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -xsize#
    -

    Logical width of allocation in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -ysize#
    -

    Logical height of allocation in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExtent(void_ptr _ptr=0)#
    -

    CUDA extent ::make_cudaExtent

    -
    -
    -width#
    -

    Width in elements when referring to array memory, in bytes when -referring to linear memory

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Height in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaPos(void_ptr _ptr=0)#
    -

    CUDA 3D position ::make_cudaPos

    -
    -
    -x#
    -

    x

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -y#
    -

    y

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -z#
    -

    z

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemcpy3DParms(void_ptr _ptr=0)#
    -

    CUDA 3D memory copying parameters

    -
    -
    -srcArray#
    -

    Source memory address

    -
    -
    Type:
    -

    cudaArray_t

    -
    -
    -
    - -
    -
    -srcPos#
    -

    Source position offset

    -
    -
    Type:
    -

    cudaPos

    -
    -
    -
    - -
    -
    -srcPtr#
    -

    Pitched source memory address

    -
    -
    Type:
    -

    cudaPitchedPtr

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination memory address

    -
    -
    Type:
    -

    cudaArray_t

    -
    -
    -
    - -
    -
    -dstPos#
    -

    Destination position offset

    -
    -
    Type:
    -

    cudaPos

    -
    -
    -
    - -
    -
    -dstPtr#
    -

    Pitched destination memory address

    -
    -
    Type:
    -

    cudaPitchedPtr

    -
    -
    -
    - -
    -
    -extent#
    -

    Requested memory copy size

    -
    -
    Type:
    -

    cudaExtent

    -
    -
    -
    - -
    -
    -kind#
    -

    Type of transfer

    -
    -
    Type:
    -

    cudaMemcpyKind

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemcpyNodeParams(void_ptr _ptr=0)#
    -

    Memcpy node parameters

    -
    -
    -flags#
    -

    Must be zero

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -reserved#
    -

    Must be zero

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -copyParams#
    -

    Parameters for the memory copy

    -
    -
    Type:
    -

    cudaMemcpy3DParms

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemcpy3DPeerParms(void_ptr _ptr=0)#
    -

    CUDA 3D cross-device memory copying parameters

    -
    -
    -srcArray#
    -

    Source memory address

    -
    -
    Type:
    -

    cudaArray_t

    -
    -
    -
    - -
    -
    -srcPos#
    -

    Source position offset

    -
    -
    Type:
    -

    cudaPos

    -
    -
    -
    - -
    -
    -srcPtr#
    -

    Pitched source memory address

    -
    -
    Type:
    -

    cudaPitchedPtr

    -
    -
    -
    - -
    -
    -srcDevice#
    -

    Source device

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -dstArray#
    -

    Destination memory address

    -
    -
    Type:
    -

    cudaArray_t

    -
    -
    -
    - -
    -
    -dstPos#
    -

    Destination position offset

    -
    -
    Type:
    -

    cudaPos

    -
    -
    -
    - -
    -
    -dstPtr#
    -

    Pitched destination memory address

    -
    -
    Type:
    -

    cudaPitchedPtr

    -
    -
    -
    - -
    -
    -dstDevice#
    -

    Destination device

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -extent#
    -

    Requested memory copy size

    -
    -
    Type:
    -

    cudaExtent

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemsetParams(void_ptr _ptr=0)#
    -

    CUDA Memset node parameters

    -
    -
    -dst#
    -

    Destination device pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of destination device pointer. Unused if height is 1

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -value#
    -

    Value to be set

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -elementSize#
    -

    Size of each element in bytes. Must be 1, 2, or 4.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the row in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Number of rows

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemsetParamsV2(void_ptr _ptr=0)#
    -

    CUDA Memset node parameters

    -
    -
    -dst#
    -

    Destination device pointer

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of destination device pointer. Unused if height is 1

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -value#
    -

    Value to be set

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -elementSize#
    -

    Size of each element in bytes. Must be 1, 2, or 4.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the row in elements

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Number of rows

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaAccessPolicyWindow(void_ptr _ptr=0)#
    -

    Specifies an access policy for a window, a contiguous extent of -memory beginning at base_ptr and ending at base_ptr + num_bytes. -Partition into many segments and assign segments such that. sum of -“hit segments” / window == approx. ratio. sum of “miss segments” / -window == approx 1-ratio. Segments and ratio specifications are -fitted to the capabilities of the architecture. Accesses in a hit -segment apply the hitProp access policy. Accesses in a miss segment -apply the missProp access policy.

    -
    -
    -base_ptr#
    -

    Starting address of the access policy window. CUDA driver may align -it.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -num_bytes#
    -

    Size in bytes of the window policy. CUDA driver may restrict the -maximum size and alignment.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -hitRatio#
    -

    hitRatio specifies percentage of lines assigned hitProp, rest are -assigned missProp.

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -hitProp#
    -

    ::CUaccessProperty set for hit.

    -
    -
    Type:
    -

    cudaAccessProperty

    -
    -
    -
    - -
    -
    -missProp#
    -

    ::CUaccessProperty set for miss. Must be either NORMAL or -STREAMING.

    -
    -
    Type:
    -

    cudaAccessProperty

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaHostNodeParams(void_ptr _ptr=0)#
    -

    CUDA host node parameters

    -
    -
    -fn#
    -

    The function to call when the node executes

    -
    -
    Type:
    -

    cudaHostFn_t

    -
    -
    -
    - -
    -
    -userData#
    -

    Argument to pass to the function

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaHostNodeParamsV2(void_ptr _ptr=0)#
    -

    CUDA host node parameters

    -
    -
    -fn#
    -

    The function to call when the node executes

    -
    -
    Type:
    -

    cudaHostFn_t

    -
    -
    -
    - -
    -
    -userData#
    -

    Argument to pass to the function

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaResourceDesc(void_ptr _ptr=0)#
    -

    CUDA resource descriptor

    -
    -
    -resType#
    -

    Resource type

    -
    -
    Type:
    -

    cudaResourceType

    -
    -
    -
    - -
    -
    -res#
    -
    -
    Type:
    -

    anon_union0

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaResourceViewDesc(void_ptr _ptr=0)#
    -

    CUDA resource view descriptor

    -
    -
    -format#
    -

    Resource view format

    -
    -
    Type:
    -

    cudaResourceViewFormat

    -
    -
    -
    - -
    -
    -width#
    -

    Width of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -height#
    -

    Height of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth of the resource view

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -firstMipmapLevel#
    -

    First defined mipmap level

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -lastMipmapLevel#
    -

    Last defined mipmap level

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -firstLayer#
    -

    First layer index

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -lastLayer#
    -

    Last layer index

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaPointerAttributes(void_ptr _ptr=0)#
    -

    CUDA pointer attributes

    -
    -
    -type#
    -

    The type of memory - cudaMemoryTypeUnregistered, -cudaMemoryTypeHost, cudaMemoryTypeDevice or cudaMemoryTypeManaged.

    -
    -
    Type:
    -

    cudaMemoryType

    -
    -
    -
    - -
    -
    -device#
    -

    The device against which the memory was allocated or registered. If -the memory type is cudaMemoryTypeDevice then this identifies the -device on which the memory referred physically resides. If the -memory type is cudaMemoryTypeHost or::cudaMemoryTypeManaged then -this identifies the device which was current when the memory was -allocated or registered (and if that device is deinitialized then -this allocation will vanish with that device’s state).

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -devicePointer#
    -

    The address which may be dereferenced on the current device to -access the memory or NULL if no such address exists.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -hostPointer#
    -

    The address which may be dereferenced on the host to access the -memory or NULL if no such address exists. CUDA doesn’t check if -unregistered memory is allocated so this field may contain invalid -pointer if an invalid pointer has been passed to CUDA.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaFuncAttributes(void_ptr _ptr=0)#
    -

    CUDA function attributes

    -
    -
    -sharedSizeBytes#
    -

    The size in bytes of statically-allocated shared memory per block -required by this function. This does not include dynamically- -allocated shared memory requested by the user at runtime.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -constSizeBytes#
    -

    The size in bytes of user-allocated constant memory required by -this function.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -localSizeBytes#
    -

    The size in bytes of local memory used by each thread of this -function.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -maxThreadsPerBlock#
    -

    The maximum number of threads per block, beyond which a launch of -the function would fail. This number depends on both the function -and the device on which the function is currently loaded.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -numRegs#
    -

    The number of registers used by each thread of this function.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -ptxVersion#
    -

    The PTX virtual architecture version for which the function was -compiled. This value is the major PTX version * 10 + the minor PTX -version, so a PTX version 1.3 function would return the value 13.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -binaryVersion#
    -

    The binary architecture version for which the function was -compiled. This value is the major binary version * 10 + the minor -binary version, so a binary version 1.3 function would return the -value 13.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -cacheModeCA#
    -

    The attribute to indicate whether the function has been compiled -with user specified option “-Xptxas –dlcm=ca” set.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxDynamicSharedSizeBytes#
    -

    The maximum size in bytes of dynamic shared memory per block for -this function. Any launch must have a dynamic shared memory size -smaller than this value.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -preferredShmemCarveout#
    -

    On devices where the L1 cache and shared memory use the same -hardware resources, this sets the shared memory carveout -preference, in percent of the maximum shared memory. Refer to -cudaDevAttrMaxSharedMemoryPerMultiprocessor. This is only a hint, -and the driver can choose a different ratio if required to execute -the function. See cudaFuncSetAttribute

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -clusterDimMustBeSet#
    -

    If this attribute is set, the kernel must launch with a valid -cluster dimension specified.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -requiredClusterWidth#
    -

    The required cluster width/height/depth in blocks. The values must -either all be 0 or all be positive. The validity of the cluster -dimensions is otherwise checked at launch time. If the value is -set during compile time, it cannot be set at runtime. Setting it at -runtime should return cudaErrorNotPermitted. See -cudaFuncSetAttribute

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -requiredClusterHeight#
    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -requiredClusterDepth#
    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -clusterSchedulingPolicyPreference#
    -

    The block scheduling policy of a function. See cudaFuncSetAttribute

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -nonPortableClusterSizeAllowed#
    -

    Whether the function can be launched with non-portable cluster -size. 1 is allowed, 0 is disallowed. A non-portable cluster size -may only function on the specific SKUs the program is tested on. -The launch might fail if the program is run on a different hardware -platform. CUDA API provides cudaOccupancyMaxActiveClusters to -assist with checking whether the desired size can be launched on -the current device. Portable Cluster Size A portable cluster size -is guaranteed to be functional on all compute capabilities higher -than the target compute capability. The portable cluster size for -sm_90 is 8 blocks per cluster. This value may increase for future -compute capabilities. The specific hardware unit may support -higher cluster sizes that’s not guaranteed to be portable. See -cudaFuncSetAttribute

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -reserved#
    -

    Reserved for future use.

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemLocation(void_ptr _ptr=0)#
    -

    Specifies a memory location. To specify a gpu, set type = -cudaMemLocationTypeDevice and set id = the gpu’s device ordinal. To -specify a cpu NUMA node, set type = cudaMemLocationTypeHostNuma and -set id = host NUMA node id.

    -
    -
    -type#
    -

    Specifies the location type, which modifies the meaning of id.

    -
    -
    Type:
    -

    cudaMemLocationType

    -
    -
    -
    - -
    -
    -id#
    -

    identifier for a given this location’s ::CUmemLocationType.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemAccessDesc(void_ptr _ptr=0)#
    -

    Memory access descriptor

    -
    -
    -location#
    -

    Location on which the request is to change it’s accessibility

    -
    -
    Type:
    -

    cudaMemLocation

    -
    -
    -
    - -
    -
    -flags#
    -

    ::CUmemProt accessibility flags to set on the request

    -
    -
    Type:
    -

    cudaMemAccessFlags

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemPoolProps(void_ptr _ptr=0)#
    -

    Specifies the properties of allocations made from the pool.

    -
    -
    -allocType#
    -

    Allocation type. Currently must be specified as -cudaMemAllocationTypePinned

    -
    -
    Type:
    -

    cudaMemAllocationType

    -
    -
    -
    - -
    -
    -handleTypes#
    -

    Handle types that will be supported by allocations from the pool.

    -
    -
    Type:
    -

    cudaMemAllocationHandleType

    -
    -
    -
    - -
    -
    -location#
    -

    Location allocations should reside.

    -
    -
    Type:
    -

    cudaMemLocation

    -
    -
    -
    - -
    -
    -win32SecurityAttributes#
    -

    Windows-specific LPSECURITYATTRIBUTES required when -cudaMemHandleTypeWin32 is specified. This security attribute -defines the scope of which exported allocations may be tranferred -to other processes. In all other cases, this field is required to -be zero.

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -maxSize#
    -

    Maximum pool size. When set to 0, defaults to a system dependent -value.

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -usage#
    -

    Bitmask indicating intended usage for the pool.

    -
    -
    Type:
    -

    unsigned short

    -
    -
    -
    - -
    -
    -reserved#
    -

    reserved for future use, must be 0

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemPoolPtrExportData(void_ptr _ptr=0)#
    -

    Opaque data for exporting a pool allocation

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemAllocNodeParams(void_ptr _ptr=0)#
    -

    Memory allocation node parameters

    -
    -
    -poolProps#
    -

    in: location where the allocation should reside (specified in -::location). ::handleTypes must be cudaMemHandleTypeNone. IPC is -not supported. in: array of memory access descriptors. Used to -describe peer GPU access

    -
    -
    Type:
    -

    cudaMemPoolProps

    -
    -
    -
    - -
    -
    -accessDescs#
    -

    in: number of memory access descriptors. Must not exceed the number -of GPUs.

    -
    -
    Type:
    -

    cudaMemAccessDesc

    -
    -
    -
    - -
    -
    -accessDescCount#
    -

    in: Number of `accessDescs`s

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -bytesize#
    -

    in: size in bytes of the requested allocation

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dptr#
    -

    out: address of the allocation returned by CUDA

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemAllocNodeParamsV2(void_ptr _ptr=0)#
    -

    Memory allocation node parameters

    -
    -
    -poolProps#
    -

    in: location where the allocation should reside (specified in -::location). ::handleTypes must be cudaMemHandleTypeNone. IPC is -not supported. in: array of memory access descriptors. Used to -describe peer GPU access

    -
    -
    Type:
    -

    cudaMemPoolProps

    -
    -
    -
    - -
    -
    -accessDescs#
    -

    in: number of memory access descriptors. Must not exceed the number -of GPUs.

    -
    -
    Type:
    -

    cudaMemAccessDesc

    -
    -
    -
    - -
    -
    -accessDescCount#
    -

    in: Number of `accessDescs`s

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -bytesize#
    -

    in: size in bytes of the requested allocation

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -dptr#
    -

    out: address of the allocation returned by CUDA

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemFreeNodeParams(void_ptr _ptr=0)#
    -

    Memory free node parameters

    -
    -
    -dptr#
    -

    in: the pointer to free

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.CUuuid_st(void_ptr _ptr=0)#
    -
    -
    -bytes#
    -

    < CUDA definition of UUID

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaDeviceProp(void_ptr _ptr=0)#
    -

    CUDA device properties

    -
    -
    -name#
    -

    ASCII string identifying device

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -uuid#
    -

    16-byte unique identifier

    -
    -
    Type:
    -

    cudaUUID_t

    -
    -
    -
    - -
    -
    -luid#
    -

    8-byte locally unique identifier. Value is undefined on TCC and -non-Windows platforms

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -luidDeviceNodeMask#
    -

    LUID device node mask. Value is undefined on TCC and non-Windows -platforms

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -totalGlobalMem#
    -

    Global memory available on device in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -sharedMemPerBlock#
    -

    Shared memory available per block in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -regsPerBlock#
    -

    32-bit registers available per block

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -warpSize#
    -

    Warp size in threads

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memPitch#
    -

    Maximum pitch in bytes allowed by memory copies

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -maxThreadsPerBlock#
    -

    Maximum number of threads per block

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxThreadsDim#
    -

    Maximum size of each dimension of a block

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxGridSize#
    -

    Maximum size of each dimension of a grid

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -clockRate#
    -

    Deprecated, Clock frequency in kilohertz

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -totalConstMem#
    -

    Constant memory available on device in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -major#
    -

    Major compute capability

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -minor#
    -

    Minor compute capability

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -textureAlignment#
    -

    Alignment requirement for textures

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -texturePitchAlignment#
    -

    Pitch alignment requirement for texture references bound to pitched -memory

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -deviceOverlap#
    -

    Device can concurrently copy memory and execute a kernel. -Deprecated. Use instead asyncEngineCount.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -multiProcessorCount#
    -

    Number of multiprocessors on device

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -kernelExecTimeoutEnabled#
    -

    Deprecated, Specified whether there is a run time limit on kernels

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -integrated#
    -

    Device is integrated as opposed to discrete

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -canMapHostMemory#
    -

    Device can map host memory with -cudaHostAlloc/cudaHostGetDevicePointer

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -computeMode#
    -

    Deprecated, Compute mode (See cudaComputeMode)

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxTexture1D#
    -

    Maximum 1D texture size

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxTexture1DMipmap#
    -

    Maximum 1D mipmapped texture size

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxTexture1DLinear#
    -

    Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() -or cuDeviceGetTexture1DLinearMaxWidth() instead.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxTexture2D#
    -

    Maximum 2D texture dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxTexture2DMipmap#
    -

    Maximum 2D mipmapped texture dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxTexture2DLinear#
    -

    Maximum dimensions (width, height, pitch) for 2D textures bound to -pitched memory

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxTexture2DGather#
    -

    Maximum 2D texture dimensions if texture gather operations have to -be performed

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxTexture3D#
    -

    Maximum 3D texture dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxTexture3DAlt#
    -

    Maximum alternate 3D texture dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxTextureCubemap#
    -

    Maximum Cubemap texture dimensions

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxTexture1DLayered#
    -

    Maximum 1D layered texture dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxTexture2DLayered#
    -

    Maximum 2D layered texture dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxTextureCubemapLayered#
    -

    Maximum Cubemap layered texture dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxSurface1D#
    -

    Maximum 1D surface size

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxSurface2D#
    -

    Maximum 2D surface dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxSurface3D#
    -

    Maximum 3D surface dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxSurface1DLayered#
    -

    Maximum 1D layered surface dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxSurface2DLayered#
    -

    Maximum 2D layered surface dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -maxSurfaceCubemap#
    -

    Maximum Cubemap surface dimensions

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxSurfaceCubemapLayered#
    -

    Maximum Cubemap layered surface dimensions

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -surfaceAlignment#
    -

    Alignment requirements for surfaces

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -concurrentKernels#
    -

    Device can possibly execute multiple kernels concurrently

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -ECCEnabled#
    -

    Device has ECC support enabled

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -pciBusID#
    -

    PCI bus ID of the device

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -pciDeviceID#
    -

    PCI device ID of the device

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -pciDomainID#
    -

    PCI domain ID of the device

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -tccDriver#
    -

    1 if device is a Tesla device using TCC driver, 0 otherwise

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -asyncEngineCount#
    -

    Number of asynchronous engines

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -unifiedAddressing#
    -

    Device shares a unified address space with the host

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memoryClockRate#
    -

    Deprecated, Peak memory clock frequency in kilohertz

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memoryBusWidth#
    -

    Global memory bus width in bits

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -l2CacheSize#
    -

    Size of L2 cache in bytes

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -persistingL2CacheMaxSize#
    -

    Device’s maximum l2 persisting lines capacity setting in bytes

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxThreadsPerMultiProcessor#
    -

    Maximum resident threads per multiprocessor

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -streamPrioritiesSupported#
    -

    Device supports stream priorities

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -globalL1CacheSupported#
    -

    Device supports caching globals in L1

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -localL1CacheSupported#
    -

    Device supports caching locals in L1

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -sharedMemPerMultiprocessor#
    -

    Shared memory available per multiprocessor in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -regsPerMultiprocessor#
    -

    32-bit registers available per multiprocessor

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -managedMemory#
    -

    Device supports allocating managed memory on this system

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -isMultiGpuBoard#
    -

    Device is on a multi-GPU board

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -multiGpuBoardGroupID#
    -

    Unique identifier for a group of devices on the same multi-GPU -board

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -hostNativeAtomicSupported#
    -

    Link between the device and the host supports native atomic -operations

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -singleToDoublePrecisionPerfRatio#
    -

    Deprecated, Ratio of single precision performance (in floating- -point operations per second) to double precision performance

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -pageableMemoryAccess#
    -

    Device supports coherently accessing pageable memory without -calling cudaHostRegister on it

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -concurrentManagedAccess#
    -

    Device can coherently access managed memory concurrently with the -CPU

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -computePreemptionSupported#
    -

    Device supports Compute Preemption

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -canUseHostPointerForRegisteredMem#
    -

    Device can access host registered memory at the same virtual -address as the CPU

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -cooperativeLaunch#
    -

    Device supports launching cooperative kernels via -cudaLaunchCooperativeKernel

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -cooperativeMultiDeviceLaunch#
    -

    Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -sharedMemPerBlockOptin#
    -

    Per device maximum shared memory per block usable by special opt in

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -pageableMemoryAccessUsesHostPageTables#
    -

    Device accesses pageable memory via the host’s page tables

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -directManagedMemAccessFromHost#
    -

    Host can directly access managed memory on the device without -migration.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxBlocksPerMultiProcessor#
    -

    Maximum number of resident blocks per multiprocessor

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -accessPolicyMaxWindowSize#
    -

    The maximum value of cudaAccessPolicyWindow::num_bytes.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -reservedSharedMemPerBlock#
    -

    Shared memory reserved by CUDA driver per block in bytes

    -
    -
    Type:
    -

    size_t

    -
    -
    -
    - -
    -
    -hostRegisterSupported#
    -

    Device supports host memory registration via cudaHostRegister.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -sparseCudaArraySupported#
    -

    1 if the device supports sparse CUDA arrays and sparse CUDA -mipmapped arrays, 0 otherwise

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -hostRegisterReadOnlySupported#
    -

    Device supports using the cudaHostRegister flag -cudaHostRegisterReadOnly to register memory that must be mapped as -read-only to the GPU

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -timelineSemaphoreInteropSupported#
    -

    External timeline semaphore interop is supported on the device

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memoryPoolsSupported#
    -

    1 if the device supports using the cudaMallocAsync and cudaMemPool -family of APIs, 0 otherwise

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -gpuDirectRDMASupported#
    -

    1 if the device supports GPUDirect RDMA APIs, 0 otherwise

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -gpuDirectRDMAFlushWritesOptions#
    -

    Bitmask to be interpreted according to the -cudaFlushGPUDirectRDMAWritesOptions enum

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -gpuDirectRDMAWritesOrdering#
    -

    See the cudaGPUDirectRDMAWritesOrdering enum for numerical values

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memoryPoolSupportedHandleTypes#
    -

    Bitmask of handle types supported with mempool-based IPC

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -deferredMappingCudaArraySupported#
    -

    1 if the device supports deferred mapping CUDA arrays and CUDA -mipmapped arrays

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -ipcEventSupported#
    -

    Device supports IPC Events.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -clusterLaunch#
    -

    Indicates device supports cluster launch

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -unifiedFunctionPointers#
    -

    Indicates device supports unified pointers

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -reserved2#
    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -reserved1#
    -

    Reserved for future use

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -reserved#
    -

    Reserved for future use

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaIpcEventHandle_st(void_ptr _ptr=0)#
    -

    CUDA IPC event handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaIpcMemHandle_st(void_ptr _ptr=0)#
    -

    CUDA IPC memory handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemFabricHandle_st(void_ptr _ptr=0)#
    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalMemoryHandleDesc(void_ptr _ptr=0)#
    -

    External memory handle descriptor

    -
    -
    -type#
    -

    Type of the handle

    -
    -
    Type:
    -

    cudaExternalMemoryHandleType

    -
    -
    -
    - -
    -
    -handle#
    -
    -
    Type:
    -

    anon_union1

    -
    -
    -
    - -
    -
    -size#
    -

    Size of the memory allocation

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags must either be zero or cudaExternalMemoryDedicated

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalMemoryBufferDesc(void_ptr _ptr=0)#
    -

    External memory buffer descriptor

    -
    -
    -offset#
    -

    Offset into the memory object where the buffer’s base is

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -size#
    -

    Size of the buffer

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags reserved for future use. Must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalMemoryMipmappedArrayDesc(void_ptr _ptr=0)#
    -

    External memory mipmap descriptor

    -
    -
    -offset#
    -

    Offset into the memory object where the base level of the mipmap -chain is.

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -formatDesc#
    -

    Format of base level of the mipmap chain

    -
    -
    Type:
    -

    cudaChannelFormatDesc

    -
    -
    -
    - -
    -
    -extent#
    -

    Dimensions of base level of the mipmap chain

    -
    -
    Type:
    -

    cudaExtent

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags associated with CUDA mipmapped arrays. See -cudaMallocMipmappedArray

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -numLevels#
    -

    Total number of levels in the mipmap chain

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalSemaphoreHandleDesc(void_ptr _ptr=0)#
    -

    External semaphore handle descriptor

    -
    -
    -type#
    -

    Type of the handle

    -
    -
    Type:
    -

    cudaExternalSemaphoreHandleType

    -
    -
    -
    - -
    -
    -handle#
    -
    -
    Type:
    -

    anon_union2

    -
    -
    -
    - -
    -
    -flags#
    -

    Flags reserved for the future. Must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalSemaphoreSignalParams(void_ptr _ptr=0)#
    -

    External semaphore signal parameters, compatible with driver type

    -
    -
    -params#
    -
    -
    Type:
    -

    anon_struct15

    -
    -
    -
    - -
    -
    -flags#
    -

    Only when cudaExternalSemaphoreSignalParams is used to signal a -cudaExternalSemaphore_t of type -cudaExternalSemaphoreHandleTypeNvSciSync, the valid flag is -cudaExternalSemaphoreSignalSkipNvSciBufMemSync: which indicates -that while signaling the cudaExternalSemaphore_t, no memory -synchronization operations should be performed for any external -memory object imported as cudaExternalMemoryHandleTypeNvSciBuf. For -all other types of cudaExternalSemaphore_t, flags must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalSemaphoreWaitParams(void_ptr _ptr=0)#
    -

    External semaphore wait parameters, compatible with driver type

    -
    -
    -params#
    -
    -
    Type:
    -

    anon_struct18

    -
    -
    -
    - -
    -
    -flags#
    -

    Only when cudaExternalSemaphoreSignalParams is used to signal a -cudaExternalSemaphore_t of type -cudaExternalSemaphoreHandleTypeNvSciSync, the valid flag is -cudaExternalSemaphoreSignalSkipNvSciBufMemSync: which indicates -that while waiting for the cudaExternalSemaphore_t, no memory -synchronization operations should be performed for any external -memory object imported as cudaExternalMemoryHandleTypeNvSciBuf. For -all other types of cudaExternalSemaphore_t, flags must be zero.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -reserved#
    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaKernelNodeParams(void_ptr _ptr=0)#
    -

    CUDA GPU kernel node parameters

    -
    -
    -func#
    -

    Kernel to launch

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -gridDim#
    -

    Grid dimensions

    -
    -
    Type:
    -

    dim3

    -
    -
    -
    - -
    -
    -blockDim#
    -

    Block dimensions

    -
    -
    Type:
    -

    dim3

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to individual kernel arguments

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -extra#
    -

    Pointer to kernel arguments in the “extra” format

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaKernelNodeParamsV2(void_ptr _ptr=0)#
    -

    CUDA GPU kernel node parameters

    -
    -
    -func#
    -

    Kernel to launch

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -gridDim#
    -

    Grid dimensions

    -
    -
    Type:
    -

    dim3

    -
    -
    -
    - -
    -
    -blockDim#
    -

    Block dimensions

    -
    -
    Type:
    -

    dim3

    -
    -
    -
    - -
    -
    -sharedMemBytes#
    -

    Dynamic shared-memory size per thread block in bytes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -kernelParams#
    -

    Array of pointers to individual kernel arguments

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -extra#
    -

    Pointer to kernel arguments in the “extra” format

    -
    -
    Type:
    -

    Any

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalSemaphoreSignalNodeParams(void_ptr _ptr=0)#
    -

    External semaphore signal node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    cudaExternalSemaphore_t

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore signal parameters.

    -
    -
    Type:
    -

    cudaExternalSemaphoreSignalParams

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalSemaphoreSignalNodeParamsV2(void_ptr _ptr=0)#
    -

    External semaphore signal node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    cudaExternalSemaphore_t

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore signal parameters.

    -
    -
    Type:
    -

    cudaExternalSemaphoreSignalParams

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalSemaphoreWaitNodeParams(void_ptr _ptr=0)#
    -

    External semaphore wait node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    cudaExternalSemaphore_t

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore wait parameters.

    -
    -
    Type:
    -

    cudaExternalSemaphoreWaitParams

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalSemaphoreWaitNodeParamsV2(void_ptr _ptr=0)#
    -

    External semaphore wait node parameters

    -
    -
    -extSemArray#
    -

    Array of external semaphore handles.

    -
    -
    Type:
    -

    cudaExternalSemaphore_t

    -
    -
    -
    - -
    -
    -paramsArray#
    -

    Array of external semaphore wait parameters.

    -
    -
    Type:
    -

    cudaExternalSemaphoreWaitParams

    -
    -
    -
    - -
    -
    -numExtSems#
    -

    Number of handles and parameters supplied in extSemArray and -paramsArray.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaConditionalNodeParams(void_ptr _ptr=0)#
    -

    CUDA conditional node parameters

    -
    -
    -handle#
    -

    Conditional node handle. Handles must be created in advance of -creating the node using cudaGraphConditionalHandleCreate.

    -
    -
    Type:
    -

    cudaGraphConditionalHandle

    -
    -
    -
    - -
    -
    -type#
    -

    Type of conditional node.

    -
    -
    Type:
    -

    cudaGraphConditionalNodeType

    -
    -
    -
    - -
    -
    -size#
    -

    Size of graph output array. Must be 1.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -phGraph_out#
    -

    CUDA-owned array populated with conditional node child graphs -during creation of the node. Valid for the lifetime of the -conditional node. The contents of the graph(s) are subject to the -following constraints: - Allowed node types are kernel nodes, -empty nodes, child graphs, memsets, memcopies, and conditionals. -This applies recursively to child graphs and conditional bodies. -- All kernels, including kernels in nested conditionals or child -graphs at any level, must belong to the same CUDA context. -These graphs may be populated using graph node creation APIs or -cudaStreamBeginCaptureToGraph.

    -
    -
    Type:
    -

    cudaGraph_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaChildGraphNodeParams(void_ptr _ptr=0)#
    -

    Child graph node parameters

    -
    -
    -graph#
    -

    The child graph to clone into the node for node creation, or a -handle to the graph owned by the node for node query

    -
    -
    Type:
    -

    cudaGraph_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEventRecordNodeParams(void_ptr _ptr=0)#
    -

    Event record node parameters

    -
    -
    -event#
    -

    The event to record when the node executes

    -
    -
    Type:
    -

    cudaEvent_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEventWaitNodeParams(void_ptr _ptr=0)#
    -

    Event wait node parameters

    -
    -
    -event#
    -

    The event to wait on from the node

    -
    -
    Type:
    -

    cudaEvent_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphNodeParams(void_ptr _ptr=0)#
    -

    Graph node parameters. See cudaGraphAddNode.

    -
    -
    -type#
    -

    Type of the node

    -
    -
    Type:
    -

    cudaGraphNodeType

    -
    -
    -
    - -
    -
    -reserved0#
    -

    Reserved. Must be zero.

    -
    -
    Type:
    -

    List[int]

    -
    -
    -
    - -
    -
    -reserved1#
    -

    Padding. Unused bytes must be zero.

    -
    -
    Type:
    -

    List[long long]

    -
    -
    -
    - -
    -
    -kernel#
    -

    Kernel node parameters.

    -
    -
    Type:
    -

    cudaKernelNodeParamsV2

    -
    -
    -
    - -
    -
    -memcpy#
    -

    Memcpy node parameters.

    -
    -
    Type:
    -

    cudaMemcpyNodeParams

    -
    -
    -
    - -
    -
    -memset#
    -

    Memset node parameters.

    -
    -
    Type:
    -

    cudaMemsetParamsV2

    -
    -
    -
    - -
    -
    -host#
    -

    Host node parameters.

    -
    -
    Type:
    -

    cudaHostNodeParamsV2

    -
    -
    -
    - -
    -
    -graph#
    -

    Child graph node parameters.

    -
    -
    Type:
    -

    cudaChildGraphNodeParams

    -
    -
    -
    - -
    -
    -eventWait#
    -

    Event wait node parameters.

    -
    -
    Type:
    -

    cudaEventWaitNodeParams

    -
    -
    -
    - -
    -
    -eventRecord#
    -

    Event record node parameters.

    -
    -
    Type:
    -

    cudaEventRecordNodeParams

    -
    -
    -
    - -
    -
    -extSemSignal#
    -

    External semaphore signal node parameters.

    -
    -
    Type:
    -

    cudaExternalSemaphoreSignalNodeParamsV2

    -
    -
    -
    - -
    -
    -extSemWait#
    -

    External semaphore wait node parameters.

    -
    -
    Type:
    -

    cudaExternalSemaphoreWaitNodeParamsV2

    -
    -
    -
    - -
    -
    -alloc#
    -

    Memory allocation node parameters.

    -
    -
    Type:
    -

    cudaMemAllocNodeParamsV2

    -
    -
    -
    - -
    -
    -free#
    -

    Memory free node parameters.

    -
    -
    Type:
    -

    cudaMemFreeNodeParams

    -
    -
    -
    - -
    -
    -conditional#
    -

    Conditional node parameters.

    -
    -
    Type:
    -

    cudaConditionalNodeParams

    -
    -
    -
    - -
    -
    -reserved2#
    -

    Reserved bytes. Must be zero.

    -
    -
    Type:
    -

    long long

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphEdgeData_st(void_ptr _ptr=0)#
    -

    Optional annotation for edges in a CUDA graph. Note, all edges -implicitly have annotations and default to a zero-initialized value -if not specified. A zero-initialized struct indicates a standard -full serialization of two nodes with memory visibility.

    -
    -
    -from_port#
    -

    This indicates when the dependency is triggered from the upstream -node on the edge. The meaning is specfic to the node type. A value -of 0 in all cases means full completion of the upstream node, with -memory visibility to the downstream node or portion thereof -(indicated by to_port). Only kernel nodes define non-zero -ports. A kernel node can use the following output port types: -cudaGraphKernelNodePortDefault, -cudaGraphKernelNodePortProgrammatic, or -cudaGraphKernelNodePortLaunchCompletion.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -to_port#
    -

    This indicates what portion of the downstream node is dependent on -the upstream node or portion thereof (indicated by from_port). -The meaning is specific to the node type. A value of 0 in all cases -means the entirety of the downstream node is dependent on the -upstream work. Currently no node types define non-zero ports. -Accordingly, this field must be set to zero.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -type#
    -

    This should be populated with a value from -::cudaGraphDependencyType. (It is typed as char due to compiler- -specific layout of bitfields.) See ::cudaGraphDependencyType.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -reserved#
    -

    These bytes are unused and must be zeroed. This ensures -compatibility if additional fields are added in the future.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphInstantiateParams_st(void_ptr _ptr=0)#
    -

    Graph instantiation parameters

    -
    -
    -flags#
    -

    Instantiation flags

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -uploadStream#
    -

    Upload stream

    -
    -
    Type:
    -

    cudaStream_t

    -
    -
    -
    - -
    -
    -errNode_out#
    -

    The node which caused instantiation to fail, if any

    -
    -
    Type:
    -

    cudaGraphNode_t

    -
    -
    -
    - -
    -
    -result_out#
    -

    Whether instantiation was successful. If it failed, the reason why

    -
    -
    Type:
    -

    cudaGraphInstantiateResult

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphExecUpdateResultInfo_st(void_ptr _ptr=0)#
    -

    Result information returned by cudaGraphExecUpdate

    -
    -
    -result#
    -

    Gives more specific detail when a cuda graph update fails.

    -
    -
    Type:
    -

    cudaGraphExecUpdateResult

    -
    -
    -
    - -
    -
    -errorNode#
    -

    The “to node” of the error edge when the topologies do not match. -The error node when the error is associated with a specific node. -NULL when the error is generic.

    -
    -
    Type:
    -

    cudaGraphNode_t

    -
    -
    -
    - -
    -
    -errorFromNode#
    -

    The from node of error edge when the topologies do not match. -Otherwise NULL.

    -
    -
    Type:
    -

    cudaGraphNode_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphKernelNodeUpdate(void_ptr _ptr=0)#
    -

    Struct to specify a single node update to pass as part of a larger -array to ::cudaGraphKernelNodeUpdatesApply

    -
    -
    -node#
    -

    Node to update

    -
    -
    Type:
    -

    cudaGraphDeviceNode_t

    -
    -
    -
    - -
    -
    -field#
    -

    Which type of update to apply. Determines how updateData is -interpreted

    -
    -
    Type:
    -

    cudaGraphKernelNodeField

    -
    -
    -
    - -
    -
    -updateData#
    -

    Update data to apply. Which field is used depends on field’s value

    -
    -
    Type:
    -

    anon_union8

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaLaunchMemSyncDomainMap_st(void_ptr _ptr=0)#
    -

    Memory Synchronization Domain map See cudaLaunchMemSyncDomain. By -default, kernels are launched in domain 0. Kernel launched with -cudaLaunchMemSyncDomainRemote will have a different domain ID. User -may also alter the domain ID with ::cudaLaunchMemSyncDomainMap for -a specific stream / graph node / kernel launch. See -cudaLaunchAttributeMemSyncDomainMap. Domain ID range is available -through cudaDevAttrMemSyncDomainCount.

    -
    -
    -default_#
    -

    The default domain ID to use for designated kernels

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -remote#
    -

    The remote domain ID to use for designated kernels

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaLaunchAttributeValue(void_ptr _ptr=0)#
    -

    Launch attributes union; used as value field of -::cudaLaunchAttribute

    -
    -
    -pad#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -accessPolicyWindow#
    -

    Value of launch attribute cudaLaunchAttributeAccessPolicyWindow.

    -
    -
    Type:
    -

    cudaAccessPolicyWindow

    -
    -
    -
    - -
    -
    -cooperative#
    -

    Value of launch attribute cudaLaunchAttributeCooperative. Nonzero -indicates a cooperative kernel (see cudaLaunchCooperativeKernel).

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -syncPolicy#
    -

    Value of launch attribute cudaLaunchAttributeSynchronizationPolicy. -::cudaSynchronizationPolicy for work queued up in this stream.

    -
    -
    Type:
    -

    cudaSynchronizationPolicy

    -
    -
    -
    - -
    -
    -clusterDim#
    -

    Value of launch attribute cudaLaunchAttributeClusterDimension that -represents the desired cluster dimensions for the kernel. Opaque -type with the following fields: - x - The X dimension of the -cluster, in blocks. Must be a divisor of the grid X dimension. - -y - The Y dimension of the cluster, in blocks. Must be a divisor -of the grid Y dimension. - z - The Z dimension of the cluster, -in blocks. Must be a divisor of the grid Z dimension.

    -
    -
    Type:
    -

    anon_struct20

    -
    -
    -
    - -
    -
    -clusterSchedulingPolicyPreference#
    -

    Value of launch attribute -cudaLaunchAttributeClusterSchedulingPolicyPreference. Cluster -scheduling policy preference for the kernel.

    -
    -
    Type:
    -

    cudaClusterSchedulingPolicy

    -
    -
    -
    - -
    -
    -programmaticStreamSerializationAllowed#
    -

    Value of launch attribute -cudaLaunchAttributeProgrammaticStreamSerialization.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -programmaticEvent#
    -

    Value of launch attribute cudaLaunchAttributeProgrammaticEvent with -the following fields: - cudaEvent_t event - Event to fire when -all blocks trigger it. - int flags; - Event record flags, see -cudaEventRecordWithFlags. Does not accept cudaEventRecordExternal. -- int triggerAtBlockStart - If this is set to non-0, each block -launch will automatically trigger the event.

    -
    -
    Type:
    -

    anon_struct21

    -
    -
    -
    - -
    -
    -priority#
    -

    Value of launch attribute cudaLaunchAttributePriority. Execution -priority of the kernel.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memSyncDomainMap#
    -

    Value of launch attribute cudaLaunchAttributeMemSyncDomainMap. See -::cudaLaunchMemSyncDomainMap.

    -
    -
    Type:
    -

    cudaLaunchMemSyncDomainMap

    -
    -
    -
    - -
    -
    -memSyncDomain#
    -

    Value of launch attribute cudaLaunchAttributeMemSyncDomain. See -cudaLaunchMemSyncDomain.

    -
    -
    Type:
    -

    cudaLaunchMemSyncDomain

    -
    -
    -
    - -
    -
    -launchCompletionEvent#
    -

    Value of launch attribute cudaLaunchAttributeLaunchCompletionEvent -with the following fields: - cudaEvent_t event - Event to fire -when the last block launches. - int flags - Event record -flags, see cudaEventRecordWithFlags. Does not accept -cudaEventRecordExternal.

    -
    -
    Type:
    -

    anon_struct22

    -
    -
    -
    - -
    -
    -deviceUpdatableKernelNode#
    -

    Value of launch attribute -cudaLaunchAttributeDeviceUpdatableKernelNode with the following -fields: - int deviceUpdatable - Whether or not the resulting -kernel node should be device-updatable. - -cudaGraphDeviceNode_t devNode - Returns a handle to pass to the -various device-side update functions.

    -
    -
    Type:
    -

    anon_struct23

    -
    -
    -
    - -
    -
    -sharedMemCarveout#
    -

    Value of launch attribute -cudaLaunchAttributePreferredSharedMemoryCarveout.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaLaunchAttribute_st(void_ptr _ptr=0)#
    -

    Launch attribute

    -
    -
    -id#
    -

    Attribute to set

    -
    -
    Type:
    -

    cudaLaunchAttributeID

    -
    -
    -
    - -
    -
    -val#
    -

    Value of the attribute

    -
    -
    Type:
    -

    cudaLaunchAttributeValue

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaAsyncNotificationInfo(void_ptr _ptr=0)#
    -

    Information describing an async notification event

    -
    -
    -type#
    -
    -
    Type:
    -

    cudaAsyncNotificationType

    -
    -
    -
    - -
    -
    -info#
    -
    -
    Type:
    -

    anon_union9

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaTextureDesc(void_ptr _ptr=0)#
    -

    CUDA texture descriptor

    -
    -
    -addressMode#
    -

    Texture address mode for up to 3 dimensions

    -
    -
    Type:
    -

    List[cudaTextureAddressMode]

    -
    -
    -
    - -
    -
    -filterMode#
    -

    Texture filter mode

    -
    -
    Type:
    -

    cudaTextureFilterMode

    -
    -
    -
    - -
    -
    -readMode#
    -

    Texture read mode

    -
    -
    Type:
    -

    cudaTextureReadMode

    -
    -
    -
    - -
    -
    -sRGB#
    -

    Perform sRGB->linear conversion during texture read

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -borderColor#
    -

    Texture Border Color

    -
    -
    Type:
    -

    List[float]

    -
    -
    -
    - -
    -
    -normalizedCoords#
    -

    Indicates whether texture reads are normalized or not

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -maxAnisotropy#
    -

    Limit to the anisotropy ratio

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -mipmapFilterMode#
    -

    Mipmap filter mode

    -
    -
    Type:
    -

    cudaTextureFilterMode

    -
    -
    -
    - -
    -
    -mipmapLevelBias#
    -

    Offset applied to the supplied mipmap level

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -minMipmapLevelClamp#
    -

    Lower end of the mipmap level range to clamp access to

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -maxMipmapLevelClamp#
    -

    Upper end of the mipmap level range to clamp access to

    -
    -
    Type:
    -

    float

    -
    -
    -
    - -
    -
    -disableTrilinearOptimization#
    -

    Disable any trilinear filtering optimizations.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -seamlessCubemap#
    -

    Enable seamless cube map filtering.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEglFrameType(value)#
    -

    CUDA EglFrame type - array or pointer

    -
    -
    -cudaEglFrameTypeArray = 0#
    -

    Frame type CUDA array

    -
    - -
    -
    -cudaEglFrameTypePitch = 1#
    -

    Frame type CUDA pointer

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEglResourceLocationFlags(value)#
    -

    Resource location flags- sysmem or vidmem For CUDA context on -iGPU, since video and system memory are equivalent - these flags -will not have an effect on the execution. For CUDA context on -dGPU, applications can use the flag -cudaEglResourceLocationFlags to give a hint about the -desired location. cudaEglResourceLocationSysmem - the -frame data is made resident on the system memory to be accessed by -CUDA. cudaEglResourceLocationVidmem - the frame data -is made resident on the dedicated video memory to be accessed by -CUDA. There may be an additional latency due to new allocation and -data migration, if the frame is produced on a different memory.

    -
    -
    -cudaEglResourceLocationSysmem = 0#
    -

    Resource location sysmem

    -
    - -
    -
    -cudaEglResourceLocationVidmem = 1#
    -

    Resource location vidmem

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEglColorFormat(value)#
    -

    CUDA EGL Color Format - The different planar and multiplanar -formats currently supported for CUDA_EGL interops.

    -
    -
    -cudaEglColorFormatYUV420Planar = 0#
    -

    Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV420SemiPlanar = 1#
    -

    Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar.

    -
    - -
    -
    -cudaEglColorFormatYUV422Planar = 2#
    -

    Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV422SemiPlanar = 3#
    -

    Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar.

    -
    - -
    -
    -cudaEglColorFormatARGB = 6#
    -

    R/G/B/A four channels in one surface with BGRA byte ordering.

    -
    - -
    -
    -cudaEglColorFormatRGBA = 7#
    -

    R/G/B/A four channels in one surface with ABGR byte ordering.

    -
    - -
    -
    -cudaEglColorFormatL = 8#
    -

    single luminance channel in one surface.

    -
    - -
    -
    -cudaEglColorFormatR = 9#
    -

    single color channel in one surface.

    -
    - -
    -
    -cudaEglColorFormatYUV444Planar = 10#
    -

    Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV444SemiPlanar = 11#
    -

    Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar.

    -
    - -
    -
    -cudaEglColorFormatYUYV422 = 12#
    -

    Y, U, V in one surface, interleaved as UYVY in one channel.

    -
    - -
    -
    -cudaEglColorFormatUYVY422 = 13#
    -

    Y, U, V in one surface, interleaved as YUYV in one channel.

    -
    - -
    -
    -cudaEglColorFormatABGR = 14#
    -

    R/G/B/A four channels in one surface with RGBA byte ordering.

    -
    - -
    -
    -cudaEglColorFormatBGRA = 15#
    -

    R/G/B/A four channels in one surface with ARGB byte ordering.

    -
    - -
    -
    -cudaEglColorFormatA = 16#
    -

    Alpha color format - one channel in one surface.

    -
    - -
    -
    -cudaEglColorFormatRG = 17#
    -

    R/G color format - two channels in one surface with GR byte ordering

    -
    - -
    -
    -cudaEglColorFormatAYUV = 18#
    -

    Y, U, V, A four channels in one surface, interleaved as VUYA.

    -
    - -
    -
    -cudaEglColorFormatYVU444SemiPlanar = 19#
    -

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU422SemiPlanar = 20#
    -

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU420SemiPlanar = 21#
    -

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_444SemiPlanar = 22#
    -

    Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_420SemiPlanar = 23#
    -

    Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatY12V12U12_444SemiPlanar = 24#
    -

    Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatY12V12U12_420SemiPlanar = 25#
    -

    Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatVYUY_ER = 26#
    -

    Extended Range Y, U, V in one surface, interleaved as YVYU in one channel.

    -
    - -
    -
    -cudaEglColorFormatUYVY_ER = 27#
    -

    Extended Range Y, U, V in one surface, interleaved as YUYV in one channel.

    -
    - -
    -
    -cudaEglColorFormatYUYV_ER = 28#
    -

    Extended Range Y, U, V in one surface, interleaved as UYVY in one channel.

    -
    - -
    -
    -cudaEglColorFormatYVYU_ER = 29#
    -

    Extended Range Y, U, V in one surface, interleaved as VYUY in one channel.

    -
    - -
    -
    -cudaEglColorFormatYUVA_ER = 31#
    -

    Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY.

    -
    - -
    -
    -cudaEglColorFormatAYUV_ER = 32#
    -

    Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA.

    -
    - -
    -
    -cudaEglColorFormatYUV444Planar_ER = 33#
    -

    Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV422Planar_ER = 34#
    -

    Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV420Planar_ER = 35#
    -

    Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV444SemiPlanar_ER = 36#
    -

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV422SemiPlanar_ER = 37#
    -

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV420SemiPlanar_ER = 38#
    -

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU444Planar_ER = 39#
    -

    Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU422Planar_ER = 40#
    -

    Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU420Planar_ER = 41#
    -

    Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU444SemiPlanar_ER = 42#
    -

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU422SemiPlanar_ER = 43#
    -

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU420SemiPlanar_ER = 44#
    -

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatBayerRGGB = 45#
    -

    Bayer format - one channel in one surface with interleaved RGGB ordering.

    -
    - -
    -
    -cudaEglColorFormatBayerBGGR = 46#
    -

    Bayer format - one channel in one surface with interleaved BGGR ordering.

    -
    - -
    -
    -cudaEglColorFormatBayerGRBG = 47#
    -

    Bayer format - one channel in one surface with interleaved GRBG ordering.

    -
    - -
    -
    -cudaEglColorFormatBayerGBRG = 48#
    -

    Bayer format - one channel in one surface with interleaved GBRG ordering.

    -
    - -
    -
    -cudaEglColorFormatBayer10RGGB = 49#
    -

    Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer10BGGR = 50#
    -

    Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer10GRBG = 51#
    -

    Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer10GBRG = 52#
    -

    Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer12RGGB = 53#
    -

    Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer12BGGR = 54#
    -

    Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer12GRBG = 55#
    -

    Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer12GBRG = 56#
    -

    Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer14RGGB = 57#
    -

    Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer14BGGR = 58#
    -

    Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer14GRBG = 59#
    -

    Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer14GBRG = 60#
    -

    Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer20RGGB = 61#
    -

    Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer20BGGR = 62#
    -

    Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer20GRBG = 63#
    -

    Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer20GBRG = 64#
    -

    Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatYVU444Planar = 65#
    -

    Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU422Planar = 66#
    -

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU420Planar = 67#
    -

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatBayerIspRGGB = 68#
    -

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype.

    -
    - -
    -
    -cudaEglColorFormatBayerIspBGGR = 69#
    -

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype.

    -
    - -
    -
    -cudaEglColorFormatBayerIspGRBG = 70#
    -

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype.

    -
    - -
    -
    -cudaEglColorFormatBayerIspGBRG = 71#
    -

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype.

    -
    - -
    -
    -cudaEglColorFormatBayerBCCR = 72#
    -

    Bayer format - one channel in one surface with interleaved BCCR ordering.

    -
    - -
    -
    -cudaEglColorFormatBayerRCCB = 73#
    -

    Bayer format - one channel in one surface with interleaved RCCB ordering.

    -
    - -
    -
    -cudaEglColorFormatBayerCRBC = 74#
    -

    Bayer format - one channel in one surface with interleaved CRBC ordering.

    -
    - -
    -
    -cudaEglColorFormatBayerCBRC = 75#
    -

    Bayer format - one channel in one surface with interleaved CBRC ordering.

    -
    - -
    -
    -cudaEglColorFormatBayer10CCCC = 76#
    -

    Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer12BCCR = 77#
    -

    Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer12RCCB = 78#
    -

    Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer12CRBC = 79#
    -

    Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer12CBRC = 80#
    -

    Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatBayer12CCCC = 81#
    -

    Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    -
    - -
    -
    -cudaEglColorFormatY = 82#
    -

    Color format for single Y plane.

    -
    - -
    -
    -cudaEglColorFormatYUV420SemiPlanar_2020 = 83#
    -

    Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU420SemiPlanar_2020 = 84#
    -

    Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV420Planar_2020 = 85#
    -

    Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU420Planar_2020 = 86#
    -

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV420SemiPlanar_709 = 87#
    -

    Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU420SemiPlanar_709 = 88#
    -

    Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYUV420Planar_709 = 89#
    -

    Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatYVU420Planar_709 = 90#
    -

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_420SemiPlanar_709 = 91#
    -

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = 92#
    -

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = 93#
    -

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_422SemiPlanar = 94#
    -

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_422SemiPlanar_709 = 95#
    -

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatY_ER = 96#
    -

    Extended Range Color format for single Y plane.

    -
    - -
    -
    -cudaEglColorFormatY_709_ER = 97#
    -

    Extended Range Color format for single Y plane.

    -
    - -
    -
    -cudaEglColorFormatY10_ER = 98#
    -

    Extended Range Color format for single Y10 plane.

    -
    - -
    -
    -cudaEglColorFormatY10_709_ER = 99#
    -

    Extended Range Color format for single Y10 plane.

    -
    - -
    -
    -cudaEglColorFormatY12_ER = 100#
    -

    Extended Range Color format for single Y12 plane.

    -
    - -
    -
    -cudaEglColorFormatY12_709_ER = 101#
    -

    Extended Range Color format for single Y12 plane.

    -
    - -
    -
    -cudaEglColorFormatYUVA = 102#
    -

    Y, U, V, A four channels in one surface, interleaved as AVUY.

    -
    - -
    -
    -cudaEglColorFormatYVYU = 104#
    -

    Y, U, V in one surface, interleaved as YVYU in one channel.

    -
    - -
    -
    -cudaEglColorFormatVYUY = 105#
    -

    Y, U, V in one surface, interleaved as VYUY in one channel.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_420SemiPlanar_ER = 106#
    -

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = 107#
    -

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_444SemiPlanar_ER = 108#
    -

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = 109#
    -

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatY12V12U12_420SemiPlanar_ER = 110#
    -

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = 111#
    -

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    -
    - -
    -
    -cudaEglColorFormatY12V12U12_444SemiPlanar_ER = 112#
    -

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    -
    - -
    -
    -cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = 113#
    -

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaError_t(value)#
    -

    impl_private CUDA error types

    -
    -
    -cudaSuccess = 0#
    -

    The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see cudaEventQuery() and cudaStreamQuery()).

    -
    - -
    -
    -cudaErrorInvalidValue = 1#
    -

    This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values.

    -
    - -
    -
    -cudaErrorMemoryAllocation = 2#
    -

    The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation.

    -
    - -
    -
    -cudaErrorInitializationError = 3#
    -

    The API call failed because the CUDA driver and runtime could not be initialized.

    -
    - -
    -
    -cudaErrorCudartUnloading = 4#
    -

    This indicates that a CUDA Runtime API call cannot be executed because it is being called during process shut down, at a point in time after CUDA driver has been unloaded.

    -
    - -
    -
    -cudaErrorProfilerDisabled = 5#
    -

    This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler.

    -
    - -
    -
    -cudaErrorProfilerNotInitialized = 6#
    -

    [Deprecated]

    -
    - -
    -
    -cudaErrorProfilerAlreadyStarted = 7#
    -

    [Deprecated]

    -
    - -
    -
    -cudaErrorProfilerAlreadyStopped = 8#
    -

    [Deprecated]

    -
    - -
    -
    -cudaErrorInvalidConfiguration = 9#
    -

    This indicates that a kernel launch is requesting resources that can never be satisfied by the current device. Requesting more shared memory per block than the device supports will trigger this error, as will requesting too many threads or blocks. See cudaDeviceProp for more device limitations.

    -
    - -
    -
    -cudaErrorInvalidPitchValue = 12#
    -

    This indicates that one or more of the pitch-related parameters passed to the API call is not within the acceptable range for pitch.

    -
    - -
    -
    -cudaErrorInvalidSymbol = 13#
    -

    This indicates that the symbol name/identifier passed to the API call is not a valid name or identifier.

    -
    - -
    -
    -cudaErrorInvalidHostPointer = 16#
    -

    This indicates that at least one host pointer passed to the API call is not a valid host pointer. [Deprecated]

    -
    - -
    -
    -cudaErrorInvalidDevicePointer = 17#
    -

    This indicates that at least one device pointer passed to the API call is not a valid device pointer. [Deprecated]

    -
    - -
    -
    -cudaErrorInvalidTexture = 18#
    -

    This indicates that the texture passed to the API call is not a valid texture.

    -
    - -
    -
    -cudaErrorInvalidTextureBinding = 19#
    -

    This indicates that the texture binding is not valid. This occurs if you call cudaGetTextureAlignmentOffset() with an unbound texture.

    -
    - -
    -
    -cudaErrorInvalidChannelDescriptor = 20#
    -

    This indicates that the channel descriptor passed to the API call is not valid. This occurs if the format is not one of the formats specified by cudaChannelFormatKind, or if one of the dimensions is invalid.

    -
    - -
    -
    -cudaErrorInvalidMemcpyDirection = 21#
    -

    This indicates that the direction of the memcpy passed to the API call is not one of the types specified by cudaMemcpyKind.

    -
    - -
    -
    -cudaErrorAddressOfConstant = 22#
    -

    This indicated that the user has taken the address of a constant variable, which was forbidden up until the CUDA 3.1 release. [Deprecated]

    -
    - -
    -
    -cudaErrorTextureFetchFailed = 23#
    -

    This indicated that a texture fetch was not able to be performed. This was previously used for device emulation of texture operations. [Deprecated]

    -
    - -
    -
    -cudaErrorTextureNotBound = 24#
    -

    This indicated that a texture was not bound for access. This was previously used for device emulation of texture operations. [Deprecated]

    -
    - -
    -
    -cudaErrorSynchronizationError = 25#
    -

    This indicated that a synchronization operation had failed. This was previously used for some device emulation functions. [Deprecated]

    -
    - -
    -
    -cudaErrorInvalidFilterSetting = 26#
    -

    This indicates that a non-float texture was being accessed with linear filtering. This is not supported by CUDA.

    -
    - -
    -
    -cudaErrorInvalidNormSetting = 27#
    -

    This indicates that an attempt was made to read a non-float texture as a normalized float. This is not supported by CUDA.

    -
    - -
    -
    -cudaErrorMixedDeviceExecution = 28#
    -

    Mixing of device and device emulation code was not allowed. [Deprecated]

    -
    - -
    -
    -cudaErrorNotYetImplemented = 31#
    -

    This indicates that the API call is not yet implemented. Production releases of CUDA will never return this error. [Deprecated]

    -
    - -
    -
    -cudaErrorMemoryValueTooLarge = 32#
    -

    This indicated that an emulated device pointer exceeded the 32-bit address range. [Deprecated]

    -
    - -
    -
    -cudaErrorStubLibrary = 34#
    -

    This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error.

    -
    - -
    -
    -cudaErrorInsufficientDriver = 35#
    -

    This indicates that the installed NVIDIA CUDA driver is older than the CUDA runtime library. This is not a supported configuration. Users should install an updated NVIDIA display driver to allow the application to run.

    -
    - -
    -
    -cudaErrorCallRequiresNewerDriver = 36#
    -

    This indicates that the API call requires a newer CUDA driver than the one currently installed. Users should install an updated NVIDIA CUDA driver to allow the API call to succeed.

    -
    - -
    -
    -cudaErrorInvalidSurface = 37#
    -

    This indicates that the surface passed to the API call is not a valid surface.

    -
    - -
    -
    -cudaErrorDuplicateVariableName = 43#
    -

    This indicates that multiple global or constant variables (across separate CUDA source files in the application) share the same string name.

    -
    - -
    -
    -cudaErrorDuplicateTextureName = 44#
    -

    This indicates that multiple textures (across separate CUDA source files in the application) share the same string name.

    -
    - -
    -
    -cudaErrorDuplicateSurfaceName = 45#
    -

    This indicates that multiple surfaces (across separate CUDA source files in the application) share the same string name.

    -
    - -
    -
    -cudaErrorDevicesUnavailable = 46#
    -

    This indicates that all CUDA devices are busy or unavailable at the current time. Devices are often busy/unavailable due to use of cudaComputeModeProhibited, cudaComputeModeExclusiveProcess, or when long running CUDA kernels have filled up the GPU and are blocking new work from starting. They can also be unavailable due to memory constraints on a device that already has active CUDA work being performed.

    -
    - -
    -
    -cudaErrorIncompatibleDriverContext = 49#
    -

    This indicates that the current context is not compatible with this the CUDA Runtime. This can only occur if you are using CUDA Runtime/Driver interoperability and have created an existing Driver context using the driver API. The Driver context may be incompatible either because the Driver context was created using an older version of the API, because the Runtime API call expects a primary driver context and the Driver context is not primary, or because the Driver context has been destroyed. Please see :py:obj:`~.Interactions`with the CUDA Driver API” for more information.

    -
    - -
    -
    -cudaErrorMissingConfiguration = 52#
    -

    The device function being invoked (usually via cudaLaunchKernel()) was not previously configured via the cudaConfigureCall() function.

    -
    - -
    -
    -cudaErrorPriorLaunchFailure = 53#
    -

    This indicated that a previous kernel launch failed. This was previously used for device emulation of kernel launches. [Deprecated]

    -
    - -
    -
    -cudaErrorLaunchMaxDepthExceeded = 65#
    -

    This error indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches.

    -
    - -
    -
    -cudaErrorLaunchFileScopedTex = 66#
    -

    This error indicates that a grid launch did not occur because the kernel uses file-scoped textures which are unsupported by the device runtime. Kernels launched via the device runtime only support textures created with the Texture Object API’s.

    -
    - -
    -
    -cudaErrorLaunchFileScopedSurf = 67#
    -

    This error indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. Kernels launched via the device runtime only support surfaces created with the Surface Object API’s.

    -
    - -
    -
    -cudaErrorSyncDepthExceeded = 68#
    -

    This error indicates that a call to cudaDeviceSynchronize made from the device runtime failed because the call was made at grid depth greater than than either the default (2 levels of grids) or user specified device limit cudaLimitDevRuntimeSyncDepth. To be able to synchronize on launched grids at a greater depth successfully, the maximum nested depth at which cudaDeviceSynchronize will be called must be specified with the cudaLimitDevRuntimeSyncDepth limit to the cudaDeviceSetLimit api before the host-side launch of a kernel using the device runtime. Keep in mind that additional levels of sync depth require the runtime to reserve large amounts of device memory that cannot be used for user allocations. Note that cudaDeviceSynchronize made from device runtime is only supported on devices of compute capability < 9.0.

    -
    - -
    -
    -cudaErrorLaunchPendingCountExceeded = 69#
    -

    This error indicates that a device runtime grid launch failed because the launch would exceed the limit cudaLimitDevRuntimePendingLaunchCount. For this launch to proceed successfully, cudaDeviceSetLimit must be called to set the cudaLimitDevRuntimePendingLaunchCount to be higher than the upper bound of outstanding launches that can be issued to the device runtime. Keep in mind that raising the limit of pending device runtime launches will require the runtime to reserve device memory that cannot be used for user allocations.

    -
    - -
    -
    -cudaErrorInvalidDeviceFunction = 98#
    -

    The requested device function does not exist or is not compiled for the proper device architecture.

    -
    - -
    -
    -cudaErrorNoDevice = 100#
    -

    This indicates that no CUDA-capable devices were detected by the installed CUDA driver.

    -
    - -
    -
    -cudaErrorInvalidDevice = 101#
    -

    This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device.

    -
    - -
    -
    -cudaErrorDeviceNotLicensed = 102#
    -

    This indicates that the device doesn’t have a valid Grid License.

    -
    - -
    -
    -cudaErrorSoftwareValidityNotEstablished = 103#
    -

    By default, the CUDA runtime may perform a minimal set of self-tests, as well as CUDA driver tests, to establish the validity of both. Introduced in CUDA 11.2, this error return indicates that at least one of these tests has failed and the validity of either the runtime or the driver could not be established.

    -
    - -
    -
    -cudaErrorStartupFailure = 127#
    -

    This indicates an internal startup failure in the CUDA runtime.

    -
    - -
    -
    -cudaErrorInvalidKernelImage = 200#
    -

    This indicates that the device kernel image is invalid.

    -
    - -
    -
    -cudaErrorDeviceUninitialized = 201#
    -

    This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had cuCtxDestroy() invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See cuCtxGetApiVersion() for more details.

    -
    - -
    -
    -cudaErrorMapBufferObjectFailed = 205#
    -

    This indicates that the buffer object could not be mapped.

    -
    - -
    -
    -cudaErrorUnmapBufferObjectFailed = 206#
    -

    This indicates that the buffer object could not be unmapped.

    -
    - -
    -
    -cudaErrorArrayIsMapped = 207#
    -

    This indicates that the specified array is currently mapped and thus cannot be destroyed.

    -
    - -
    -
    -cudaErrorAlreadyMapped = 208#
    -

    This indicates that the resource is already mapped.

    -
    - -
    -
    -cudaErrorNoKernelImageForDevice = 209#
    -

    This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration.

    -
    - -
    -
    -cudaErrorAlreadyAcquired = 210#
    -

    This indicates that a resource has already been acquired.

    -
    - -
    -
    -cudaErrorNotMapped = 211#
    -

    This indicates that a resource is not mapped.

    -
    - -
    -
    -cudaErrorNotMappedAsArray = 212#
    -

    This indicates that a mapped resource is not available for access as an array.

    -
    - -
    -
    -cudaErrorNotMappedAsPointer = 213#
    -

    This indicates that a mapped resource is not available for access as a pointer.

    -
    - -
    -
    -cudaErrorECCUncorrectable = 214#
    -

    This indicates that an uncorrectable ECC error was detected during execution.

    -
    - -
    -
    -cudaErrorUnsupportedLimit = 215#
    -

    This indicates that the cudaLimit passed to the API call is not supported by the active device.

    -
    - -
    -
    -cudaErrorDeviceAlreadyInUse = 216#
    -

    This indicates that a call tried to access an exclusive-thread device that is already in use by a different thread.

    -
    - -
    -
    -cudaErrorPeerAccessUnsupported = 217#
    -

    This error indicates that P2P access is not supported across the given devices.

    -
    - -
    -
    -cudaErrorInvalidPtx = 218#
    -

    A PTX compilation failed. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.

    -
    - -
    -
    -cudaErrorInvalidGraphicsContext = 219#
    -

    This indicates an error with the OpenGL or DirectX context.

    -
    - -
    -
    -cudaErrorNvlinkUncorrectable = 220#
    -

    This indicates that an uncorrectable NVLink error was detected during the execution.

    -
    - -
    -
    -cudaErrorJitCompilerNotFound = 221#
    -

    This indicates that the PTX JIT compiler library was not found. The JIT Compiler library is used for PTX compilation. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.

    -
    - -
    -
    -cudaErrorUnsupportedPtxVersion = 222#
    -

    This indicates that the provided PTX was compiled with an unsupported toolchain. The most common reason for this, is the PTX was generated by a compiler newer than what is supported by the CUDA driver and PTX JIT compiler.

    -
    - -
    -
    -cudaErrorJitCompilationDisabled = 223#
    -

    This indicates that the JIT compilation was disabled. The JIT compilation compiles PTX. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.

    -
    - -
    -
    -cudaErrorUnsupportedExecAffinity = 224#
    -

    This indicates that the provided execution affinity is not supported by the device.

    -
    - -
    -
    -cudaErrorUnsupportedDevSideSync = 225#
    -

    This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize.

    -
    - -
    -
    -cudaErrorInvalidSource = 300#
    -

    This indicates that the device kernel source is invalid.

    -
    - -
    -
    -cudaErrorFileNotFound = 301#
    -

    This indicates that the file specified was not found.

    -
    - -
    -
    -cudaErrorSharedObjectSymbolNotFound = 302#
    -

    This indicates that a link to a shared object failed to resolve.

    -
    - -
    -
    -cudaErrorSharedObjectInitFailed = 303#
    -

    This indicates that initialization of a shared object failed.

    -
    - -
    -
    -cudaErrorOperatingSystem = 304#
    -

    This error indicates that an OS call failed.

    -
    - -
    -
    -cudaErrorInvalidResourceHandle = 400#
    -

    This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like cudaStream_t and cudaEvent_t.

    -
    - -
    -
    -cudaErrorIllegalState = 401#
    -

    This indicates that a resource required by the API call is not in a valid state to perform the requested operation.

    -
    - -
    -
    -cudaErrorLossyQuery = 402#
    -

    This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments.

    -
    - -
    -
    -cudaErrorSymbolNotFound = 500#
    -

    This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names.

    -
    - -
    -
    -cudaErrorNotReady = 600#
    -

    This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than cudaSuccess (which indicates completion). Calls that may return this value include cudaEventQuery() and cudaStreamQuery().

    -
    - -
    -
    -cudaErrorIllegalAddress = 700#
    -

    The device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorLaunchOutOfResources = 701#
    -

    This indicates that a launch did not occur because it did not have appropriate resources. Although this error is similar to cudaErrorInvalidConfiguration, this error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel’s register count.

    -
    - -
    -
    -cudaErrorLaunchTimeout = 702#
    -

    This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device property kernelExecTimeoutEnabled for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorLaunchIncompatibleTexturing = 703#
    -

    This error indicates a kernel launch that uses an incompatible texturing mode.

    -
    - -
    -
    -cudaErrorPeerAccessAlreadyEnabled = 704#
    -

    This error indicates that a call to cudaDeviceEnablePeerAccess() is trying to re-enable peer addressing on from a context which has already had peer addressing enabled.

    -
    - -
    -
    -cudaErrorPeerAccessNotEnabled = 705#
    -

    This error indicates that cudaDeviceDisablePeerAccess() is trying to disable peer addressing which has not been enabled yet via cudaDeviceEnablePeerAccess().

    -
    - -
    -
    -cudaErrorSetOnActiveProcess = 708#
    -

    This indicates that the user has called cudaSetValidDevices(), cudaSetDeviceFlags(), cudaD3D9SetDirect3DDevice(), cudaD3D10SetDirect3DDevice, cudaD3D11SetDirect3DDevice(), or cudaVDPAUSetVDPAUDevice() after initializing the CUDA runtime by calling non-device management operations (allocating memory and launching kernels are examples of non-device management operations). This error can also be returned if using runtime/driver interoperability and there is an existing CUcontext active on the host thread.

    -
    - -
    -
    -cudaErrorContextIsDestroyed = 709#
    -

    This error indicates that the context current to the calling thread has been destroyed using cuCtxDestroy, or is a primary context which has not yet been initialized.

    -
    - -
    -
    -cudaErrorAssert = 710#
    -

    An assert triggered in device code during kernel execution. The device cannot be used again. All existing allocations are invalid. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorTooManyPeers = 711#
    -

    This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to cudaEnablePeerAccess().

    -
    - -
    -
    -cudaErrorHostMemoryAlreadyRegistered = 712#
    -

    This error indicates that the memory range passed to cudaHostRegister() has already been registered.

    -
    - -
    -
    -cudaErrorHostMemoryNotRegistered = 713#
    -

    This error indicates that the pointer passed to cudaHostUnregister() does not correspond to any currently registered memory region.

    -
    - -
    -
    -cudaErrorHardwareStackError = 714#
    -

    Device encountered an error in the call stack during kernel execution, possibly due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorIllegalInstruction = 715#
    -

    The device encountered an illegal instruction during kernel execution This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorMisalignedAddress = 716#
    -

    The device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorInvalidAddressSpace = 717#
    -

    While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorInvalidPc = 718#
    -

    The device encountered an invalid program counter. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorLaunchFailure = 719#
    -

    An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorCooperativeLaunchTooLarge = 720#
    -

    This error indicates that the number of blocks launched per grid for a kernel that was launched via either cudaLaunchCooperativeKernel or cudaLaunchCooperativeKernelMultiDevice exceeds the maximum number of blocks as allowed by cudaOccupancyMaxActiveBlocksPerMultiprocessor or cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors as specified by the device attribute cudaDevAttrMultiProcessorCount.

    -
    - -
    -
    -cudaErrorNotPermitted = 800#
    -

    This error indicates the attempted operation is not permitted.

    -
    - -
    -
    -cudaErrorNotSupported = 801#
    -

    This error indicates the attempted operation is not supported on the current system or device.

    -
    - -
    -
    -cudaErrorSystemNotReady = 802#
    -

    This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide.

    -
    - -
    -
    -cudaErrorSystemDriverMismatch = 803#
    -

    This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions.

    -
    - -
    -
    -cudaErrorCompatNotSupportedOnDevice = 804#
    -

    This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable.

    -
    - -
    -
    -cudaErrorMpsConnectionFailed = 805#
    -

    This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server.

    -
    - -
    -
    -cudaErrorMpsRpcFailure = 806#
    -

    This error indicates that the remote procedural call between the MPS server and the MPS client failed.

    -
    - -
    -
    -cudaErrorMpsServerNotReady = 807#
    -

    This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure.

    -
    - -
    -
    -cudaErrorMpsMaxClientsReached = 808#
    -

    This error indicates that the hardware resources required to create MPS client have been exhausted.

    -
    - -
    -
    -cudaErrorMpsMaxConnectionsReached = 809#
    -

    This error indicates the the hardware resources required to device connections have been exhausted.

    -
    - -
    -
    -cudaErrorMpsClientTerminated = 810#
    -

    This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorCdpNotSupported = 811#
    -

    This error indicates, that the program is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it.

    -
    - -
    -
    -cudaErrorCdpVersionMismatch = 812#
    -

    This error indicates, that the program contains an unsupported interaction between different versions of CUDA Dynamic Parallelism.

    -
    - -
    -
    -cudaErrorStreamCaptureUnsupported = 900#
    -

    The operation is not permitted when the stream is capturing.

    -
    - -
    -
    -cudaErrorStreamCaptureInvalidated = 901#
    -

    The current capture sequence on the stream has been invalidated due to a previous error.

    -
    - -
    -
    -cudaErrorStreamCaptureMerge = 902#
    -

    The operation would have resulted in a merge of two independent capture sequences.

    -
    - -
    -
    -cudaErrorStreamCaptureUnmatched = 903#
    -

    The capture was not initiated in this stream.

    -
    - -
    -
    -cudaErrorStreamCaptureUnjoined = 904#
    -

    The capture sequence contains a fork that was not joined to the primary stream.

    -
    - -
    -
    -cudaErrorStreamCaptureIsolation = 905#
    -

    A dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary.

    -
    - -
    -
    -cudaErrorStreamCaptureImplicit = 906#
    -

    The operation would have resulted in a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy.

    -
    - -
    -
    -cudaErrorCapturedEvent = 907#
    -

    The operation is not permitted on an event which was last recorded in a capturing stream.

    -
    - -
    -
    -cudaErrorStreamCaptureWrongThread = 908#
    -

    A stream capture sequence not initiated with the cudaStreamCaptureModeRelaxed argument to cudaStreamBeginCapture was passed to cudaStreamEndCapture in a different thread.

    -
    - -
    -
    -cudaErrorTimeout = 909#
    -

    This indicates that the wait operation has timed out.

    -
    - -
    -
    -cudaErrorGraphExecUpdateFailure = 910#
    -

    This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update.

    -
    - -
    -
    -cudaErrorExternalDevice = 911#
    -

    This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device’s signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    -
    - -
    -
    -cudaErrorInvalidClusterSize = 912#
    -

    This indicates that a kernel launch error has occurred due to cluster misconfiguration.

    -
    - -
    -
    -cudaErrorFunctionNotLoaded = 913#
    -

    Indiciates a function handle is not loaded when calling an API that requires a loaded function.

    -
    - -
    -
    -cudaErrorInvalidResourceType = 914#
    -

    This error indicates one or more resources passed in are not valid resource types for the operation.

    -
    - -
    -
    -cudaErrorInvalidResourceConfiguration = 915#
    -

    This error indicates one or more resources are insufficient or non-applicable for the operation.

    -
    - -
    -
    -cudaErrorUnknown = 999#
    -

    This indicates that an unknown internal error has occurred.

    -
    - -
    -
    -cudaErrorApiFailureBase = 10000#
    -
    - -
    - -
    -
    -class cuda.cudart.cudaChannelFormatKind(value)#
    -

    Channel format kind

    -
    -
    -cudaChannelFormatKindSigned = 0#
    -

    Signed channel format

    -
    - -
    -
    -cudaChannelFormatKindUnsigned = 1#
    -

    Unsigned channel format

    -
    - -
    -
    -cudaChannelFormatKindFloat = 2#
    -

    Float channel format

    -
    - -
    -
    -cudaChannelFormatKindNone = 3#
    -

    No channel format

    -
    - -
    -
    -cudaChannelFormatKindNV12 = 4#
    -

    Unsigned 8-bit integers, planar 4:2:0 YUV format

    -
    - -
    -
    -cudaChannelFormatKindUnsignedNormalized8X1 = 5#
    -

    1 channel unsigned 8-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindUnsignedNormalized8X2 = 6#
    -

    2 channel unsigned 8-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindUnsignedNormalized8X4 = 7#
    -

    4 channel unsigned 8-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindUnsignedNormalized16X1 = 8#
    -

    1 channel unsigned 16-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindUnsignedNormalized16X2 = 9#
    -

    2 channel unsigned 16-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindUnsignedNormalized16X4 = 10#
    -

    4 channel unsigned 16-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindSignedNormalized8X1 = 11#
    -

    1 channel signed 8-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindSignedNormalized8X2 = 12#
    -

    2 channel signed 8-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindSignedNormalized8X4 = 13#
    -

    4 channel signed 8-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindSignedNormalized16X1 = 14#
    -

    1 channel signed 16-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindSignedNormalized16X2 = 15#
    -

    2 channel signed 16-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindSignedNormalized16X4 = 16#
    -

    4 channel signed 16-bit normalized integer

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed1 = 17#
    -

    4 channel unsigned normalized block-compressed (BC1 compression) format

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed1SRGB = 18#
    -

    4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed2 = 19#
    -

    4 channel unsigned normalized block-compressed (BC2 compression) format

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed2SRGB = 20#
    -

    4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed3 = 21#
    -

    4 channel unsigned normalized block-compressed (BC3 compression) format

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed3SRGB = 22#
    -

    4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed4 = 23#
    -

    1 channel unsigned normalized block-compressed (BC4 compression) format

    -
    - -
    -
    -cudaChannelFormatKindSignedBlockCompressed4 = 24#
    -

    1 channel signed normalized block-compressed (BC4 compression) format

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed5 = 25#
    -

    2 channel unsigned normalized block-compressed (BC5 compression) format

    -
    - -
    -
    -cudaChannelFormatKindSignedBlockCompressed5 = 26#
    -

    2 channel signed normalized block-compressed (BC5 compression) format

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed6H = 27#
    -

    3 channel unsigned half-float block-compressed (BC6H compression) format

    -
    - -
    -
    -cudaChannelFormatKindSignedBlockCompressed6H = 28#
    -

    3 channel signed half-float block-compressed (BC6H compression) format

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed7 = 29#
    -

    4 channel unsigned normalized block-compressed (BC7 compression) format

    -
    - -
    -
    -cudaChannelFormatKindUnsignedBlockCompressed7SRGB = 30#
    -

    4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemoryType(value)#
    -

    CUDA memory types

    -
    -
    -cudaMemoryTypeUnregistered = 0#
    -

    Unregistered memory

    -
    - -
    -
    -cudaMemoryTypeHost = 1#
    -

    Host memory

    -
    - -
    -
    -cudaMemoryTypeDevice = 2#
    -

    Device memory

    -
    - -
    -
    -cudaMemoryTypeManaged = 3#
    -

    Managed memory

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemcpyKind(value)#
    -

    CUDA memory copy types

    -
    -
    -cudaMemcpyHostToHost = 0#
    -

    Host -> Host

    -
    - -
    -
    -cudaMemcpyHostToDevice = 1#
    -

    Host -> Device

    -
    - -
    -
    -cudaMemcpyDeviceToHost = 2#
    -

    Device -> Host

    -
    - -
    -
    -cudaMemcpyDeviceToDevice = 3#
    -

    Device -> Device

    -
    - -
    -
    -cudaMemcpyDefault = 4#
    -

    Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing

    -
    - -
    - -
    -
    -class cuda.cudart.cudaAccessProperty(value)#
    -

    Specifies performance hint with cudaAccessPolicyWindow -for hitProp and missProp members.

    -
    -
    -cudaAccessPropertyNormal = 0#
    -

    Normal cache persistence.

    -
    - -
    -
    -cudaAccessPropertyStreaming = 1#
    -

    Streaming access is less likely to persit from cache.

    -
    - -
    -
    -cudaAccessPropertyPersisting = 2#
    -

    Persisting access is more likely to persist in cache.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaStreamCaptureStatus(value)#
    -

    Possible stream capture statuses returned by -cudaStreamIsCapturing

    -
    -
    -cudaStreamCaptureStatusNone = 0#
    -

    Stream is not capturing

    -
    - -
    -
    -cudaStreamCaptureStatusActive = 1#
    -

    Stream is actively capturing

    -
    - -
    -
    -cudaStreamCaptureStatusInvalidated = 2#
    -

    Stream is part of a capture sequence that has been invalidated, but not terminated

    -
    - -
    - -
    -
    -class cuda.cudart.cudaStreamCaptureMode(value)#
    -

    Possible modes for stream capture thread interactions. For more -details see cudaStreamBeginCapture and -cudaThreadExchangeStreamCaptureMode

    -
    -
    -cudaStreamCaptureModeGlobal = 0#
    -
    - -
    -
    -cudaStreamCaptureModeThreadLocal = 1#
    -
    - -
    -
    -cudaStreamCaptureModeRelaxed = 2#
    -
    - -
    - -
    -
    -class cuda.cudart.cudaSynchronizationPolicy(value)#
    -
    -
    -cudaSyncPolicyAuto = 1#
    -
    - -
    -
    -cudaSyncPolicySpin = 2#
    -
    - -
    -
    -cudaSyncPolicyYield = 3#
    -
    - -
    -
    -cudaSyncPolicyBlockingSync = 4#
    -
    - -
    - -
    -
    -class cuda.cudart.cudaClusterSchedulingPolicy(value)#
    -

    Cluster scheduling policies. These may be passed to -cudaFuncSetAttribute

    -
    -
    -cudaClusterSchedulingPolicyDefault = 0#
    -

    the default policy

    -
    - -
    -
    -cudaClusterSchedulingPolicySpread = 1#
    -

    spread the blocks within a cluster to the SMs

    -
    - -
    -
    -cudaClusterSchedulingPolicyLoadBalancing = 2#
    -

    allow the hardware to load-balance the blocks in a cluster to the SMs

    -
    - -
    - -
    -
    -class cuda.cudart.cudaStreamUpdateCaptureDependenciesFlags(value)#
    -

    Flags for cudaStreamUpdateCaptureDependencies

    -
    -
    -cudaStreamAddCaptureDependencies = 0#
    -

    Add new nodes to the dependency set

    -
    - -
    -
    -cudaStreamSetCaptureDependencies = 1#
    -

    Replace the dependency set with the new nodes

    -
    - -
    - -
    -
    -class cuda.cudart.cudaUserObjectFlags(value)#
    -

    Flags for user objects for graphs

    -
    -
    -cudaUserObjectNoDestructorSync = 1#
    -

    Indicates the destructor execution is not synchronized by any CUDA handle.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaUserObjectRetainFlags(value)#
    -

    Flags for retaining user object references for graphs

    -
    -
    -cudaGraphUserObjectMove = 1#
    -

    Transfer references from the caller rather than creating new references.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphicsRegisterFlags(value)#
    -

    CUDA graphics interop register flags

    -
    -
    -cudaGraphicsRegisterFlagsNone = 0#
    -

    Default

    -
    - -
    -
    -cudaGraphicsRegisterFlagsReadOnly = 1#
    -

    CUDA will not write to this resource

    -
    - -
    -
    -cudaGraphicsRegisterFlagsWriteDiscard = 2#
    -

    CUDA will only write to and will not read from this resource

    -
    - -
    -
    -cudaGraphicsRegisterFlagsSurfaceLoadStore = 4#
    -

    CUDA will bind this resource to a surface reference

    -
    - -
    -
    -cudaGraphicsRegisterFlagsTextureGather = 8#
    -

    CUDA will perform texture gather operations on this resource

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphicsMapFlags(value)#
    -

    CUDA graphics interop map flags

    -
    -
    -cudaGraphicsMapFlagsNone = 0#
    -

    Default; Assume resource can be read/written

    -
    - -
    -
    -cudaGraphicsMapFlagsReadOnly = 1#
    -

    CUDA will not write to this resource

    -
    - -
    -
    -cudaGraphicsMapFlagsWriteDiscard = 2#
    -

    CUDA will only write to and will not read from this resource

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphicsCubeFace(value)#
    -

    CUDA graphics interop array indices for cube maps

    -
    -
    -cudaGraphicsCubeFacePositiveX = 0#
    -

    Positive X face of cubemap

    -
    - -
    -
    -cudaGraphicsCubeFaceNegativeX = 1#
    -

    Negative X face of cubemap

    -
    - -
    -
    -cudaGraphicsCubeFacePositiveY = 2#
    -

    Positive Y face of cubemap

    -
    - -
    -
    -cudaGraphicsCubeFaceNegativeY = 3#
    -

    Negative Y face of cubemap

    -
    - -
    -
    -cudaGraphicsCubeFacePositiveZ = 4#
    -

    Positive Z face of cubemap

    -
    - -
    -
    -cudaGraphicsCubeFaceNegativeZ = 5#
    -

    Negative Z face of cubemap

    -
    - -
    - -
    -
    -class cuda.cudart.cudaResourceType(value)#
    -

    CUDA resource types

    -
    -
    -cudaResourceTypeArray = 0#
    -

    Array resource

    -
    - -
    -
    -cudaResourceTypeMipmappedArray = 1#
    -

    Mipmapped array resource

    -
    - -
    -
    -cudaResourceTypeLinear = 2#
    -

    Linear resource

    -
    - -
    -
    -cudaResourceTypePitch2D = 3#
    -

    Pitch 2D resource

    -
    - -
    - -
    -
    -class cuda.cudart.cudaResourceViewFormat(value)#
    -

    CUDA texture resource view formats

    -
    -
    -cudaResViewFormatNone = 0#
    -

    No resource view format (use underlying resource format)

    -
    - -
    -
    -cudaResViewFormatUnsignedChar1 = 1#
    -

    1 channel unsigned 8-bit integers

    -
    - -
    -
    -cudaResViewFormatUnsignedChar2 = 2#
    -

    2 channel unsigned 8-bit integers

    -
    - -
    -
    -cudaResViewFormatUnsignedChar4 = 3#
    -

    4 channel unsigned 8-bit integers

    -
    - -
    -
    -cudaResViewFormatSignedChar1 = 4#
    -

    1 channel signed 8-bit integers

    -
    - -
    -
    -cudaResViewFormatSignedChar2 = 5#
    -

    2 channel signed 8-bit integers

    -
    - -
    -
    -cudaResViewFormatSignedChar4 = 6#
    -

    4 channel signed 8-bit integers

    -
    - -
    -
    -cudaResViewFormatUnsignedShort1 = 7#
    -

    1 channel unsigned 16-bit integers

    -
    - -
    -
    -cudaResViewFormatUnsignedShort2 = 8#
    -

    2 channel unsigned 16-bit integers

    -
    - -
    -
    -cudaResViewFormatUnsignedShort4 = 9#
    -

    4 channel unsigned 16-bit integers

    -
    - -
    -
    -cudaResViewFormatSignedShort1 = 10#
    -

    1 channel signed 16-bit integers

    -
    - -
    -
    -cudaResViewFormatSignedShort2 = 11#
    -

    2 channel signed 16-bit integers

    -
    - -
    -
    -cudaResViewFormatSignedShort4 = 12#
    -

    4 channel signed 16-bit integers

    -
    - -
    -
    -cudaResViewFormatUnsignedInt1 = 13#
    -

    1 channel unsigned 32-bit integers

    -
    - -
    -
    -cudaResViewFormatUnsignedInt2 = 14#
    -

    2 channel unsigned 32-bit integers

    -
    - -
    -
    -cudaResViewFormatUnsignedInt4 = 15#
    -

    4 channel unsigned 32-bit integers

    -
    - -
    -
    -cudaResViewFormatSignedInt1 = 16#
    -

    1 channel signed 32-bit integers

    -
    - -
    -
    -cudaResViewFormatSignedInt2 = 17#
    -

    2 channel signed 32-bit integers

    -
    - -
    -
    -cudaResViewFormatSignedInt4 = 18#
    -

    4 channel signed 32-bit integers

    -
    - -
    -
    -cudaResViewFormatHalf1 = 19#
    -

    1 channel 16-bit floating point

    -
    - -
    -
    -cudaResViewFormatHalf2 = 20#
    -

    2 channel 16-bit floating point

    -
    - -
    -
    -cudaResViewFormatHalf4 = 21#
    -

    4 channel 16-bit floating point

    -
    - -
    -
    -cudaResViewFormatFloat1 = 22#
    -

    1 channel 32-bit floating point

    -
    - -
    -
    -cudaResViewFormatFloat2 = 23#
    -

    2 channel 32-bit floating point

    -
    - -
    -
    -cudaResViewFormatFloat4 = 24#
    -

    4 channel 32-bit floating point

    -
    - -
    -
    -cudaResViewFormatUnsignedBlockCompressed1 = 25#
    -

    Block compressed 1

    -
    - -
    -
    -cudaResViewFormatUnsignedBlockCompressed2 = 26#
    -

    Block compressed 2

    -
    - -
    -
    -cudaResViewFormatUnsignedBlockCompressed3 = 27#
    -

    Block compressed 3

    -
    - -
    -
    -cudaResViewFormatUnsignedBlockCompressed4 = 28#
    -

    Block compressed 4 unsigned

    -
    - -
    -
    -cudaResViewFormatSignedBlockCompressed4 = 29#
    -

    Block compressed 4 signed

    -
    - -
    -
    -cudaResViewFormatUnsignedBlockCompressed5 = 30#
    -

    Block compressed 5 unsigned

    -
    - -
    -
    -cudaResViewFormatSignedBlockCompressed5 = 31#
    -

    Block compressed 5 signed

    -
    - -
    -
    -cudaResViewFormatUnsignedBlockCompressed6H = 32#
    -

    Block compressed 6 unsigned half-float

    -
    - -
    -
    -cudaResViewFormatSignedBlockCompressed6H = 33#
    -

    Block compressed 6 signed half-float

    -
    - -
    -
    -cudaResViewFormatUnsignedBlockCompressed7 = 34#
    -

    Block compressed 7

    -
    - -
    - -
    -
    -class cuda.cudart.cudaFuncAttribute(value)#
    -

    CUDA function attributes that can be set using -cudaFuncSetAttribute

    -
    -
    -cudaFuncAttributeMaxDynamicSharedMemorySize = 8#
    -

    Maximum dynamic shared memory size

    -
    - -
    -
    -cudaFuncAttributePreferredSharedMemoryCarveout = 9#
    -

    Preferred shared memory-L1 cache split

    -
    - -
    -
    -cudaFuncAttributeClusterDimMustBeSet = 10#
    -

    Indicator to enforce valid cluster dimension specification on kernel launch

    -
    - -
    -
    -cudaFuncAttributeRequiredClusterWidth = 11#
    -

    Required cluster width

    -
    - -
    -
    -cudaFuncAttributeRequiredClusterHeight = 12#
    -

    Required cluster height

    -
    - -
    -
    -cudaFuncAttributeRequiredClusterDepth = 13#
    -

    Required cluster depth

    -
    - -
    -
    -cudaFuncAttributeNonPortableClusterSizeAllowed = 14#
    -

    Whether non-portable cluster scheduling policy is supported

    -
    - -
    -
    -cudaFuncAttributeClusterSchedulingPolicyPreference = 15#
    -

    Required cluster scheduling policy preference

    -
    - -
    -
    -cudaFuncAttributeMax = 16#
    -
    - -
    - -
    -
    -class cuda.cudart.cudaFuncCache(value)#
    -

    CUDA function cache configurations

    -
    -
    -cudaFuncCachePreferNone = 0#
    -

    Default function cache configuration, no preference

    -
    - -
    -
    -cudaFuncCachePreferShared = 1#
    -

    Prefer larger shared memory and smaller L1 cache

    -
    - -
    -
    -cudaFuncCachePreferL1 = 2#
    -

    Prefer larger L1 cache and smaller shared memory

    -
    - -
    -
    -cudaFuncCachePreferEqual = 3#
    -

    Prefer equal size L1 cache and shared memory

    -
    - -
    - -
    -
    -class cuda.cudart.cudaSharedMemConfig(value)#
    -

    CUDA shared memory configuration [Deprecated]

    -
    -
    -cudaSharedMemBankSizeDefault = 0#
    -
    - -
    -
    -cudaSharedMemBankSizeFourByte = 1#
    -
    - -
    -
    -cudaSharedMemBankSizeEightByte = 2#
    -
    - -
    - -
    -
    -class cuda.cudart.cudaSharedCarveout(value)#
    -

    Shared memory carveout configurations. These may be passed to -cudaFuncSetAttribute

    -
    -
    -cudaSharedmemCarveoutDefault = -1#
    -

    No preference for shared memory or L1 (default)

    -
    - -
    -
    -cudaSharedmemCarveoutMaxShared = 100#
    -

    Prefer maximum available shared memory, minimum L1 cache

    -
    - -
    -
    -cudaSharedmemCarveoutMaxL1 = 0#
    -

    Prefer maximum available L1 cache, minimum shared memory

    -
    - -
    - -
    -
    -class cuda.cudart.cudaComputeMode(value)#
    -

    CUDA device compute modes

    -
    -
    -cudaComputeModeDefault = 0#
    -

    Default compute mode (Multiple threads can use cudaSetDevice() with this device)

    -
    - -
    -
    -cudaComputeModeExclusive = 1#
    -

    Compute-exclusive-thread mode (Only one thread in one process will be able to use cudaSetDevice() with this device)

    -
    - -
    -
    -cudaComputeModeProhibited = 2#
    -

    Compute-prohibited mode (No threads can use cudaSetDevice() with this device)

    -
    - -
    -
    -cudaComputeModeExclusiveProcess = 3#
    -

    Compute-exclusive-process mode (Many threads in one process will be able to use cudaSetDevice() with this device)

    -
    - -
    - -
    -
    -class cuda.cudart.cudaLimit(value)#
    -

    CUDA Limits

    -
    -
    -cudaLimitStackSize = 0#
    -

    GPU thread stack size

    -
    - -
    -
    -cudaLimitPrintfFifoSize = 1#
    -

    GPU printf FIFO size

    -
    - -
    -
    -cudaLimitMallocHeapSize = 2#
    -

    GPU malloc heap size

    -
    - -
    -
    -cudaLimitDevRuntimeSyncDepth = 3#
    -

    GPU device runtime synchronize depth

    -
    - -
    -
    -cudaLimitDevRuntimePendingLaunchCount = 4#
    -

    GPU device runtime pending launch count

    -
    - -
    -
    -cudaLimitMaxL2FetchGranularity = 5#
    -

    A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint

    -
    - -
    -
    -cudaLimitPersistingL2CacheSize = 6#
    -

    A size in bytes for L2 persisting lines cache size

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemoryAdvise(value)#
    -

    CUDA Memory Advise values

    -
    -
    -cudaMemAdviseSetReadMostly = 1#
    -

    Data will mostly be read and only occassionally be written to

    -
    - -
    -
    -cudaMemAdviseUnsetReadMostly = 2#
    -

    Undo the effect of cudaMemAdviseSetReadMostly

    -
    - -
    -
    -cudaMemAdviseSetPreferredLocation = 3#
    -

    Set the preferred location for the data as the specified device

    -
    - -
    -
    -cudaMemAdviseUnsetPreferredLocation = 4#
    -

    Clear the preferred location for the data

    -
    - -
    -
    -cudaMemAdviseSetAccessedBy = 5#
    -

    Data will be accessed by the specified device, so prevent page faults as much as possible

    -
    - -
    -
    -cudaMemAdviseUnsetAccessedBy = 6#
    -

    Let the Unified Memory subsystem decide on the page faulting policy for the specified device

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemRangeAttribute(value)#
    -

    CUDA range attributes

    -
    -
    -cudaMemRangeAttributeReadMostly = 1#
    -

    Whether the range will mostly be read and only occassionally be written to

    -
    - -
    -
    -cudaMemRangeAttributePreferredLocation = 2#
    -

    The preferred location of the range

    -
    - -
    -
    -cudaMemRangeAttributeAccessedBy = 3#
    -

    Memory range has cudaMemAdviseSetAccessedBy set for specified device

    -
    - -
    -
    -cudaMemRangeAttributeLastPrefetchLocation = 4#
    -

    The last location to which the range was prefetched

    -
    - -
    -
    -cudaMemRangeAttributePreferredLocationType = 5#
    -

    The preferred location type of the range

    -
    - -
    -
    -cudaMemRangeAttributePreferredLocationId = 6#
    -

    The preferred location id of the range

    -
    - -
    -
    -cudaMemRangeAttributeLastPrefetchLocationType = 7#
    -

    The last location type to which the range was prefetched

    -
    - -
    -
    -cudaMemRangeAttributeLastPrefetchLocationId = 8#
    -

    The last location id to which the range was prefetched

    -
    - -
    - -
    -
    -class cuda.cudart.cudaFlushGPUDirectRDMAWritesOptions(value)#
    -

    CUDA GPUDirect RDMA flush writes APIs supported on the device

    -
    -
    -cudaFlushGPUDirectRDMAWritesOptionHost = 1#
    -

    cudaDeviceFlushGPUDirectRDMAWrites() and its CUDA Driver API counterpart are supported on the device.

    -
    - -
    -
    -cudaFlushGPUDirectRDMAWritesOptionMemOps = 2#
    -

    The CU_STREAM_WAIT_VALUE_FLUSH flag and the CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the CUDA device.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGPUDirectRDMAWritesOrdering(value)#
    -

    CUDA GPUDirect RDMA flush writes ordering features of the device

    -
    -
    -cudaGPUDirectRDMAWritesOrderingNone = 0#
    -

    The device does not natively support ordering of GPUDirect RDMA writes. cudaFlushGPUDirectRDMAWrites() can be leveraged if supported.

    -
    - -
    -
    -cudaGPUDirectRDMAWritesOrderingOwner = 100#
    -

    Natively, the device can consistently consume GPUDirect RDMA writes, although other CUDA devices may not.

    -
    - -
    -
    -cudaGPUDirectRDMAWritesOrderingAllDevices = 200#
    -

    Any CUDA device in the system can consistently consume GPUDirect RDMA writes to this device.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaFlushGPUDirectRDMAWritesScope(value)#
    -

    CUDA GPUDirect RDMA flush writes scopes

    -
    -
    -cudaFlushGPUDirectRDMAWritesToOwner = 100#
    -

    Blocks until remote writes are visible to the CUDA device context owning the data.

    -
    - -
    -
    -cudaFlushGPUDirectRDMAWritesToAllDevices = 200#
    -

    Blocks until remote writes are visible to all CUDA device contexts.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaFlushGPUDirectRDMAWritesTarget(value)#
    -

    CUDA GPUDirect RDMA flush writes targets

    -
    -
    -cudaFlushGPUDirectRDMAWritesTargetCurrentDevice = 0#
    -

    Sets the target for cudaDeviceFlushGPUDirectRDMAWrites() to the currently active CUDA device context.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaDeviceAttr(value)#
    -

    CUDA device attributes

    -
    -
    -cudaDevAttrMaxThreadsPerBlock = 1#
    -

    Maximum number of threads per block

    -
    - -
    -
    -cudaDevAttrMaxBlockDimX = 2#
    -

    Maximum block dimension X

    -
    - -
    -
    -cudaDevAttrMaxBlockDimY = 3#
    -

    Maximum block dimension Y

    -
    - -
    -
    -cudaDevAttrMaxBlockDimZ = 4#
    -

    Maximum block dimension Z

    -
    - -
    -
    -cudaDevAttrMaxGridDimX = 5#
    -

    Maximum grid dimension X

    -
    - -
    -
    -cudaDevAttrMaxGridDimY = 6#
    -

    Maximum grid dimension Y

    -
    - -
    -
    -cudaDevAttrMaxGridDimZ = 7#
    -

    Maximum grid dimension Z

    -
    - -
    -
    -cudaDevAttrMaxSharedMemoryPerBlock = 8#
    -

    Maximum shared memory available per block in bytes

    -
    - -
    -
    -cudaDevAttrTotalConstantMemory = 9#
    -

    Memory available on device for constant variables in a CUDA C kernel in bytes

    -
    - -
    -
    -cudaDevAttrWarpSize = 10#
    -

    Warp size in threads

    -
    - -
    -
    -cudaDevAttrMaxPitch = 11#
    -

    Maximum pitch in bytes allowed by memory copies

    -
    - -
    -
    -cudaDevAttrMaxRegistersPerBlock = 12#
    -

    Maximum number of 32-bit registers available per block

    -
    - -
    -
    -cudaDevAttrClockRate = 13#
    -

    Peak clock frequency in kilohertz

    -
    - -
    -
    -cudaDevAttrTextureAlignment = 14#
    -

    Alignment requirement for textures

    -
    - -
    -
    -cudaDevAttrGpuOverlap = 15#
    -

    Device can possibly copy memory and execute a kernel concurrently

    -
    - -
    -
    -cudaDevAttrMultiProcessorCount = 16#
    -

    Number of multiprocessors on device

    -
    - -
    -
    -cudaDevAttrKernelExecTimeout = 17#
    -

    Specifies whether there is a run time limit on kernels

    -
    - -
    -
    -cudaDevAttrIntegrated = 18#
    -

    Device is integrated with host memory

    -
    - -
    -
    -cudaDevAttrCanMapHostMemory = 19#
    -

    Device can map host memory into CUDA address space

    -
    - -
    -
    -cudaDevAttrComputeMode = 20#
    -

    Compute mode (See cudaComputeMode for details)

    -
    - -
    -
    -cudaDevAttrMaxTexture1DWidth = 21#
    -

    Maximum 1D texture width

    -
    - -
    -
    -cudaDevAttrMaxTexture2DWidth = 22#
    -

    Maximum 2D texture width

    -
    - -
    -
    -cudaDevAttrMaxTexture2DHeight = 23#
    -

    Maximum 2D texture height

    -
    - -
    -
    -cudaDevAttrMaxTexture3DWidth = 24#
    -

    Maximum 3D texture width

    -
    - -
    -
    -cudaDevAttrMaxTexture3DHeight = 25#
    -

    Maximum 3D texture height

    -
    - -
    -
    -cudaDevAttrMaxTexture3DDepth = 26#
    -

    Maximum 3D texture depth

    -
    - -
    -
    -cudaDevAttrMaxTexture2DLayeredWidth = 27#
    -

    Maximum 2D layered texture width

    -
    - -
    -
    -cudaDevAttrMaxTexture2DLayeredHeight = 28#
    -

    Maximum 2D layered texture height

    -
    - -
    -
    -cudaDevAttrMaxTexture2DLayeredLayers = 29#
    -

    Maximum layers in a 2D layered texture

    -
    - -
    -
    -cudaDevAttrSurfaceAlignment = 30#
    -

    Alignment requirement for surfaces

    -
    - -
    -
    -cudaDevAttrConcurrentKernels = 31#
    -

    Device can possibly execute multiple kernels concurrently

    -
    - -
    -
    -cudaDevAttrEccEnabled = 32#
    -

    Device has ECC support enabled

    -
    - -
    -
    -cudaDevAttrPciBusId = 33#
    -

    PCI bus ID of the device

    -
    - -
    -
    -cudaDevAttrPciDeviceId = 34#
    -

    PCI device ID of the device

    -
    - -
    -
    -cudaDevAttrTccDriver = 35#
    -

    Device is using TCC driver model

    -
    - -
    -
    -cudaDevAttrMemoryClockRate = 36#
    -

    Peak memory clock frequency in kilohertz

    -
    - -
    -
    -cudaDevAttrGlobalMemoryBusWidth = 37#
    -

    Global memory bus width in bits

    -
    - -
    -
    -cudaDevAttrL2CacheSize = 38#
    -

    Size of L2 cache in bytes

    -
    - -
    -
    -cudaDevAttrMaxThreadsPerMultiProcessor = 39#
    -

    Maximum resident threads per multiprocessor

    -
    - -
    -
    -cudaDevAttrAsyncEngineCount = 40#
    -

    Number of asynchronous engines

    -
    - -
    -
    -cudaDevAttrUnifiedAddressing = 41#
    -

    Device shares a unified address space with the host

    -
    - -
    -
    -cudaDevAttrMaxTexture1DLayeredWidth = 42#
    -

    Maximum 1D layered texture width

    -
    - -
    -
    -cudaDevAttrMaxTexture1DLayeredLayers = 43#
    -

    Maximum layers in a 1D layered texture

    -
    - -
    -
    -cudaDevAttrMaxTexture2DGatherWidth = 45#
    -

    Maximum 2D texture width if cudaArrayTextureGather is set

    -
    - -
    -
    -cudaDevAttrMaxTexture2DGatherHeight = 46#
    -

    Maximum 2D texture height if cudaArrayTextureGather is set

    -
    - -
    -
    -cudaDevAttrMaxTexture3DWidthAlt = 47#
    -

    Alternate maximum 3D texture width

    -
    - -
    -
    -cudaDevAttrMaxTexture3DHeightAlt = 48#
    -

    Alternate maximum 3D texture height

    -
    - -
    -
    -cudaDevAttrMaxTexture3DDepthAlt = 49#
    -

    Alternate maximum 3D texture depth

    -
    - -
    -
    -cudaDevAttrPciDomainId = 50#
    -

    PCI domain ID of the device

    -
    - -
    -
    -cudaDevAttrTexturePitchAlignment = 51#
    -

    Pitch alignment requirement for textures

    -
    - -
    -
    -cudaDevAttrMaxTextureCubemapWidth = 52#
    -

    Maximum cubemap texture width/height

    -
    - -
    -
    -cudaDevAttrMaxTextureCubemapLayeredWidth = 53#
    -

    Maximum cubemap layered texture width/height

    -
    - -
    -
    -cudaDevAttrMaxTextureCubemapLayeredLayers = 54#
    -

    Maximum layers in a cubemap layered texture

    -
    - -
    -
    -cudaDevAttrMaxSurface1DWidth = 55#
    -

    Maximum 1D surface width

    -
    - -
    -
    -cudaDevAttrMaxSurface2DWidth = 56#
    -

    Maximum 2D surface width

    -
    - -
    -
    -cudaDevAttrMaxSurface2DHeight = 57#
    -

    Maximum 2D surface height

    -
    - -
    -
    -cudaDevAttrMaxSurface3DWidth = 58#
    -

    Maximum 3D surface width

    -
    - -
    -
    -cudaDevAttrMaxSurface3DHeight = 59#
    -

    Maximum 3D surface height

    -
    - -
    -
    -cudaDevAttrMaxSurface3DDepth = 60#
    -

    Maximum 3D surface depth

    -
    - -
    -
    -cudaDevAttrMaxSurface1DLayeredWidth = 61#
    -

    Maximum 1D layered surface width

    -
    - -
    -
    -cudaDevAttrMaxSurface1DLayeredLayers = 62#
    -

    Maximum layers in a 1D layered surface

    -
    - -
    -
    -cudaDevAttrMaxSurface2DLayeredWidth = 63#
    -

    Maximum 2D layered surface width

    -
    - -
    -
    -cudaDevAttrMaxSurface2DLayeredHeight = 64#
    -

    Maximum 2D layered surface height

    -
    - -
    -
    -cudaDevAttrMaxSurface2DLayeredLayers = 65#
    -

    Maximum layers in a 2D layered surface

    -
    - -
    -
    -cudaDevAttrMaxSurfaceCubemapWidth = 66#
    -

    Maximum cubemap surface width

    -
    - -
    -
    -cudaDevAttrMaxSurfaceCubemapLayeredWidth = 67#
    -

    Maximum cubemap layered surface width

    -
    - -
    -
    -cudaDevAttrMaxSurfaceCubemapLayeredLayers = 68#
    -

    Maximum layers in a cubemap layered surface

    -
    - -
    -
    -cudaDevAttrMaxTexture1DLinearWidth = 69#
    -

    Maximum 1D linear texture width

    -
    - -
    -
    -cudaDevAttrMaxTexture2DLinearWidth = 70#
    -

    Maximum 2D linear texture width

    -
    - -
    -
    -cudaDevAttrMaxTexture2DLinearHeight = 71#
    -

    Maximum 2D linear texture height

    -
    - -
    -
    -cudaDevAttrMaxTexture2DLinearPitch = 72#
    -

    Maximum 2D linear texture pitch in bytes

    -
    - -
    -
    -cudaDevAttrMaxTexture2DMipmappedWidth = 73#
    -

    Maximum mipmapped 2D texture width

    -
    - -
    -
    -cudaDevAttrMaxTexture2DMipmappedHeight = 74#
    -

    Maximum mipmapped 2D texture height

    -
    - -
    -
    -cudaDevAttrComputeCapabilityMajor = 75#
    -

    Major compute capability version number

    -
    - -
    -
    -cudaDevAttrComputeCapabilityMinor = 76#
    -

    Minor compute capability version number

    -
    - -
    -
    -cudaDevAttrMaxTexture1DMipmappedWidth = 77#
    -

    Maximum mipmapped 1D texture width

    -
    - -
    -
    -cudaDevAttrStreamPrioritiesSupported = 78#
    -

    Device supports stream priorities

    -
    - -
    -
    -cudaDevAttrGlobalL1CacheSupported = 79#
    -

    Device supports caching globals in L1

    -
    - -
    -
    -cudaDevAttrLocalL1CacheSupported = 80#
    -

    Device supports caching locals in L1

    -
    - -
    -
    -cudaDevAttrMaxSharedMemoryPerMultiprocessor = 81#
    -

    Maximum shared memory available per multiprocessor in bytes

    -
    - -
    -
    -cudaDevAttrMaxRegistersPerMultiprocessor = 82#
    -

    Maximum number of 32-bit registers available per multiprocessor

    -
    - -
    -
    -cudaDevAttrManagedMemory = 83#
    -

    Device can allocate managed memory on this system

    -
    - -
    -
    -cudaDevAttrIsMultiGpuBoard = 84#
    -

    Device is on a multi-GPU board

    -
    - -
    -
    -cudaDevAttrMultiGpuBoardGroupID = 85#
    -

    Unique identifier for a group of devices on the same multi-GPU board

    -
    - -
    -
    -cudaDevAttrHostNativeAtomicSupported = 86#
    -

    Link between the device and the host supports native atomic operations

    -
    - -
    -
    -cudaDevAttrSingleToDoublePrecisionPerfRatio = 87#
    -

    Ratio of single precision performance (in floating-point operations per second) to double precision performance

    -
    - -
    -
    -cudaDevAttrPageableMemoryAccess = 88#
    -

    Device supports coherently accessing pageable memory without calling cudaHostRegister on it

    -
    - -
    -
    -cudaDevAttrConcurrentManagedAccess = 89#
    -

    Device can coherently access managed memory concurrently with the CPU

    -
    - -
    -
    -cudaDevAttrComputePreemptionSupported = 90#
    -

    Device supports Compute Preemption

    -
    - -
    -
    -cudaDevAttrCanUseHostPointerForRegisteredMem = 91#
    -

    Device can access host registered memory at the same virtual address as the CPU

    -
    - -
    -
    -cudaDevAttrReserved92 = 92#
    -
    - -
    -
    -cudaDevAttrReserved93 = 93#
    -
    - -
    -
    -cudaDevAttrReserved94 = 94#
    -
    - -
    -
    -cudaDevAttrCooperativeLaunch = 95#
    -

    Device supports launching cooperative kernels via cudaLaunchCooperativeKernel

    -
    - -
    -
    -cudaDevAttrCooperativeMultiDeviceLaunch = 96#
    -

    Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated.

    -
    - -
    -
    -cudaDevAttrMaxSharedMemoryPerBlockOptin = 97#
    -

    The maximum optin shared memory per block. This value may vary by chip. See cudaFuncSetAttribute

    -
    - -
    -
    -cudaDevAttrCanFlushRemoteWrites = 98#
    -

    Device supports flushing of outstanding remote writes.

    -
    - -
    -
    -cudaDevAttrHostRegisterSupported = 99#
    -

    Device supports host memory registration via cudaHostRegister.

    -
    - -
    -
    -cudaDevAttrPageableMemoryAccessUsesHostPageTables = 100#
    -

    Device accesses pageable memory via the host’s page tables.

    -
    - -
    -
    -cudaDevAttrDirectManagedMemAccessFromHost = 101#
    -

    Host can directly access managed memory on the device without migration.

    -
    - -
    -
    -cudaDevAttrMaxBlocksPerMultiprocessor = 106#
    -

    Maximum number of blocks per multiprocessor

    -
    - -
    -
    -cudaDevAttrMaxPersistingL2CacheSize = 108#
    -

    Maximum L2 persisting lines capacity setting in bytes.

    -
    - -
    -
    -cudaDevAttrMaxAccessPolicyWindowSize = 109#
    -

    Maximum value of num_bytes.

    -
    - -
    -
    -cudaDevAttrReservedSharedMemoryPerBlock = 111#
    -

    Shared memory reserved by CUDA driver per block in bytes

    -
    - -
    -
    -cudaDevAttrSparseCudaArraySupported = 112#
    -

    Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays

    -
    - -
    -
    -cudaDevAttrHostRegisterReadOnlySupported = 113#
    -

    Device supports using the cudaHostRegister flag cudaHostRegisterReadOnly to register memory that must be mapped as read-only to the GPU

    -
    - -
    -
    -cudaDevAttrTimelineSemaphoreInteropSupported = 114#
    -

    External timeline semaphore interop is supported on the device

    -
    - -
    -
    -cudaDevAttrMaxTimelineSemaphoreInteropSupported = 114#
    -

    Deprecated, External timeline semaphore interop is supported on the device

    -
    - -
    -
    -cudaDevAttrMemoryPoolsSupported = 115#
    -

    Device supports using the cudaMallocAsync and cudaMemPool family of APIs

    -
    - -
    -
    -cudaDevAttrGPUDirectRDMASupported = 116#
    -

    Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information)

    -
    - -
    -
    -cudaDevAttrGPUDirectRDMAFlushWritesOptions = 117#
    -

    The returned attribute shall be interpreted as a bitmask, where the individual bits are listed in the cudaFlushGPUDirectRDMAWritesOptions enum

    -
    - -
    -
    -cudaDevAttrGPUDirectRDMAWritesOrdering = 118#
    -

    GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See cudaGPUDirectRDMAWritesOrdering for the numerical values returned here.

    -
    - -
    -
    -cudaDevAttrMemoryPoolSupportedHandleTypes = 119#
    -

    Handle types supported with mempool based IPC

    -
    - -
    -
    -cudaDevAttrClusterLaunch = 120#
    -

    Indicates device supports cluster launch

    -
    - -
    -
    -cudaDevAttrDeferredMappingCudaArraySupported = 121#
    -

    Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays

    -
    - -
    -
    -cudaDevAttrReserved122 = 122#
    -
    - -
    -
    -cudaDevAttrReserved123 = 123#
    -
    - -
    -
    -cudaDevAttrReserved124 = 124#
    -
    - -
    -
    -cudaDevAttrIpcEventSupport = 125#
    -

    Device supports IPC Events.

    -
    - -
    -
    -cudaDevAttrMemSyncDomainCount = 126#
    -

    Number of memory synchronization domains the device supports.

    -
    - -
    -
    -cudaDevAttrReserved127 = 127#
    -
    - -
    -
    -cudaDevAttrReserved128 = 128#
    -
    - -
    -
    -cudaDevAttrReserved129 = 129#
    -
    - -
    -
    -cudaDevAttrNumaConfig = 130#
    -

    NUMA configuration of a device: value is of type cudaDeviceNumaConfig enum

    -
    - -
    -
    -cudaDevAttrNumaId = 131#
    -

    NUMA node ID of the GPU memory

    -
    - -
    -
    -cudaDevAttrReserved132 = 132#
    -
    - -
    -
    -cudaDevAttrMpsEnabled = 133#
    -

    Contexts created on this device will be shared via MPS

    -
    - -
    -
    -cudaDevAttrHostNumaId = 134#
    -

    NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA.

    -
    - -
    -
    -cudaDevAttrD3D12CigSupported = 135#
    -

    Device supports CIG with D3D12.

    -
    - -
    -
    -cudaDevAttrMax = 136#
    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemPoolAttr(value)#
    -

    CUDA memory pool attributes

    -
    -
    -cudaMemPoolReuseFollowEventDependencies = 1#
    -

    (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled)

    -
    - -
    -
    -cudaMemPoolReuseAllowOpportunistic = 2#
    -

    (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled)

    -
    - -
    -
    -cudaMemPoolReuseAllowInternalDependencies = 3#
    -

    (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled).

    -
    - -
    -
    -cudaMemPoolAttrReleaseThreshold = 4#
    -

    (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0)

    -
    - -
    -
    -cudaMemPoolAttrReservedMemCurrent = 5#
    -

    (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool.

    -
    - -
    -
    -cudaMemPoolAttrReservedMemHigh = 6#
    -

    (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero.

    -
    - -
    -
    -cudaMemPoolAttrUsedMemCurrent = 7#
    -

    (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application.

    -
    - -
    -
    -cudaMemPoolAttrUsedMemHigh = 8#
    -

    (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemLocationType(value)#
    -

    Specifies the type of location

    -
    -
    -cudaMemLocationTypeInvalid = 0#
    -
    - -
    -
    -cudaMemLocationTypeDevice = 1#
    -

    Location is a device location, thus id is a device ordinal

    -
    - -
    -
    -cudaMemLocationTypeHost = 2#
    -

    Location is host, id is ignored

    -
    - -
    -
    -cudaMemLocationTypeHostNuma = 3#
    -

    Location is a host NUMA node, thus id is a host NUMA node id

    -
    - -
    -
    -cudaMemLocationTypeHostNumaCurrent = 4#
    -

    Location is the host NUMA node closest to the current thread’s CPU, id is ignored

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemAccessFlags(value)#
    -

    Specifies the memory protection flags for mapping.

    -
    -
    -cudaMemAccessFlagsProtNone = 0#
    -

    Default, make the address range not accessible

    -
    - -
    -
    -cudaMemAccessFlagsProtRead = 1#
    -

    Make the address range read accessible

    -
    - -
    -
    -cudaMemAccessFlagsProtReadWrite = 3#
    -

    Make the address range read-write accessible

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemAllocationType(value)#
    -

    Defines the allocation types available

    -
    -
    -cudaMemAllocationTypeInvalid = 0#
    -
    - -
    -
    -cudaMemAllocationTypePinned = 1#
    -

    This allocation type is ‘pinned’, i.e. cannot migrate from its current location while the application is actively using it

    -
    - -
    -
    -cudaMemAllocationTypeMax = 2147483647#
    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemAllocationHandleType(value)#
    -

    Flags for specifying particular handle types

    -
    -
    -cudaMemHandleTypeNone = 0#
    -

    Does not allow any export mechanism. >

    -
    - -
    -
    -cudaMemHandleTypePosixFileDescriptor = 1#
    -

    Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int)

    -
    - -
    -
    -cudaMemHandleTypeWin32 = 2#
    -

    Allows a Win32 NT handle to be used for exporting. (HANDLE)

    -
    - -
    -
    -cudaMemHandleTypeWin32Kmt = 4#
    -

    Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE)

    -
    - -
    -
    -cudaMemHandleTypeFabric = 8#
    -

    Allows a fabric handle to be used for exporting. (cudaMemFabricHandle_t)

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphMemAttributeType(value)#
    -

    Graph memory attributes

    -
    -
    -cudaGraphMemAttrUsedMemCurrent = 0#
    -

    (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs.

    -
    - -
    -
    -cudaGraphMemAttrUsedMemHigh = 1#
    -

    (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero.

    -
    - -
    -
    -cudaGraphMemAttrReservedMemCurrent = 2#
    -

    (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator.

    -
    - -
    -
    -cudaGraphMemAttrReservedMemHigh = 3#
    -

    (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaDeviceP2PAttr(value)#
    -

    CUDA device P2P attributes

    -
    -
    -cudaDevP2PAttrPerformanceRank = 1#
    -

    A relative value indicating the performance of the link between two devices

    -
    - -
    -
    -cudaDevP2PAttrAccessSupported = 2#
    -

    Peer access is enabled

    -
    - -
    -
    -cudaDevP2PAttrNativeAtomicSupported = 3#
    -

    Native atomic operation over the link supported

    -
    - -
    -
    -cudaDevP2PAttrCudaArrayAccessSupported = 4#
    -

    Accessing CUDA arrays over the link supported

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalMemoryHandleType(value)#
    -

    External memory handle types

    -
    -
    -cudaExternalMemoryHandleTypeOpaqueFd = 1#
    -

    Handle is an opaque file descriptor

    -
    - -
    -
    -cudaExternalMemoryHandleTypeOpaqueWin32 = 2#
    -

    Handle is an opaque shared NT handle

    -
    - -
    -
    -cudaExternalMemoryHandleTypeOpaqueWin32Kmt = 3#
    -

    Handle is an opaque, globally shared handle

    -
    - -
    -
    -cudaExternalMemoryHandleTypeD3D12Heap = 4#
    -

    Handle is a D3D12 heap object

    -
    - -
    -
    -cudaExternalMemoryHandleTypeD3D12Resource = 5#
    -

    Handle is a D3D12 committed resource

    -
    - -
    -
    -cudaExternalMemoryHandleTypeD3D11Resource = 6#
    -

    Handle is a shared NT handle to a D3D11 resource

    -
    - -
    -
    -cudaExternalMemoryHandleTypeD3D11ResourceKmt = 7#
    -

    Handle is a globally shared handle to a D3D11 resource

    -
    - -
    -
    -cudaExternalMemoryHandleTypeNvSciBuf = 8#
    -

    Handle is an NvSciBuf object

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalSemaphoreHandleType(value)#
    -

    External semaphore handle types

    -
    -
    -cudaExternalSemaphoreHandleTypeOpaqueFd = 1#
    -

    Handle is an opaque file descriptor

    -
    - -
    -
    -cudaExternalSemaphoreHandleTypeOpaqueWin32 = 2#
    -

    Handle is an opaque shared NT handle

    -
    - -
    -
    -cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt = 3#
    -

    Handle is an opaque, globally shared handle

    -
    - -
    -
    -cudaExternalSemaphoreHandleTypeD3D12Fence = 4#
    -

    Handle is a shared NT handle referencing a D3D12 fence object

    -
    - -
    -
    -cudaExternalSemaphoreHandleTypeD3D11Fence = 5#
    -

    Handle is a shared NT handle referencing a D3D11 fence object

    -
    - -
    -
    -cudaExternalSemaphoreHandleTypeNvSciSync = 6#
    -

    Opaque handle to NvSciSync Object

    -
    - -
    -
    -cudaExternalSemaphoreHandleTypeKeyedMutex = 7#
    -

    Handle is a shared NT handle referencing a D3D11 keyed mutex object

    -
    - -
    -
    -cudaExternalSemaphoreHandleTypeKeyedMutexKmt = 8#
    -

    Handle is a shared KMT handle referencing a D3D11 keyed mutex object

    -
    - -
    -
    -cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd = 9#
    -

    Handle is an opaque handle file descriptor referencing a timeline semaphore

    -
    - -
    -
    -cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 = 10#
    -

    Handle is an opaque handle file descriptor referencing a timeline semaphore

    -
    - -
    - -
    -
    -class cuda.cudart.cudaCGScope(value)#
    -

    CUDA cooperative group scope

    -
    -
    -cudaCGScopeInvalid = 0#
    -

    Invalid cooperative group scope

    -
    - -
    -
    -cudaCGScopeGrid = 1#
    -

    Scope represented by a grid_group

    -
    - -
    -
    -cudaCGScopeMultiGrid = 2#
    -

    Scope represented by a multi_grid_group

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphConditionalHandleFlags(value)#
    -
    -
    -cudaGraphCondAssignDefault = 1#
    -

    Apply default handle value when graph is launched.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphConditionalNodeType(value)#
    -

    CUDA conditional node types

    -
    -
    -cudaGraphCondTypeIf = 0#
    -

    Conditional ‘if’ Node. Body executed once if condition value is non-zero.

    -
    - -
    -
    -cudaGraphCondTypeWhile = 1#
    -

    Conditional ‘while’ Node. Body executed repeatedly while condition value is non-zero.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphNodeType(value)#
    -

    CUDA Graph node types

    -
    -
    -cudaGraphNodeTypeKernel = 0#
    -

    GPU kernel node

    -
    - -
    -
    -cudaGraphNodeTypeMemcpy = 1#
    -

    Memcpy node

    -
    - -
    -
    -cudaGraphNodeTypeMemset = 2#
    -

    Memset node

    -
    - -
    -
    -cudaGraphNodeTypeHost = 3#
    -

    Host (executable) node

    -
    - -
    -
    -cudaGraphNodeTypeGraph = 4#
    -

    Node which executes an embedded graph

    -
    - -
    -
    -cudaGraphNodeTypeEmpty = 5#
    -

    Empty (no-op) node

    -
    - -
    -
    -cudaGraphNodeTypeWaitEvent = 6#
    -

    External event wait node

    -
    - -
    -
    -cudaGraphNodeTypeEventRecord = 7#
    -

    External event record node

    -
    - -
    -
    -cudaGraphNodeTypeExtSemaphoreSignal = 8#
    -

    External semaphore signal node

    -
    - -
    -
    -cudaGraphNodeTypeExtSemaphoreWait = 9#
    -

    External semaphore wait node

    -
    - -
    -
    -cudaGraphNodeTypeMemAlloc = 10#
    -

    Memory allocation node

    -
    - -
    -
    -cudaGraphNodeTypeMemFree = 11#
    -

    Memory free node

    -
    - -
    -
    -cudaGraphNodeTypeConditional = 13#
    -

    Conditional node May be used to implement a conditional execution path or loop

    -
    -

    inside of a graph. The graph(s) contained within the body of the conditional node

    -

    can be selectively executed or iterated upon based on the value of a conditional

    -

    variable.

    -

    Handles must be created in advance of creating the node

    -

    using cudaGraphConditionalHandleCreate.

    -

    The following restrictions apply to graphs which contain conditional nodes:

    -
    -

    The graph cannot be used in a child node.

    -

    Only one instantiation of the graph may exist at any point in time.

    -

    The graph cannot be cloned.

    -
    -

    To set the control value, supply a default value when creating the handle and/or

    -

    call cudaGraphSetConditional from device code.

    -
    -
    - -
    -
    -cudaGraphNodeTypeCount = 14#
    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphDependencyType(value)#
    -

    Type annotations that can be applied to graph edges as part of -cudaGraphEdgeData.

    -
    -
    -cudaGraphDependencyTypeDefault = 0#
    -

    This is an ordinary dependency.

    -
    - -
    -
    -cudaGraphDependencyTypeProgrammatic = 1#
    -

    This dependency type allows the downstream node to use cudaGridDependencySynchronize(). It may only be used between kernel nodes, and must be used with either the cudaGraphKernelNodePortProgrammatic or cudaGraphKernelNodePortLaunchCompletion outgoing port.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphExecUpdateResult(value)#
    -

    CUDA Graph Update error types

    -
    -
    -cudaGraphExecUpdateSuccess = 0#
    -

    The update succeeded

    -
    - -
    -
    -cudaGraphExecUpdateError = 1#
    -

    The update failed for an unexpected reason which is described in the return value of the function

    -
    - -
    -
    -cudaGraphExecUpdateErrorTopologyChanged = 2#
    -

    The update failed because the topology changed

    -
    - -
    -
    -cudaGraphExecUpdateErrorNodeTypeChanged = 3#
    -

    The update failed because a node type changed

    -
    - -
    -
    -cudaGraphExecUpdateErrorFunctionChanged = 4#
    -

    The update failed because the function of a kernel node changed (CUDA driver < 11.2)

    -
    - -
    -
    -cudaGraphExecUpdateErrorParametersChanged = 5#
    -

    The update failed because the parameters changed in a way that is not supported

    -
    - -
    -
    -cudaGraphExecUpdateErrorNotSupported = 6#
    -

    The update failed because something about the node is not supported

    -
    - -
    -
    -cudaGraphExecUpdateErrorUnsupportedFunctionChange = 7#
    -

    The update failed because the function of a kernel node changed in an unsupported way

    -
    - -
    -
    -cudaGraphExecUpdateErrorAttributesChanged = 8#
    -

    The update failed because the node attributes changed in a way that is not supported

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphInstantiateResult(value)#
    -

    Graph instantiation results

    -
    -
    -cudaGraphInstantiateSuccess = 0#
    -

    Instantiation succeeded

    -
    - -
    -
    -cudaGraphInstantiateError = 1#
    -

    Instantiation failed for an unexpected reason which is described in the return value of the function

    -
    - -
    -
    -cudaGraphInstantiateInvalidStructure = 2#
    -

    Instantiation failed due to invalid structure, such as cycles

    -
    - -
    -
    -cudaGraphInstantiateNodeOperationNotSupported = 3#
    -

    Instantiation for device launch failed because the graph contained an unsupported operation

    -
    - -
    -
    -cudaGraphInstantiateMultipleDevicesNotSupported = 4#
    -

    Instantiation for device launch failed due to the nodes belonging to different contexts

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphKernelNodeField(value)#
    -

    Specifies the field to update when performing multiple node updates -from the device

    -
    -
    -cudaGraphKernelNodeFieldInvalid = 0#
    -

    Invalid field

    -
    - -
    -
    -cudaGraphKernelNodeFieldGridDim = 1#
    -

    Grid dimension update

    -
    - -
    -
    -cudaGraphKernelNodeFieldParam = 2#
    -

    Kernel parameter update

    -
    - -
    -
    -cudaGraphKernelNodeFieldEnabled = 3#
    -

    Node enable/disable

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGetDriverEntryPointFlags(value)#
    -

    Flags to specify search options to be used with -cudaGetDriverEntryPoint For more details see -cuGetProcAddress

    -
    -
    -cudaEnableDefault = 0#
    -

    Default search mode for driver symbols.

    -
    - -
    -
    -cudaEnableLegacyStream = 1#
    -

    Search for legacy versions of driver symbols.

    -
    - -
    -
    -cudaEnablePerThreadDefaultStream = 2#
    -

    Search for per-thread versions of driver symbols.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaDriverEntryPointQueryResult(value)#
    -

    Enum for status from obtaining driver entry points, used with -cudaApiGetDriverEntryPoint

    -
    -
    -cudaDriverEntryPointSuccess = 0#
    -

    Search for symbol found a match

    -
    - -
    -
    -cudaDriverEntryPointSymbolNotFound = 1#
    -

    Search for symbol was not found

    -
    - -
    -
    -cudaDriverEntryPointVersionNotSufficent = 2#
    -

    Search for symbol was found but version wasn’t great enough

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphDebugDotFlags(value)#
    -

    CUDA Graph debug write options

    -
    -
    -cudaGraphDebugDotFlagsVerbose = 1#
    -

    Output all debug data as if every debug flag is enabled

    -
    - -
    -
    -cudaGraphDebugDotFlagsKernelNodeParams = 4#
    -

    Adds cudaKernelNodeParams to output

    -
    - -
    -
    -cudaGraphDebugDotFlagsMemcpyNodeParams = 8#
    -

    Adds cudaMemcpy3DParms to output

    -
    - -
    -
    -cudaGraphDebugDotFlagsMemsetNodeParams = 16#
    -

    Adds cudaMemsetParams to output

    -
    - -
    -
    -cudaGraphDebugDotFlagsHostNodeParams = 32#
    -

    Adds cudaHostNodeParams to output

    -
    - -
    -
    -cudaGraphDebugDotFlagsEventNodeParams = 64#
    -

    Adds cudaEvent_t handle from record and wait nodes to output

    -
    - -
    -
    -cudaGraphDebugDotFlagsExtSemasSignalNodeParams = 128#
    -

    Adds cudaExternalSemaphoreSignalNodeParams values to output

    -
    - -
    -
    -cudaGraphDebugDotFlagsExtSemasWaitNodeParams = 256#
    -

    Adds cudaExternalSemaphoreWaitNodeParams to output

    -
    - -
    -
    -cudaGraphDebugDotFlagsKernelNodeAttributes = 512#
    -

    Adds cudaKernelNodeAttrID values to output

    -
    - -
    -
    -cudaGraphDebugDotFlagsHandles = 1024#
    -

    Adds node handles and every kernel function handle to output

    -
    - -
    -
    -cudaGraphDebugDotFlagsConditionalNodeParams = 32768#
    -

    Adds cudaConditionalNodeParams to output

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphInstantiateFlags(value)#
    -

    Flags for instantiating a graph

    -
    -
    -cudaGraphInstantiateFlagAutoFreeOnLaunch = 1#
    -

    Automatically free memory allocated in a graph before relaunching.

    -
    - -
    -
    -cudaGraphInstantiateFlagUpload = 2#
    -

    Automatically upload the graph after instantiation. Only supported by

    -
    -

    cudaGraphInstantiateWithParams. The upload will be performed using the

    -

    stream provided in instantiateParams.

    -
    -
    - -
    -
    -cudaGraphInstantiateFlagDeviceLaunch = 4#
    -

    Instantiate the graph to be launchable from the device. This flag can only

    -
    -

    be used on platforms which support unified addressing. This flag cannot be

    -

    used in conjunction with cudaGraphInstantiateFlagAutoFreeOnLaunch.

    -
    -
    - -
    -
    -cudaGraphInstantiateFlagUseNodePriority = 8#
    -

    Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaLaunchMemSyncDomain(value)#
    -

    Memory Synchronization Domain A kernel can be launched in a -specified memory synchronization domain that affects all memory -operations issued by that kernel. A memory barrier issued in one -domain will only order memory operations in that domain, thus -eliminating latency increase from memory barriers ordering -unrelated traffic. By default, kernels are launched in domain 0. -Kernel launched with cudaLaunchMemSyncDomainRemote will -have a different domain ID. User may also alter the domain ID with -cudaLaunchMemSyncDomainMap for a specific stream / -graph node / kernel launch. See -cudaLaunchAttributeMemSyncDomain, -cudaStreamSetAttribute, cudaLaunchKernelEx, -cudaGraphKernelNodeSetAttribute. Memory operations -done in kernels launched in different domains are considered -system-scope distanced. In other words, a GPU scoped memory -synchronization is not sufficient for memory order to be observed -by kernels in another memory synchronization domain even if they -are on the same GPU.

    -
    -
    -cudaLaunchMemSyncDomainDefault = 0#
    -

    Launch kernels in the default domain

    -
    - -
    -
    -cudaLaunchMemSyncDomainRemote = 1#
    -

    Launch kernels in the remote domain

    -
    - -
    - -
    -
    -class cuda.cudart.cudaLaunchAttributeID(value)#
    -

    Launch attributes enum; used as id field of -cudaLaunchAttribute

    -
    -
    -cudaLaunchAttributeIgnore = 0#
    -

    Ignored entry, for convenient composition

    -
    - -
    -
    -cudaLaunchAttributeAccessPolicyWindow = 1#
    -

    Valid for streams, graph nodes, launches. See accessPolicyWindow.

    -
    - -
    -
    -cudaLaunchAttributeCooperative = 2#
    -

    Valid for graph nodes, launches. See cooperative.

    -
    - -
    -
    -cudaLaunchAttributeSynchronizationPolicy = 3#
    -

    Valid for streams. See syncPolicy.

    -
    - -
    -
    -cudaLaunchAttributeClusterDimension = 4#
    -

    Valid for graph nodes, launches. See clusterDim.

    -
    - -
    -
    -cudaLaunchAttributeClusterSchedulingPolicyPreference = 5#
    -

    Valid for graph nodes, launches. See clusterSchedulingPolicyPreference.

    -
    - -
    -
    -cudaLaunchAttributeProgrammaticStreamSerialization = 6#
    -

    Valid for launches. Setting programmaticStreamSerializationAllowed to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid’s execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions).

    -
    - -
    -
    -cudaLaunchAttributeProgrammaticEvent = 7#
    -

    Valid for launches. Set programmaticEvent to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event programmatically in a future CUDA release. A trigger can also be inserted at the beginning of each block’s execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling cudaEventSynchronize()) are not guaranteed to observe the release precisely when it is released. For example, cudaEventSynchronize() may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks.

    -
    -

    The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the cudaEventDisableTiming flag set).

    -
    -
    - -
    -
    -cudaLaunchAttributePriority = 8#
    -

    Valid for streams, graph nodes, launches. See priority.

    -
    - -
    -
    -cudaLaunchAttributeMemSyncDomainMap = 9#
    -

    Valid for streams, graph nodes, launches. See memSyncDomainMap.

    -
    - -
    -
    -cudaLaunchAttributeMemSyncDomain = 10#
    -

    Valid for streams, graph nodes, launches. See memSyncDomain.

    -
    - -
    -
    -cudaLaunchAttributeLaunchCompletionEvent = 12#
    -

    Valid for launches. Set launchCompletionEvent to record the event.

    -
    -

    Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock.

    -

    A launch completion event is nominally similar to a programmatic event with triggerAtBlockStart set except that it is not visible to cudaGridDependencySynchronize() and can be used with compute capability less than 9.0.

    -

    The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the cudaEventDisableTiming flag set).

    -
    -
    - -
    -
    -cudaLaunchAttributeDeviceUpdatableKernelNode = 13#
    -

    Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error.

    -
    -

    :cudaLaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via cudaLaunchAttributeValue::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node’s kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see cudaGraphKernelNodeUpdatesApply.

    -

    Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via cudaGraphDestroyNode. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via cudaGraphKernelNodeCopyAttributes. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to cudaGraphExecUpdate.

    -

    If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with cuGraphUpload before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again.

    -
    -
    - -
    -
    -cudaLaunchAttributePreferredSharedMemoryCarveout = 14#
    -

    Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting sharedMemCarveout to a percentage between 0-100 signals sets the shared memory carveout preference in percent of the total shared memory for that kernel launch. This attribute takes precedence over cudaFuncAttributePreferredSharedMemoryCarveout. This is only a hint, and the driver can choose a different configuration if required for the launch.

    -
    - -
    - -
    -
    -class cuda.cudart.cudaDeviceNumaConfig(value)#
    -

    CUDA device NUMA config

    -
    -
    -cudaDeviceNumaConfigNone = 0#
    -

    The GPU is not a NUMA node

    -
    - -
    -
    -cudaDeviceNumaConfigNumaNode = 1#
    -

    The GPU is a NUMA node, cudaDevAttrNumaId contains its NUMA ID

    -
    - -
    - -
    -
    -class cuda.cudart.cudaAsyncNotificationType(value)#
    -

    Types of async notification that can occur

    -
    -
    -cudaAsyncNotificationTypeOverBudget = 1#
    -
    - -
    - -
    -
    -class cuda.cudart.cudaSurfaceBoundaryMode(value)#
    -

    CUDA Surface boundary modes

    -
    -
    -cudaBoundaryModeZero = 0#
    -

    Zero boundary mode

    -
    - -
    -
    -cudaBoundaryModeClamp = 1#
    -

    Clamp boundary mode

    -
    - -
    -
    -cudaBoundaryModeTrap = 2#
    -

    Trap boundary mode

    -
    - -
    - -
    -
    -class cuda.cudart.cudaSurfaceFormatMode(value)#
    -

    CUDA Surface format modes

    -
    -
    -cudaFormatModeForced = 0#
    -

    Forced format mode

    -
    - -
    -
    -cudaFormatModeAuto = 1#
    -

    Auto format mode

    -
    - -
    - -
    -
    -class cuda.cudart.cudaTextureAddressMode(value)#
    -

    CUDA texture address modes

    -
    -
    -cudaAddressModeWrap = 0#
    -

    Wrapping address mode

    -
    - -
    -
    -cudaAddressModeClamp = 1#
    -

    Clamp to edge address mode

    -
    - -
    -
    -cudaAddressModeMirror = 2#
    -

    Mirror address mode

    -
    - -
    -
    -cudaAddressModeBorder = 3#
    -

    Border address mode

    -
    - -
    - -
    -
    -class cuda.cudart.cudaTextureFilterMode(value)#
    -

    CUDA texture filter modes

    -
    -
    -cudaFilterModePoint = 0#
    -

    Point filter mode

    -
    - -
    -
    -cudaFilterModeLinear = 1#
    -

    Linear filter mode

    -
    - -
    - -
    -
    -class cuda.cudart.cudaTextureReadMode(value)#
    -

    CUDA texture read modes

    -
    -
    -cudaReadModeElementType = 0#
    -

    Read texture as specified element type

    -
    - -
    -
    -cudaReadModeNormalizedFloat = 1#
    -

    Read texture as normalized float

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEglPlaneDesc#
    -

    CUDA EGL Plane Descriptor - structure defining each plane of a CUDA -EGLFrame

    -
    -
    -width#
    -

    Width of plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -height#
    -

    Height of plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -depth#
    -

    Depth of plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -pitch#
    -

    Pitch of plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -numChannels#
    -

    Number of channels for the plane

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -channelDesc#
    -

    Channel Format Descriptor

    -
    -
    Type:
    -

    cudaChannelFormatDesc

    -
    -
    -
    - -
    -
    -reserved#
    -

    Reserved for future use

    -
    -
    Type:
    -

    List[unsigned int]

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEglFrame#
    -

    CUDA EGLFrame Descriptor - structure defining one frame of EGL. -Each frame may contain one or more planes depending on whether the -surface is Multiplanar or not. Each plane of EGLFrame is -represented by cudaEglPlaneDesc which is defined as: -typedefstructcudaEglPlaneDesc_st unsignedintwidth; -unsignedintheight; unsignedintdepth; unsignedintpitch; -unsignedintnumChannels; structcudaChannelFormatDescchannelDesc; -unsignedintreserved[4]; cudaEglPlaneDesc;

    -
    -
    -frame#
    -
    -
    Type:
    -

    anon_union10

    -
    -
    -
    - -
    -
    -planeDesc#
    -

    CUDA EGL Plane Descriptor cudaEglPlaneDesc

    -
    -
    Type:
    -

    List[cudaEglPlaneDesc]

    -
    -
    -
    - -
    -
    -planeCount#
    -

    Number of planes

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -frameType#
    -

    Array or Pitch

    -
    -
    Type:
    -

    cudaEglFrameType

    -
    -
    -
    - -
    -
    -eglColorFormat#
    -

    CUDA EGL Color Format

    -
    -
    Type:
    -

    cudaEglColorFormat

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEglStreamConnection#
    -

    CUDA EGLSream Connection

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaArray_t(*args, **kwargs)#
    -

    CUDA array

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaArray_const_t(*args, **kwargs)#
    -

    CUDA array (as source copy argument)

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMipmappedArray_t(*args, **kwargs)#
    -

    CUDA mipmapped array

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMipmappedArray_const_t(*args, **kwargs)#
    -

    CUDA mipmapped array (as source argument)

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaHostFn_t(*args, **kwargs)#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.CUuuid#
    -
    -
    -bytes#
    -

    < CUDA definition of UUID

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaUUID_t#
    -
    -
    -bytes#
    -

    < CUDA definition of UUID

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaIpcEventHandle_t#
    -

    CUDA IPC event handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaIpcMemHandle_t#
    -

    CUDA IPC memory handle

    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemFabricHandle_t#
    -
    -
    -reserved#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaStream_t#
    -

    CUDA stream

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaEvent_t#
    -

    CUDA event types

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphicsResource_t(*args, **kwargs)#
    -

    CUDA graphics resource types

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalMemory_t(*args, **kwargs)#
    -

    CUDA external memory

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaExternalSemaphore_t(*args, **kwargs)#
    -

    CUDA external semaphore

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraph_t#
    -

    CUDA graph

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphNode_t#
    -

    CUDA graph node.

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaUserObject_t#
    -

    CUDA user object for graphs

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphConditionalHandle#
    -

    CUDA handle for conditional graph nodes

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaFunction_t#
    -

    CUDA function

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaKernel_t(*args, **kwargs)#
    -

    CUDA kernel

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaMemPool_t#
    -

    CUDA memory pool

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphEdgeData#
    -

    Optional annotation for edges in a CUDA graph. Note, all edges -implicitly have annotations and default to a zero-initialized value -if not specified. A zero-initialized struct indicates a standard -full serialization of two nodes with memory visibility.

    -
    -
    -from_port#
    -

    This indicates when the dependency is triggered from the upstream -node on the edge. The meaning is specfic to the node type. A value -of 0 in all cases means full completion of the upstream node, with -memory visibility to the downstream node or portion thereof -(indicated by to_port). Only kernel nodes define non-zero -ports. A kernel node can use the following output port types: -cudaGraphKernelNodePortDefault, -cudaGraphKernelNodePortProgrammatic, or -cudaGraphKernelNodePortLaunchCompletion.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -to_port#
    -

    This indicates what portion of the downstream node is dependent on -the upstream node or portion thereof (indicated by from_port). -The meaning is specific to the node type. A value of 0 in all cases -means the entirety of the downstream node is dependent on the -upstream work. Currently no node types define non-zero ports. -Accordingly, this field must be set to zero.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -type#
    -

    This should be populated with a value from -::cudaGraphDependencyType. (It is typed as char due to compiler- -specific layout of bitfields.) See ::cudaGraphDependencyType.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -reserved#
    -

    These bytes are unused and must be zeroed. This ensures -compatibility if additional fields are added in the future.

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphExec_t#
    -

    CUDA executable (launchable) graph

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphInstantiateParams#
    -

    Graph instantiation parameters

    -
    -
    -flags#
    -

    Instantiation flags

    -
    -
    Type:
    -

    unsigned long long

    -
    -
    -
    - -
    -
    -uploadStream#
    -

    Upload stream

    -
    -
    Type:
    -

    cudaStream_t

    -
    -
    -
    - -
    -
    -errNode_out#
    -

    The node which caused instantiation to fail, if any

    -
    -
    Type:
    -

    cudaGraphNode_t

    -
    -
    -
    - -
    -
    -result_out#
    -

    Whether instantiation was successful. If it failed, the reason why

    -
    -
    Type:
    -

    cudaGraphInstantiateResult

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphExecUpdateResultInfo#
    -

    Result information returned by cudaGraphExecUpdate

    -
    -
    -result#
    -

    Gives more specific detail when a cuda graph update fails.

    -
    -
    Type:
    -

    cudaGraphExecUpdateResult

    -
    -
    -
    - -
    -
    -errorNode#
    -

    The “to node” of the error edge when the topologies do not match. -The error node when the error is associated with a specific node. -NULL when the error is generic.

    -
    -
    Type:
    -

    cudaGraphNode_t

    -
    -
    -
    - -
    -
    -errorFromNode#
    -

    The from node of error edge when the topologies do not match. -Otherwise NULL.

    -
    -
    Type:
    -

    cudaGraphNode_t

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaGraphDeviceNode_t(*args, **kwargs)#
    -

    CUDA device node handle for device-side node update

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaLaunchMemSyncDomainMap#
    -

    Memory Synchronization Domain map See cudaLaunchMemSyncDomain. By -default, kernels are launched in domain 0. Kernel launched with -cudaLaunchMemSyncDomainRemote will have a different domain ID. User -may also alter the domain ID with ::cudaLaunchMemSyncDomainMap for -a specific stream / graph node / kernel launch. See -cudaLaunchAttributeMemSyncDomainMap. Domain ID range is available -through cudaDevAttrMemSyncDomainCount.

    -
    -
    -default_#
    -

    The default domain ID to use for designated kernels

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -remote#
    -

    The remote domain ID to use for designated kernels

    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaLaunchAttributeValue(void_ptr _ptr=0)#
    -

    Launch attributes union; used as value field of -::cudaLaunchAttribute

    -
    -
    -pad#
    -
    -
    Type:
    -

    bytes

    -
    -
    -
    - -
    -
    -accessPolicyWindow#
    -

    Value of launch attribute cudaLaunchAttributeAccessPolicyWindow.

    -
    -
    Type:
    -

    cudaAccessPolicyWindow

    -
    -
    -
    - -
    -
    -cooperative#
    -

    Value of launch attribute cudaLaunchAttributeCooperative. Nonzero -indicates a cooperative kernel (see cudaLaunchCooperativeKernel).

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -syncPolicy#
    -

    Value of launch attribute cudaLaunchAttributeSynchronizationPolicy. -::cudaSynchronizationPolicy for work queued up in this stream.

    -
    -
    Type:
    -

    cudaSynchronizationPolicy

    -
    -
    -
    - -
    -
    -clusterDim#
    -

    Value of launch attribute cudaLaunchAttributeClusterDimension that -represents the desired cluster dimensions for the kernel. Opaque -type with the following fields: - x - The X dimension of the -cluster, in blocks. Must be a divisor of the grid X dimension. - -y - The Y dimension of the cluster, in blocks. Must be a divisor -of the grid Y dimension. - z - The Z dimension of the cluster, -in blocks. Must be a divisor of the grid Z dimension.

    -
    -
    Type:
    -

    anon_struct20

    -
    -
    -
    - -
    -
    -clusterSchedulingPolicyPreference#
    -

    Value of launch attribute -cudaLaunchAttributeClusterSchedulingPolicyPreference. Cluster -scheduling policy preference for the kernel.

    -
    -
    Type:
    -

    cudaClusterSchedulingPolicy

    -
    -
    -
    - -
    -
    -programmaticStreamSerializationAllowed#
    -

    Value of launch attribute -cudaLaunchAttributeProgrammaticStreamSerialization.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -programmaticEvent#
    -

    Value of launch attribute cudaLaunchAttributeProgrammaticEvent with -the following fields: - cudaEvent_t event - Event to fire when -all blocks trigger it. - int flags; - Event record flags, see -cudaEventRecordWithFlags. Does not accept cudaEventRecordExternal. -- int triggerAtBlockStart - If this is set to non-0, each block -launch will automatically trigger the event.

    -
    -
    Type:
    -

    anon_struct21

    -
    -
    -
    - -
    -
    -priority#
    -

    Value of launch attribute cudaLaunchAttributePriority. Execution -priority of the kernel.

    -
    -
    Type:
    -

    int

    -
    -
    -
    - -
    -
    -memSyncDomainMap#
    -

    Value of launch attribute cudaLaunchAttributeMemSyncDomainMap. See -::cudaLaunchMemSyncDomainMap.

    -
    -
    Type:
    -

    cudaLaunchMemSyncDomainMap

    -
    -
    -
    - -
    -
    -memSyncDomain#
    -

    Value of launch attribute cudaLaunchAttributeMemSyncDomain. See -cudaLaunchMemSyncDomain.

    -
    -
    Type:
    -

    cudaLaunchMemSyncDomain

    -
    -
    -
    - -
    -
    -launchCompletionEvent#
    -

    Value of launch attribute cudaLaunchAttributeLaunchCompletionEvent -with the following fields: - cudaEvent_t event - Event to fire -when the last block launches. - int flags - Event record -flags, see cudaEventRecordWithFlags. Does not accept -cudaEventRecordExternal.

    -
    -
    Type:
    -

    anon_struct22

    -
    -
    -
    - -
    -
    -deviceUpdatableKernelNode#
    -

    Value of launch attribute -cudaLaunchAttributeDeviceUpdatableKernelNode with the following -fields: - int deviceUpdatable - Whether or not the resulting -kernel node should be device-updatable. - -cudaGraphDeviceNode_t devNode - Returns a handle to pass to the -various device-side update functions.

    -
    -
    Type:
    -

    anon_struct23

    -
    -
    -
    - -
    -
    -sharedMemCarveout#
    -

    Value of launch attribute -cudaLaunchAttributePreferredSharedMemoryCarveout.

    -
    -
    Type:
    -

    unsigned int

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaLaunchAttribute#
    -

    Launch attribute

    -
    -
    -id#
    -

    Attribute to set

    -
    -
    Type:
    -

    cudaLaunchAttributeID

    -
    -
    -
    - -
    -
    -val#
    -

    Value of the attribute

    -
    -
    Type:
    -

    cudaLaunchAttributeValue

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaAsyncCallbackHandle_t(*args, **kwargs)#
    -

    CUDA async callback handle

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaAsyncNotificationInfo_t#
    -

    Information describing an async notification event

    -
    -
    -type#
    -
    -
    Type:
    -

    cudaAsyncNotificationType

    -
    -
    -
    - -
    -
    -info#
    -
    -
    Type:
    -

    anon_union9

    -
    -
    -
    - -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaAsyncCallback(*args, **kwargs)#
    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaSurfaceObject_t#
    -

    An opaque value that represents a CUDA Surface object

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -class cuda.cudart.cudaTextureObject_t#
    -

    An opaque value that represents a CUDA texture object

    -
    -
    -getPtr()#
    -

    Get memory address of class instance

    -
    - -
    - -
    -
    -cudart.CUDA_EGL_MAX_PLANES = 3#
    -

    Maximum number of planes per frame

    -
    - -
    -
    -cudart.cudaHostAllocDefault = 0#
    -

    Default page-locked allocation flag

    -
    - -
    -
    -cudart.cudaHostAllocPortable = 1#
    -

    Pinned memory accessible by all CUDA contexts

    -
    - -
    -
    -cudart.cudaHostAllocMapped = 2#
    -

    Map allocation into device space

    -
    - -
    -
    -cudart.cudaHostAllocWriteCombined = 4#
    -

    Write-combined memory

    -
    - -
    -
    -cudart.cudaHostRegisterDefault = 0#
    -

    Default host memory registration flag

    -
    - -
    -
    -cudart.cudaHostRegisterPortable = 1#
    -

    Pinned memory accessible by all CUDA contexts

    -
    - -
    -
    -cudart.cudaHostRegisterMapped = 2#
    -

    Map registered memory into device space

    -
    - -
    -
    -cudart.cudaHostRegisterIoMemory = 4#
    -

    Memory-mapped I/O space

    -
    - -
    -
    -cudart.cudaHostRegisterReadOnly = 8#
    -

    Memory-mapped read-only

    -
    - -
    -
    -cudart.cudaPeerAccessDefault = 0#
    -

    Default peer addressing enable flag

    -
    - -
    -
    -cudart.cudaStreamDefault = 0#
    -

    Default stream flag

    -
    - -
    -
    -cudart.cudaStreamNonBlocking = 1#
    -

    Stream does not synchronize with stream 0 (the NULL stream)

    -
    - -
    -
    -cudart.cudaStreamLegacy = 1#
    -

    Legacy stream handle

    -

    Stream handle that can be passed as a cudaStream_t to use an implicit stream with legacy synchronization behavior.

    -

    See details of the link_sync_behavior

    -
    - -
    -
    -cudart.cudaStreamPerThread = 2#
    -

    Per-thread stream handle

    -

    Stream handle that can be passed as a cudaStream_t to use an implicit stream with per-thread synchronization behavior.

    -

    See details of the link_sync_behavior

    -
    - -
    -
    -cudart.cudaEventDefault = 0#
    -

    Default event flag

    -
    - -
    -
    -cudart.cudaEventBlockingSync = 1#
    -

    Event uses blocking synchronization

    -
    - -
    -
    -cudart.cudaEventDisableTiming = 2#
    -

    Event will not record timing data

    -
    - -
    -
    -cudart.cudaEventInterprocess = 4#
    -

    Event is suitable for interprocess use. cudaEventDisableTiming must be set

    -
    - -
    -
    -cudart.cudaEventRecordDefault = 0#
    -

    Default event record flag

    -
    - -
    -
    -cudart.cudaEventRecordExternal = 1#
    -

    Event is captured in the graph as an external event node when performing stream capture

    -
    - -
    -
    -cudart.cudaEventWaitDefault = 0#
    -

    Default event wait flag

    -
    - -
    -
    -cudart.cudaEventWaitExternal = 1#
    -

    Event is captured in the graph as an external event node when performing stream capture

    -
    - -
    -
    -cudart.cudaDeviceScheduleAuto = 0#
    -

    Device flag - Automatic scheduling

    -
    - -
    -
    -cudart.cudaDeviceScheduleSpin = 1#
    -

    Device flag - Spin default scheduling

    -
    - -
    -
    -cudart.cudaDeviceScheduleYield = 2#
    -

    Device flag - Yield default scheduling

    -
    - -
    -
    -cudart.cudaDeviceScheduleBlockingSync = 4#
    -

    Device flag - Use blocking synchronization

    -
    - -
    -
    -cudart.cudaDeviceBlockingSync = 4#
    -

    Device flag - Use blocking synchronization [Deprecated]

    -
    - -
    -
    -cudart.cudaDeviceScheduleMask = 7#
    -

    Device schedule flags mask

    -
    - -
    -
    -cudart.cudaDeviceMapHost = 8#
    -

    Device flag - Support mapped pinned allocations

    -
    - -
    -
    -cudart.cudaDeviceLmemResizeToMax = 16#
    -

    Device flag - Keep local memory allocation after launch

    -
    - -
    -
    -cudart.cudaDeviceSyncMemops = 128#
    -

    Device flag - Ensure synchronous memory operations on this context will synchronize

    -
    - -
    -
    -cudart.cudaDeviceMask = 255#
    -

    Device flags mask

    -
    - -
    -
    -cudart.cudaArrayDefault = 0#
    -

    Default CUDA array allocation flag

    -
    - -
    -
    -cudart.cudaArrayLayered = 1#
    -

    Must be set in cudaMalloc3DArray to create a layered CUDA array

    -
    - -
    -
    -cudart.cudaArraySurfaceLoadStore = 2#
    -

    Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind surfaces to the CUDA array

    -
    - -
    -
    -cudart.cudaArrayCubemap = 4#
    -

    Must be set in cudaMalloc3DArray to create a cubemap CUDA array

    -
    - -
    -
    -cudart.cudaArrayTextureGather = 8#
    -

    Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform texture gather operations on the CUDA array

    -
    - -
    -
    -cudart.cudaArrayColorAttachment = 32#
    -

    Must be set in cudaExternalMemoryGetMappedMipmappedArray if the mipmapped array is used as a color target in a graphics API

    -
    - -
    -
    -cudart.cudaArraySparse = 64#
    -

    Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a sparse CUDA array or CUDA mipmapped array

    -
    - -
    -
    -cudart.cudaArrayDeferredMapping = 128#
    -

    Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a deferred mapping CUDA array or CUDA mipmapped array

    -
    - -
    -
    -cudart.cudaIpcMemLazyEnablePeerAccess = 1#
    -

    Automatically enable peer access between remote devices as needed

    -
    - -
    -
    -cudart.cudaMemAttachGlobal = 1#
    -

    Memory can be accessed by any stream on any device

    -
    - -
    -
    -cudart.cudaMemAttachHost = 2#
    -

    Memory cannot be accessed by any stream on any device

    -
    - -
    -
    -cudart.cudaMemAttachSingle = 4#
    -

    Memory can only be accessed by a single stream on the associated device

    -
    - -
    -
    -cudart.cudaOccupancyDefault = 0#
    -

    Default behavior

    -
    - -
    -
    -cudart.cudaOccupancyDisableCachingOverride = 1#
    -

    Assume global caching is enabled and cannot be automatically turned off

    -
    - -
    -
    -cudart.cudaCpuDeviceId = -1#
    -

    Device id that represents the CPU

    -
    - -
    -
    -cudart.cudaInvalidDeviceId = -2#
    -

    Device id that represents an invalid device

    -
    - -
    -
    -cudart.cudaInitDeviceFlagsAreValid = 1#
    -

    Tell the CUDA runtime that DeviceFlags is being set in cudaInitDevice call

    -
    - -
    -
    -cudart.cudaCooperativeLaunchMultiDeviceNoPreSync = 1#
    -

    If set, each kernel launched as part of cudaLaunchCooperativeKernelMultiDevice only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution.

    -
    - -
    -
    -cudart.cudaCooperativeLaunchMultiDeviceNoPostSync = 2#
    -

    If set, any subsequent work pushed in a stream that participated in a call to cudaLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution.

    -
    - -
    -
    -cudart.cudaArraySparsePropertiesSingleMipTail = 1#
    -

    Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers

    -
    - -
    -
    -cudart.CUDA_IPC_HANDLE_SIZE = 64#
    -

    CUDA IPC Handle Size

    -
    - -
    -
    -cudart.cudaExternalMemoryDedicated = 1#
    -

    Indicates that the external memory object is a dedicated resource

    -
    - -
    -
    -cudart.cudaExternalSemaphoreSignalSkipNvSciBufMemSync = 1#
    -

    When the /p flags parameter of cudaExternalSemaphoreSignalParams contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as cudaExternalMemoryHandleTypeNvSciBuf, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects.

    -
    - -
    -
    -cudart.cudaExternalSemaphoreWaitSkipNvSciBufMemSync = 2#
    -

    When the /p flags parameter of cudaExternalSemaphoreWaitParams contains this flag, it indicates that waiting an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as cudaExternalMemoryHandleTypeNvSciBuf, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects.

    -
    - -
    -
    -cudart.cudaNvSciSyncAttrSignal = 1#
    -

    When /p flags of cudaDeviceGetNvSciSyncAttributes is set to this, it indicates that application need signaler specific NvSciSyncAttr to be filled by cudaDeviceGetNvSciSyncAttributes.

    -
    - -
    -
    -cudart.cudaNvSciSyncAttrWait = 2#
    -

    When /p flags of cudaDeviceGetNvSciSyncAttributes is set to this, it indicates that application need waiter specific NvSciSyncAttr to be filled by cudaDeviceGetNvSciSyncAttributes.

    -
    - -
    -
    -cudart.cudaGraphKernelNodePortDefault = 0#
    -

    This port activates when the kernel has finished executing.

    -
    - -
    -
    -cudart.cudaGraphKernelNodePortProgrammatic = 1#
    -

    This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type cudaGraphDependencyTypeProgrammatic. See also cudaLaunchAttributeProgrammaticEvent.

    -
    - -
    -
    -cudart.cudaGraphKernelNodePortLaunchCompletion = 2#
    -

    This port activates when all blocks of the kernel have begun execution. See also cudaLaunchAttributeLaunchCompletionEvent.

    -
    - -
    -
    -cudart.cudaStreamAttrID = <enum 'cudaStreamAttrID'>#
    -
    - -
    -
    -cudart.cudaStreamAttributeAccessPolicyWindow = 1#
    -
    - -
    -
    -cudart.cudaStreamAttributeSynchronizationPolicy = 3#
    -
    - -
    -
    -cudart.cudaStreamAttributeMemSyncDomainMap = 9#
    -
    - -
    -
    -cudart.cudaStreamAttributeMemSyncDomain = 10#
    -
    - -
    -
    -cudart.cudaStreamAttributePriority = 8#
    -
    - -
    -
    -cudart.cudaStreamAttrValue = <class 'cuda.cudart.cudaStreamAttrValue'>#
    -
    - -
    -
    -cudart.cudaKernelNodeAttrID = <enum 'cudaKernelNodeAttrID'>#
    -
    - -
    -
    -cudart.cudaKernelNodeAttributeAccessPolicyWindow = 1#
    -
    - -
    -
    -cudart.cudaKernelNodeAttributeCooperative = 2#
    -
    - -
    -
    -cudart.cudaKernelNodeAttributePriority = 8#
    -
    - -
    -
    -cudart.cudaKernelNodeAttributeClusterDimension = 4#
    -
    - -
    -
    -cudart.cudaKernelNodeAttributeClusterSchedulingPolicyPreference = 5#
    -
    - -
    -
    -cudart.cudaKernelNodeAttributeMemSyncDomainMap = 9#
    -
    - -
    -
    -cudart.cudaKernelNodeAttributeMemSyncDomain = 10#
    -
    - -
    -
    -cudart.cudaKernelNodeAttributePreferredSharedMemoryCarveout = 14#
    -
    - -
    -
    -cudart.cudaKernelNodeAttributeDeviceUpdatableKernelNode = 13#
    -
    - -
    -
    -cudart.cudaKernelNodeAttrValue = <class 'cuda.cudart.cudaKernelNodeAttrValue'>#
    -
    - -
    -
    -cudart.cudaSurfaceType1D = 1#
    -
    - -
    -
    -cudart.cudaSurfaceType2D = 2#
    -
    - -
    -
    -cudart.cudaSurfaceType3D = 3#
    -
    - -
    -
    -cudart.cudaSurfaceTypeCubemap = 12#
    -
    - -
    -
    -cudart.cudaSurfaceType1DLayered = 241#
    -
    - -
    -
    -cudart.cudaSurfaceType2DLayered = 242#
    -
    - -
    -
    -cudart.cudaSurfaceTypeCubemapLayered = 252#
    -
    - -
    -
    -cudart.cudaTextureType1D = 1#
    -
    - -
    -
    -cudart.cudaTextureType2D = 2#
    -
    - -
    -
    -cudart.cudaTextureType3D = 3#
    -
    - -
    -
    -cudart.cudaTextureTypeCubemap = 12#
    -
    - -
    -
    -cudart.cudaTextureType1DLayered = 241#
    -
    - -
    -
    -cudart.cudaTextureType2DLayered = 242#
    -
    - -
    -
    -cudart.cudaTextureTypeCubemapLayered = 252#
    -
    - -
    -
    - -
    -
    - -
    - -
    -
    - - - - - - - \ No newline at end of file diff --git a/docs/module/driver.html b/docs/module/driver.html new file mode 100644 index 00000000..3d7a5996 --- /dev/null +++ b/docs/module/driver.html @@ -0,0 +1,39422 @@ + + + + + + + + + + driver - CUDA Python 12.6.1 documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
    +
    +
    + +
    + +
    +
    + +
    + +
    +
    + +
    +
    +
    + + + + + Back to top + +
    + +
    + +
    + +
    +
    +
    +

    driver#

    +
    +

    Data types used by CUDA driver#

    +
    +
    +class cuda.bindings.driver.CUuuid_st(void_ptr _ptr=0)#
    +
    +
    +bytes#
    +

    < CUDA definition of UUID

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemFabricHandle_st(void_ptr _ptr=0)#
    +

    Fabric handle - An opaque handle representing a memory allocation +that can be exported to processes in same or different nodes. For +IPC between processes on different nodes they must be connected via +the NVSwitch fabric.

    +
    +
    +data#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUipcEventHandle_st(void_ptr _ptr=0)#
    +

    CUDA IPC event handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUipcMemHandle_st(void_ptr _ptr=0)#
    +

    CUDA IPC mem handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamBatchMemOpParams_union(void_ptr _ptr=0)#
    +

    Per-operation parameters for cuStreamBatchMemOp

    +
    +
    +operation#
    +
    +
    Type:
    +

    CUstreamBatchMemOpType

    +
    +
    +
    + +
    +
    +waitValue#
    +
    +
    Type:
    +

    CUstreamMemOpWaitValueParams_st

    +
    +
    +
    + +
    +
    +writeValue#
    +
    +
    Type:
    +

    CUstreamMemOpWriteValueParams_st

    +
    +
    +
    + +
    +
    +flushRemoteWrites#
    +
    +
    Type:
    +

    CUstreamMemOpFlushRemoteWritesParams_st

    +
    +
    +
    + +
    +
    +memoryBarrier#
    +
    +
    Type:
    +

    CUstreamMemOpMemoryBarrierParams_st

    +
    +
    +
    + +
    +
    +pad#
    +
    +
    Type:
    +

    List[cuuint64_t]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st(void_ptr _ptr=0)#
    +
    +
    +ctx#
    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +count#
    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +paramArray#
    +
    +
    Type:
    +

    CUstreamBatchMemOpParams

    +
    +
    +
    + +
    +
    +flags#
    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    +

    Batch memory operation node parameters

    +
    +
    +ctx#
    +

    Context to use for the operations.

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +count#
    +

    Number of operations in paramArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +paramArray#
    +

    Array of batch memory operations.

    +
    +
    Type:
    +

    CUstreamBatchMemOpParams

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags to control the node.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUasyncNotificationInfo_st(void_ptr _ptr=0)#
    +

    Information passed to the user via the async notification callback

    +
    +
    +type#
    +
    +
    Type:
    +

    CUasyncNotificationType

    +
    +
    +
    + +
    +
    +info#
    +
    +
    Type:
    +

    anon_union2

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevprop_st(void_ptr _ptr=0)#
    +

    Legacy device properties

    +
    +
    +maxThreadsPerBlock#
    +

    Maximum number of threads per block

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxThreadsDim#
    +

    Maximum size of each dimension of a block

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxGridSize#
    +

    Maximum size of each dimension of a grid

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +sharedMemPerBlock#
    +

    Shared memory available per block in bytes

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +totalConstantMemory#
    +

    Constant memory available on device in bytes

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +SIMDWidth#
    +

    Warp size in threads

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memPitch#
    +

    Maximum pitch in bytes allowed by memory copies

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +regsPerBlock#
    +

    32-bit registers available per block

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +clockRate#
    +

    Clock frequency in kilohertz

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +textureAlign#
    +

    Alignment requirement for textures

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUaccessPolicyWindow_st(void_ptr _ptr=0)#
    +

    Specifies an access policy for a window, a contiguous extent of +memory beginning at base_ptr and ending at base_ptr + num_bytes. +num_bytes is limited by +CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. Partition into +many segments and assign segments such that: sum of “hit segments” +/ window == approx. ratio. sum of “miss segments” / window == +approx 1-ratio. Segments and ratio specifications are fitted to the +capabilities of the architecture. Accesses in a hit segment apply +the hitProp access policy. Accesses in a miss segment apply the +missProp access policy.

    +
    +
    +base_ptr#
    +

    Starting address of the access policy window. CUDA driver may align +it.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +num_bytes#
    +

    Size in bytes of the window policy. CUDA driver may restrict the +maximum size and alignment.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +hitRatio#
    +

    hitRatio specifies percentage of lines assigned hitProp, rest are +assigned missProp.

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +hitProp#
    +

    CUaccessProperty set for hit.

    +
    +
    Type:
    +

    CUaccessProperty

    +
    +
    +
    + +
    +
    +missProp#
    +

    CUaccessProperty set for miss. Must be either NORMAL or STREAMING

    +
    +
    Type:
    +

    CUaccessProperty

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    GPU kernel node parameters

    +
    +
    +func#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +extra#
    +

    Extra options

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    +

    GPU kernel node parameters

    +
    +
    +func#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +extra#
    +

    Extra options

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +kern#
    +

    Kernel to launch, will only be referenced if func is NULL

    +
    +
    Type:
    +

    CUkernel

    +
    +
    +
    + +
    +
    +ctx#
    +

    Context for the kernel task to run in. The value NULL will indicate +the current context should be used by the api. This field is +ignored if func is set.

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v3_st(void_ptr _ptr=0)#
    +

    GPU kernel node parameters

    +
    +
    +func#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +extra#
    +

    Extra options

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +kern#
    +

    Kernel to launch, will only be referenced if func is NULL

    +
    +
    Type:
    +

    CUkernel

    +
    +
    +
    + +
    +
    +ctx#
    +

    Context for the kernel task to run in. The value NULL will indicate +the current context should be used by the api. This field is +ignored if func is set.

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    Memset node parameters

    +
    +
    +dst#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of destination device pointer. Unused if height is 1

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +value#
    +

    Value to be set

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +elementSize#
    +

    Size of each element in bytes. Must be 1, 2, or 4.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the row in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Number of rows

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    +

    Memset node parameters

    +
    +
    +dst#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of destination device pointer. Unused if height is 1

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +value#
    +

    Value to be set

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +elementSize#
    +

    Size of each element in bytes. Must be 1, 2, or 4.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the row in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Number of rows

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +ctx#
    +

    Context on which to run the node

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    Host node parameters

    +
    +
    +fn#
    +

    The function to call when the node executes

    +
    +
    Type:
    +

    CUhostFn

    +
    +
    +
    + +
    +
    +userData#
    +

    Argument to pass to the function

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    +

    Host node parameters

    +
    +
    +fn#
    +

    The function to call when the node executes

    +
    +
    Type:
    +

    CUhostFn

    +
    +
    +
    + +
    +
    +userData#
    +

    Argument to pass to the function

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_CONDITIONAL_NODE_PARAMS(void_ptr _ptr=0)#
    +

    Conditional node parameters

    +
    +
    +handle#
    +

    Conditional node handle. Handles must be created in advance of +creating the node using cuGraphConditionalHandleCreate.

    +
    +
    Type:
    +

    CUgraphConditionalHandle

    +
    +
    +
    + +
    +
    +type#
    +

    Type of conditional node.

    +
    +
    Type:
    +

    CUgraphConditionalNodeType

    +
    +
    +
    + +
    +
    +size#
    +

    Size of graph output array. Must be 1.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +phGraph_out#
    +

    CUDA-owned array populated with conditional node child graphs +during creation of the node. Valid for the lifetime of the +conditional node. The contents of the graph(s) are subject to the +following constraints: - Allowed node types are kernel nodes, +empty nodes, child graphs, memsets, memcopies, and conditionals. +This applies recursively to child graphs and conditional bodies. +- All kernels, including kernels in nested conditionals or child +graphs at any level, must belong to the same CUDA context. +These graphs may be populated using graph node creation APIs or +cuStreamBeginCaptureToGraph.

    +
    +
    Type:
    +

    CUgraph

    +
    +
    +
    + +
    +
    +ctx#
    +

    Context on which to run the node. Must match context used to create +the handle and all body nodes.

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphEdgeData_st(void_ptr _ptr=0)#
    +

    Optional annotation for edges in a CUDA graph. Note, all edges +implicitly have annotations and default to a zero-initialized value +if not specified. A zero-initialized struct indicates a standard +full serialization of two nodes with memory visibility.

    +
    +
    +from_port#
    +

    This indicates when the dependency is triggered from the upstream +node on the edge. The meaning is specfic to the node type. A value +of 0 in all cases means full completion of the upstream node, with +memory visibility to the downstream node or portion thereof +(indicated by to_port). Only kernel nodes define non-zero +ports. A kernel node can use the following output port types: +CU_GRAPH_KERNEL_NODE_PORT_DEFAULT, +CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC, or +CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +to_port#
    +

    This indicates what portion of the downstream node is dependent on +the upstream node or portion thereof (indicated by from_port). +The meaning is specific to the node type. A value of 0 in all cases +means the entirety of the downstream node is dependent on the +upstream work. Currently no node types define non-zero ports. +Accordingly, this field must be set to zero.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +type#
    +

    This should be populated with a value from CUgraphDependencyType. +(It is typed as char due to compiler-specific layout of bitfields.) +See CUgraphDependencyType.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +reserved#
    +

    These bytes are unused and must be zeroed. This ensures +compatibility if additional fields are added in the future.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_GRAPH_INSTANTIATE_PARAMS_st(void_ptr _ptr=0)#
    +

    Graph instantiation parameters

    +
    +
    +flags#
    +

    Instantiation flags

    +
    +
    Type:
    +

    cuuint64_t

    +
    +
    +
    + +
    +
    +hUploadStream#
    +

    Upload stream

    +
    +
    Type:
    +

    CUstream

    +
    +
    +
    + +
    +
    +hErrNode_out#
    +

    The node which caused instantiation to fail, if any

    +
    +
    Type:
    +

    CUgraphNode

    +
    +
    +
    + +
    +
    +result_out#
    +

    Whether instantiation was successful. If it failed, the reason why

    +
    +
    Type:
    +

    CUgraphInstantiateResult

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchMemSyncDomainMap_st(void_ptr _ptr=0)#
    +

    Memory Synchronization Domain map See ::cudaLaunchMemSyncDomain. +By default, kernels are launched in domain 0. Kernel launched with +CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a different domain ID. +User may also alter the domain ID with CUlaunchMemSyncDomainMap for +a specific stream / graph node / kernel launch. See +CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. Domain ID range is +available through CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.

    +
    +
    +default_#
    +

    The default domain ID to use for designated kernels

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +remote#
    +

    The remote domain ID to use for designated kernels

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchAttributeValue_union(void_ptr _ptr=0)#
    +

    Launch attributes union; used as value field of CUlaunchAttribute

    +
    +
    +pad#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +accessPolicyWindow#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    +
    +
    Type:
    +

    CUaccessPolicyWindow

    +
    +
    +
    + +
    +
    +cooperative#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero +indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +syncPolicy#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. +::CUsynchronizationPolicy for work queued up in this stream

    +
    +
    Type:
    +

    CUsynchronizationPolicy

    +
    +
    +
    + +
    +
    +clusterDim#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION +that represents the desired cluster dimensions for the kernel. +Opaque type with the following fields: - x - The X dimension of +the cluster, in blocks. Must be a divisor of the grid X dimension. +- y - The Y dimension of the cluster, in blocks. Must be a +divisor of the grid Y dimension. - z - The Z dimension of the +cluster, in blocks. Must be a divisor of the grid Z dimension.

    +
    +
    Type:
    +

    anon_struct1

    +
    +
    +
    + +
    +
    +clusterSchedulingPolicyPreference#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster +scheduling policy preference for the kernel.

    +
    +
    Type:
    +

    CUclusterSchedulingPolicy

    +
    +
    +
    + +
    +
    +programmaticStreamSerializationAllowed#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +programmaticEvent#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT +with the following fields: - CUevent event - Event to fire when +all blocks trigger it. - Event record flags, see +cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. +- triggerAtBlockStart - If this is set to non-0, each block +launch will automatically trigger the event.

    +
    +
    Type:
    +

    anon_struct2

    +
    +
    +
    + +
    +
    +launchCompletionEvent#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following +fields: - CUevent event - Event to fire when the last block +launches - int flags; - Event record flags, see +cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    +
    +
    Type:
    +

    anon_struct3

    +
    +
    +
    + +
    +
    +priority#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution +priority of the kernel.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memSyncDomainMap#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. +See CUlaunchMemSyncDomainMap.

    +
    +
    Type:
    +

    CUlaunchMemSyncDomainMap

    +
    +
    +
    + +
    +
    +memSyncDomain#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. +See::CUlaunchMemSyncDomain

    +
    +
    Type:
    +

    CUlaunchMemSyncDomain

    +
    +
    +
    + +
    +
    +deviceUpdatableKernelNode#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the +following fields: - int deviceUpdatable - Whether or not the +resulting kernel node should be device-updatable. - +CUgraphDeviceNode devNode - Returns a handle to pass to the +various device-side update functions.

    +
    +
    Type:
    +

    anon_struct4

    +
    +
    +
    + +
    +
    +sharedMemCarveout#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchAttribute_st(void_ptr _ptr=0)#
    +

    Launch attribute

    +
    +
    +id#
    +

    Attribute to set

    +
    +
    Type:
    +

    CUlaunchAttributeID

    +
    +
    +
    + +
    +
    +value#
    +

    Value of the attribute

    +
    +
    Type:
    +

    CUlaunchAttributeValue

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchConfig_st(void_ptr _ptr=0)#
    +

    CUDA extensible launch configuration

    +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +hStream#
    +

    Stream identifier

    +
    +
    Type:
    +

    CUstream

    +
    +
    +
    + +
    +
    +attrs#
    +

    List of attributes; nullable if CUlaunchConfig::numAttrs == 0

    +
    +
    Type:
    +

    CUlaunchAttribute

    +
    +
    +
    + +
    +
    +numAttrs#
    +

    Number of attributes populated in CUlaunchConfig::attrs

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexecAffinitySmCount_st(void_ptr _ptr=0)#
    +

    Value for CU_EXEC_AFFINITY_TYPE_SM_COUNT

    +
    +
    +val#
    +

    The number of SMs the context is limited to use.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexecAffinityParam_st(void_ptr _ptr=0)#
    +

    Execution Affinity Parameters

    +
    +
    +type#
    +
    +
    Type:
    +

    CUexecAffinityType

    +
    +
    +
    + +
    +
    +param#
    +
    +
    Type:
    +

    anon_union3

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUctxCigParam_st(void_ptr _ptr=0)#
    +

    CIG Context Create Params

    +
    +
    +sharedDataType#
    +
    +
    Type:
    +

    CUcigDataType

    +
    +
    +
    + +
    +
    +sharedData#
    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUctxCreateParams_st(void_ptr _ptr=0)#
    +

    Params for creating CUDA context Exactly one of execAffinityParams +and cigParams must be non-NULL.

    +
    +
    +execAffinityParams#
    +
    +
    Type:
    +

    CUexecAffinityParam

    +
    +
    +
    + +
    +
    +numExecAffinityParams#
    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +cigParams#
    +
    +
    Type:
    +

    CUctxCigParam

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlibraryHostUniversalFunctionAndDataTable_st(void_ptr _ptr=0)#
    +
    +
    +functionTable#
    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +functionWindowSize#
    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dataTable#
    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dataWindowSize#
    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY2D_st(void_ptr _ptr=0)#
    +

    2D memory copy parameters

    +
    +
    +srcXInBytes#
    +

    Source X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcY#
    +

    Source Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcMemoryType#
    +

    Source memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +srcHost#
    +

    Source host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +srcArray#
    +

    Source array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +srcPitch#
    +

    Source pitch (ignored when src is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstXInBytes#
    +

    Destination X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstY#
    +

    Destination Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstMemoryType#
    +

    Destination memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +dstHost#
    +

    Destination host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +dstPitch#
    +

    Destination pitch (ignored when dst is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +WidthInBytes#
    +

    Width of 2D memory copy in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 2D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY3D_st(void_ptr _ptr=0)#
    +

    3D memory copy parameters

    +
    +
    +srcXInBytes#
    +

    Source X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcY#
    +

    Source Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcZ#
    +

    Source Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcLOD#
    +

    Source LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcMemoryType#
    +

    Source memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +srcHost#
    +

    Source host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +srcArray#
    +

    Source array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +reserved0#
    +

    Must be NULL

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcPitch#
    +

    Source pitch (ignored when src is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcHeight#
    +

    Source height (ignored when src is array; may be 0 if Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstXInBytes#
    +

    Destination X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstY#
    +

    Destination Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstZ#
    +

    Destination Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstLOD#
    +

    Destination LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstMemoryType#
    +

    Destination memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +dstHost#
    +

    Destination host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +reserved1#
    +

    Must be NULL

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstPitch#
    +

    Destination pitch (ignored when dst is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstHeight#
    +

    Destination height (ignored when dst is array; may be 0 if +Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +WidthInBytes#
    +

    Width of 3D memory copy in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Depth#
    +

    Depth of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY3D_PEER_st(void_ptr _ptr=0)#
    +

    3D memory cross-context copy parameters

    +
    +
    +srcXInBytes#
    +

    Source X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcY#
    +

    Source Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcZ#
    +

    Source Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcLOD#
    +

    Source LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcMemoryType#
    +

    Source memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +srcHost#
    +

    Source host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +srcArray#
    +

    Source array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +srcContext#
    +

    Source context (ignored with srcMemoryType is CU_MEMORYTYPE_ARRAY)

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +srcPitch#
    +

    Source pitch (ignored when src is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcHeight#
    +

    Source height (ignored when src is array; may be 0 if Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstXInBytes#
    +

    Destination X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstY#
    +

    Destination Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstZ#
    +

    Destination Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstLOD#
    +

    Destination LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstMemoryType#
    +

    Destination memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +dstHost#
    +

    Destination host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +dstContext#
    +

    Destination context (ignored with dstMemoryType is +CU_MEMORYTYPE_ARRAY)

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +dstPitch#
    +

    Destination pitch (ignored when dst is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstHeight#
    +

    Destination height (ignored when dst is array; may be 0 if +Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +WidthInBytes#
    +

    Width of 3D memory copy in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Depth#
    +

    Depth of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    Memcpy node parameters

    +
    +
    +flags#
    +

    Must be zero

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +reserved#
    +

    Must be zero

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +copyCtx#
    +

    Context on which to run the node

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +copyParams#
    +

    Parameters for the memory copy

    +
    +
    Type:
    +

    CUDA_MEMCPY3D

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR_st(void_ptr _ptr=0)#
    +

    Array descriptor

    +
    +
    +Width#
    +

    Width of array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Format#
    +

    Array format

    +
    +
    Type:
    +

    CUarray_format

    +
    +
    +
    + +
    +
    +NumChannels#
    +

    Channels per array element

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR_st(void_ptr _ptr=0)#
    +

    3D array descriptor

    +
    +
    +Width#
    +

    Width of 3D array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 3D array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Depth#
    +

    Depth of 3D array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Format#
    +

    Array format

    +
    +
    Type:
    +

    CUarray_format

    +
    +
    +
    + +
    +
    +NumChannels#
    +

    Channels per array element

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +Flags#
    +

    Flags

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES_st(void_ptr _ptr=0)#
    +

    CUDA array sparse properties

    +
    +
    +tileExtent#
    +
    +
    Type:
    +

    anon_struct5

    +
    +
    +
    + +
    +
    +miptailFirstLevel#
    +

    First mip level at which the mip tail begins.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +miptailSize#
    +

    Total size of the mip tail.

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags will either be zero or +CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS_st(void_ptr _ptr=0)#
    +

    CUDA array memory requirements

    +
    +
    +size#
    +

    Total required memory size

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +alignment#
    +

    alignment requirement

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_RESOURCE_DESC_st(void_ptr _ptr=0)#
    +

    CUDA Resource descriptor

    +
    +
    +resType#
    +

    Resource type

    +
    +
    Type:
    +

    CUresourcetype

    +
    +
    +
    + +
    +
    +res#
    +
    +
    Type:
    +

    anon_union4

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags (must be zero)

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_TEXTURE_DESC_st(void_ptr _ptr=0)#
    +

    Texture descriptor

    +
    +
    +addressMode#
    +

    Address modes

    +
    +
    Type:
    +

    List[CUaddress_mode]

    +
    +
    +
    + +
    +
    +filterMode#
    +

    Filter mode

    +
    +
    Type:
    +

    CUfilter_mode

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +maxAnisotropy#
    +

    Maximum anisotropy ratio

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +mipmapFilterMode#
    +

    Mipmap filter mode

    +
    +
    Type:
    +

    CUfilter_mode

    +
    +
    +
    + +
    +
    +mipmapLevelBias#
    +

    Mipmap level bias

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +minMipmapLevelClamp#
    +

    Mipmap minimum level clamp

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +maxMipmapLevelClamp#
    +

    Mipmap maximum level clamp

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +borderColor#
    +

    Border Color

    +
    +
    Type:
    +

    List[float]

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC_st(void_ptr _ptr=0)#
    +

    Resource view descriptor

    +
    +
    +format#
    +

    Resource view format

    +
    +
    Type:
    +

    CUresourceViewFormat

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Height of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +firstMipmapLevel#
    +

    First defined mipmap level

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +lastMipmapLevel#
    +

    Last defined mipmap level

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +firstLayer#
    +

    First layer index

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +lastLayer#
    +

    Last layer index

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtensorMap_st(void_ptr _ptr=0)#
    +

    Tensor map descriptor. Requires compiler support for aligning to 64 +bytes.

    +
    +
    +opaque#
    +
    +
    Type:
    +

    List[cuuint64_t]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st(void_ptr _ptr=0)#
    +

    GPU Direct v3 tokens

    +
    +
    +p2pToken#
    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +vaSpaceToken#
    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_LAUNCH_PARAMS_st(void_ptr _ptr=0)#
    +

    Kernel launch parameters

    +
    +
    +function#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +hStream#
    +

    Stream identifier

    +
    +
    Type:
    +

    CUstream

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st(void_ptr _ptr=0)#
    +

    External memory handle descriptor

    +
    +
    +type#
    +

    Type of the handle

    +
    +
    Type:
    +

    CUexternalMemoryHandleType

    +
    +
    +
    + +
    +
    +handle#
    +
    +
    Type:
    +

    anon_union5

    +
    +
    +
    + +
    +
    +size#
    +

    Size of the memory allocation

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags must either be zero or CUDA_EXTERNAL_MEMORY_DEDICATED

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st(void_ptr _ptr=0)#
    +

    External memory buffer descriptor

    +
    +
    +offset#
    +

    Offset into the memory object where the buffer’s base is

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +size#
    +

    Size of the buffer

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags reserved for future use. Must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st(void_ptr _ptr=0)#
    +

    External memory mipmap descriptor

    +
    +
    +offset#
    +

    Offset into the memory object where the base level of the mipmap +chain is.

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +arrayDesc#
    +

    Format, dimension and type of base level of the mipmap chain

    +
    +
    Type:
    +

    CUDA_ARRAY3D_DESCRIPTOR

    +
    +
    +
    + +
    +
    +numLevels#
    +

    Total number of levels in the mipmap chain

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st(void_ptr _ptr=0)#
    +

    External semaphore handle descriptor

    +
    +
    +type#
    +

    Type of the handle

    +
    +
    Type:
    +

    CUexternalSemaphoreHandleType

    +
    +
    +
    + +
    +
    +handle#
    +
    +
    Type:
    +

    anon_union6

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags reserved for the future. Must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st(void_ptr _ptr=0)#
    +

    External semaphore signal parameters

    +
    +
    +params#
    +
    +
    Type:
    +

    anon_struct15

    +
    +
    +
    + +
    +
    +flags#
    +

    Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to signal +a CUexternalSemaphore of type +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which +indicates that while signaling the CUexternalSemaphore, no memory +synchronization operations should be performed for any external +memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. +For all other types of CUexternalSemaphore, flags must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st(void_ptr _ptr=0)#
    +

    External semaphore wait parameters

    +
    +
    +params#
    +
    +
    Type:
    +

    anon_struct18

    +
    +
    +
    + +
    +
    +flags#
    +

    Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on +a CUexternalSemaphore of type +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is +CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC which indicates +that while waiting for the CUexternalSemaphore, no memory +synchronization operations should be performed for any external +memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. +For all other types of CUexternalSemaphore, flags must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    Semaphore signal node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore signal parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    +

    Semaphore signal node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore signal parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    Semaphore wait node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore wait parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    +

    Semaphore wait node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore wait parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUarrayMapInfo_st(void_ptr _ptr=0)#
    +

    Specifies the CUDA array or CUDA mipmapped array memory mapping +information

    +
    +
    +resourceType#
    +

    Resource type

    +
    +
    Type:
    +

    CUresourcetype

    +
    +
    +
    + +
    +
    +resource#
    +
    +
    Type:
    +

    anon_union9

    +
    +
    +
    + +
    +
    +subresourceType#
    +

    Sparse subresource type

    +
    +
    Type:
    +

    CUarraySparseSubresourceType

    +
    +
    +
    + +
    +
    +subresource#
    +
    +
    Type:
    +

    anon_union10

    +
    +
    +
    + +
    +
    +memOperationType#
    +

    Memory operation type

    +
    +
    Type:
    +

    CUmemOperationType

    +
    +
    +
    + +
    +
    +memHandleType#
    +

    Memory handle type

    +
    +
    Type:
    +

    CUmemHandleType

    +
    +
    +
    + +
    +
    +memHandle#
    +
    +
    Type:
    +

    anon_union11

    +
    +
    +
    + +
    +
    +offset#
    +

    Offset within mip tail Offset within the memory

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +deviceBitMask#
    +

    Device ordinal bit mask

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +flags#
    +

    flags for future use, must be zero now.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +

    Reserved for future use, must be zero now.

    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemLocation_st(void_ptr _ptr=0)#
    +

    Specifies a memory location.

    +
    +
    +type#
    +

    Specifies the location type, which modifies the meaning of id.

    +
    +
    Type:
    +

    CUmemLocationType

    +
    +
    +
    + +
    +
    +id#
    +

    identifier for a given this location’s CUmemLocationType.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAllocationProp_st(void_ptr _ptr=0)#
    +

    Specifies the allocation properties for a allocation.

    +
    +
    +type#
    +

    Allocation type

    +
    +
    Type:
    +

    CUmemAllocationType

    +
    +
    +
    + +
    +
    +requestedHandleTypes#
    +

    requested CUmemAllocationHandleType

    +
    +
    Type:
    +

    CUmemAllocationHandleType

    +
    +
    +
    + +
    +
    +location#
    +

    Location of allocation

    +
    +
    Type:
    +

    CUmemLocation

    +
    +
    +
    + +
    +
    +win32HandleMetaData#
    +

    Windows-specific POBJECT_ATTRIBUTES required when +CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes +structure includes security attributes that define the scope of +which exported allocations may be transferred to other processes. +In all other cases, this field is required to be zero.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +allocFlags#
    +
    +
    Type:
    +

    anon_struct21

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmulticastObjectProp_st(void_ptr _ptr=0)#
    +

    Specifies the properties for a multicast object.

    +
    +
    +numDevices#
    +

    The number of devices in the multicast team that will bind memory +to this object

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +size#
    +

    The maximum amount of memory that can be bound to this multicast +object per device

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +handleTypes#
    +

    Bitmask of exportable handle types (see CUmemAllocationHandleType) +for this object

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags for future use, must be zero now

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAccessDesc_st(void_ptr _ptr=0)#
    +

    Memory access descriptor

    +
    +
    +location#
    +

    Location on which the request is to change it’s accessibility

    +
    +
    Type:
    +

    CUmemLocation

    +
    +
    +
    + +
    +
    +flags#
    +

    ::CUmemProt accessibility flags to set on the request

    +
    +
    Type:
    +

    CUmemAccess_flags

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphExecUpdateResultInfo_st(void_ptr _ptr=0)#
    +

    Result information returned by cuGraphExecUpdate

    +
    +
    +result#
    +

    Gives more specific detail when a cuda graph update fails.

    +
    +
    Type:
    +

    CUgraphExecUpdateResult

    +
    +
    +
    + +
    +
    +errorNode#
    +

    The “to node” of the error edge when the topologies do not match. +The error node when the error is associated with a specific node. +NULL when the error is generic.

    +
    +
    Type:
    +

    CUgraphNode

    +
    +
    +
    + +
    +
    +errorFromNode#
    +

    The from node of error edge when the topologies do not match. +Otherwise NULL.

    +
    +
    Type:
    +

    CUgraphNode

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemPoolProps_st(void_ptr _ptr=0)#
    +

    Specifies the properties of allocations made from the pool.

    +
    +
    +allocType#
    +

    Allocation type. Currently must be specified as +CU_MEM_ALLOCATION_TYPE_PINNED

    +
    +
    Type:
    +

    CUmemAllocationType

    +
    +
    +
    + +
    +
    +handleTypes#
    +

    Handle types that will be supported by allocations from the pool.

    +
    +
    Type:
    +

    CUmemAllocationHandleType

    +
    +
    +
    + +
    +
    +location#
    +

    Location where allocations should reside.

    +
    +
    Type:
    +

    CUmemLocation

    +
    +
    +
    + +
    +
    +win32SecurityAttributes#
    +

    Windows-specific LPSECURITYATTRIBUTES required when +CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute +defines the scope of which exported allocations may be transferred +to other processes. In all other cases, this field is required to +be zero.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +maxSize#
    +

    Maximum pool size. When set to 0, defaults to a system dependent +value.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +usage#
    +

    Bitmask indicating intended usage for the pool.

    +
    +
    Type:
    +

    unsigned short

    +
    +
    +
    + +
    +
    +reserved#
    +

    reserved for future use, must be 0

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemPoolPtrExportData_st(void_ptr _ptr=0)#
    +

    Opaque data for exporting a pool allocation

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st(void_ptr _ptr=0)#
    +

    Memory allocation node parameters

    +
    +
    +poolProps#
    +

    in: location where the allocation should reside (specified in +::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is +not supported.

    +
    +
    Type:
    +

    CUmemPoolProps

    +
    +
    +
    + +
    +
    +accessDescs#
    +

    in: array of memory access descriptors. Used to describe peer GPU +access

    +
    +
    Type:
    +

    CUmemAccessDesc

    +
    +
    +
    + +
    +
    +accessDescCount#
    +

    in: number of memory access descriptors. Must not exceed the number +of GPUs.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +bytesize#
    +

    in: size in bytes of the requested allocation

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dptr#
    +

    out: address of the allocation returned by CUDA

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st(void_ptr _ptr=0)#
    +

    Memory allocation node parameters

    +
    +
    +poolProps#
    +

    in: location where the allocation should reside (specified in +::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is +not supported.

    +
    +
    Type:
    +

    CUmemPoolProps

    +
    +
    +
    + +
    +
    +accessDescs#
    +

    in: array of memory access descriptors. Used to describe peer GPU +access

    +
    +
    Type:
    +

    CUmemAccessDesc

    +
    +
    +
    + +
    +
    +accessDescCount#
    +

    in: number of memory access descriptors. Must not exceed the number +of GPUs.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +bytesize#
    +

    in: size in bytes of the requested allocation

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dptr#
    +

    out: address of the allocation returned by CUDA

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEM_FREE_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    Memory free node parameters

    +
    +
    +dptr#
    +

    in: the pointer to free

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_CHILD_GRAPH_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    Child graph node parameters

    +
    +
    +graph#
    +

    The child graph to clone into the node for node creation, or a +handle to the graph owned by the node for node query

    +
    +
    Type:
    +

    CUgraph

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EVENT_RECORD_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    Event record node parameters

    +
    +
    +event#
    +

    The event to record when the node executes

    +
    +
    Type:
    +

    CUevent

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EVENT_WAIT_NODE_PARAMS_st(void_ptr _ptr=0)#
    +

    Event wait node parameters

    +
    +
    +event#
    +

    The event to wait on from the node

    +
    +
    Type:
    +

    CUevent

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphNodeParams_st(void_ptr _ptr=0)#
    +

    Graph node parameters. See cuGraphAddNode.

    +
    +
    +type#
    +

    Type of the node

    +
    +
    Type:
    +

    CUgraphNodeType

    +
    +
    +
    + +
    +
    +reserved0#
    +

    Reserved. Must be zero.

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +reserved1#
    +

    Padding. Unused bytes must be zero.

    +
    +
    Type:
    +

    List[long long]

    +
    +
    +
    + +
    +
    +kernel#
    +

    Kernel node parameters.

    +
    +
    Type:
    +

    CUDA_KERNEL_NODE_PARAMS_v3

    +
    +
    +
    + +
    +
    +memcpy#
    +

    Memcpy node parameters.

    +
    +
    Type:
    +

    CUDA_MEMCPY_NODE_PARAMS

    +
    +
    +
    + +
    +
    +memset#
    +

    Memset node parameters.

    +
    +
    Type:
    +

    CUDA_MEMSET_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +host#
    +

    Host node parameters.

    +
    +
    Type:
    +

    CUDA_HOST_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +graph#
    +

    Child graph node parameters.

    +
    +
    Type:
    +

    CUDA_CHILD_GRAPH_NODE_PARAMS

    +
    +
    +
    + +
    +
    +eventWait#
    +

    Event wait node parameters.

    +
    +
    Type:
    +

    CUDA_EVENT_WAIT_NODE_PARAMS

    +
    +
    +
    + +
    +
    +eventRecord#
    +

    Event record node parameters.

    +
    +
    Type:
    +

    CUDA_EVENT_RECORD_NODE_PARAMS

    +
    +
    +
    + +
    +
    +extSemSignal#
    +

    External semaphore signal node parameters.

    +
    +
    Type:
    +

    CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +extSemWait#
    +

    External semaphore wait node parameters.

    +
    +
    Type:
    +

    CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +alloc#
    +

    Memory allocation node parameters.

    +
    +
    Type:
    +

    CUDA_MEM_ALLOC_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +free#
    +

    Memory free node parameters.

    +
    +
    Type:
    +

    CUDA_MEM_FREE_NODE_PARAMS

    +
    +
    +
    + +
    +
    +memOp#
    +

    MemOp node parameters.

    +
    +
    Type:
    +

    CUDA_BATCH_MEM_OP_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +conditional#
    +

    Conditional node parameters.

    +
    +
    Type:
    +

    CUDA_CONDITIONAL_NODE_PARAMS

    +
    +
    +
    + +
    +
    +reserved2#
    +

    Reserved bytes. Must be zero.

    +
    +
    Type:
    +

    long long

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUeglFrame_st(void_ptr _ptr=0)#
    +

    CUDA EGLFrame structure Descriptor - structure defining one frame +of EGL. Each frame may contain one or more planes depending on +whether the surface * is Multiplanar or not.

    +
    +
    +frame#
    +
    +
    Type:
    +

    anon_union14

    +
    +
    +
    + +
    +
    +width#
    +

    Width of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +height#
    +

    Height of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +planeCount#
    +

    Number of planes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +numChannels#
    +

    Number of channels for the plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +frameType#
    +

    Array or Pitch

    +
    +
    Type:
    +

    CUeglFrameType

    +
    +
    +
    + +
    +
    +eglColorFormat#
    +

    CUDA EGL Color Format

    +
    +
    Type:
    +

    CUeglColorFormat

    +
    +
    +
    + +
    +
    +cuFormat#
    +

    CUDA Array Format

    +
    +
    Type:
    +

    CUarray_format

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUipcMem_flags(value)#
    +

    CUDA Ipc Mem Flags

    +
    +
    +CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS = 1#
    +

    Automatically enable peer access between remote devices as needed

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAttach_flags(value)#
    +

    CUDA Mem Attach Flags

    +
    +
    +CU_MEM_ATTACH_GLOBAL = 1#
    +

    Memory can be accessed by any stream on any device

    +
    + +
    +
    +CU_MEM_ATTACH_HOST = 2#
    +

    Memory cannot be accessed by any stream on any device

    +
    + +
    +
    +CU_MEM_ATTACH_SINGLE = 4#
    +

    Memory can only be accessed by a single stream on the associated device

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUctx_flags(value)#
    +

    Context creation flags

    +
    +
    +CU_CTX_SCHED_AUTO = 0#
    +

    Automatic scheduling

    +
    + +
    +
    +CU_CTX_SCHED_SPIN = 1#
    +

    Set spin as default scheduling

    +
    + +
    +
    +CU_CTX_SCHED_YIELD = 2#
    +

    Set yield as default scheduling

    +
    + +
    +
    +CU_CTX_SCHED_BLOCKING_SYNC = 4#
    +

    Set blocking synchronization as default scheduling

    +
    + +
    +
    +CU_CTX_BLOCKING_SYNC = 4#
    +

    Set blocking synchronization as default scheduling [Deprecated]

    +
    + +
    +
    +CU_CTX_SCHED_MASK = 7#
    +
    + +
    +
    +CU_CTX_MAP_HOST = 8#
    +

    [Deprecated]

    +
    + +
    +
    +CU_CTX_LMEM_RESIZE_TO_MAX = 16#
    +

    Keep local memory allocation after launch

    +
    + +
    +
    +CU_CTX_COREDUMP_ENABLE = 32#
    +

    Trigger coredumps from exceptions in this context

    +
    + +
    +
    +CU_CTX_USER_COREDUMP_ENABLE = 64#
    +

    Enable user pipe to trigger coredumps in this context

    +
    + +
    +
    +CU_CTX_SYNC_MEMOPS = 128#
    +

    Ensure synchronous memory operations on this context will synchronize

    +
    + +
    +
    +CU_CTX_FLAGS_MASK = 255#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUevent_sched_flags(value)#
    +

    Event sched flags

    +
    +
    +CU_EVENT_SCHED_AUTO = 0#
    +

    Automatic scheduling

    +
    + +
    +
    +CU_EVENT_SCHED_SPIN = 1#
    +

    Set spin as default scheduling

    +
    + +
    +
    +CU_EVENT_SCHED_YIELD = 2#
    +

    Set yield as default scheduling

    +
    + +
    +
    +CU_EVENT_SCHED_BLOCKING_SYNC = 4#
    +

    Set blocking synchronization as default scheduling

    +
    + +
    + +
    +
    +class cuda.bindings.driver.cl_event_flags(value)#
    +

    NVCL event scheduling flags

    +
    +
    +NVCL_EVENT_SCHED_AUTO = 0#
    +

    Automatic scheduling

    +
    + +
    +
    +NVCL_EVENT_SCHED_SPIN = 1#
    +

    Set spin as default scheduling

    +
    + +
    +
    +NVCL_EVENT_SCHED_YIELD = 2#
    +

    Set yield as default scheduling

    +
    + +
    +
    +NVCL_EVENT_SCHED_BLOCKING_SYNC = 4#
    +

    Set blocking synchronization as default scheduling

    +
    + +
    + +
    +
    +class cuda.bindings.driver.cl_context_flags(value)#
    +

    NVCL context scheduling flags

    +
    +
    +NVCL_CTX_SCHED_AUTO = 0#
    +

    Automatic scheduling

    +
    + +
    +
    +NVCL_CTX_SCHED_SPIN = 1#
    +

    Set spin as default scheduling

    +
    + +
    +
    +NVCL_CTX_SCHED_YIELD = 2#
    +

    Set yield as default scheduling

    +
    + +
    +
    +NVCL_CTX_SCHED_BLOCKING_SYNC = 4#
    +

    Set blocking synchronization as default scheduling

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstream_flags(value)#
    +

    Stream creation flags

    +
    +
    +CU_STREAM_DEFAULT = 0#
    +

    Default stream flag

    +
    + +
    +
    +CU_STREAM_NON_BLOCKING = 1#
    +

    Stream does not synchronize with stream 0 (the NULL stream)

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUevent_flags(value)#
    +

    Event creation flags

    +
    +
    +CU_EVENT_DEFAULT = 0#
    +

    Default event flag

    +
    + +
    +
    +CU_EVENT_BLOCKING_SYNC = 1#
    +

    Event uses blocking synchronization

    +
    + +
    +
    +CU_EVENT_DISABLE_TIMING = 2#
    +

    Event will not record timing data

    +
    + +
    +
    +CU_EVENT_INTERPROCESS = 4#
    +

    Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUevent_record_flags(value)#
    +

    Event record flags

    +
    +
    +CU_EVENT_RECORD_DEFAULT = 0#
    +

    Default event record flag

    +
    + +
    +
    +CU_EVENT_RECORD_EXTERNAL = 1#
    +

    When using stream capture, create an event record node instead of the default behavior. This flag is invalid when used outside of capture.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUevent_wait_flags(value)#
    +

    Event wait flags

    +
    +
    +CU_EVENT_WAIT_DEFAULT = 0#
    +

    Default event wait flag

    +
    + +
    +
    +CU_EVENT_WAIT_EXTERNAL = 1#
    +

    When using stream capture, create an event wait node instead of the default behavior. This flag is invalid when used outside of capture.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamWaitValue_flags(value)#
    +

    Flags for cuStreamWaitValue32 and +cuStreamWaitValue64

    +
    +
    +CU_STREAM_WAIT_VALUE_GEQ = 0#
    +

    Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit values). Note this is a cyclic comparison which ignores wraparound. (Default behavior.)

    +
    + +
    +
    +CU_STREAM_WAIT_VALUE_EQ = 1#
    +

    Wait until *addr == value.

    +
    + +
    +
    +CU_STREAM_WAIT_VALUE_AND = 2#
    +

    Wait until (*addr & value) != 0.

    +
    + +
    +
    +CU_STREAM_WAIT_VALUE_NOR = 3#
    +

    Wait until ~(*addr | value) != 0. Support for this operation can be queried with cuDeviceGetAttribute() and CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.

    +
    + +
    +
    +CU_STREAM_WAIT_VALUE_FLUSH = 1073741824#
    +

    Follow the wait operation with a flush of outstanding remote writes. This means that, if a remote write operation is guaranteed to have reached the device before the wait can be satisfied, that write is guaranteed to be visible to downstream device work. The device is permitted to reorder remote writes internally. For example, this flag would be required if two remote writes arrive in a defined order, the wait is satisfied by the second write, and downstream work needs to observe the first write. Support for this operation is restricted to selected platforms and can be queried with CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamWriteValue_flags(value)#
    +

    Flags for cuStreamWriteValue32

    +
    +
    +CU_STREAM_WRITE_VALUE_DEFAULT = 0#
    +

    Default behavior

    +
    + +
    +
    +CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER = 1#
    +

    Permits the write to be reordered with writes which were issued before it, as a performance optimization. Normally, cuStreamWriteValue32 will provide a memory fence before the write, which has similar semantics to __threadfence_system() but is scoped to the stream rather than a CUDA thread. This flag is not supported in the v2 API.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamBatchMemOpType(value)#
    +

    Operations for cuStreamBatchMemOp

    +
    +
    +CU_STREAM_MEM_OP_WAIT_VALUE_32 = 1#
    +

    Represents a cuStreamWaitValue32 operation

    +
    + +
    +
    +CU_STREAM_MEM_OP_WRITE_VALUE_32 = 2#
    +

    Represents a cuStreamWriteValue32 operation

    +
    + +
    +
    +CU_STREAM_MEM_OP_WAIT_VALUE_64 = 4#
    +

    Represents a cuStreamWaitValue64 operation

    +
    + +
    +
    +CU_STREAM_MEM_OP_WRITE_VALUE_64 = 5#
    +

    Represents a cuStreamWriteValue64 operation

    +
    + +
    +
    +CU_STREAM_MEM_OP_BARRIER = 6#
    +

    Insert a memory barrier of the specified type

    +
    + +
    +
    +CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES = 3#
    +

    This has the same effect as CU_STREAM_WAIT_VALUE_FLUSH, but as a standalone operation.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamMemoryBarrier_flags(value)#
    +

    Flags for cuStreamMemoryBarrier

    +
    +
    +CU_STREAM_MEMORY_BARRIER_TYPE_SYS = 0#
    +

    System-wide memory barrier.

    +
    + +
    +
    +CU_STREAM_MEMORY_BARRIER_TYPE_GPU = 1#
    +

    Limit memory barrier scope to the GPU.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUoccupancy_flags(value)#
    +

    Occupancy calculator flag

    +
    +
    +CU_OCCUPANCY_DEFAULT = 0#
    +

    Default behavior

    +
    + +
    +
    +CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE = 1#
    +

    Assume global caching is enabled and cannot be automatically turned off

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamUpdateCaptureDependencies_flags(value)#
    +

    Flags for cuStreamUpdateCaptureDependencies

    +
    +
    +CU_STREAM_ADD_CAPTURE_DEPENDENCIES = 0#
    +

    Add new nodes to the dependency set

    +
    + +
    +
    +CU_STREAM_SET_CAPTURE_DEPENDENCIES = 1#
    +

    Replace the dependency set with the new nodes

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUasyncNotificationType(value)#
    +

    Types of async notification that can be sent

    +
    +
    +CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET = 1#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUarray_format(value)#
    +

    Array formats

    +
    +
    +CU_AD_FORMAT_UNSIGNED_INT8 = 1#
    +

    Unsigned 8-bit integers

    +
    + +
    +
    +CU_AD_FORMAT_UNSIGNED_INT16 = 2#
    +

    Unsigned 16-bit integers

    +
    + +
    +
    +CU_AD_FORMAT_UNSIGNED_INT32 = 3#
    +

    Unsigned 32-bit integers

    +
    + +
    +
    +CU_AD_FORMAT_SIGNED_INT8 = 8#
    +

    Signed 8-bit integers

    +
    + +
    +
    +CU_AD_FORMAT_SIGNED_INT16 = 9#
    +

    Signed 16-bit integers

    +
    + +
    +
    +CU_AD_FORMAT_SIGNED_INT32 = 10#
    +

    Signed 32-bit integers

    +
    + +
    +
    +CU_AD_FORMAT_HALF = 16#
    +

    16-bit floating point

    +
    + +
    +
    +CU_AD_FORMAT_FLOAT = 32#
    +

    32-bit floating point

    +
    + +
    +
    +CU_AD_FORMAT_NV12 = 176#
    +

    8-bit YUV planar format, with 4:2:0 sampling

    +
    + +
    +
    +CU_AD_FORMAT_UNORM_INT8X1 = 192#
    +

    1 channel unsigned 8-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_UNORM_INT8X2 = 193#
    +

    2 channel unsigned 8-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_UNORM_INT8X4 = 194#
    +

    4 channel unsigned 8-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_UNORM_INT16X1 = 195#
    +

    1 channel unsigned 16-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_UNORM_INT16X2 = 196#
    +

    2 channel unsigned 16-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_UNORM_INT16X4 = 197#
    +

    4 channel unsigned 16-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_SNORM_INT8X1 = 198#
    +

    1 channel signed 8-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_SNORM_INT8X2 = 199#
    +

    2 channel signed 8-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_SNORM_INT8X4 = 200#
    +

    4 channel signed 8-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_SNORM_INT16X1 = 201#
    +

    1 channel signed 16-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_SNORM_INT16X2 = 202#
    +

    2 channel signed 16-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_SNORM_INT16X4 = 203#
    +

    4 channel signed 16-bit normalized integer

    +
    + +
    +
    +CU_AD_FORMAT_BC1_UNORM = 145#
    +

    4 channel unsigned normalized block-compressed (BC1 compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC1_UNORM_SRGB = 146#
    +

    4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding

    +
    + +
    +
    +CU_AD_FORMAT_BC2_UNORM = 147#
    +

    4 channel unsigned normalized block-compressed (BC2 compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC2_UNORM_SRGB = 148#
    +

    4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding

    +
    + +
    +
    +CU_AD_FORMAT_BC3_UNORM = 149#
    +

    4 channel unsigned normalized block-compressed (BC3 compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC3_UNORM_SRGB = 150#
    +

    4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding

    +
    + +
    +
    +CU_AD_FORMAT_BC4_UNORM = 151#
    +

    1 channel unsigned normalized block-compressed (BC4 compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC4_SNORM = 152#
    +

    1 channel signed normalized block-compressed (BC4 compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC5_UNORM = 153#
    +

    2 channel unsigned normalized block-compressed (BC5 compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC5_SNORM = 154#
    +

    2 channel signed normalized block-compressed (BC5 compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC6H_UF16 = 155#
    +

    3 channel unsigned half-float block-compressed (BC6H compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC6H_SF16 = 156#
    +

    3 channel signed half-float block-compressed (BC6H compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC7_UNORM = 157#
    +

    4 channel unsigned normalized block-compressed (BC7 compression) format

    +
    + +
    +
    +CU_AD_FORMAT_BC7_UNORM_SRGB = 158#
    +

    4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding

    +
    + +
    +
    +CU_AD_FORMAT_P010 = 159#
    +

    10-bit YUV planar format, with 4:2:0 sampling

    +
    + +
    +
    +CU_AD_FORMAT_P016 = 161#
    +

    16-bit YUV planar format, with 4:2:0 sampling

    +
    + +
    +
    +CU_AD_FORMAT_NV16 = 162#
    +

    8-bit YUV planar format, with 4:2:2 sampling

    +
    + +
    +
    +CU_AD_FORMAT_P210 = 163#
    +

    10-bit YUV planar format, with 4:2:2 sampling

    +
    + +
    +
    +CU_AD_FORMAT_P216 = 164#
    +

    16-bit YUV planar format, with 4:2:2 sampling

    +
    + +
    +
    +CU_AD_FORMAT_YUY2 = 165#
    +

    2 channel, 8-bit YUV packed planar format, with 4:2:2 sampling

    +
    + +
    +
    +CU_AD_FORMAT_Y210 = 166#
    +

    2 channel, 10-bit YUV packed planar format, with 4:2:2 sampling

    +
    + +
    +
    +CU_AD_FORMAT_Y216 = 167#
    +

    2 channel, 16-bit YUV packed planar format, with 4:2:2 sampling

    +
    + +
    +
    +CU_AD_FORMAT_AYUV = 168#
    +

    4 channel, 8-bit YUV packed planar format, with 4:4:4 sampling

    +
    + +
    +
    +CU_AD_FORMAT_Y410 = 169#
    +

    10-bit YUV packed planar format, with 4:4:4 sampling

    +
    + +
    +
    +CU_AD_FORMAT_Y416 = 177#
    +

    4 channel, 12-bit YUV packed planar format, with 4:4:4 sampling

    +
    + +
    +
    +CU_AD_FORMAT_Y444_PLANAR8 = 178#
    +

    3 channel 8-bit YUV planar format, with 4:4:4 sampling

    +
    + +
    +
    +CU_AD_FORMAT_Y444_PLANAR10 = 179#
    +

    3 channel 10-bit YUV planar format, with 4:4:4 sampling

    +
    + +
    +
    +CU_AD_FORMAT_MAX = 2147483647#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUaddress_mode(value)#
    +

    Texture reference addressing modes

    +
    +
    +CU_TR_ADDRESS_MODE_WRAP = 0#
    +

    Wrapping address mode

    +
    + +
    +
    +CU_TR_ADDRESS_MODE_CLAMP = 1#
    +

    Clamp to edge address mode

    +
    + +
    +
    +CU_TR_ADDRESS_MODE_MIRROR = 2#
    +

    Mirror address mode

    +
    + +
    +
    +CU_TR_ADDRESS_MODE_BORDER = 3#
    +

    Border address mode

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUfilter_mode(value)#
    +

    Texture reference filtering modes

    +
    +
    +CU_TR_FILTER_MODE_POINT = 0#
    +

    Point filter mode

    +
    + +
    +
    +CU_TR_FILTER_MODE_LINEAR = 1#
    +

    Linear filter mode

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevice_attribute(value)#
    +

    Device properties

    +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 1#
    +

    Maximum number of threads per block

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = 2#
    +

    Maximum block dimension X

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = 3#
    +

    Maximum block dimension Y

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = 4#
    +

    Maximum block dimension Z

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = 5#
    +

    Maximum grid dimension X

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = 6#
    +

    Maximum grid dimension Y

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = 7#
    +

    Maximum grid dimension Z

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = 8#
    +

    Maximum shared memory available per block in bytes

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK = 8#
    +

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY = 9#
    +

    Memory available on device for constant variables in a CUDA C kernel in bytes

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_WARP_SIZE = 10#
    +

    Warp size in threads

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_PITCH = 11#
    +

    Maximum pitch in bytes allowed by memory copies

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK = 12#
    +

    Maximum number of 32-bit registers available per block

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK = 12#
    +

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13#
    +

    Typical clock frequency in kilohertz

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT = 14#
    +

    Alignment requirement for textures

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_GPU_OVERLAP = 15#
    +

    Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16#
    +

    Number of multiprocessors on device

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT = 17#
    +

    Specifies whether there is a run time limit on kernels

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_INTEGRATED = 18#
    +

    Device is integrated with host memory

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = 19#
    +

    Device can map host memory into CUDA address space

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_COMPUTE_MODE = 20#
    +

    Compute mode (See CUcomputemode for details)

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH = 21#
    +

    Maximum 1D texture width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH = 22#
    +

    Maximum 2D texture width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT = 23#
    +

    Maximum 2D texture height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH = 24#
    +

    Maximum 3D texture width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT = 25#
    +

    Maximum 3D texture height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH = 26#
    +

    Maximum 3D texture depth

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH = 27#
    +

    Maximum 2D layered texture width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT = 28#
    +

    Maximum 2D layered texture height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS = 29#
    +

    Maximum layers in a 2D layered texture

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH = 27#
    +

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT = 28#
    +

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES = 29#
    +

    Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT = 30#
    +

    Alignment requirement for surfaces

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS = 31#
    +

    Device can possibly execute multiple kernels concurrently

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_ECC_ENABLED = 32#
    +

    Device has ECC support enabled

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33#
    +

    PCI bus ID of the device

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34#
    +

    PCI device ID of the device

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_TCC_DRIVER = 35#
    +

    Device is using TCC driver model

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36#
    +

    Peak memory clock frequency in kilohertz

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH = 37#
    +

    Global memory bus width in bits

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE = 38#
    +

    Size of L2 cache in bytes

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39#
    +

    Maximum resident threads per multiprocessor

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = 40#
    +

    Number of asynchronous engines

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = 41#
    +

    Device shares a unified address space with the host

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH = 42#
    +

    Maximum 1D layered texture width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS = 43#
    +

    Maximum layers in a 1D layered texture

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER = 44#
    +

    Deprecated, do not use.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH = 45#
    +

    Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT = 46#
    +

    Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE = 47#
    +

    Alternate maximum 3D texture width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE = 48#
    +

    Alternate maximum 3D texture height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE = 49#
    +

    Alternate maximum 3D texture depth

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = 50#
    +

    PCI domain ID of the device

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT = 51#
    +

    Pitch alignment requirement for textures

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH = 52#
    +

    Maximum cubemap texture width/height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH = 53#
    +

    Maximum cubemap layered texture width/height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS = 54#
    +

    Maximum layers in a cubemap layered texture

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH = 55#
    +

    Maximum 1D surface width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH = 56#
    +

    Maximum 2D surface width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT = 57#
    +

    Maximum 2D surface height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH = 58#
    +

    Maximum 3D surface width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT = 59#
    +

    Maximum 3D surface height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH = 60#
    +

    Maximum 3D surface depth

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH = 61#
    +

    Maximum 1D layered surface width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS = 62#
    +

    Maximum layers in a 1D layered surface

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH = 63#
    +

    Maximum 2D layered surface width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT = 64#
    +

    Maximum 2D layered surface height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS = 65#
    +

    Maximum layers in a 2D layered surface

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH = 66#
    +

    Maximum cubemap surface width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH = 67#
    +

    Maximum cubemap layered surface width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS = 68#
    +

    Maximum layers in a cubemap layered surface

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH = 69#
    +

    Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH = 70#
    +

    Maximum 2D linear texture width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT = 71#
    +

    Maximum 2D linear texture height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH = 72#
    +

    Maximum 2D linear texture pitch in bytes

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH = 73#
    +

    Maximum mipmapped 2D texture width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT = 74#
    +

    Maximum mipmapped 2D texture height

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75#
    +

    Major compute capability version number

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76#
    +

    Minor compute capability version number

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH = 77#
    +

    Maximum mipmapped 1D texture width

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED = 78#
    +

    Device supports stream priorities

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED = 79#
    +

    Device supports caching globals in L1

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED = 80#
    +

    Device supports caching locals in L1

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR = 81#
    +

    Maximum shared memory available per multiprocessor in bytes

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR = 82#
    +

    Maximum number of 32-bit registers available per multiprocessor

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY = 83#
    +

    Device can allocate managed memory on this system

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD = 84#
    +

    Device is on a multi-GPU board

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID = 85#
    +

    Unique id for a group of devices on the same multi-GPU board

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED = 86#
    +

    Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO = 87#
    +

    Ratio of single precision performance (in floating-point operations per second) to double precision performance

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS = 88#
    +

    Device supports coherently accessing pageable memory without calling cudaHostRegister on it

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS = 89#
    +

    Device can coherently access managed memory concurrently with the CPU

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED = 90#
    +

    Device supports compute preemption.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM = 91#
    +

    Device can access host registered memory at the same virtual address as the CPU

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 = 92#
    +

    Deprecated, along with v1 MemOps API, cuStreamBatchMemOp and related APIs are supported.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 = 93#
    +

    Deprecated, along with v1 MemOps API, 64-bit operations are supported in cuStreamBatchMemOp and related APIs.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 = 94#
    +

    Deprecated, along with v1 MemOps API, CU_STREAM_WAIT_VALUE_NOR is supported.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH = 95#
    +

    Device supports launching cooperative kernels via cuLaunchCooperativeKernel

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH = 96#
    +

    Deprecated, cuLaunchCooperativeKernelMultiDevice is deprecated.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN = 97#
    +

    Maximum optin shared memory per block

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES = 98#
    +

    The CU_STREAM_WAIT_VALUE_FLUSH flag and the CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See Stream Memory Operations for additional details.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED = 99#
    +

    Device supports host memory registration via cudaHostRegister.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES = 100#
    +

    Device accesses pageable memory via the host’s page tables.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST = 101#
    +

    The host can directly access managed memory on the device without migration.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED = 102#
    +

    Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED = 102#
    +

    Device supports virtual memory management APIs like cuMemAddressReserve, cuMemCreate, cuMemMap and related APIs

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED = 103#
    +

    Device supports exporting memory to a posix file descriptor with cuMemExportToShareableHandle, if requested via cuMemCreate

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED = 104#
    +

    Device supports exporting memory to a Win32 NT handle with cuMemExportToShareableHandle, if requested via cuMemCreate

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED = 105#
    +

    Device supports exporting memory to a Win32 KMT handle with cuMemExportToShareableHandle, if requested via cuMemCreate

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR = 106#
    +

    Maximum number of blocks per multiprocessor

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED = 107#
    +

    Device supports compression of memory

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE = 108#
    +

    Maximum L2 persisting lines capacity setting in bytes.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE = 109#
    +

    Maximum value of num_bytes.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED = 110#
    +

    Device supports specifying the GPUDirect RDMA flag with cuMemCreate

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK = 111#
    +

    Shared memory reserved by CUDA driver per block in bytes

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED = 112#
    +

    Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED = 113#
    +

    Device supports using the cuMemHostRegister flag CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED = 114#
    +

    External timeline semaphore interop is supported on the device

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED = 115#
    +

    Device supports using the cuMemAllocAsync and cuMemPool family of APIs

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED = 116#
    +

    Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information)

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS = 117#
    +

    The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the CUflushGPUDirectRDMAWritesOptions enum

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING = 118#
    +

    GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See CUGPUDirectRDMAWritesOrdering for the numerical values returned here.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES = 119#
    +

    Handle types supported with mempool based IPC

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH = 120#
    +

    Indicates device supports cluster launch

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED = 121#
    +

    Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS = 122#
    +

    64-bit operations are supported in cuStreamBatchMemOp and related MemOp APIs.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR = 123#
    +

    CU_STREAM_WAIT_VALUE_NOR is supported by MemOp APIs.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED = 124#
    +

    Device supports buffer sharing with dma_buf mechanism.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED = 125#
    +

    Device supports IPC Events.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT = 126#
    +

    Number of memory domains the device supports.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED = 127#
    +

    Device supports accessing memory using Tensor Map.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED = 128#
    +

    Device supports exporting memory to a fabric handle with cuMemExportToShareableHandle() or requested with cuMemCreate()

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS = 129#
    +

    Device supports unified function pointers.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_NUMA_CONFIG = 130#
    +

    NUMA configuration of a device: value is of type CUdeviceNumaConfig enum

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_NUMA_ID = 131#
    +

    NUMA node ID of the GPU memory

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED = 132#
    +

    Device supports switch multicast and reduction operations.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MPS_ENABLED = 133#
    +

    Indicates if contexts created on this device will be shared via MPS

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID = 134#
    +

    NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED = 135#
    +

    Device supports CIG with D3D12.

    +
    + +
    +
    +CU_DEVICE_ATTRIBUTE_MAX = 136#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUpointer_attribute(value)#
    +

    Pointer information

    +
    +
    +CU_POINTER_ATTRIBUTE_CONTEXT = 1#
    +

    The CUcontext on which a pointer was allocated or registered

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_MEMORY_TYPE = 2#
    +

    The CUmemorytype describing the physical location of a pointer

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_DEVICE_POINTER = 3#
    +

    The address at which a pointer’s memory may be accessed on the device

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_HOST_POINTER = 4#
    +

    The address at which a pointer’s memory may be accessed on the host

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_P2P_TOKENS = 5#
    +

    A pair of tokens for use with the nv-p2p.h Linux kernel interface

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_SYNC_MEMOPS = 6#
    +

    Synchronize every synchronous memory operation initiated on this region

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_BUFFER_ID = 7#
    +

    A process-wide unique ID for an allocated memory region

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_IS_MANAGED = 8#
    +

    Indicates if the pointer points to managed memory

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL = 9#
    +

    A device ordinal of a device on which a pointer was allocated or registered

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE = 10#
    +

    1 if this pointer maps to an allocation that is suitable for cudaIpcGetMemHandle, 0 otherwise

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_RANGE_START_ADDR = 11#
    +

    Starting address for this requested pointer

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_RANGE_SIZE = 12#
    +

    Size of the address range for this requested pointer

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_MAPPED = 13#
    +

    1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES = 14#
    +

    Bitmask of allowed CUmemAllocationHandleType for this allocation

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE = 15#
    +

    1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_ACCESS_FLAGS = 16#
    +

    Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE = 17#
    +

    Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL.

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_MAPPING_SIZE = 18#
    +

    Size of the actual underlying mapping that the pointer belongs to

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR = 19#
    +

    The start address of the mapping that the pointer belongs to

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID = 20#
    +

    A process-wide unique id corresponding to the physical allocation the pointer belongs to

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUfunction_attribute(value)#
    +

    Function properties

    +
    +
    +CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 0#
    +

    The maximum number of threads per block, beyond which a launch of the function would fail. This number depends on both the function and the device on which the function is currently loaded.

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES = 1#
    +

    The size in bytes of statically-allocated shared memory required by this function. This does not include dynamically-allocated shared memory requested by the user at runtime.

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES = 2#
    +

    The size in bytes of user-allocated constant memory required by this function.

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES = 3#
    +

    The size in bytes of local memory used by each thread of this function.

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_NUM_REGS = 4#
    +

    The number of registers used by each thread of this function.

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_PTX_VERSION = 5#
    +

    The PTX virtual architecture version for which the function was compiled. This value is the major PTX version * 10 + the minor PTX version, so a PTX version 1.3 function would return the value 13. Note that this may return the undefined value of 0 for cubins compiled prior to CUDA 3.0.

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_BINARY_VERSION = 6#
    +

    The binary architecture version for which the function was compiled. This value is the major binary version * 10 + the minor binary version, so a binary version 1.3 function would return the value 13. Note that this will return a value of 10 for legacy cubins that do not have a properly-encoded binary architecture version.

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_CACHE_MODE_CA = 7#
    +

    The attribute to indicate whether the function has been compiled with user specified option “-Xptxas –dlcm=ca” set .

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES = 8#
    +

    The maximum size in bytes of dynamically-allocated shared memory that can be used by this function. If the user-specified dynamic shared memory size is larger than this value, the launch will fail. See cuFuncSetAttribute, cuKernelSetAttribute

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 9#
    +

    On devices where the L1 cache and shared memory use the same hardware resources, this sets the shared memory carveout preference, in percent of the total shared memory. Refer to CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR. This is only a hint, and the driver can choose a different ratio if required to execute the function. See cuFuncSetAttribute, cuKernelSetAttribute

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET = 10#
    +

    If this attribute is set, the kernel must launch with a valid cluster size specified. See cuFuncSetAttribute, cuKernelSetAttribute

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH = 11#
    +

    The required cluster width in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time.

    +

    If the value is set during compile time, it cannot be set at runtime. Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. See cuFuncSetAttribute, cuKernelSetAttribute

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT = 12#
    +

    The required cluster height in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time.

    +

    If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See cuFuncSetAttribute, cuKernelSetAttribute

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH = 13#
    +

    The required cluster depth in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time.

    +

    If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See cuFuncSetAttribute, cuKernelSetAttribute

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED = 14#
    +

    Whether the function can be launched with non-portable cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster size may only function on the specific SKUs the program is tested on. The launch might fail if the program is run on a different hardware platform.

    +

    CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking whether the desired size can be launched on the current device.

    +

    Portable Cluster Size

    +

    A portable cluster size is guaranteed to be functional on all compute capabilities higher than the target compute capability. The portable cluster size for sm_90 is 8 blocks per cluster. This value may increase for future compute capabilities.

    +

    The specific hardware unit may support higher cluster sizes that’s not guaranteed to be portable. See cuFuncSetAttribute, cuKernelSetAttribute

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 15#
    +

    The block scheduling policy of a function. The value type is CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. See cuFuncSetAttribute, cuKernelSetAttribute

    +
    + +
    +
    +CU_FUNC_ATTRIBUTE_MAX = 16#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUfunc_cache(value)#
    +

    Function cache configurations

    +
    +
    +CU_FUNC_CACHE_PREFER_NONE = 0#
    +

    no preference for shared memory or L1 (default)

    +
    + +
    +
    +CU_FUNC_CACHE_PREFER_SHARED = 1#
    +

    prefer larger shared memory and smaller L1 cache

    +
    + +
    +
    +CU_FUNC_CACHE_PREFER_L1 = 2#
    +

    prefer larger L1 cache and smaller shared memory

    +
    + +
    +
    +CU_FUNC_CACHE_PREFER_EQUAL = 3#
    +

    prefer equal sized L1 cache and shared memory

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUsharedconfig(value)#
    +

    [Deprecated] Shared memory configurations

    +
    +
    +CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = 0#
    +

    set default shared memory bank size

    +
    + +
    +
    +CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = 1#
    +

    set shared memory bank width to four bytes

    +
    + +
    +
    +CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = 2#
    +

    set shared memory bank width to eight bytes

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUshared_carveout(value)#
    +

    Shared memory carveout configurations. These may be passed to +cuFuncSetAttribute or cuKernelSetAttribute

    +
    +
    +CU_SHAREDMEM_CARVEOUT_DEFAULT = -1#
    +

    No preference for shared memory or L1 (default)

    +
    + +
    +
    +CU_SHAREDMEM_CARVEOUT_MAX_SHARED = 100#
    +

    Prefer maximum available shared memory, minimum L1 cache

    +
    + +
    +
    +CU_SHAREDMEM_CARVEOUT_MAX_L1 = 0#
    +

    Prefer maximum available L1 cache, minimum shared memory

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemorytype(value)#
    +

    Memory types

    +
    +
    +CU_MEMORYTYPE_HOST = 1#
    +

    Host memory

    +
    + +
    +
    +CU_MEMORYTYPE_DEVICE = 2#
    +

    Device memory

    +
    + +
    +
    +CU_MEMORYTYPE_ARRAY = 3#
    +

    Array memory

    +
    + +
    +
    +CU_MEMORYTYPE_UNIFIED = 4#
    +

    Unified device or host memory

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUcomputemode(value)#
    +

    Compute Modes

    +
    +
    +CU_COMPUTEMODE_DEFAULT = 0#
    +

    Default compute mode (Multiple contexts allowed per device)

    +
    + +
    +
    +CU_COMPUTEMODE_PROHIBITED = 2#
    +

    Compute-prohibited mode (No contexts can be created on this device at this time)

    +
    + +
    +
    +CU_COMPUTEMODE_EXCLUSIVE_PROCESS = 3#
    +

    Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time)

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmem_advise(value)#
    +

    Memory advise values

    +
    +
    +CU_MEM_ADVISE_SET_READ_MOSTLY = 1#
    +

    Data will mostly be read and only occasionally be written to

    +
    + +
    +
    +CU_MEM_ADVISE_UNSET_READ_MOSTLY = 2#
    +

    Undo the effect of CU_MEM_ADVISE_SET_READ_MOSTLY

    +
    + +
    +
    +CU_MEM_ADVISE_SET_PREFERRED_LOCATION = 3#
    +

    Set the preferred location for the data as the specified device

    +
    + +
    +
    +CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION = 4#
    +

    Clear the preferred location for the data

    +
    + +
    +
    +CU_MEM_ADVISE_SET_ACCESSED_BY = 5#
    +

    Data will be accessed by the specified device, so prevent page faults as much as possible

    +
    + +
    +
    +CU_MEM_ADVISE_UNSET_ACCESSED_BY = 6#
    +

    Let the Unified Memory subsystem decide on the page faulting policy for the specified device

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmem_range_attribute(value)#
    +
    +
    +CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY = 1#
    +

    Whether the range will mostly be read and only occasionally be written to

    +
    + +
    +
    +CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION = 2#
    +

    The preferred location of the range

    +
    + +
    +
    +CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY = 3#
    +

    Memory range has CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device

    +
    + +
    +
    +CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION = 4#
    +

    The last location to which the range was prefetched

    +
    + +
    +
    +CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE = 5#
    +

    The preferred location type of the range

    +
    + +
    +
    +CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID = 6#
    +

    The preferred location id of the range

    +
    + +
    +
    +CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE = 7#
    +

    The last location type to which the range was prefetched

    +
    + +
    +
    +CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID = 8#
    +

    The last location id to which the range was prefetched

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUjit_option(value)#
    +

    Online compiler and linker options

    +
    +
    +CU_JIT_MAX_REGISTERS = 0#
    +

    Max number of registers that a thread may use.

    +

    Option type: unsigned int

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_THREADS_PER_BLOCK = 1#
    +

    IN: Specifies minimum number of threads per block to target compilation for

    +

    OUT: Returns the number of threads the compiler actually targeted. This restricts the resource utilization of the compiler (e.g. max registers) such that a block with the given number of threads should be able to launch based on register limitations. Note, this option does not currently take into account any other resource limitations, such as shared memory utilization.

    +

    Cannot be combined with CU_JIT_TARGET.

    +

    Option type: unsigned int

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_WALL_TIME = 2#
    +

    Overwrites the option value with the total wall clock time, in milliseconds, spent in the compiler and linker

    +

    Option type: float

    +

    Applies to: compiler and linker

    +
    + +
    +
    +CU_JIT_INFO_LOG_BUFFER = 3#
    +

    Pointer to a buffer in which to print any log messages that are informational in nature (the buffer size is specified via option CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)

    +

    Option type: char *

    +

    Applies to: compiler and linker

    +
    + +
    +
    +CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES = 4#
    +

    IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator)

    +

    OUT: Amount of log buffer filled with messages

    +

    Option type: unsigned int

    +

    Applies to: compiler and linker

    +
    + +
    +
    +CU_JIT_ERROR_LOG_BUFFER = 5#
    +

    Pointer to a buffer in which to print any log messages that reflect errors (the buffer size is specified via option CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)

    +

    Option type: char *

    +

    Applies to: compiler and linker

    +
    + +
    +
    +CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES = 6#
    +

    IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator)

    +

    OUT: Amount of log buffer filled with messages

    +

    Option type: unsigned int

    +

    Applies to: compiler and linker

    +
    + +
    +
    +CU_JIT_OPTIMIZATION_LEVEL = 7#
    +

    Level of optimizations to apply to generated code (0 - 4), with 4 being the default and highest level of optimizations.

    +

    Option type: unsigned int

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_TARGET_FROM_CUCONTEXT = 8#
    +

    No option value required. Determines the target based on the current attached context (default)

    +

    Option type: No option value needed

    +

    Applies to: compiler and linker

    +
    + +
    +
    +CU_JIT_TARGET = 9#
    +

    Target is chosen based on supplied CUjit_target. Cannot be combined with CU_JIT_THREADS_PER_BLOCK.

    +

    Option type: unsigned int for enumerated type CUjit_target

    +

    Applies to: compiler and linker

    +
    + +
    +
    +CU_JIT_FALLBACK_STRATEGY = 10#
    +

    Specifies choice of fallback strategy if matching cubin is not found. Choice is based on supplied CUjit_fallback. This option cannot be used with cuLink* APIs as the linker requires exact matches.

    +

    Option type: unsigned int for enumerated type CUjit_fallback

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_GENERATE_DEBUG_INFO = 11#
    +

    Specifies whether to create debug information in output (-g) (0: false, default)

    +

    Option type: int

    +

    Applies to: compiler and linker

    +
    + +
    +
    +CU_JIT_LOG_VERBOSE = 12#
    +

    Generate verbose log messages (0: false, default)

    +

    Option type: int

    +

    Applies to: compiler and linker

    +
    + +
    +
    +CU_JIT_GENERATE_LINE_INFO = 13#
    +

    Generate line number information (-lineinfo) (0: false, default)

    +

    Option type: int

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_CACHE_MODE = 14#
    +

    Specifies whether to enable caching explicitly (-dlcm)

    +

    Choice is based on supplied CUjit_cacheMode_enum.

    +

    Option type: unsigned int for enumerated type CUjit_cacheMode_enum

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_NEW_SM3X_OPT = 15#
    +

    [Deprecated]

    +
    + +
    +
    +CU_JIT_FAST_COMPILE = 16#
    +

    This jit option is used for internal purpose only.

    +
    + +
    +
    +CU_JIT_GLOBAL_SYMBOL_NAMES = 17#
    +

    Array of device symbol names that will be relocated to the corresponding host addresses stored in CU_JIT_GLOBAL_SYMBOL_ADDRESSES.

    +

    Must contain CU_JIT_GLOBAL_SYMBOL_COUNT entries.

    +

    When loading a device module, driver will relocate all encountered unresolved symbols to the host addresses.

    +

    It is only allowed to register symbols that correspond to unresolved global variables.

    +

    It is illegal to register the same device symbol at multiple addresses.

    +

    Option type: const char **

    +

    Applies to: dynamic linker only

    +
    + +
    +
    +CU_JIT_GLOBAL_SYMBOL_ADDRESSES = 18#
    +

    Array of host addresses that will be used to relocate corresponding device symbols stored in CU_JIT_GLOBAL_SYMBOL_NAMES.

    +

    Must contain CU_JIT_GLOBAL_SYMBOL_COUNT entries.

    +

    Option type: void **

    +

    Applies to: dynamic linker only

    +
    + +
    +
    +CU_JIT_GLOBAL_SYMBOL_COUNT = 19#
    +

    Number of entries in CU_JIT_GLOBAL_SYMBOL_NAMES and CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.

    +

    Option type: unsigned int

    +

    Applies to: dynamic linker only

    +
    + +
    +
    +CU_JIT_LTO = 20#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_FTZ = 21#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_PREC_DIV = 22#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_PREC_SQRT = 23#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_FMA = 24#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_REFERENCED_KERNEL_NAMES = 25#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_REFERENCED_KERNEL_COUNT = 26#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_REFERENCED_VARIABLE_NAMES = 27#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_REFERENCED_VARIABLE_COUNT = 28#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES = 29#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_POSITION_INDEPENDENT_CODE = 30#
    +

    Generate position independent code (0: false)

    +

    Option type: int

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_MIN_CTA_PER_SM = 31#
    +

    This option hints to the JIT compiler the minimum number of CTAs from the kernel’s grid to be mapped to a SM. This option is ignored when used together with CU_JIT_MAX_REGISTERS or CU_JIT_THREADS_PER_BLOCK. Optimizations based on this option need CU_JIT_MAX_THREADS_PER_BLOCK to be specified as well. For kernels already using PTX directive .minnctapersm, this option will be ignored by default. Use CU_JIT_OVERRIDE_DIRECTIVE_VALUES to let this option take precedence over the PTX directive. Option type: unsigned int

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_MAX_THREADS_PER_BLOCK = 32#
    +

    Maximum number threads in a thread block, computed as the product of the maximum extent specifed for each dimension of the block. This limit is guaranteed not to be exeeded in any invocation of the kernel. Exceeding the the maximum number of threads results in runtime error or kernel launch failure. For kernels already using PTX directive .maxntid, this option will be ignored by default. Use CU_JIT_OVERRIDE_DIRECTIVE_VALUES to let this option take precedence over the PTX directive. Option type: int

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_OVERRIDE_DIRECTIVE_VALUES = 33#
    +

    This option lets the values specified using CU_JIT_MAX_REGISTERS, CU_JIT_THREADS_PER_BLOCK, CU_JIT_MAX_THREADS_PER_BLOCK and CU_JIT_MIN_CTA_PER_SM take precedence over any PTX directives. (0: Disable, default; 1: Enable) Option type: int

    +

    Applies to: compiler only

    +
    + +
    +
    +CU_JIT_NUM_OPTIONS = 34#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUjit_target(value)#
    +

    Online compilation targets

    +
    +
    +CU_TARGET_COMPUTE_30 = 30#
    +

    Compute device class 3.0

    +
    + +
    +
    +CU_TARGET_COMPUTE_32 = 32#
    +

    Compute device class 3.2

    +
    + +
    +
    +CU_TARGET_COMPUTE_35 = 35#
    +

    Compute device class 3.5

    +
    + +
    +
    +CU_TARGET_COMPUTE_37 = 37#
    +

    Compute device class 3.7

    +
    + +
    +
    +CU_TARGET_COMPUTE_50 = 50#
    +

    Compute device class 5.0

    +
    + +
    +
    +CU_TARGET_COMPUTE_52 = 52#
    +

    Compute device class 5.2

    +
    + +
    +
    +CU_TARGET_COMPUTE_53 = 53#
    +

    Compute device class 5.3

    +
    + +
    +
    +CU_TARGET_COMPUTE_60 = 60#
    +

    Compute device class 6.0.

    +
    + +
    +
    +CU_TARGET_COMPUTE_61 = 61#
    +

    Compute device class 6.1.

    +
    + +
    +
    +CU_TARGET_COMPUTE_62 = 62#
    +

    Compute device class 6.2.

    +
    + +
    +
    +CU_TARGET_COMPUTE_70 = 70#
    +

    Compute device class 7.0.

    +
    + +
    +
    +CU_TARGET_COMPUTE_72 = 72#
    +

    Compute device class 7.2.

    +
    + +
    +
    +CU_TARGET_COMPUTE_75 = 75#
    +

    Compute device class 7.5.

    +
    + +
    +
    +CU_TARGET_COMPUTE_80 = 80#
    +

    Compute device class 8.0.

    +
    + +
    +
    +CU_TARGET_COMPUTE_86 = 86#
    +

    Compute device class 8.6.

    +
    + +
    +
    +CU_TARGET_COMPUTE_87 = 87#
    +

    Compute device class 8.7.

    +
    + +
    +
    +CU_TARGET_COMPUTE_89 = 89#
    +

    Compute device class 8.9.

    +
    + +
    +
    +CU_TARGET_COMPUTE_90 = 90#
    +

    Compute device class 9.0. Compute device class 9.0. with accelerated features.

    +
    + +
    +
    +CU_TARGET_COMPUTE_90A = 65626#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUjit_fallback(value)#
    +

    Cubin matching fallback strategies

    +
    +
    +CU_PREFER_PTX = 0#
    +

    Prefer to compile ptx if exact binary match not found

    +
    + +
    +
    +CU_PREFER_BINARY = 1#
    +

    Prefer to fall back to compatible binary code if exact match not found

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUjit_cacheMode(value)#
    +

    Caching modes for dlcm

    +
    +
    +CU_JIT_CACHE_OPTION_NONE = 0#
    +

    Compile with no -dlcm flag specified

    +
    + +
    +
    +CU_JIT_CACHE_OPTION_CG = 1#
    +

    Compile with L1 cache disabled

    +
    + +
    +
    +CU_JIT_CACHE_OPTION_CA = 2#
    +

    Compile with L1 cache enabled

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUjitInputType(value)#
    +

    Device code formats

    +
    +
    +CU_JIT_INPUT_CUBIN = 0#
    +

    Compiled device-class-specific device code

    +

    Applicable options: none

    +
    + +
    +
    +CU_JIT_INPUT_PTX = 1#
    +

    PTX source code

    +

    Applicable options: PTX compiler options

    +
    + +
    +
    +CU_JIT_INPUT_FATBINARY = 2#
    +

    Bundle of multiple cubins and/or PTX of some device code

    +

    Applicable options: PTX compiler options, CU_JIT_FALLBACK_STRATEGY

    +
    + +
    +
    +CU_JIT_INPUT_OBJECT = 3#
    +

    Host object with embedded device code

    +

    Applicable options: PTX compiler options, CU_JIT_FALLBACK_STRATEGY

    +
    + +
    +
    +CU_JIT_INPUT_LIBRARY = 4#
    +

    Archive of host objects with embedded device code

    +

    Applicable options: PTX compiler options, CU_JIT_FALLBACK_STRATEGY

    +
    + +
    +
    +CU_JIT_INPUT_NVVM = 5#
    +

    [Deprecated]

    +

    Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0

    +
    + +
    +
    +CU_JIT_NUM_INPUT_TYPES = 6#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphicsRegisterFlags(value)#
    +

    Flags to register a graphics resource

    +
    +
    +CU_GRAPHICS_REGISTER_FLAGS_NONE = 0#
    +
    + +
    +
    +CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY = 1#
    +
    + +
    +
    +CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD = 2#
    +
    + +
    +
    +CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST = 4#
    +
    + +
    +
    +CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER = 8#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphicsMapResourceFlags(value)#
    +

    Flags for mapping and unmapping interop resources

    +
    +
    +CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = 0#
    +
    + +
    +
    +CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = 1#
    +
    + +
    +
    +CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 2#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUarray_cubemap_face(value)#
    +

    Array indices for cube faces

    +
    +
    +CU_CUBEMAP_FACE_POSITIVE_X = 0#
    +

    Positive X face of cubemap

    +
    + +
    +
    +CU_CUBEMAP_FACE_NEGATIVE_X = 1#
    +

    Negative X face of cubemap

    +
    + +
    +
    +CU_CUBEMAP_FACE_POSITIVE_Y = 2#
    +

    Positive Y face of cubemap

    +
    + +
    +
    +CU_CUBEMAP_FACE_NEGATIVE_Y = 3#
    +

    Negative Y face of cubemap

    +
    + +
    +
    +CU_CUBEMAP_FACE_POSITIVE_Z = 4#
    +

    Positive Z face of cubemap

    +
    + +
    +
    +CU_CUBEMAP_FACE_NEGATIVE_Z = 5#
    +

    Negative Z face of cubemap

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlimit(value)#
    +

    Limits

    +
    +
    +CU_LIMIT_STACK_SIZE = 0#
    +

    GPU thread stack size

    +
    + +
    +
    +CU_LIMIT_PRINTF_FIFO_SIZE = 1#
    +

    GPU printf FIFO size

    +
    + +
    +
    +CU_LIMIT_MALLOC_HEAP_SIZE = 2#
    +

    GPU malloc heap size

    +
    + +
    +
    +CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = 3#
    +

    GPU device runtime launch synchronize depth

    +
    + +
    +
    +CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = 4#
    +

    GPU device runtime pending launch count

    +
    + +
    +
    +CU_LIMIT_MAX_L2_FETCH_GRANULARITY = 5#
    +

    A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint

    +
    + +
    +
    +CU_LIMIT_PERSISTING_L2_CACHE_SIZE = 6#
    +

    A size in bytes for L2 persisting lines cache size

    +
    + +
    +
    +CU_LIMIT_SHMEM_SIZE = 7#
    +

    A maximum size in bytes of shared memory available to CUDA kernels on a CIG context. Can only be queried, cannot be set

    +
    + +
    +
    +CU_LIMIT_CIG_ENABLED = 8#
    +

    A non-zero value indicates this CUDA context is a CIG-enabled context. Can only be queried, cannot be set

    +
    + +
    +
    +CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED = 9#
    +

    When set to a non-zero value, CUDA will fail to launch a kernel on a CIG context, instead of using the fallback path, if the kernel uses more shared memory than available

    +
    + +
    +
    +CU_LIMIT_MAX = 10#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUresourcetype(value)#
    +

    Resource types

    +
    +
    +CU_RESOURCE_TYPE_ARRAY = 0#
    +

    Array resource

    +
    + +
    +
    +CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 1#
    +

    Mipmapped array resource

    +
    + +
    +
    +CU_RESOURCE_TYPE_LINEAR = 2#
    +

    Linear resource

    +
    + +
    +
    +CU_RESOURCE_TYPE_PITCH2D = 3#
    +

    Pitch 2D resource

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUaccessProperty(value)#
    +

    Specifies performance hint with CUaccessPolicyWindow +for hitProp and missProp members.

    +
    +
    +CU_ACCESS_PROPERTY_NORMAL = 0#
    +

    Normal cache persistence.

    +
    + +
    +
    +CU_ACCESS_PROPERTY_STREAMING = 1#
    +

    Streaming access is less likely to persit from cache.

    +
    + +
    +
    +CU_ACCESS_PROPERTY_PERSISTING = 2#
    +

    Persisting access is more likely to persist in cache.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphConditionalNodeType(value)#
    +

    Conditional node types

    +
    +
    +CU_GRAPH_COND_TYPE_IF = 0#
    +

    Conditional ‘if’ Node. Body executed once if condition value is non-zero.

    +
    + +
    +
    +CU_GRAPH_COND_TYPE_WHILE = 1#
    +

    Conditional ‘while’ Node. Body executed repeatedly while condition value is non-zero.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphNodeType(value)#
    +

    Graph node types

    +
    +
    +CU_GRAPH_NODE_TYPE_KERNEL = 0#
    +

    GPU kernel node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_MEMCPY = 1#
    +

    Memcpy node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_MEMSET = 2#
    +

    Memset node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_HOST = 3#
    +

    Host (executable) node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_GRAPH = 4#
    +

    Node which executes an embedded graph

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_EMPTY = 5#
    +

    Empty (no-op) node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_WAIT_EVENT = 6#
    +

    External event wait node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_EVENT_RECORD = 7#
    +

    External event record node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL = 8#
    +

    External semaphore signal node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT = 9#
    +

    External semaphore wait node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_MEM_ALLOC = 10#
    +

    Memory Allocation Node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_MEM_FREE = 11#
    +

    Memory Free Node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_BATCH_MEM_OP = 12#
    +

    Batch MemOp Node

    +
    + +
    +
    +CU_GRAPH_NODE_TYPE_CONDITIONAL = 13#
    +

    Conditional Node May be used to implement a conditional execution path or loop

    +
    +

    inside of a graph. The graph(s) contained within the body of the conditional node

    +

    can be selectively executed or iterated upon based on the value of a conditional

    +

    variable.

    +

    Handles must be created in advance of creating the node

    +

    using cuGraphConditionalHandleCreate.

    +

    The following restrictions apply to graphs which contain conditional nodes:

    +
    +

    The graph cannot be used in a child node.

    +

    Only one instantiation of the graph may exist at any point in time.

    +

    The graph cannot be cloned.

    +
    +

    To set the control value, supply a default value when creating the handle and/or

    +

    call cudaGraphSetConditional from device code.

    +
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphDependencyType(value)#
    +

    Type annotations that can be applied to graph edges as part of +CUgraphEdgeData.

    +
    +
    +CU_GRAPH_DEPENDENCY_TYPE_DEFAULT = 0#
    +

    This is an ordinary dependency.

    +
    + +
    +
    +CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC = 1#
    +

    This dependency type allows the downstream node to use cudaGridDependencySynchronize(). It may only be used between kernel nodes, and must be used with either the CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC or CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER outgoing port.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphInstantiateResult(value)#
    +

    Graph instantiation results

    +
    +
    +CUDA_GRAPH_INSTANTIATE_SUCCESS = 0#
    +

    Instantiation succeeded

    +
    + +
    +
    +CUDA_GRAPH_INSTANTIATE_ERROR = 1#
    +

    Instantiation failed for an unexpected reason which is described in the return value of the function

    +
    + +
    +
    +CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE = 2#
    +

    Instantiation failed due to invalid structure, such as cycles

    +
    + +
    +
    +CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED = 3#
    +

    Instantiation for device launch failed because the graph contained an unsupported operation

    +
    + +
    +
    +CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED = 4#
    +

    Instantiation for device launch failed due to the nodes belonging to different contexts

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUsynchronizationPolicy(value)#
    +
    +
    +CU_SYNC_POLICY_AUTO = 1#
    +
    + +
    +
    +CU_SYNC_POLICY_SPIN = 2#
    +
    + +
    +
    +CU_SYNC_POLICY_YIELD = 3#
    +
    + +
    +
    +CU_SYNC_POLICY_BLOCKING_SYNC = 4#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUclusterSchedulingPolicy(value)#
    +

    Cluster scheduling policies. These may be passed to +cuFuncSetAttribute or cuKernelSetAttribute

    +
    +
    +CU_CLUSTER_SCHEDULING_POLICY_DEFAULT = 0#
    +

    the default policy

    +
    + +
    +
    +CU_CLUSTER_SCHEDULING_POLICY_SPREAD = 1#
    +

    spread the blocks within a cluster to the SMs

    +
    + +
    +
    +CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING = 2#
    +

    allow the hardware to load-balance the blocks in a cluster to the SMs

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchMemSyncDomain(value)#
    +

    Memory Synchronization Domain A kernel can be launched in a +specified memory synchronization domain that affects all memory +operations issued by that kernel. A memory barrier issued in one +domain will only order memory operations in that domain, thus +eliminating latency increase from memory barriers ordering +unrelated traffic. By default, kernels are launched in domain 0. +Kernel launched with CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE +will have a different domain ID. User may also alter the domain ID +with CUlaunchMemSyncDomainMap for a specific stream / +graph node / kernel launch. See +CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN, +cuStreamSetAttribute, cuLaunchKernelEx, +cuGraphKernelNodeSetAttribute. Memory operations done +in kernels launched in different domains are considered system- +scope distanced. In other words, a GPU scoped memory +synchronization is not sufficient for memory order to be observed +by kernels in another memory synchronization domain even if they +are on the same GPU.

    +
    +
    +CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT = 0#
    +

    Launch kernels in the default domain

    +
    + +
    +
    +CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE = 1#
    +

    Launch kernels in the remote domain

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchAttributeID(value)#
    +

    Launch attributes enum; used as id field of +CUlaunchAttribute

    +
    +
    +CU_LAUNCH_ATTRIBUTE_IGNORE = 0#
    +

    Ignored entry, for convenient composition

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1#
    +

    Valid for streams, graph nodes, launches. See accessPolicyWindow.

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_COOPERATIVE = 2#
    +

    Valid for graph nodes, launches. See cooperative.

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3#
    +

    Valid for streams. See syncPolicy.

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = 4#
    +

    Valid for graph nodes, launches. See clusterDim.

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 5#
    +

    Valid for graph nodes, launches. See clusterSchedulingPolicyPreference.

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = 6#
    +

    Valid for launches. Setting programmaticStreamSerializationAllowed to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid’s execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions).

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = 7#
    +

    Valid for launches. Set programmaticEvent to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event through PTX launchdep.release or CUDA builtin function cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be inserted at the beginning of each block’s execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling cuEventSynchronize()) are not guaranteed to observe the release precisely when it is released. For example, cuEventSynchronize() may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks.

    +
    +

    The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the CU_EVENT_DISABLE_TIMING flag set).

    +
    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_PRIORITY = 8#
    +

    Valid for streams, graph nodes, launches. See priority.

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = 9#
    +

    Valid for streams, graph nodes, launches. See memSyncDomainMap.

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = 10#
    +

    Valid for streams, graph nodes, launches. See memSyncDomain.

    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = 12#
    +

    Valid for launches. Set launchCompletionEvent to record the event.

    +
    +

    Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock.

    +

    A launch completion event is nominally similar to a programmatic event with triggerAtBlockStart set except that it is not visible to cudaGridDependencySynchronize() and can be used with compute capability less than 9.0.

    +

    The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the CU_EVENT_DISABLE_TIMING flag set).

    +
    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = 13#
    +

    Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error.

    +
    +

    CUlaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via CUlaunchAttributeValue::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node’s kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see cudaGraphKernelNodeUpdatesApply.

    +

    Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via cuGraphDestroyNode. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via cuGraphKernelNodeCopyAttributes. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to cuGraphExecUpdate.

    +

    If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with cuGraphUpload before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again.

    +
    +
    + +
    +
    +CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 14#
    +

    Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting sharedMemCarveout to a percentage between 0-100 signals the CUDA driver to set the shared memory carveout preference, in percent of the total shared memory for that kernel launch. This attribute takes precedence over CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT. This is only a hint, and the CUDA driver can choose a different configuration if required for the launch.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamCaptureStatus(value)#
    +

    Possible stream capture statuses returned by +cuStreamIsCapturing

    +
    +
    +CU_STREAM_CAPTURE_STATUS_NONE = 0#
    +

    Stream is not capturing

    +
    + +
    +
    +CU_STREAM_CAPTURE_STATUS_ACTIVE = 1#
    +

    Stream is actively capturing

    +
    + +
    +
    +CU_STREAM_CAPTURE_STATUS_INVALIDATED = 2#
    +

    Stream is part of a capture sequence that has been invalidated, but not terminated

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamCaptureMode(value)#
    +

    Possible modes for stream capture thread interactions. For more +details see cuStreamBeginCapture and +cuThreadExchangeStreamCaptureMode

    +
    +
    +CU_STREAM_CAPTURE_MODE_GLOBAL = 0#
    +
    + +
    +
    +CU_STREAM_CAPTURE_MODE_THREAD_LOCAL = 1#
    +
    + +
    +
    +CU_STREAM_CAPTURE_MODE_RELAXED = 2#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdriverProcAddress_flags(value)#
    +

    Flags to specify search options. For more details see +cuGetProcAddress

    +
    +
    +CU_GET_PROC_ADDRESS_DEFAULT = 0#
    +

    Default search mode for driver symbols.

    +
    + +
    +
    +CU_GET_PROC_ADDRESS_LEGACY_STREAM = 1#
    +

    Search for legacy versions of driver symbols.

    +
    + +
    +
    +CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM = 2#
    +

    Search for per-thread versions of driver symbols.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdriverProcAddressQueryResult(value)#
    +

    Flags to indicate search status. For more details see +cuGetProcAddress

    +
    +
    +CU_GET_PROC_ADDRESS_SUCCESS = 0#
    +

    Symbol was succesfully found

    +
    + +
    +
    +CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND = 1#
    +

    Symbol was not found in search

    +
    + +
    +
    +CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT = 2#
    +

    Symbol was found but version supplied was not sufficient

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexecAffinityType(value)#
    +

    Execution Affinity Types

    +
    +
    +CU_EXEC_AFFINITY_TYPE_SM_COUNT = 0#
    +

    Create a context with limited SMs.

    +
    + +
    +
    +CU_EXEC_AFFINITY_TYPE_MAX = 1#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUcigDataType(value)#
    +
    +
    +CIG_DATA_TYPE_D3D12_COMMAND_QUEUE = 1#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlibraryOption(value)#
    +

    Library options to be specified with +cuLibraryLoadData() or +cuLibraryLoadFromFile()

    +
    +
    +CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE = 0#
    +
    + +
    +
    +CU_LIBRARY_BINARY_IS_PRESERVED = 1#
    +

    Specifes that the argument code passed to cuLibraryLoadData() will be preserved. Specifying this option will let the driver know that code can be accessed at any point until cuLibraryUnload(). The default behavior is for the driver to allocate and maintain its own copy of code. Note that this is only a memory usage optimization hint and the driver can choose to ignore it if required. Specifying this option with cuLibraryLoadFromFile() is invalid and will return CUDA_ERROR_INVALID_VALUE.

    +
    + +
    +
    +CU_LIBRARY_NUM_OPTIONS = 2#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUresult(value)#
    +

    Error codes

    +
    +
    +CUDA_SUCCESS = 0#
    +

    The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see cuEventQuery() and cuStreamQuery()).

    +
    + +
    +
    +CUDA_ERROR_INVALID_VALUE = 1#
    +

    This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values.

    +
    + +
    +
    +CUDA_ERROR_OUT_OF_MEMORY = 2#
    +

    The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation.

    +
    + +
    +
    +CUDA_ERROR_NOT_INITIALIZED = 3#
    +

    This indicates that the CUDA driver has not been initialized with cuInit() or that initialization has failed.

    +
    + +
    +
    +CUDA_ERROR_DEINITIALIZED = 4#
    +

    This indicates that the CUDA driver is in the process of shutting down.

    +
    + +
    +
    +CUDA_ERROR_PROFILER_DISABLED = 5#
    +

    This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler.

    +
    + +
    +
    +CUDA_ERROR_PROFILER_NOT_INITIALIZED = 6#
    +

    [Deprecated]

    +
    + +
    +
    +CUDA_ERROR_PROFILER_ALREADY_STARTED = 7#
    +

    [Deprecated]

    +
    + +
    +
    +CUDA_ERROR_PROFILER_ALREADY_STOPPED = 8#
    +

    [Deprecated]

    +
    + +
    +
    +CUDA_ERROR_STUB_LIBRARY = 34#
    +

    This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error.

    +
    + +
    +
    +CUDA_ERROR_DEVICE_UNAVAILABLE = 46#
    +

    This indicates that requested CUDA device is unavailable at the current time. Devices are often unavailable due to use of CU_COMPUTEMODE_EXCLUSIVE_PROCESS or CU_COMPUTEMODE_PROHIBITED.

    +
    + +
    +
    +CUDA_ERROR_NO_DEVICE = 100#
    +

    This indicates that no CUDA-capable devices were detected by the installed CUDA driver.

    +
    + +
    +
    +CUDA_ERROR_INVALID_DEVICE = 101#
    +

    This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device.

    +
    + +
    +
    +CUDA_ERROR_DEVICE_NOT_LICENSED = 102#
    +

    This error indicates that the Grid license is not applied.

    +
    + +
    +
    +CUDA_ERROR_INVALID_IMAGE = 200#
    +

    This indicates that the device kernel image is invalid. This can also indicate an invalid CUDA module.

    +
    + +
    +
    +CUDA_ERROR_INVALID_CONTEXT = 201#
    +

    This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had cuCtxDestroy() invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See cuCtxGetApiVersion() for more details. This can also be returned if the green context passed to an API call was not converted to a CUcontext using cuCtxFromGreenCtx API.

    +
    + +
    +
    +CUDA_ERROR_CONTEXT_ALREADY_CURRENT = 202#
    +

    This indicated that the context being supplied as a parameter to the API call was already the active context. [Deprecated]

    +
    + +
    +
    +CUDA_ERROR_MAP_FAILED = 205#
    +

    This indicates that a map or register operation has failed.

    +
    + +
    +
    +CUDA_ERROR_UNMAP_FAILED = 206#
    +

    This indicates that an unmap or unregister operation has failed.

    +
    + +
    +
    +CUDA_ERROR_ARRAY_IS_MAPPED = 207#
    +

    This indicates that the specified array is currently mapped and thus cannot be destroyed.

    +
    + +
    +
    +CUDA_ERROR_ALREADY_MAPPED = 208#
    +

    This indicates that the resource is already mapped.

    +
    + +
    +
    +CUDA_ERROR_NO_BINARY_FOR_GPU = 209#
    +

    This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration.

    +
    + +
    +
    +CUDA_ERROR_ALREADY_ACQUIRED = 210#
    +

    This indicates that a resource has already been acquired.

    +
    + +
    +
    +CUDA_ERROR_NOT_MAPPED = 211#
    +

    This indicates that a resource is not mapped.

    +
    + +
    +
    +CUDA_ERROR_NOT_MAPPED_AS_ARRAY = 212#
    +

    This indicates that a mapped resource is not available for access as an array.

    +
    + +
    +
    +CUDA_ERROR_NOT_MAPPED_AS_POINTER = 213#
    +

    This indicates that a mapped resource is not available for access as a pointer.

    +
    + +
    +
    +CUDA_ERROR_ECC_UNCORRECTABLE = 214#
    +

    This indicates that an uncorrectable ECC error was detected during execution.

    +
    + +
    +
    +CUDA_ERROR_UNSUPPORTED_LIMIT = 215#
    +

    This indicates that the CUlimit passed to the API call is not supported by the active device.

    +
    + +
    +
    +CUDA_ERROR_CONTEXT_ALREADY_IN_USE = 216#
    +

    This indicates that the CUcontext passed to the API call can only be bound to a single CPU thread at a time but is already bound to a CPU thread.

    +
    + +
    +
    +CUDA_ERROR_PEER_ACCESS_UNSUPPORTED = 217#
    +

    This indicates that peer access is not supported across the given devices.

    +
    + +
    +
    +CUDA_ERROR_INVALID_PTX = 218#
    +

    This indicates that a PTX JIT compilation failed.

    +
    + +
    +
    +CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = 219#
    +

    This indicates an error with OpenGL or DirectX context.

    +
    + +
    + +

    This indicates that an uncorrectable NVLink error was detected during the execution.

    +
    + +
    +
    +CUDA_ERROR_JIT_COMPILER_NOT_FOUND = 221#
    +

    This indicates that the PTX JIT compiler library was not found.

    +
    + +
    +
    +CUDA_ERROR_UNSUPPORTED_PTX_VERSION = 222#
    +

    This indicates that the provided PTX was compiled with an unsupported toolchain.

    +
    + +
    +
    +CUDA_ERROR_JIT_COMPILATION_DISABLED = 223#
    +

    This indicates that the PTX JIT compilation was disabled.

    +
    + +
    +
    +CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY = 224#
    +

    This indicates that the CUexecAffinityType passed to the API call is not supported by the active device.

    +
    + +
    +
    +CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC = 225#
    +

    This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize.

    +
    + +
    +
    +CUDA_ERROR_INVALID_SOURCE = 300#
    +

    This indicates that the device kernel source is invalid. This includes compilation/linker errors encountered in device code or user error.

    +
    + +
    +
    +CUDA_ERROR_FILE_NOT_FOUND = 301#
    +

    This indicates that the file specified was not found.

    +
    + +
    +
    +CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = 302#
    +

    This indicates that a link to a shared object failed to resolve.

    +
    + +
    +
    +CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = 303#
    +

    This indicates that initialization of a shared object failed.

    +
    + +
    +
    +CUDA_ERROR_OPERATING_SYSTEM = 304#
    +

    This indicates that an OS call failed.

    +
    + +
    +
    +CUDA_ERROR_INVALID_HANDLE = 400#
    +

    This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like CUstream and CUevent.

    +
    + +
    +
    +CUDA_ERROR_ILLEGAL_STATE = 401#
    +

    This indicates that a resource required by the API call is not in a valid state to perform the requested operation.

    +
    + +
    +
    +CUDA_ERROR_LOSSY_QUERY = 402#
    +

    This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments.

    +
    + +
    +
    +CUDA_ERROR_NOT_FOUND = 500#
    +

    This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names.

    +
    + +
    +
    +CUDA_ERROR_NOT_READY = 600#
    +

    This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than CUDA_SUCCESS (which indicates completion). Calls that may return this value include cuEventQuery() and cuStreamQuery().

    +
    + +
    +
    +CUDA_ERROR_ILLEGAL_ADDRESS = 700#
    +

    While executing a kernel, the device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = 701#
    +

    This indicates that a launch did not occur because it did not have appropriate resources. This error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel’s register count. Passing arguments of the wrong size (i.e. a 64-bit pointer when a 32-bit int is expected) is equivalent to passing too many arguments and can also result in this error.

    +
    + +
    +
    +CUDA_ERROR_LAUNCH_TIMEOUT = 702#
    +

    This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device attribute CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = 703#
    +

    This error indicates a kernel launch that uses an incompatible texturing mode.

    +
    + +
    +
    +CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = 704#
    +

    This error indicates that a call to cuCtxEnablePeerAccess() is trying to re-enable peer access to a context which has already had peer access to it enabled.

    +
    + +
    +
    +CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = 705#
    +

    This error indicates that cuCtxDisablePeerAccess() is trying to disable peer access which has not been enabled yet via cuCtxEnablePeerAccess().

    +
    + +
    +
    +CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = 708#
    +

    This error indicates that the primary context for the specified device has already been initialized.

    +
    + +
    +
    +CUDA_ERROR_CONTEXT_IS_DESTROYED = 709#
    +

    This error indicates that the context current to the calling thread has been destroyed using cuCtxDestroy, or is a primary context which has not yet been initialized.

    +
    + +
    +
    +CUDA_ERROR_ASSERT = 710#
    +

    A device-side assert triggered during kernel execution. The context cannot be used anymore, and must be destroyed. All existing device memory allocations from this context are invalid and must be reconstructed if the program is to continue using CUDA.

    +
    + +
    +
    +CUDA_ERROR_TOO_MANY_PEERS = 711#
    +

    This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to cuCtxEnablePeerAccess().

    +
    + +
    +
    +CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = 712#
    +

    This error indicates that the memory range passed to cuMemHostRegister() has already been registered.

    +
    + +
    +
    +CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = 713#
    +

    This error indicates that the pointer passed to cuMemHostUnregister() does not correspond to any currently registered memory region.

    +
    + +
    +
    +CUDA_ERROR_HARDWARE_STACK_ERROR = 714#
    +

    While executing a kernel, the device encountered a stack error. This can be due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_ILLEGAL_INSTRUCTION = 715#
    +

    While executing a kernel, the device encountered an illegal instruction. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_MISALIGNED_ADDRESS = 716#
    +

    While executing a kernel, the device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_INVALID_ADDRESS_SPACE = 717#
    +

    While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_INVALID_PC = 718#
    +

    While executing a kernel, the device program counter wrapped its address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_LAUNCH_FAILED = 719#
    +

    An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE = 720#
    +

    This error indicates that the number of blocks launched per grid for a kernel that was launched via either cuLaunchCooperativeKernel or cuLaunchCooperativeKernelMultiDevice exceeds the maximum number of blocks as allowed by cuOccupancyMaxActiveBlocksPerMultiprocessor or cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors as specified by the device attribute CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT.

    +
    + +
    +
    +CUDA_ERROR_NOT_PERMITTED = 800#
    +

    This error indicates that the attempted operation is not permitted.

    +
    + +
    +
    +CUDA_ERROR_NOT_SUPPORTED = 801#
    +

    This error indicates that the attempted operation is not supported on the current system or device.

    +
    + +
    +
    +CUDA_ERROR_SYSTEM_NOT_READY = 802#
    +

    This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide.

    +
    + +
    +
    +CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803#
    +

    This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions.

    +
    + +
    +
    +CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE = 804#
    +

    This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable.

    +
    + +
    +
    +CUDA_ERROR_MPS_CONNECTION_FAILED = 805#
    +

    This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server.

    +
    + +
    +
    +CUDA_ERROR_MPS_RPC_FAILURE = 806#
    +

    This error indicates that the remote procedural call between the MPS server and the MPS client failed.

    +
    + +
    +
    +CUDA_ERROR_MPS_SERVER_NOT_READY = 807#
    +

    This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure.

    +
    + +
    +
    +CUDA_ERROR_MPS_MAX_CLIENTS_REACHED = 808#
    +

    This error indicates that the hardware resources required to create MPS client have been exhausted.

    +
    + +
    +
    +CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED = 809#
    +

    This error indicates the the hardware resources required to support device connections have been exhausted.

    +
    + +
    +
    +CUDA_ERROR_MPS_CLIENT_TERMINATED = 810#
    +

    This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_CDP_NOT_SUPPORTED = 811#
    +

    This error indicates that the module is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it.

    +
    + +
    +
    +CUDA_ERROR_CDP_VERSION_MISMATCH = 812#
    +

    This error indicates that a module contains an unsupported interaction between different versions of CUDA Dynamic Parallelism.

    +
    + +
    +
    +CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED = 900#
    +

    This error indicates that the operation is not permitted when the stream is capturing.

    +
    + +
    +
    +CUDA_ERROR_STREAM_CAPTURE_INVALIDATED = 901#
    +

    This error indicates that the current capture sequence on the stream has been invalidated due to a previous error.

    +
    + +
    +
    +CUDA_ERROR_STREAM_CAPTURE_MERGE = 902#
    +

    This error indicates that the operation would have resulted in a merge of two independent capture sequences.

    +
    + +
    +
    +CUDA_ERROR_STREAM_CAPTURE_UNMATCHED = 903#
    +

    This error indicates that the capture was not initiated in this stream.

    +
    + +
    +
    +CUDA_ERROR_STREAM_CAPTURE_UNJOINED = 904#
    +

    This error indicates that the capture sequence contains a fork that was not joined to the primary stream.

    +
    + +
    +
    +CUDA_ERROR_STREAM_CAPTURE_ISOLATION = 905#
    +

    This error indicates that a dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary.

    +
    + +
    +
    +CUDA_ERROR_STREAM_CAPTURE_IMPLICIT = 906#
    +

    This error indicates a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy.

    +
    + +
    +
    +CUDA_ERROR_CAPTURED_EVENT = 907#
    +

    This error indicates that the operation is not permitted on an event which was last recorded in a capturing stream.

    +
    + +
    +
    +CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD = 908#
    +

    A stream capture sequence not initiated with the CU_STREAM_CAPTURE_MODE_RELAXED argument to cuStreamBeginCapture was passed to cuStreamEndCapture in a different thread.

    +
    + +
    +
    +CUDA_ERROR_TIMEOUT = 909#
    +

    This error indicates that the timeout specified for the wait operation has lapsed.

    +
    + +
    +
    +CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE = 910#
    +

    This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update.

    +
    + +
    +
    +CUDA_ERROR_EXTERNAL_DEVICE = 911#
    +

    This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device’s signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +CUDA_ERROR_INVALID_CLUSTER_SIZE = 912#
    +

    Indicates a kernel launch error due to cluster misconfiguration.

    +
    + +
    +
    +CUDA_ERROR_FUNCTION_NOT_LOADED = 913#
    +

    Indiciates a function handle is not loaded when calling an API that requires a loaded function.

    +
    + +
    +
    +CUDA_ERROR_INVALID_RESOURCE_TYPE = 914#
    +

    This error indicates one or more resources passed in are not valid resource types for the operation.

    +
    + +
    +
    +CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION = 915#
    +

    This error indicates one or more resources are insufficient or non-applicable for the operation.

    +
    + +
    +
    +CUDA_ERROR_UNKNOWN = 999#
    +

    This indicates that an unknown internal error has occurred.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevice_P2PAttribute(value)#
    +

    P2P Attributes

    +
    +
    +CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = 1#
    +

    A relative value indicating the performance of the link between two devices

    +
    + +
    +
    +CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = 2#
    +

    P2P Access is enable

    +
    + +
    +
    +CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = 3#
    +

    Atomic operation over the link supported

    +
    + +
    +
    +CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED = 4#
    +

    [Deprecated]

    +
    + +
    +
    +CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = 4#
    +

    Accessing CUDA arrays over the link supported

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUresourceViewFormat(value)#
    +

    Resource view format

    +
    +
    +CU_RES_VIEW_FORMAT_NONE = 0#
    +

    No resource view format (use underlying resource format)

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UINT_1X8 = 1#
    +

    1 channel unsigned 8-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UINT_2X8 = 2#
    +

    2 channel unsigned 8-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UINT_4X8 = 3#
    +

    4 channel unsigned 8-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SINT_1X8 = 4#
    +

    1 channel signed 8-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SINT_2X8 = 5#
    +

    2 channel signed 8-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SINT_4X8 = 6#
    +

    4 channel signed 8-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UINT_1X16 = 7#
    +

    1 channel unsigned 16-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UINT_2X16 = 8#
    +

    2 channel unsigned 16-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UINT_4X16 = 9#
    +

    4 channel unsigned 16-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SINT_1X16 = 10#
    +

    1 channel signed 16-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SINT_2X16 = 11#
    +

    2 channel signed 16-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SINT_4X16 = 12#
    +

    4 channel signed 16-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UINT_1X32 = 13#
    +

    1 channel unsigned 32-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UINT_2X32 = 14#
    +

    2 channel unsigned 32-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UINT_4X32 = 15#
    +

    4 channel unsigned 32-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SINT_1X32 = 16#
    +

    1 channel signed 32-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SINT_2X32 = 17#
    +

    2 channel signed 32-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SINT_4X32 = 18#
    +

    4 channel signed 32-bit integers

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_FLOAT_1X16 = 19#
    +

    1 channel 16-bit floating point

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_FLOAT_2X16 = 20#
    +

    2 channel 16-bit floating point

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_FLOAT_4X16 = 21#
    +

    4 channel 16-bit floating point

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_FLOAT_1X32 = 22#
    +

    1 channel 32-bit floating point

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_FLOAT_2X32 = 23#
    +

    2 channel 32-bit floating point

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_FLOAT_4X32 = 24#
    +

    4 channel 32-bit floating point

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UNSIGNED_BC1 = 25#
    +

    Block compressed 1

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UNSIGNED_BC2 = 26#
    +

    Block compressed 2

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UNSIGNED_BC3 = 27#
    +

    Block compressed 3

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UNSIGNED_BC4 = 28#
    +

    Block compressed 4 unsigned

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SIGNED_BC4 = 29#
    +

    Block compressed 4 signed

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UNSIGNED_BC5 = 30#
    +

    Block compressed 5 unsigned

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SIGNED_BC5 = 31#
    +

    Block compressed 5 signed

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UNSIGNED_BC6H = 32#
    +

    Block compressed 6 unsigned half-float

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_SIGNED_BC6H = 33#
    +

    Block compressed 6 signed half-float

    +
    + +
    +
    +CU_RES_VIEW_FORMAT_UNSIGNED_BC7 = 34#
    +

    Block compressed 7

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtensorMapDataType(value)#
    +

    Tensor map data type

    +
    +
    +CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_UINT16 = 1#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_UINT32 = 2#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_INT32 = 3#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_UINT64 = 4#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_INT64 = 5#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_FLOAT16 = 6#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_FLOAT32 = 7#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_FLOAT64 = 8#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 = 9#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ = 10#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 = 11#
    +
    + +
    +
    +CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ = 12#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtensorMapInterleave(value)#
    +

    Tensor map interleave layout type

    +
    +
    +CU_TENSOR_MAP_INTERLEAVE_NONE = 0#
    +
    + +
    +
    +CU_TENSOR_MAP_INTERLEAVE_16B = 1#
    +
    + +
    +
    +CU_TENSOR_MAP_INTERLEAVE_32B = 2#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtensorMapSwizzle(value)#
    +

    Tensor map swizzling mode of shared memory banks

    +
    +
    +CU_TENSOR_MAP_SWIZZLE_NONE = 0#
    +
    + +
    +
    +CU_TENSOR_MAP_SWIZZLE_32B = 1#
    +
    + +
    +
    +CU_TENSOR_MAP_SWIZZLE_64B = 2#
    +
    + +
    +
    +CU_TENSOR_MAP_SWIZZLE_128B = 3#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtensorMapL2promotion(value)#
    +

    Tensor map L2 promotion type

    +
    +
    +CU_TENSOR_MAP_L2_PROMOTION_NONE = 0#
    +
    + +
    +
    +CU_TENSOR_MAP_L2_PROMOTION_L2_64B = 1#
    +
    + +
    +
    +CU_TENSOR_MAP_L2_PROMOTION_L2_128B = 2#
    +
    + +
    +
    +CU_TENSOR_MAP_L2_PROMOTION_L2_256B = 3#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtensorMapFloatOOBfill(value)#
    +

    Tensor map out-of-bounds fill type

    +
    +
    +CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0#
    +
    + +
    +
    +CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA = 1#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS(value)#
    +

    Access flags that specify the level of access the current context’s +device has on the memory referenced.

    +
    +
    +CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE = 0#
    +

    No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ = 1#
    +

    Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case.

    +
    + +
    +
    +CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE = 3#
    +

    Read-write access, the device has full read-write access to the memory

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexternalMemoryHandleType(value)#
    +

    External memory handle types

    +
    +
    +CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1#
    +

    Handle is an opaque file descriptor

    +
    + +
    +
    +CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2#
    +

    Handle is an opaque shared NT handle

    +
    + +
    +
    +CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3#
    +

    Handle is an opaque, globally shared handle

    +
    + +
    +
    +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4#
    +

    Handle is a D3D12 heap object

    +
    + +
    +
    +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5#
    +

    Handle is a D3D12 committed resource

    +
    + +
    +
    +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6#
    +

    Handle is a shared NT handle to a D3D11 resource

    +
    + +
    +
    +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7#
    +

    Handle is a globally shared handle to a D3D11 resource

    +
    + +
    +
    +CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8#
    +

    Handle is an NvSciBuf object

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexternalSemaphoreHandleType(value)#
    +

    External semaphore handle types

    +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1#
    +

    Handle is an opaque file descriptor

    +
    + +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2#
    +

    Handle is an opaque shared NT handle

    +
    + +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3#
    +

    Handle is an opaque, globally shared handle

    +
    + +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4#
    +

    Handle is a shared NT handle referencing a D3D12 fence object

    +
    + +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5#
    +

    Handle is a shared NT handle referencing a D3D11 fence object

    +
    + +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6#
    +

    Opaque handle to NvSciSync Object

    +
    + +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7#
    +

    Handle is a shared NT handle referencing a D3D11 keyed mutex object

    +
    + +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8#
    +

    Handle is a globally shared handle referencing a D3D11 keyed mutex object

    +
    + +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = 9#
    +

    Handle is an opaque file descriptor referencing a timeline semaphore

    +
    + +
    +
    +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10#
    +

    Handle is an opaque shared NT handle referencing a timeline semaphore

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAllocationHandleType(value)#
    +

    Flags for specifying particular handle types

    +
    +
    +CU_MEM_HANDLE_TYPE_NONE = 0#
    +

    Does not allow any export mechanism. >

    +
    + +
    +
    +CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR = 1#
    +

    Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int)

    +
    + +
    +
    +CU_MEM_HANDLE_TYPE_WIN32 = 2#
    +

    Allows a Win32 NT handle to be used for exporting. (HANDLE)

    +
    + +
    +
    +CU_MEM_HANDLE_TYPE_WIN32_KMT = 4#
    +

    Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE)

    +
    + +
    +
    +CU_MEM_HANDLE_TYPE_FABRIC = 8#
    +

    Allows a fabric handle to be used for exporting. (CUmemFabricHandle)

    +
    + +
    +
    +CU_MEM_HANDLE_TYPE_MAX = 2147483647#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAccess_flags(value)#
    +

    Specifies the memory protection flags for mapping.

    +
    +
    +CU_MEM_ACCESS_FLAGS_PROT_NONE = 0#
    +

    Default, make the address range not accessible

    +
    + +
    +
    +CU_MEM_ACCESS_FLAGS_PROT_READ = 1#
    +

    Make the address range read accessible

    +
    + +
    +
    +CU_MEM_ACCESS_FLAGS_PROT_READWRITE = 3#
    +

    Make the address range read-write accessible

    +
    + +
    +
    +CU_MEM_ACCESS_FLAGS_PROT_MAX = 2147483647#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemLocationType(value)#
    +

    Specifies the type of location

    +
    +
    +CU_MEM_LOCATION_TYPE_INVALID = 0#
    +
    + +
    +
    +CU_MEM_LOCATION_TYPE_DEVICE = 1#
    +

    Location is a device location, thus id is a device ordinal

    +
    + +
    +
    +CU_MEM_LOCATION_TYPE_HOST = 2#
    +

    Location is host, id is ignored

    +
    + +
    +
    +CU_MEM_LOCATION_TYPE_HOST_NUMA = 3#
    +

    Location is a host NUMA node, thus id is a host NUMA node id

    +
    + +
    +
    +CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT = 4#
    +

    Location is a host NUMA node of the current thread, id is ignored

    +
    + +
    +
    +CU_MEM_LOCATION_TYPE_MAX = 2147483647#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAllocationType(value)#
    +

    Defines the allocation types available

    +
    +
    +CU_MEM_ALLOCATION_TYPE_INVALID = 0#
    +
    + +
    +
    +CU_MEM_ALLOCATION_TYPE_PINNED = 1#
    +

    This allocation type is ‘pinned’, i.e. cannot migrate from its current location while the application is actively using it

    +
    + +
    +
    +CU_MEM_ALLOCATION_TYPE_MAX = 2147483647#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAllocationGranularity_flags(value)#
    +

    Flag for requesting different optimal and required granularities +for an allocation.

    +
    +
    +CU_MEM_ALLOC_GRANULARITY_MINIMUM = 0#
    +

    Minimum required granularity for allocation

    +
    + +
    + +

    Recommended granularity for allocation for best performance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemRangeHandleType(value)#
    +

    Specifies the handle type for address range

    +
    +
    +CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD = 1#
    +
    + +
    +
    +CU_MEM_RANGE_HANDLE_TYPE_MAX = 2147483647#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUarraySparseSubresourceType(value)#
    +

    Sparse subresource types

    +
    +
    +CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = 0#
    +
    + +
    +
    +CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = 1#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemOperationType(value)#
    +

    Memory operation types

    +
    +
    +CU_MEM_OPERATION_TYPE_MAP = 1#
    +
    + +
    +
    +CU_MEM_OPERATION_TYPE_UNMAP = 2#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemHandleType(value)#
    +

    Memory handle types

    +
    +
    +CU_MEM_HANDLE_TYPE_GENERIC = 0#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAllocationCompType(value)#
    +

    Specifies compression attribute for an allocation.

    +
    +
    +CU_MEM_ALLOCATION_COMP_NONE = 0#
    +

    Allocating non-compressible memory

    +
    + +
    +
    +CU_MEM_ALLOCATION_COMP_GENERIC = 1#
    +

    Allocating compressible memory

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmulticastGranularity_flags(value)#
    +

    Flags for querying different granularities for a multicast object

    +
    +
    +CU_MULTICAST_GRANULARITY_MINIMUM = 0#
    +

    Minimum required granularity

    +
    + +
    + +

    Recommended granularity for best performance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphExecUpdateResult(value)#
    +

    CUDA Graph Update error types

    +
    +
    +CU_GRAPH_EXEC_UPDATE_SUCCESS = 0#
    +

    The update succeeded

    +
    + +
    +
    +CU_GRAPH_EXEC_UPDATE_ERROR = 1#
    +

    The update failed for an unexpected reason which is described in the return value of the function

    +
    + +
    +
    +CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED = 2#
    +

    The update failed because the topology changed

    +
    + +
    +
    +CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED = 3#
    +

    The update failed because a node type changed

    +
    + +
    +
    +CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED = 4#
    +

    The update failed because the function of a kernel node changed (CUDA driver < 11.2)

    +
    + +
    +
    +CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED = 5#
    +

    The update failed because the parameters changed in a way that is not supported

    +
    + +
    +
    +CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED = 6#
    +

    The update failed because something about the node is not supported

    +
    + +
    +
    +CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE = 7#
    +

    The update failed because the function of a kernel node changed in an unsupported way

    +
    + +
    +
    +CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED = 8#
    +

    The update failed because the node attributes changed in a way that is not supported

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemPool_attribute(value)#
    +

    CUDA memory pool attributes

    +
    +
    +CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES = 1#
    +

    (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled)

    +
    + +
    +
    +CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC = 2#
    +

    (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled)

    +
    + +
    +
    +CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES = 3#
    +

    (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled).

    +
    + +
    +
    +CU_MEMPOOL_ATTR_RELEASE_THRESHOLD = 4#
    +

    (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0)

    +
    + +
    +
    +CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT = 5#
    +

    (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool.

    +
    + +
    +
    +CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH = 6#
    +

    (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero.

    +
    + +
    +
    +CU_MEMPOOL_ATTR_USED_MEM_CURRENT = 7#
    +

    (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application.

    +
    + +
    +
    +CU_MEMPOOL_ATTR_USED_MEM_HIGH = 8#
    +

    (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphMem_attribute(value)#
    +
    +
    +CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT = 0#
    +

    (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs

    +
    + +
    +
    +CU_GRAPH_MEM_ATTR_USED_MEM_HIGH = 1#
    +

    (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero.

    +
    + +
    +
    +CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT = 2#
    +

    (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator.

    +
    + +
    +
    +CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH = 3#
    +

    (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUflushGPUDirectRDMAWritesOptions(value)#
    +

    Bitmasks for +CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS

    +
    +
    +CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST = 1#
    +

    cuFlushGPUDirectRDMAWrites() and its CUDA Runtime API counterpart are supported on the device.

    +
    + +
    +
    +CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS = 2#
    +

    The CU_STREAM_WAIT_VALUE_FLUSH flag and the CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering(value)#
    +

    Platform native ordering for GPUDirect RDMA writes

    +
    +
    +CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE = 0#
    +

    The device does not natively support ordering of remote writes. cuFlushGPUDirectRDMAWrites() can be leveraged if supported.

    +
    + +
    +
    +CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER = 100#
    +

    Natively, the device can consistently consume remote writes, although other CUDA devices may not.

    +
    + +
    +
    +CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES = 200#
    +

    Any CUDA device in the system can consistently consume remote writes to this device.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUflushGPUDirectRDMAWritesScope(value)#
    +

    The scopes for cuFlushGPUDirectRDMAWrites

    +
    +
    +CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER = 100#
    +

    Blocks until remote writes are visible to the CUDA device context owning the data.

    +
    + +
    +
    +CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES = 200#
    +

    Blocks until remote writes are visible to all CUDA device contexts.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUflushGPUDirectRDMAWritesTarget(value)#
    +

    The targets for cuFlushGPUDirectRDMAWrites

    +
    +
    +CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX = 0#
    +

    Sets the target for cuFlushGPUDirectRDMAWrites() to the currently active CUDA device context.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphDebugDot_flags(value)#
    +

    The additional write options for cuGraphDebugDotPrint

    +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE = 1#
    +

    Output all debug data as if every debug flag is enabled

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES = 2#
    +

    Use CUDA Runtime structures for output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS = 4#
    +

    Adds CUDA_KERNEL_NODE_PARAMS values to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS = 8#
    +

    Adds CUDA_MEMCPY3D values to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS = 16#
    +

    Adds CUDA_MEMSET_NODE_PARAMS values to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS = 32#
    +

    Adds CUDA_HOST_NODE_PARAMS values to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS = 64#
    +

    Adds CUevent handle from record and wait nodes to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS = 128#
    +

    Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS = 256#
    +

    Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES = 512#
    +

    Adds CUkernelNodeAttrValue values to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES = 1024#
    +

    Adds node handles and every kernel function handle to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS = 2048#
    +

    Adds memory alloc node parameters to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS = 4096#
    +

    Adds memory free node parameters to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS = 8192#
    +

    Adds batch mem op node parameters to output

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO = 16384#
    +

    Adds edge numbering information

    +
    + +
    +
    +CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS = 32768#
    +

    Adds conditional node parameters to output

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUuserObject_flags(value)#
    +

    Flags for user objects for graphs

    +
    +
    +CU_USER_OBJECT_NO_DESTRUCTOR_SYNC = 1#
    +

    Indicates the destructor execution is not synchronized by any CUDA handle.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUuserObjectRetain_flags(value)#
    +

    Flags for retaining user object references for graphs

    +
    +
    +CU_GRAPH_USER_OBJECT_MOVE = 1#
    +

    Transfer references from the caller rather than creating new references.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphInstantiate_flags(value)#
    +

    Flags for instantiating a graph

    +
    +
    +CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH = 1#
    +

    Automatically free memory allocated in a graph before relaunching.

    +
    + +
    +
    +CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD = 2#
    +

    Automatically upload the graph after instantiation. Only supported by cuGraphInstantiateWithParams. The upload will be performed using the stream provided in instantiateParams.

    +
    + +
    +
    +CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH = 4#
    +

    Instantiate the graph to be launchable from the device. This flag can only be used on platforms which support unified addressing. This flag cannot be used in conjunction with CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH.

    +
    + +
    +
    +CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY = 8#
    +

    Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into.

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdeviceNumaConfig(value)#
    +

    CUDA device NUMA configuration

    +
    +
    +CU_DEVICE_NUMA_CONFIG_NONE = 0#
    +

    The GPU is not a NUMA node

    +
    + +
    +
    +CU_DEVICE_NUMA_CONFIG_NUMA_NODE = 1#
    +

    The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its NUMA ID

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUeglFrameType(value)#
    +

    CUDA EglFrame type - array or pointer

    +
    +
    +CU_EGL_FRAME_TYPE_ARRAY = 0#
    +

    Frame type CUDA array

    +
    + +
    +
    +CU_EGL_FRAME_TYPE_PITCH = 1#
    +

    Frame type pointer

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUeglResourceLocationFlags(value)#
    +

    Resource location flags- sysmem or vidmem For CUDA context on +iGPU, since video and system memory are equivalent - these flags +will not have an effect on the execution. For CUDA context on +dGPU, applications can use the flag +CUeglResourceLocationFlags to give a hint about the +desired location. CU_EGL_RESOURCE_LOCATION_SYSMEM - +the frame data is made resident on the system memory to be accessed +by CUDA. CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame +data is made resident on the dedicated video memory to be accessed +by CUDA. There may be an additional latency due to new allocation +and data migration, if the frame is produced on a different memory.

    +
    +
    +CU_EGL_RESOURCE_LOCATION_SYSMEM = 0#
    +

    Resource location sysmem

    +
    + +
    +
    +CU_EGL_RESOURCE_LOCATION_VIDMEM = 1#
    +

    Resource location vidmem

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUeglColorFormat(value)#
    +

    CUDA EGL Color Format - The different planar and multiplanar +formats currently supported for CUDA_EGL interops. Three channel +formats are currently not supported for +CU_EGL_FRAME_TYPE_ARRAY

    +
    +
    +CU_EGL_COLOR_FORMAT_YUV420_PLANAR = 0#
    +

    Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR = 1#
    +

    Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV422_PLANAR = 2#
    +

    Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR = 3#
    +

    Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_RGB = 4#
    +

    R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BGR = 5#
    +

    R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_ARGB = 6#
    +

    R/G/B/A four channels in one surface with BGRA byte ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_RGBA = 7#
    +

    R/G/B/A four channels in one surface with ABGR byte ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_L = 8#
    +

    single luminance channel in one surface.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_R = 9#
    +

    single color channel in one surface.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV444_PLANAR = 10#
    +

    Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR = 11#
    +

    Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUYV_422 = 12#
    +

    Y, U, V in one surface, interleaved as UYVY in one channel.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_UYVY_422 = 13#
    +

    Y, U, V in one surface, interleaved as YUYV in one channel.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_ABGR = 14#
    +

    R/G/B/A four channels in one surface with RGBA byte ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BGRA = 15#
    +

    R/G/B/A four channels in one surface with ARGB byte ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_A = 16#
    +

    Alpha color format - one channel in one surface.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_RG = 17#
    +

    R/G color format - two channels in one surface with GR byte ordering

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_AYUV = 18#
    +

    Y, U, V, A four channels in one surface, interleaved as VUYA.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR = 19#
    +

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR = 20#
    +

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR = 21#
    +

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR = 22#
    +

    Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR = 23#
    +

    Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR = 24#
    +

    Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR = 25#
    +

    Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_VYUY_ER = 26#
    +

    Extended Range Y, U, V in one surface, interleaved as YVYU in one channel.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_UYVY_ER = 27#
    +

    Extended Range Y, U, V in one surface, interleaved as YUYV in one channel.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUYV_ER = 28#
    +

    Extended Range Y, U, V in one surface, interleaved as UYVY in one channel.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVYU_ER = 29#
    +

    Extended Range Y, U, V in one surface, interleaved as VYUY in one channel.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV_ER = 30#
    +

    Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUVA_ER = 31#
    +

    Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_AYUV_ER = 32#
    +

    Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER = 33#
    +

    Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER = 34#
    +

    Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER = 35#
    +

    Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER = 36#
    +

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER = 37#
    +

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER = 38#
    +

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER = 39#
    +

    Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER = 40#
    +

    Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER = 41#
    +

    Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER = 42#
    +

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER = 43#
    +

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER = 44#
    +

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_RGGB = 45#
    +

    Bayer format - one channel in one surface with interleaved RGGB ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_BGGR = 46#
    +

    Bayer format - one channel in one surface with interleaved BGGR ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_GRBG = 47#
    +

    Bayer format - one channel in one surface with interleaved GRBG ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_GBRG = 48#
    +

    Bayer format - one channel in one surface with interleaved GBRG ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER10_RGGB = 49#
    +

    Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER10_BGGR = 50#
    +

    Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER10_GRBG = 51#
    +

    Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER10_GBRG = 52#
    +

    Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER12_RGGB = 53#
    +

    Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER12_BGGR = 54#
    +

    Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER12_GRBG = 55#
    +

    Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER12_GBRG = 56#
    +

    Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER14_RGGB = 57#
    +

    Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER14_BGGR = 58#
    +

    Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER14_GRBG = 59#
    +

    Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER14_GBRG = 60#
    +

    Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER20_RGGB = 61#
    +

    Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER20_BGGR = 62#
    +

    Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER20_GRBG = 63#
    +

    Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER20_GBRG = 64#
    +

    Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU444_PLANAR = 65#
    +

    Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU422_PLANAR = 66#
    +

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU420_PLANAR = 67#
    +

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB = 68#
    +

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR = 69#
    +

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG = 70#
    +

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG = 71#
    +

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_BCCR = 72#
    +

    Bayer format - one channel in one surface with interleaved BCCR ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_RCCB = 73#
    +

    Bayer format - one channel in one surface with interleaved RCCB ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_CRBC = 74#
    +

    Bayer format - one channel in one surface with interleaved CRBC ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER_CBRC = 75#
    +

    Bayer format - one channel in one surface with interleaved CBRC ordering.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER10_CCCC = 76#
    +

    Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER12_BCCR = 77#
    +

    Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER12_RCCB = 78#
    +

    Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER12_CRBC = 79#
    +

    Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER12_CBRC = 80#
    +

    Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_BAYER12_CCCC = 81#
    +

    Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y = 82#
    +

    Color format for single Y plane.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 = 83#
    +

    Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 = 84#
    +

    Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 = 85#
    +

    Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 = 86#
    +

    Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 = 87#
    +

    Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 = 88#
    +

    Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 = 89#
    +

    Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 = 90#
    +

    Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 = 91#
    +

    Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 = 92#
    +

    Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 = 93#
    +

    Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR = 94#
    +

    Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 = 95#
    +

    Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y_ER = 96#
    +

    Extended Range Color format for single Y plane.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y_709_ER = 97#
    +

    Extended Range Color format for single Y plane.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10_ER = 98#
    +

    Extended Range Color format for single Y10 plane.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10_709_ER = 99#
    +

    Extended Range Color format for single Y10 plane.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y12_ER = 100#
    +

    Extended Range Color format for single Y12 plane.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y12_709_ER = 101#
    +

    Extended Range Color format for single Y12 plane.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUVA = 102#
    +

    Y, U, V, A four channels in one surface, interleaved as AVUY.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YUV = 103#
    +

    Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_YVYU = 104#
    +

    Y, U, V in one surface, interleaved as YVYU in one channel.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_VYUY = 105#
    +

    Y, U, V in one surface, interleaved as VYUY in one channel.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER = 106#
    +

    Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER = 107#
    +

    Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER = 108#
    +

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER = 109#
    +

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER = 110#
    +

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER = 111#
    +

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER = 112#
    +

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER = 113#
    +

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +CU_EGL_COLOR_FORMAT_MAX = 114#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdeviceptr_v2#
    +

    CUDA device pointer CUdeviceptr is defined as an unsigned integer type whose size matches the size of a pointer on the target platform.

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdeviceptr#
    +

    CUDA device pointer CUdeviceptr is defined as an unsigned integer type whose size matches the size of a pointer on the target platform.

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevice_v1#
    +

    CUDA device

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevice#
    +

    CUDA device

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUcontext(*args, **kwargs)#
    +

    A regular context handle

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmodule(*args, **kwargs)#
    +

    CUDA module

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUfunction(*args, **kwargs)#
    +

    CUDA function

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlibrary(*args, **kwargs)#
    +

    CUDA library

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUkernel(*args, **kwargs)#
    +

    CUDA kernel

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUarray(*args, **kwargs)#
    +

    CUDA array

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmipmappedArray(*args, **kwargs)#
    +

    CUDA mipmapped array

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtexref(*args, **kwargs)#
    +

    CUDA texture reference

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUsurfref(*args, **kwargs)#
    +

    CUDA surface reference

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUevent(*args, **kwargs)#
    +

    CUDA event

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstream(*args, **kwargs)#
    +

    CUDA stream

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphicsResource(*args, **kwargs)#
    +

    CUDA graphics interop resource

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtexObject_v1#
    +

    An opaque value that represents a CUDA texture object

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtexObject#
    +

    An opaque value that represents a CUDA texture object

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUsurfObject_v1#
    +

    An opaque value that represents a CUDA surface object

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUsurfObject#
    +

    An opaque value that represents a CUDA surface object

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexternalMemory(*args, **kwargs)#
    +

    CUDA external memory

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexternalSemaphore(*args, **kwargs)#
    +

    CUDA external semaphore

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraph(*args, **kwargs)#
    +

    CUDA graph

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphNode(*args, **kwargs)#
    +

    CUDA graph node

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphExec(*args, **kwargs)#
    +

    CUDA executable graph

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemoryPool(*args, **kwargs)#
    +

    CUDA memory pool

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUuserObject(*args, **kwargs)#
    +

    CUDA user object for graphs

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphConditionalHandle#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphDeviceNode(*args, **kwargs)#
    +

    CUDA graph device node handle

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUasyncCallbackHandle(*args, **kwargs)#
    +

    CUDA async notification callback handle

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgreenCtx(*args, **kwargs)#
    +

    A green context handle. This handle can be used safely from only one CPU thread at a time. Created via cuGreenCtxCreate

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUuuid#
    +
    +
    +bytes#
    +

    < CUDA definition of UUID

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemFabricHandle_v1#
    +

    Fabric handle - An opaque handle representing a memory allocation +that can be exported to processes in same or different nodes. For +IPC between processes on different nodes they must be connected via +the NVSwitch fabric.

    +
    +
    +data#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemFabricHandle#
    +

    Fabric handle - An opaque handle representing a memory allocation +that can be exported to processes in same or different nodes. For +IPC between processes on different nodes they must be connected via +the NVSwitch fabric.

    +
    +
    +data#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUipcEventHandle_v1#
    +

    CUDA IPC event handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUipcEventHandle#
    +

    CUDA IPC event handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUipcMemHandle_v1#
    +

    CUDA IPC mem handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUipcMemHandle#
    +

    CUDA IPC mem handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamBatchMemOpParams_v1#
    +

    Per-operation parameters for cuStreamBatchMemOp

    +
    +
    +operation#
    +
    +
    Type:
    +

    CUstreamBatchMemOpType

    +
    +
    +
    + +
    +
    +waitValue#
    +
    +
    Type:
    +

    CUstreamMemOpWaitValueParams_st

    +
    +
    +
    + +
    +
    +writeValue#
    +
    +
    Type:
    +

    CUstreamMemOpWriteValueParams_st

    +
    +
    +
    + +
    +
    +flushRemoteWrites#
    +
    +
    Type:
    +

    CUstreamMemOpFlushRemoteWritesParams_st

    +
    +
    +
    + +
    +
    +memoryBarrier#
    +
    +
    Type:
    +

    CUstreamMemOpMemoryBarrierParams_st

    +
    +
    +
    + +
    +
    +pad#
    +
    +
    Type:
    +

    List[cuuint64_t]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamBatchMemOpParams#
    +

    Per-operation parameters for cuStreamBatchMemOp

    +
    +
    +operation#
    +
    +
    Type:
    +

    CUstreamBatchMemOpType

    +
    +
    +
    + +
    +
    +waitValue#
    +
    +
    Type:
    +

    CUstreamMemOpWaitValueParams_st

    +
    +
    +
    + +
    +
    +writeValue#
    +
    +
    Type:
    +

    CUstreamMemOpWriteValueParams_st

    +
    +
    +
    + +
    +
    +flushRemoteWrites#
    +
    +
    Type:
    +

    CUstreamMemOpFlushRemoteWritesParams_st

    +
    +
    +
    + +
    +
    +memoryBarrier#
    +
    +
    Type:
    +

    CUstreamMemOpMemoryBarrierParams_st

    +
    +
    +
    + +
    +
    +pad#
    +
    +
    Type:
    +

    List[cuuint64_t]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1#
    +
    +
    +ctx#
    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +count#
    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +paramArray#
    +
    +
    Type:
    +

    CUstreamBatchMemOpParams

    +
    +
    +
    + +
    +
    +flags#
    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS#
    +
    +
    +ctx#
    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +count#
    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +paramArray#
    +
    +
    Type:
    +

    CUstreamBatchMemOpParams

    +
    +
    +
    + +
    +
    +flags#
    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2#
    +

    Batch memory operation node parameters

    +
    +
    +ctx#
    +

    Context to use for the operations.

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +count#
    +

    Number of operations in paramArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +paramArray#
    +

    Array of batch memory operations.

    +
    +
    Type:
    +

    CUstreamBatchMemOpParams

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags to control the node.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUasyncNotificationInfo#
    +

    Information passed to the user via the async notification callback

    +
    +
    +type#
    +
    +
    Type:
    +

    CUasyncNotificationType

    +
    +
    +
    + +
    +
    +info#
    +
    +
    Type:
    +

    anon_union2

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUasyncCallback(*args, **kwargs)#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevprop_v1#
    +

    Legacy device properties

    +
    +
    +maxThreadsPerBlock#
    +

    Maximum number of threads per block

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxThreadsDim#
    +

    Maximum size of each dimension of a block

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxGridSize#
    +

    Maximum size of each dimension of a grid

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +sharedMemPerBlock#
    +

    Shared memory available per block in bytes

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +totalConstantMemory#
    +

    Constant memory available on device in bytes

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +SIMDWidth#
    +

    Warp size in threads

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memPitch#
    +

    Maximum pitch in bytes allowed by memory copies

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +regsPerBlock#
    +

    32-bit registers available per block

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +clockRate#
    +

    Clock frequency in kilohertz

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +textureAlign#
    +

    Alignment requirement for textures

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevprop#
    +

    Legacy device properties

    +
    +
    +maxThreadsPerBlock#
    +

    Maximum number of threads per block

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxThreadsDim#
    +

    Maximum size of each dimension of a block

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxGridSize#
    +

    Maximum size of each dimension of a grid

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +sharedMemPerBlock#
    +

    Shared memory available per block in bytes

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +totalConstantMemory#
    +

    Constant memory available on device in bytes

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +SIMDWidth#
    +

    Warp size in threads

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memPitch#
    +

    Maximum pitch in bytes allowed by memory copies

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +regsPerBlock#
    +

    32-bit registers available per block

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +clockRate#
    +

    Clock frequency in kilohertz

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +textureAlign#
    +

    Alignment requirement for textures

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlinkState(*args, **kwargs)#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUhostFn(*args, **kwargs)#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUaccessPolicyWindow_v1#
    +

    Specifies an access policy for a window, a contiguous extent of +memory beginning at base_ptr and ending at base_ptr + num_bytes. +num_bytes is limited by +CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. Partition into +many segments and assign segments such that: sum of “hit segments” +/ window == approx. ratio. sum of “miss segments” / window == +approx 1-ratio. Segments and ratio specifications are fitted to the +capabilities of the architecture. Accesses in a hit segment apply +the hitProp access policy. Accesses in a miss segment apply the +missProp access policy.

    +
    +
    +base_ptr#
    +

    Starting address of the access policy window. CUDA driver may align +it.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +num_bytes#
    +

    Size in bytes of the window policy. CUDA driver may restrict the +maximum size and alignment.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +hitRatio#
    +

    hitRatio specifies percentage of lines assigned hitProp, rest are +assigned missProp.

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +hitProp#
    +

    CUaccessProperty set for hit.

    +
    +
    Type:
    +

    CUaccessProperty

    +
    +
    +
    + +
    +
    +missProp#
    +

    CUaccessProperty set for miss. Must be either NORMAL or STREAMING

    +
    +
    Type:
    +

    CUaccessProperty

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUaccessPolicyWindow#
    +

    Specifies an access policy for a window, a contiguous extent of +memory beginning at base_ptr and ending at base_ptr + num_bytes. +num_bytes is limited by +CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. Partition into +many segments and assign segments such that: sum of “hit segments” +/ window == approx. ratio. sum of “miss segments” / window == +approx 1-ratio. Segments and ratio specifications are fitted to the +capabilities of the architecture. Accesses in a hit segment apply +the hitProp access policy. Accesses in a miss segment apply the +missProp access policy.

    +
    +
    +base_ptr#
    +

    Starting address of the access policy window. CUDA driver may align +it.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +num_bytes#
    +

    Size in bytes of the window policy. CUDA driver may restrict the +maximum size and alignment.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +hitRatio#
    +

    hitRatio specifies percentage of lines assigned hitProp, rest are +assigned missProp.

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +hitProp#
    +

    CUaccessProperty set for hit.

    +
    +
    Type:
    +

    CUaccessProperty

    +
    +
    +
    + +
    +
    +missProp#
    +

    CUaccessProperty set for miss. Must be either NORMAL or STREAMING

    +
    +
    Type:
    +

    CUaccessProperty

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v1#
    +

    GPU kernel node parameters

    +
    +
    +func#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +extra#
    +

    Extra options

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v2#
    +

    GPU kernel node parameters

    +
    +
    +func#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +extra#
    +

    Extra options

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +kern#
    +

    Kernel to launch, will only be referenced if func is NULL

    +
    +
    Type:
    +

    CUkernel

    +
    +
    +
    + +
    +
    +ctx#
    +

    Context for the kernel task to run in. The value NULL will indicate +the current context should be used by the api. This field is +ignored if func is set.

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS#
    +

    GPU kernel node parameters

    +
    +
    +func#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +extra#
    +

    Extra options

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +kern#
    +

    Kernel to launch, will only be referenced if func is NULL

    +
    +
    Type:
    +

    CUkernel

    +
    +
    +
    + +
    +
    +ctx#
    +

    Context for the kernel task to run in. The value NULL will indicate +the current context should be used by the api. This field is +ignored if func is set.

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v3#
    +

    GPU kernel node parameters

    +
    +
    +func#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +extra#
    +

    Extra options

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +kern#
    +

    Kernel to launch, will only be referenced if func is NULL

    +
    +
    Type:
    +

    CUkernel

    +
    +
    +
    + +
    +
    +ctx#
    +

    Context for the kernel task to run in. The value NULL will indicate +the current context should be used by the api. This field is +ignored if func is set.

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v1#
    +

    Memset node parameters

    +
    +
    +dst#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of destination device pointer. Unused if height is 1

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +value#
    +

    Value to be set

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +elementSize#
    +

    Size of each element in bytes. Must be 1, 2, or 4.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the row in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Number of rows

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS#
    +

    Memset node parameters

    +
    +
    +dst#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of destination device pointer. Unused if height is 1

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +value#
    +

    Value to be set

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +elementSize#
    +

    Size of each element in bytes. Must be 1, 2, or 4.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the row in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Number of rows

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v2#
    +

    Memset node parameters

    +
    +
    +dst#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of destination device pointer. Unused if height is 1

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +value#
    +

    Value to be set

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +elementSize#
    +

    Size of each element in bytes. Must be 1, 2, or 4.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the row in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Number of rows

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +ctx#
    +

    Context on which to run the node

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v1#
    +

    Host node parameters

    +
    +
    +fn#
    +

    The function to call when the node executes

    +
    +
    Type:
    +

    CUhostFn

    +
    +
    +
    + +
    +
    +userData#
    +

    Argument to pass to the function

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_HOST_NODE_PARAMS#
    +

    Host node parameters

    +
    +
    +fn#
    +

    The function to call when the node executes

    +
    +
    Type:
    +

    CUhostFn

    +
    +
    +
    + +
    +
    +userData#
    +

    Argument to pass to the function

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v2#
    +

    Host node parameters

    +
    +
    +fn#
    +

    The function to call when the node executes

    +
    +
    Type:
    +

    CUhostFn

    +
    +
    +
    + +
    +
    +userData#
    +

    Argument to pass to the function

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_CONDITIONAL_NODE_PARAMS(void_ptr _ptr=0)#
    +

    Conditional node parameters

    +
    +
    +handle#
    +

    Conditional node handle. Handles must be created in advance of +creating the node using cuGraphConditionalHandleCreate.

    +
    +
    Type:
    +

    CUgraphConditionalHandle

    +
    +
    +
    + +
    +
    +type#
    +

    Type of conditional node.

    +
    +
    Type:
    +

    CUgraphConditionalNodeType

    +
    +
    +
    + +
    +
    +size#
    +

    Size of graph output array. Must be 1.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +phGraph_out#
    +

    CUDA-owned array populated with conditional node child graphs +during creation of the node. Valid for the lifetime of the +conditional node. The contents of the graph(s) are subject to the +following constraints: - Allowed node types are kernel nodes, +empty nodes, child graphs, memsets, memcopies, and conditionals. +This applies recursively to child graphs and conditional bodies. +- All kernels, including kernels in nested conditionals or child +graphs at any level, must belong to the same CUDA context. +These graphs may be populated using graph node creation APIs or +cuStreamBeginCaptureToGraph.

    +
    +
    Type:
    +

    CUgraph

    +
    +
    +
    + +
    +
    +ctx#
    +

    Context on which to run the node. Must match context used to create +the handle and all body nodes.

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphEdgeData#
    +

    Optional annotation for edges in a CUDA graph. Note, all edges +implicitly have annotations and default to a zero-initialized value +if not specified. A zero-initialized struct indicates a standard +full serialization of two nodes with memory visibility.

    +
    +
    +from_port#
    +

    This indicates when the dependency is triggered from the upstream +node on the edge. The meaning is specfic to the node type. A value +of 0 in all cases means full completion of the upstream node, with +memory visibility to the downstream node or portion thereof +(indicated by to_port). Only kernel nodes define non-zero +ports. A kernel node can use the following output port types: +CU_GRAPH_KERNEL_NODE_PORT_DEFAULT, +CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC, or +CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +to_port#
    +

    This indicates what portion of the downstream node is dependent on +the upstream node or portion thereof (indicated by from_port). +The meaning is specific to the node type. A value of 0 in all cases +means the entirety of the downstream node is dependent on the +upstream work. Currently no node types define non-zero ports. +Accordingly, this field must be set to zero.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +type#
    +

    This should be populated with a value from CUgraphDependencyType. +(It is typed as char due to compiler-specific layout of bitfields.) +See CUgraphDependencyType.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +reserved#
    +

    These bytes are unused and must be zeroed. This ensures +compatibility if additional fields are added in the future.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_GRAPH_INSTANTIATE_PARAMS#
    +

    Graph instantiation parameters

    +
    +
    +flags#
    +

    Instantiation flags

    +
    +
    Type:
    +

    cuuint64_t

    +
    +
    +
    + +
    +
    +hUploadStream#
    +

    Upload stream

    +
    +
    Type:
    +

    CUstream

    +
    +
    +
    + +
    +
    +hErrNode_out#
    +

    The node which caused instantiation to fail, if any

    +
    +
    Type:
    +

    CUgraphNode

    +
    +
    +
    + +
    +
    +result_out#
    +

    Whether instantiation was successful. If it failed, the reason why

    +
    +
    Type:
    +

    CUgraphInstantiateResult

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchMemSyncDomainMap#
    +

    Memory Synchronization Domain map See ::cudaLaunchMemSyncDomain. +By default, kernels are launched in domain 0. Kernel launched with +CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a different domain ID. +User may also alter the domain ID with CUlaunchMemSyncDomainMap for +a specific stream / graph node / kernel launch. See +CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. Domain ID range is +available through CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.

    +
    +
    +default_#
    +

    The default domain ID to use for designated kernels

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +remote#
    +

    The remote domain ID to use for designated kernels

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchAttributeValue#
    +

    Launch attributes union; used as value field of CUlaunchAttribute

    +
    +
    +pad#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +accessPolicyWindow#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    +
    +
    Type:
    +

    CUaccessPolicyWindow

    +
    +
    +
    + +
    +
    +cooperative#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero +indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +syncPolicy#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. +::CUsynchronizationPolicy for work queued up in this stream

    +
    +
    Type:
    +

    CUsynchronizationPolicy

    +
    +
    +
    + +
    +
    +clusterDim#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION +that represents the desired cluster dimensions for the kernel. +Opaque type with the following fields: - x - The X dimension of +the cluster, in blocks. Must be a divisor of the grid X dimension. +- y - The Y dimension of the cluster, in blocks. Must be a +divisor of the grid Y dimension. - z - The Z dimension of the +cluster, in blocks. Must be a divisor of the grid Z dimension.

    +
    +
    Type:
    +

    anon_struct1

    +
    +
    +
    + +
    +
    +clusterSchedulingPolicyPreference#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster +scheduling policy preference for the kernel.

    +
    +
    Type:
    +

    CUclusterSchedulingPolicy

    +
    +
    +
    + +
    +
    +programmaticStreamSerializationAllowed#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +programmaticEvent#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT +with the following fields: - CUevent event - Event to fire when +all blocks trigger it. - Event record flags, see +cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. +- triggerAtBlockStart - If this is set to non-0, each block +launch will automatically trigger the event.

    +
    +
    Type:
    +

    anon_struct2

    +
    +
    +
    + +
    +
    +launchCompletionEvent#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following +fields: - CUevent event - Event to fire when the last block +launches - int flags; - Event record flags, see +cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    +
    +
    Type:
    +

    anon_struct3

    +
    +
    +
    + +
    +
    +priority#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution +priority of the kernel.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memSyncDomainMap#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. +See CUlaunchMemSyncDomainMap.

    +
    +
    Type:
    +

    CUlaunchMemSyncDomainMap

    +
    +
    +
    + +
    +
    +memSyncDomain#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. +See::CUlaunchMemSyncDomain

    +
    +
    Type:
    +

    CUlaunchMemSyncDomain

    +
    +
    +
    + +
    +
    +deviceUpdatableKernelNode#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the +following fields: - int deviceUpdatable - Whether or not the +resulting kernel node should be device-updatable. - +CUgraphDeviceNode devNode - Returns a handle to pass to the +various device-side update functions.

    +
    +
    Type:
    +

    anon_struct4

    +
    +
    +
    + +
    +
    +sharedMemCarveout#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchAttribute#
    +

    Launch attribute

    +
    +
    +id#
    +

    Attribute to set

    +
    +
    Type:
    +

    CUlaunchAttributeID

    +
    +
    +
    + +
    +
    +value#
    +

    Value of the attribute

    +
    +
    Type:
    +

    CUlaunchAttributeValue

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlaunchConfig#
    +

    CUDA extensible launch configuration

    +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +hStream#
    +

    Stream identifier

    +
    +
    Type:
    +

    CUstream

    +
    +
    +
    + +
    +
    +attrs#
    +

    List of attributes; nullable if CUlaunchConfig::numAttrs == 0

    +
    +
    Type:
    +

    CUlaunchAttribute

    +
    +
    +
    + +
    +
    +numAttrs#
    +

    Number of attributes populated in CUlaunchConfig::attrs

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUkernelNodeAttrID(value)#
    +

    Launch attributes enum; used as id field of +CUlaunchAttribute

    +
    + +
    +
    +class cuda.bindings.driver.CUkernelNodeAttrValue_v1#
    +

    Launch attributes union; used as value field of CUlaunchAttribute

    +
    +
    +pad#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +accessPolicyWindow#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    +
    +
    Type:
    +

    CUaccessPolicyWindow

    +
    +
    +
    + +
    +
    +cooperative#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero +indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +syncPolicy#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. +::CUsynchronizationPolicy for work queued up in this stream

    +
    +
    Type:
    +

    CUsynchronizationPolicy

    +
    +
    +
    + +
    +
    +clusterDim#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION +that represents the desired cluster dimensions for the kernel. +Opaque type with the following fields: - x - The X dimension of +the cluster, in blocks. Must be a divisor of the grid X dimension. +- y - The Y dimension of the cluster, in blocks. Must be a +divisor of the grid Y dimension. - z - The Z dimension of the +cluster, in blocks. Must be a divisor of the grid Z dimension.

    +
    +
    Type:
    +

    anon_struct1

    +
    +
    +
    + +
    +
    +clusterSchedulingPolicyPreference#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster +scheduling policy preference for the kernel.

    +
    +
    Type:
    +

    CUclusterSchedulingPolicy

    +
    +
    +
    + +
    +
    +programmaticStreamSerializationAllowed#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +programmaticEvent#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT +with the following fields: - CUevent event - Event to fire when +all blocks trigger it. - Event record flags, see +cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. +- triggerAtBlockStart - If this is set to non-0, each block +launch will automatically trigger the event.

    +
    +
    Type:
    +

    anon_struct2

    +
    +
    +
    + +
    +
    +launchCompletionEvent#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following +fields: - CUevent event - Event to fire when the last block +launches - int flags; - Event record flags, see +cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    +
    +
    Type:
    +

    anon_struct3

    +
    +
    +
    + +
    +
    +priority#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution +priority of the kernel.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memSyncDomainMap#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. +See CUlaunchMemSyncDomainMap.

    +
    +
    Type:
    +

    CUlaunchMemSyncDomainMap

    +
    +
    +
    + +
    +
    +memSyncDomain#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. +See::CUlaunchMemSyncDomain

    +
    +
    Type:
    +

    CUlaunchMemSyncDomain

    +
    +
    +
    + +
    +
    +deviceUpdatableKernelNode#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the +following fields: - int deviceUpdatable - Whether or not the +resulting kernel node should be device-updatable. - +CUgraphDeviceNode devNode - Returns a handle to pass to the +various device-side update functions.

    +
    +
    Type:
    +

    anon_struct4

    +
    +
    +
    + +
    +
    +sharedMemCarveout#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUkernelNodeAttrValue#
    +

    Launch attributes union; used as value field of CUlaunchAttribute

    +
    +
    +pad#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +accessPolicyWindow#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    +
    +
    Type:
    +

    CUaccessPolicyWindow

    +
    +
    +
    + +
    +
    +cooperative#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero +indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +syncPolicy#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. +::CUsynchronizationPolicy for work queued up in this stream

    +
    +
    Type:
    +

    CUsynchronizationPolicy

    +
    +
    +
    + +
    +
    +clusterDim#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION +that represents the desired cluster dimensions for the kernel. +Opaque type with the following fields: - x - The X dimension of +the cluster, in blocks. Must be a divisor of the grid X dimension. +- y - The Y dimension of the cluster, in blocks. Must be a +divisor of the grid Y dimension. - z - The Z dimension of the +cluster, in blocks. Must be a divisor of the grid Z dimension.

    +
    +
    Type:
    +

    anon_struct1

    +
    +
    +
    + +
    +
    +clusterSchedulingPolicyPreference#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster +scheduling policy preference for the kernel.

    +
    +
    Type:
    +

    CUclusterSchedulingPolicy

    +
    +
    +
    + +
    +
    +programmaticStreamSerializationAllowed#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +programmaticEvent#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT +with the following fields: - CUevent event - Event to fire when +all blocks trigger it. - Event record flags, see +cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. +- triggerAtBlockStart - If this is set to non-0, each block +launch will automatically trigger the event.

    +
    +
    Type:
    +

    anon_struct2

    +
    +
    +
    + +
    +
    +launchCompletionEvent#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following +fields: - CUevent event - Event to fire when the last block +launches - int flags; - Event record flags, see +cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    +
    +
    Type:
    +

    anon_struct3

    +
    +
    +
    + +
    +
    +priority#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution +priority of the kernel.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memSyncDomainMap#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. +See CUlaunchMemSyncDomainMap.

    +
    +
    Type:
    +

    CUlaunchMemSyncDomainMap

    +
    +
    +
    + +
    +
    +memSyncDomain#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. +See::CUlaunchMemSyncDomain

    +
    +
    Type:
    +

    CUlaunchMemSyncDomain

    +
    +
    +
    + +
    +
    +deviceUpdatableKernelNode#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the +following fields: - int deviceUpdatable - Whether or not the +resulting kernel node should be device-updatable. - +CUgraphDeviceNode devNode - Returns a handle to pass to the +various device-side update functions.

    +
    +
    Type:
    +

    anon_struct4

    +
    +
    +
    + +
    +
    +sharedMemCarveout#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamAttrID(value)#
    +

    Launch attributes enum; used as id field of +CUlaunchAttribute

    +
    + +
    +
    +class cuda.bindings.driver.CUstreamAttrValue_v1#
    +

    Launch attributes union; used as value field of CUlaunchAttribute

    +
    +
    +pad#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +accessPolicyWindow#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    +
    +
    Type:
    +

    CUaccessPolicyWindow

    +
    +
    +
    + +
    +
    +cooperative#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero +indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +syncPolicy#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. +::CUsynchronizationPolicy for work queued up in this stream

    +
    +
    Type:
    +

    CUsynchronizationPolicy

    +
    +
    +
    + +
    +
    +clusterDim#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION +that represents the desired cluster dimensions for the kernel. +Opaque type with the following fields: - x - The X dimension of +the cluster, in blocks. Must be a divisor of the grid X dimension. +- y - The Y dimension of the cluster, in blocks. Must be a +divisor of the grid Y dimension. - z - The Z dimension of the +cluster, in blocks. Must be a divisor of the grid Z dimension.

    +
    +
    Type:
    +

    anon_struct1

    +
    +
    +
    + +
    +
    +clusterSchedulingPolicyPreference#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster +scheduling policy preference for the kernel.

    +
    +
    Type:
    +

    CUclusterSchedulingPolicy

    +
    +
    +
    + +
    +
    +programmaticStreamSerializationAllowed#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +programmaticEvent#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT +with the following fields: - CUevent event - Event to fire when +all blocks trigger it. - Event record flags, see +cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. +- triggerAtBlockStart - If this is set to non-0, each block +launch will automatically trigger the event.

    +
    +
    Type:
    +

    anon_struct2

    +
    +
    +
    + +
    +
    +launchCompletionEvent#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following +fields: - CUevent event - Event to fire when the last block +launches - int flags; - Event record flags, see +cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    +
    +
    Type:
    +

    anon_struct3

    +
    +
    +
    + +
    +
    +priority#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution +priority of the kernel.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memSyncDomainMap#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. +See CUlaunchMemSyncDomainMap.

    +
    +
    Type:
    +

    CUlaunchMemSyncDomainMap

    +
    +
    +
    + +
    +
    +memSyncDomain#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. +See::CUlaunchMemSyncDomain

    +
    +
    Type:
    +

    CUlaunchMemSyncDomain

    +
    +
    +
    + +
    +
    +deviceUpdatableKernelNode#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the +following fields: - int deviceUpdatable - Whether or not the +resulting kernel node should be device-updatable. - +CUgraphDeviceNode devNode - Returns a handle to pass to the +various device-side update functions.

    +
    +
    Type:
    +

    anon_struct4

    +
    +
    +
    + +
    +
    +sharedMemCarveout#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamAttrValue#
    +

    Launch attributes union; used as value field of CUlaunchAttribute

    +
    +
    +pad#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +accessPolicyWindow#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW.

    +
    +
    Type:
    +

    CUaccessPolicyWindow

    +
    +
    +
    + +
    +
    +cooperative#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero +indicates a cooperative kernel (see cuLaunchCooperativeKernel).

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +syncPolicy#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. +::CUsynchronizationPolicy for work queued up in this stream

    +
    +
    Type:
    +

    CUsynchronizationPolicy

    +
    +
    +
    + +
    +
    +clusterDim#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION +that represents the desired cluster dimensions for the kernel. +Opaque type with the following fields: - x - The X dimension of +the cluster, in blocks. Must be a divisor of the grid X dimension. +- y - The Y dimension of the cluster, in blocks. Must be a +divisor of the grid Y dimension. - z - The Z dimension of the +cluster, in blocks. Must be a divisor of the grid Z dimension.

    +
    +
    Type:
    +

    anon_struct1

    +
    +
    +
    + +
    +
    +clusterSchedulingPolicyPreference#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster +scheduling policy preference for the kernel.

    +
    +
    Type:
    +

    CUclusterSchedulingPolicy

    +
    +
    +
    + +
    +
    +programmaticStreamSerializationAllowed#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +programmaticEvent#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT +with the following fields: - CUevent event - Event to fire when +all blocks trigger it. - Event record flags, see +cuEventRecordWithFlags. Does not accept :CU_EVENT_RECORD_EXTERNAL. +- triggerAtBlockStart - If this is set to non-0, each block +launch will automatically trigger the event.

    +
    +
    Type:
    +

    anon_struct2

    +
    +
    +
    + +
    +
    +launchCompletionEvent#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT with the following +fields: - CUevent event - Event to fire when the last block +launches - int flags; - Event record flags, see +cuEventRecordWithFlags. Does not accept CU_EVENT_RECORD_EXTERNAL.

    +
    +
    Type:
    +

    anon_struct3

    +
    +
    +
    + +
    +
    +priority#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution +priority of the kernel.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memSyncDomainMap#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. +See CUlaunchMemSyncDomainMap.

    +
    +
    Type:
    +

    CUlaunchMemSyncDomainMap

    +
    +
    +
    + +
    +
    +memSyncDomain#
    +

    Value of launch attribute CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. +See::CUlaunchMemSyncDomain

    +
    +
    Type:
    +

    CUlaunchMemSyncDomain

    +
    +
    +
    + +
    +
    +deviceUpdatableKernelNode#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. with the +following fields: - int deviceUpdatable - Whether or not the +resulting kernel node should be device-updatable. - +CUgraphDeviceNode devNode - Returns a handle to pass to the +various device-side update functions.

    +
    +
    Type:
    +

    anon_struct4

    +
    +
    +
    + +
    +
    +sharedMemCarveout#
    +

    Value of launch attribute +CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexecAffinitySmCount_v1#
    +

    Value for CU_EXEC_AFFINITY_TYPE_SM_COUNT

    +
    +
    +val#
    +

    The number of SMs the context is limited to use.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexecAffinitySmCount#
    +

    Value for CU_EXEC_AFFINITY_TYPE_SM_COUNT

    +
    +
    +val#
    +

    The number of SMs the context is limited to use.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexecAffinityParam_v1#
    +

    Execution Affinity Parameters

    +
    +
    +type#
    +
    +
    Type:
    +

    CUexecAffinityType

    +
    +
    +
    + +
    +
    +param#
    +
    +
    Type:
    +

    anon_union3

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUexecAffinityParam#
    +

    Execution Affinity Parameters

    +
    +
    +type#
    +
    +
    Type:
    +

    CUexecAffinityType

    +
    +
    +
    + +
    +
    +param#
    +
    +
    Type:
    +

    anon_union3

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUctxCigParam#
    +

    CIG Context Create Params

    +
    +
    +sharedDataType#
    +
    +
    Type:
    +

    CUcigDataType

    +
    +
    +
    + +
    +
    +sharedData#
    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUctxCreateParams#
    +

    Params for creating CUDA context Exactly one of execAffinityParams +and cigParams must be non-NULL.

    +
    +
    +execAffinityParams#
    +
    +
    Type:
    +

    CUexecAffinityParam

    +
    +
    +
    + +
    +
    +numExecAffinityParams#
    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +cigParams#
    +
    +
    Type:
    +

    CUctxCigParam

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUlibraryHostUniversalFunctionAndDataTable#
    +
    +
    +functionTable#
    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +functionWindowSize#
    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dataTable#
    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dataWindowSize#
    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUstreamCallback(*args, **kwargs)#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUoccupancyB2DSize(*args, **kwargs)#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY2D_v2#
    +

    2D memory copy parameters

    +
    +
    +srcXInBytes#
    +

    Source X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcY#
    +

    Source Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcMemoryType#
    +

    Source memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +srcHost#
    +

    Source host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +srcArray#
    +

    Source array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +srcPitch#
    +

    Source pitch (ignored when src is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstXInBytes#
    +

    Destination X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstY#
    +

    Destination Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstMemoryType#
    +

    Destination memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +dstHost#
    +

    Destination host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +dstPitch#
    +

    Destination pitch (ignored when dst is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +WidthInBytes#
    +

    Width of 2D memory copy in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 2D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY2D#
    +

    2D memory copy parameters

    +
    +
    +srcXInBytes#
    +

    Source X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcY#
    +

    Source Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcMemoryType#
    +

    Source memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +srcHost#
    +

    Source host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +srcArray#
    +

    Source array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +srcPitch#
    +

    Source pitch (ignored when src is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstXInBytes#
    +

    Destination X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstY#
    +

    Destination Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstMemoryType#
    +

    Destination memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +dstHost#
    +

    Destination host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +dstPitch#
    +

    Destination pitch (ignored when dst is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +WidthInBytes#
    +

    Width of 2D memory copy in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 2D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY3D_v2#
    +

    3D memory copy parameters

    +
    +
    +srcXInBytes#
    +

    Source X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcY#
    +

    Source Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcZ#
    +

    Source Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcLOD#
    +

    Source LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcMemoryType#
    +

    Source memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +srcHost#
    +

    Source host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +srcArray#
    +

    Source array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +reserved0#
    +

    Must be NULL

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcPitch#
    +

    Source pitch (ignored when src is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcHeight#
    +

    Source height (ignored when src is array; may be 0 if Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstXInBytes#
    +

    Destination X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstY#
    +

    Destination Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstZ#
    +

    Destination Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstLOD#
    +

    Destination LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstMemoryType#
    +

    Destination memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +dstHost#
    +

    Destination host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +reserved1#
    +

    Must be NULL

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstPitch#
    +

    Destination pitch (ignored when dst is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstHeight#
    +

    Destination height (ignored when dst is array; may be 0 if +Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +WidthInBytes#
    +

    Width of 3D memory copy in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Depth#
    +

    Depth of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY3D#
    +

    3D memory copy parameters

    +
    +
    +srcXInBytes#
    +

    Source X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcY#
    +

    Source Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcZ#
    +

    Source Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcLOD#
    +

    Source LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcMemoryType#
    +

    Source memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +srcHost#
    +

    Source host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +srcArray#
    +

    Source array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +reserved0#
    +

    Must be NULL

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcPitch#
    +

    Source pitch (ignored when src is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcHeight#
    +

    Source height (ignored when src is array; may be 0 if Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstXInBytes#
    +

    Destination X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstY#
    +

    Destination Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstZ#
    +

    Destination Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstLOD#
    +

    Destination LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstMemoryType#
    +

    Destination memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +dstHost#
    +

    Destination host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +reserved1#
    +

    Must be NULL

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstPitch#
    +

    Destination pitch (ignored when dst is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstHeight#
    +

    Destination height (ignored when dst is array; may be 0 if +Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +WidthInBytes#
    +

    Width of 3D memory copy in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Depth#
    +

    Depth of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY3D_PEER_v1#
    +

    3D memory cross-context copy parameters

    +
    +
    +srcXInBytes#
    +

    Source X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcY#
    +

    Source Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcZ#
    +

    Source Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcLOD#
    +

    Source LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcMemoryType#
    +

    Source memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +srcHost#
    +

    Source host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +srcArray#
    +

    Source array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +srcContext#
    +

    Source context (ignored with srcMemoryType is CU_MEMORYTYPE_ARRAY)

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +srcPitch#
    +

    Source pitch (ignored when src is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcHeight#
    +

    Source height (ignored when src is array; may be 0 if Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstXInBytes#
    +

    Destination X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstY#
    +

    Destination Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstZ#
    +

    Destination Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstLOD#
    +

    Destination LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstMemoryType#
    +

    Destination memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +dstHost#
    +

    Destination host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +dstContext#
    +

    Destination context (ignored with dstMemoryType is +CU_MEMORYTYPE_ARRAY)

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +dstPitch#
    +

    Destination pitch (ignored when dst is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstHeight#
    +

    Destination height (ignored when dst is array; may be 0 if +Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +WidthInBytes#
    +

    Width of 3D memory copy in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Depth#
    +

    Depth of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY3D_PEER#
    +

    3D memory cross-context copy parameters

    +
    +
    +srcXInBytes#
    +

    Source X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcY#
    +

    Source Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcZ#
    +

    Source Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcLOD#
    +

    Source LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcMemoryType#
    +

    Source memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +srcHost#
    +

    Source host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +srcArray#
    +

    Source array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +srcContext#
    +

    Source context (ignored with srcMemoryType is CU_MEMORYTYPE_ARRAY)

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +srcPitch#
    +

    Source pitch (ignored when src is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +srcHeight#
    +

    Source height (ignored when src is array; may be 0 if Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstXInBytes#
    +

    Destination X in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstY#
    +

    Destination Y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstZ#
    +

    Destination Z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstLOD#
    +

    Destination LOD

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstMemoryType#
    +

    Destination memory type (host, device, array)

    +
    +
    Type:
    +

    CUmemorytype

    +
    +
    +
    + +
    +
    +dstHost#
    +

    Destination host pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device pointer

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination array reference

    +
    +
    Type:
    +

    CUarray

    +
    +
    +
    + +
    +
    +dstContext#
    +

    Destination context (ignored with dstMemoryType is +CU_MEMORYTYPE_ARRAY)

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +dstPitch#
    +

    Destination pitch (ignored when dst is array)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dstHeight#
    +

    Destination height (ignored when dst is array; may be 0 if +Depth==1)

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +WidthInBytes#
    +

    Width of 3D memory copy in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Depth#
    +

    Depth of 3D memory copy

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEMCPY_NODE_PARAMS#
    +

    Memcpy node parameters

    +
    +
    +flags#
    +

    Must be zero

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +reserved#
    +

    Must be zero

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +copyCtx#
    +

    Context on which to run the node

    +
    +
    Type:
    +

    CUcontext

    +
    +
    +
    + +
    +
    +copyParams#
    +

    Parameters for the memory copy

    +
    +
    Type:
    +

    CUDA_MEMCPY3D

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR_v2#
    +

    Array descriptor

    +
    +
    +Width#
    +

    Width of array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Format#
    +

    Array format

    +
    +
    Type:
    +

    CUarray_format

    +
    +
    +
    + +
    +
    +NumChannels#
    +

    Channels per array element

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR#
    +

    Array descriptor

    +
    +
    +Width#
    +

    Width of array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Format#
    +

    Array format

    +
    +
    Type:
    +

    CUarray_format

    +
    +
    +
    + +
    +
    +NumChannels#
    +

    Channels per array element

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR_v2#
    +

    3D array descriptor

    +
    +
    +Width#
    +

    Width of 3D array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 3D array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Depth#
    +

    Depth of 3D array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Format#
    +

    Array format

    +
    +
    Type:
    +

    CUarray_format

    +
    +
    +
    + +
    +
    +NumChannels#
    +

    Channels per array element

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +Flags#
    +

    Flags

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR#
    +

    3D array descriptor

    +
    +
    +Width#
    +

    Width of 3D array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Height#
    +

    Height of 3D array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Depth#
    +

    Depth of 3D array

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +Format#
    +

    Array format

    +
    +
    Type:
    +

    CUarray_format

    +
    +
    +
    + +
    +
    +NumChannels#
    +

    Channels per array element

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +Flags#
    +

    Flags

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES_v1#
    +

    CUDA array sparse properties

    +
    +
    +tileExtent#
    +
    +
    Type:
    +

    anon_struct5

    +
    +
    +
    + +
    +
    +miptailFirstLevel#
    +

    First mip level at which the mip tail begins.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +miptailSize#
    +

    Total size of the mip tail.

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags will either be zero or +CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES#
    +

    CUDA array sparse properties

    +
    +
    +tileExtent#
    +
    +
    Type:
    +

    anon_struct5

    +
    +
    +
    + +
    +
    +miptailFirstLevel#
    +

    First mip level at which the mip tail begins.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +miptailSize#
    +

    Total size of the mip tail.

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags will either be zero or +CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1#
    +

    CUDA array memory requirements

    +
    +
    +size#
    +

    Total required memory size

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +alignment#
    +

    alignment requirement

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS#
    +

    CUDA array memory requirements

    +
    +
    +size#
    +

    Total required memory size

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +alignment#
    +

    alignment requirement

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_RESOURCE_DESC_v1#
    +

    CUDA Resource descriptor

    +
    +
    +resType#
    +

    Resource type

    +
    +
    Type:
    +

    CUresourcetype

    +
    +
    +
    + +
    +
    +res#
    +
    +
    Type:
    +

    anon_union4

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags (must be zero)

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_RESOURCE_DESC#
    +

    CUDA Resource descriptor

    +
    +
    +resType#
    +

    Resource type

    +
    +
    Type:
    +

    CUresourcetype

    +
    +
    +
    + +
    +
    +res#
    +
    +
    Type:
    +

    anon_union4

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags (must be zero)

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_TEXTURE_DESC_v1#
    +

    Texture descriptor

    +
    +
    +addressMode#
    +

    Address modes

    +
    +
    Type:
    +

    List[CUaddress_mode]

    +
    +
    +
    + +
    +
    +filterMode#
    +

    Filter mode

    +
    +
    Type:
    +

    CUfilter_mode

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +maxAnisotropy#
    +

    Maximum anisotropy ratio

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +mipmapFilterMode#
    +

    Mipmap filter mode

    +
    +
    Type:
    +

    CUfilter_mode

    +
    +
    +
    + +
    +
    +mipmapLevelBias#
    +

    Mipmap level bias

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +minMipmapLevelClamp#
    +

    Mipmap minimum level clamp

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +maxMipmapLevelClamp#
    +

    Mipmap maximum level clamp

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +borderColor#
    +

    Border Color

    +
    +
    Type:
    +

    List[float]

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_TEXTURE_DESC#
    +

    Texture descriptor

    +
    +
    +addressMode#
    +

    Address modes

    +
    +
    Type:
    +

    List[CUaddress_mode]

    +
    +
    +
    + +
    +
    +filterMode#
    +

    Filter mode

    +
    +
    Type:
    +

    CUfilter_mode

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +maxAnisotropy#
    +

    Maximum anisotropy ratio

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +mipmapFilterMode#
    +

    Mipmap filter mode

    +
    +
    Type:
    +

    CUfilter_mode

    +
    +
    +
    + +
    +
    +mipmapLevelBias#
    +

    Mipmap level bias

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +minMipmapLevelClamp#
    +

    Mipmap minimum level clamp

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +maxMipmapLevelClamp#
    +

    Mipmap maximum level clamp

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +borderColor#
    +

    Border Color

    +
    +
    Type:
    +

    List[float]

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC_v1#
    +

    Resource view descriptor

    +
    +
    +format#
    +

    Resource view format

    +
    +
    Type:
    +

    CUresourceViewFormat

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Height of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +firstMipmapLevel#
    +

    First defined mipmap level

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +lastMipmapLevel#
    +

    Last defined mipmap level

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +firstLayer#
    +

    First layer index

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +lastLayer#
    +

    Last layer index

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC#
    +

    Resource view descriptor

    +
    +
    +format#
    +

    Resource view format

    +
    +
    Type:
    +

    CUresourceViewFormat

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Height of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +firstMipmapLevel#
    +

    First defined mipmap level

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +lastMipmapLevel#
    +

    Last defined mipmap level

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +firstLayer#
    +

    First layer index

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +lastLayer#
    +

    Last layer index

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUtensorMap#
    +

    Tensor map descriptor. Requires compiler support for aligning to 64 +bytes.

    +
    +
    +opaque#
    +
    +
    Type:
    +

    List[cuuint64_t]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1#
    +

    GPU Direct v3 tokens

    +
    +
    +p2pToken#
    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +vaSpaceToken#
    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS#
    +

    GPU Direct v3 tokens

    +
    +
    +p2pToken#
    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +vaSpaceToken#
    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_LAUNCH_PARAMS_v1#
    +

    Kernel launch parameters

    +
    +
    +function#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +hStream#
    +

    Stream identifier

    +
    +
    Type:
    +

    CUstream

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_LAUNCH_PARAMS#
    +

    Kernel launch parameters

    +
    +
    +function#
    +

    Kernel to launch

    +
    +
    Type:
    +

    CUfunction

    +
    +
    +
    + +
    +
    +gridDimX#
    +

    Width of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimY#
    +

    Height of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gridDimZ#
    +

    Depth of grid in blocks

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimX#
    +

    X dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimY#
    +

    Y dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +blockDimZ#
    +

    Z dimension of each thread block

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +hStream#
    +

    Stream identifier

    +
    +
    Type:
    +

    CUstream

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to kernel parameters

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1#
    +

    External memory handle descriptor

    +
    +
    +type#
    +

    Type of the handle

    +
    +
    Type:
    +

    CUexternalMemoryHandleType

    +
    +
    +
    + +
    +
    +handle#
    +
    +
    Type:
    +

    anon_union5

    +
    +
    +
    + +
    +
    +size#
    +

    Size of the memory allocation

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags must either be zero or CUDA_EXTERNAL_MEMORY_DEDICATED

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC#
    +

    External memory handle descriptor

    +
    +
    +type#
    +

    Type of the handle

    +
    +
    Type:
    +

    CUexternalMemoryHandleType

    +
    +
    +
    + +
    +
    +handle#
    +
    +
    Type:
    +

    anon_union5

    +
    +
    +
    + +
    +
    +size#
    +

    Size of the memory allocation

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags must either be zero or CUDA_EXTERNAL_MEMORY_DEDICATED

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1#
    +

    External memory buffer descriptor

    +
    +
    +offset#
    +

    Offset into the memory object where the buffer’s base is

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +size#
    +

    Size of the buffer

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags reserved for future use. Must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC#
    +

    External memory buffer descriptor

    +
    +
    +offset#
    +

    Offset into the memory object where the buffer’s base is

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +size#
    +

    Size of the buffer

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags reserved for future use. Must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1#
    +

    External memory mipmap descriptor

    +
    +
    +offset#
    +

    Offset into the memory object where the base level of the mipmap +chain is.

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +arrayDesc#
    +

    Format, dimension and type of base level of the mipmap chain

    +
    +
    Type:
    +

    CUDA_ARRAY3D_DESCRIPTOR

    +
    +
    +
    + +
    +
    +numLevels#
    +

    Total number of levels in the mipmap chain

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC#
    +

    External memory mipmap descriptor

    +
    +
    +offset#
    +

    Offset into the memory object where the base level of the mipmap +chain is.

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +arrayDesc#
    +

    Format, dimension and type of base level of the mipmap chain

    +
    +
    Type:
    +

    CUDA_ARRAY3D_DESCRIPTOR

    +
    +
    +
    + +
    +
    +numLevels#
    +

    Total number of levels in the mipmap chain

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1#
    +

    External semaphore handle descriptor

    +
    +
    +type#
    +

    Type of the handle

    +
    +
    Type:
    +

    CUexternalSemaphoreHandleType

    +
    +
    +
    + +
    +
    +handle#
    +
    +
    Type:
    +

    anon_union6

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags reserved for the future. Must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC#
    +

    External semaphore handle descriptor

    +
    +
    +type#
    +

    Type of the handle

    +
    +
    Type:
    +

    CUexternalSemaphoreHandleType

    +
    +
    +
    + +
    +
    +handle#
    +
    +
    Type:
    +

    anon_union6

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags reserved for the future. Must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1#
    +

    External semaphore signal parameters

    +
    +
    +params#
    +
    +
    Type:
    +

    anon_struct15

    +
    +
    +
    + +
    +
    +flags#
    +

    Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to signal +a CUexternalSemaphore of type +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which +indicates that while signaling the CUexternalSemaphore, no memory +synchronization operations should be performed for any external +memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. +For all other types of CUexternalSemaphore, flags must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS#
    +

    External semaphore signal parameters

    +
    +
    +params#
    +
    +
    Type:
    +

    anon_struct15

    +
    +
    +
    + +
    +
    +flags#
    +

    Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to signal +a CUexternalSemaphore of type +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which +indicates that while signaling the CUexternalSemaphore, no memory +synchronization operations should be performed for any external +memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. +For all other types of CUexternalSemaphore, flags must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1#
    +

    External semaphore wait parameters

    +
    +
    +params#
    +
    +
    Type:
    +

    anon_struct18

    +
    +
    +
    + +
    +
    +flags#
    +

    Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on +a CUexternalSemaphore of type +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is +CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC which indicates +that while waiting for the CUexternalSemaphore, no memory +synchronization operations should be performed for any external +memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. +For all other types of CUexternalSemaphore, flags must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS#
    +

    External semaphore wait parameters

    +
    +
    +params#
    +
    +
    Type:
    +

    anon_struct18

    +
    +
    +
    + +
    +
    +flags#
    +

    Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on +a CUexternalSemaphore of type +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is +CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC which indicates +that while waiting for the CUexternalSemaphore, no memory +synchronization operations should be performed for any external +memory object imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. +For all other types of CUexternalSemaphore, flags must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1#
    +

    Semaphore signal node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore signal parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS#
    +

    Semaphore signal node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore signal parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2#
    +

    Semaphore signal node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore signal parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1#
    +

    Semaphore wait node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore wait parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS#
    +

    Semaphore wait node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore wait parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2#
    +

    Semaphore wait node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    CUexternalSemaphore

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore wait parameters.

    +
    +
    Type:
    +

    CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemGenericAllocationHandle_v1#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemGenericAllocationHandle#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUarrayMapInfo_v1#
    +

    Specifies the CUDA array or CUDA mipmapped array memory mapping +information

    +
    +
    +resourceType#
    +

    Resource type

    +
    +
    Type:
    +

    CUresourcetype

    +
    +
    +
    + +
    +
    +resource#
    +
    +
    Type:
    +

    anon_union9

    +
    +
    +
    + +
    +
    +subresourceType#
    +

    Sparse subresource type

    +
    +
    Type:
    +

    CUarraySparseSubresourceType

    +
    +
    +
    + +
    +
    +subresource#
    +
    +
    Type:
    +

    anon_union10

    +
    +
    +
    + +
    +
    +memOperationType#
    +

    Memory operation type

    +
    +
    Type:
    +

    CUmemOperationType

    +
    +
    +
    + +
    +
    +memHandleType#
    +

    Memory handle type

    +
    +
    Type:
    +

    CUmemHandleType

    +
    +
    +
    + +
    +
    +memHandle#
    +
    +
    Type:
    +

    anon_union11

    +
    +
    +
    + +
    +
    +offset#
    +

    Offset within mip tail Offset within the memory

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +deviceBitMask#
    +

    Device ordinal bit mask

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +flags#
    +

    flags for future use, must be zero now.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +

    Reserved for future use, must be zero now.

    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUarrayMapInfo#
    +

    Specifies the CUDA array or CUDA mipmapped array memory mapping +information

    +
    +
    +resourceType#
    +

    Resource type

    +
    +
    Type:
    +

    CUresourcetype

    +
    +
    +
    + +
    +
    +resource#
    +
    +
    Type:
    +

    anon_union9

    +
    +
    +
    + +
    +
    +subresourceType#
    +

    Sparse subresource type

    +
    +
    Type:
    +

    CUarraySparseSubresourceType

    +
    +
    +
    + +
    +
    +subresource#
    +
    +
    Type:
    +

    anon_union10

    +
    +
    +
    + +
    +
    +memOperationType#
    +

    Memory operation type

    +
    +
    Type:
    +

    CUmemOperationType

    +
    +
    +
    + +
    +
    +memHandleType#
    +

    Memory handle type

    +
    +
    Type:
    +

    CUmemHandleType

    +
    +
    +
    + +
    +
    +memHandle#
    +
    +
    Type:
    +

    anon_union11

    +
    +
    +
    + +
    +
    +offset#
    +

    Offset within mip tail Offset within the memory

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +deviceBitMask#
    +

    Device ordinal bit mask

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +flags#
    +

    flags for future use, must be zero now.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +

    Reserved for future use, must be zero now.

    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemLocation_v1#
    +

    Specifies a memory location.

    +
    +
    +type#
    +

    Specifies the location type, which modifies the meaning of id.

    +
    +
    Type:
    +

    CUmemLocationType

    +
    +
    +
    + +
    +
    +id#
    +

    identifier for a given this location’s CUmemLocationType.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemLocation#
    +

    Specifies a memory location.

    +
    +
    +type#
    +

    Specifies the location type, which modifies the meaning of id.

    +
    +
    Type:
    +

    CUmemLocationType

    +
    +
    +
    + +
    +
    +id#
    +

    identifier for a given this location’s CUmemLocationType.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAllocationProp_v1#
    +

    Specifies the allocation properties for a allocation.

    +
    +
    +type#
    +

    Allocation type

    +
    +
    Type:
    +

    CUmemAllocationType

    +
    +
    +
    + +
    +
    +requestedHandleTypes#
    +

    requested CUmemAllocationHandleType

    +
    +
    Type:
    +

    CUmemAllocationHandleType

    +
    +
    +
    + +
    +
    +location#
    +

    Location of allocation

    +
    +
    Type:
    +

    CUmemLocation

    +
    +
    +
    + +
    +
    +win32HandleMetaData#
    +

    Windows-specific POBJECT_ATTRIBUTES required when +CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes +structure includes security attributes that define the scope of +which exported allocations may be transferred to other processes. +In all other cases, this field is required to be zero.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +allocFlags#
    +
    +
    Type:
    +

    anon_struct21

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAllocationProp#
    +

    Specifies the allocation properties for a allocation.

    +
    +
    +type#
    +

    Allocation type

    +
    +
    Type:
    +

    CUmemAllocationType

    +
    +
    +
    + +
    +
    +requestedHandleTypes#
    +

    requested CUmemAllocationHandleType

    +
    +
    Type:
    +

    CUmemAllocationHandleType

    +
    +
    +
    + +
    +
    +location#
    +

    Location of allocation

    +
    +
    Type:
    +

    CUmemLocation

    +
    +
    +
    + +
    +
    +win32HandleMetaData#
    +

    Windows-specific POBJECT_ATTRIBUTES required when +CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes +structure includes security attributes that define the scope of +which exported allocations may be transferred to other processes. +In all other cases, this field is required to be zero.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +allocFlags#
    +
    +
    Type:
    +

    anon_struct21

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmulticastObjectProp_v1#
    +

    Specifies the properties for a multicast object.

    +
    +
    +numDevices#
    +

    The number of devices in the multicast team that will bind memory +to this object

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +size#
    +

    The maximum amount of memory that can be bound to this multicast +object per device

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +handleTypes#
    +

    Bitmask of exportable handle types (see CUmemAllocationHandleType) +for this object

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags for future use, must be zero now

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmulticastObjectProp#
    +

    Specifies the properties for a multicast object.

    +
    +
    +numDevices#
    +

    The number of devices in the multicast team that will bind memory +to this object

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +size#
    +

    The maximum amount of memory that can be bound to this multicast +object per device

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +handleTypes#
    +

    Bitmask of exportable handle types (see CUmemAllocationHandleType) +for this object

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags for future use, must be zero now

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAccessDesc_v1#
    +

    Memory access descriptor

    +
    +
    +location#
    +

    Location on which the request is to change it’s accessibility

    +
    +
    Type:
    +

    CUmemLocation

    +
    +
    +
    + +
    +
    +flags#
    +

    ::CUmemProt accessibility flags to set on the request

    +
    +
    Type:
    +

    CUmemAccess_flags

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemAccessDesc#
    +

    Memory access descriptor

    +
    +
    +location#
    +

    Location on which the request is to change it’s accessibility

    +
    +
    Type:
    +

    CUmemLocation

    +
    +
    +
    + +
    +
    +flags#
    +

    ::CUmemProt accessibility flags to set on the request

    +
    +
    Type:
    +

    CUmemAccess_flags

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphExecUpdateResultInfo_v1#
    +

    Result information returned by cuGraphExecUpdate

    +
    +
    +result#
    +

    Gives more specific detail when a cuda graph update fails.

    +
    +
    Type:
    +

    CUgraphExecUpdateResult

    +
    +
    +
    + +
    +
    +errorNode#
    +

    The “to node” of the error edge when the topologies do not match. +The error node when the error is associated with a specific node. +NULL when the error is generic.

    +
    +
    Type:
    +

    CUgraphNode

    +
    +
    +
    + +
    +
    +errorFromNode#
    +

    The from node of error edge when the topologies do not match. +Otherwise NULL.

    +
    +
    Type:
    +

    CUgraphNode

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphExecUpdateResultInfo#
    +

    Result information returned by cuGraphExecUpdate

    +
    +
    +result#
    +

    Gives more specific detail when a cuda graph update fails.

    +
    +
    Type:
    +

    CUgraphExecUpdateResult

    +
    +
    +
    + +
    +
    +errorNode#
    +

    The “to node” of the error edge when the topologies do not match. +The error node when the error is associated with a specific node. +NULL when the error is generic.

    +
    +
    Type:
    +

    CUgraphNode

    +
    +
    +
    + +
    +
    +errorFromNode#
    +

    The from node of error edge when the topologies do not match. +Otherwise NULL.

    +
    +
    Type:
    +

    CUgraphNode

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemPoolProps_v1#
    +

    Specifies the properties of allocations made from the pool.

    +
    +
    +allocType#
    +

    Allocation type. Currently must be specified as +CU_MEM_ALLOCATION_TYPE_PINNED

    +
    +
    Type:
    +

    CUmemAllocationType

    +
    +
    +
    + +
    +
    +handleTypes#
    +

    Handle types that will be supported by allocations from the pool.

    +
    +
    Type:
    +

    CUmemAllocationHandleType

    +
    +
    +
    + +
    +
    +location#
    +

    Location where allocations should reside.

    +
    +
    Type:
    +

    CUmemLocation

    +
    +
    +
    + +
    +
    +win32SecurityAttributes#
    +

    Windows-specific LPSECURITYATTRIBUTES required when +CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute +defines the scope of which exported allocations may be transferred +to other processes. In all other cases, this field is required to +be zero.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +maxSize#
    +

    Maximum pool size. When set to 0, defaults to a system dependent +value.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +usage#
    +

    Bitmask indicating intended usage for the pool.

    +
    +
    Type:
    +

    unsigned short

    +
    +
    +
    + +
    +
    +reserved#
    +

    reserved for future use, must be 0

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemPoolProps#
    +

    Specifies the properties of allocations made from the pool.

    +
    +
    +allocType#
    +

    Allocation type. Currently must be specified as +CU_MEM_ALLOCATION_TYPE_PINNED

    +
    +
    Type:
    +

    CUmemAllocationType

    +
    +
    +
    + +
    +
    +handleTypes#
    +

    Handle types that will be supported by allocations from the pool.

    +
    +
    Type:
    +

    CUmemAllocationHandleType

    +
    +
    +
    + +
    +
    +location#
    +

    Location where allocations should reside.

    +
    +
    Type:
    +

    CUmemLocation

    +
    +
    +
    + +
    +
    +win32SecurityAttributes#
    +

    Windows-specific LPSECURITYATTRIBUTES required when +CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute +defines the scope of which exported allocations may be transferred +to other processes. In all other cases, this field is required to +be zero.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +maxSize#
    +

    Maximum pool size. When set to 0, defaults to a system dependent +value.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +usage#
    +

    Bitmask indicating intended usage for the pool.

    +
    +
    Type:
    +

    unsigned short

    +
    +
    +
    + +
    +
    +reserved#
    +

    reserved for future use, must be 0

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemPoolPtrExportData_v1#
    +

    Opaque data for exporting a pool allocation

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUmemPoolPtrExportData#
    +

    Opaque data for exporting a pool allocation

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v1#
    +

    Memory allocation node parameters

    +
    +
    +poolProps#
    +

    in: location where the allocation should reside (specified in +::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is +not supported.

    +
    +
    Type:
    +

    CUmemPoolProps

    +
    +
    +
    + +
    +
    +accessDescs#
    +

    in: array of memory access descriptors. Used to describe peer GPU +access

    +
    +
    Type:
    +

    CUmemAccessDesc

    +
    +
    +
    + +
    +
    +accessDescCount#
    +

    in: number of memory access descriptors. Must not exceed the number +of GPUs.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +bytesize#
    +

    in: size in bytes of the requested allocation

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dptr#
    +

    out: address of the allocation returned by CUDA

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS#
    +

    Memory allocation node parameters

    +
    +
    +poolProps#
    +

    in: location where the allocation should reside (specified in +::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is +not supported.

    +
    +
    Type:
    +

    CUmemPoolProps

    +
    +
    +
    + +
    +
    +accessDescs#
    +

    in: array of memory access descriptors. Used to describe peer GPU +access

    +
    +
    Type:
    +

    CUmemAccessDesc

    +
    +
    +
    + +
    +
    +accessDescCount#
    +

    in: number of memory access descriptors. Must not exceed the number +of GPUs.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +bytesize#
    +

    in: size in bytes of the requested allocation

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dptr#
    +

    out: address of the allocation returned by CUDA

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v2#
    +

    Memory allocation node parameters

    +
    +
    +poolProps#
    +

    in: location where the allocation should reside (specified in +::location). ::handleTypes must be CU_MEM_HANDLE_TYPE_NONE. IPC is +not supported.

    +
    +
    Type:
    +

    CUmemPoolProps

    +
    +
    +
    + +
    +
    +accessDescs#
    +

    in: array of memory access descriptors. Used to describe peer GPU +access

    +
    +
    Type:
    +

    CUmemAccessDesc

    +
    +
    +
    + +
    +
    +accessDescCount#
    +

    in: number of memory access descriptors. Must not exceed the number +of GPUs.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +bytesize#
    +

    in: size in bytes of the requested allocation

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dptr#
    +

    out: address of the allocation returned by CUDA

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_MEM_FREE_NODE_PARAMS#
    +

    Memory free node parameters

    +
    +
    +dptr#
    +

    in: the pointer to free

    +
    +
    Type:
    +

    CUdeviceptr

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_CHILD_GRAPH_NODE_PARAMS#
    +

    Child graph node parameters

    +
    +
    +graph#
    +

    The child graph to clone into the node for node creation, or a +handle to the graph owned by the node for node query

    +
    +
    Type:
    +

    CUgraph

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EVENT_RECORD_NODE_PARAMS#
    +

    Event record node parameters

    +
    +
    +event#
    +

    The event to record when the node executes

    +
    +
    Type:
    +

    CUevent

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUDA_EVENT_WAIT_NODE_PARAMS#
    +

    Event wait node parameters

    +
    +
    +event#
    +

    The event to wait on from the node

    +
    +
    Type:
    +

    CUevent

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgraphNodeParams#
    +

    Graph node parameters. See cuGraphAddNode.

    +
    +
    +type#
    +

    Type of the node

    +
    +
    Type:
    +

    CUgraphNodeType

    +
    +
    +
    + +
    +
    +reserved0#
    +

    Reserved. Must be zero.

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +reserved1#
    +

    Padding. Unused bytes must be zero.

    +
    +
    Type:
    +

    List[long long]

    +
    +
    +
    + +
    +
    +kernel#
    +

    Kernel node parameters.

    +
    +
    Type:
    +

    CUDA_KERNEL_NODE_PARAMS_v3

    +
    +
    +
    + +
    +
    +memcpy#
    +

    Memcpy node parameters.

    +
    +
    Type:
    +

    CUDA_MEMCPY_NODE_PARAMS

    +
    +
    +
    + +
    +
    +memset#
    +

    Memset node parameters.

    +
    +
    Type:
    +

    CUDA_MEMSET_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +host#
    +

    Host node parameters.

    +
    +
    Type:
    +

    CUDA_HOST_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +graph#
    +

    Child graph node parameters.

    +
    +
    Type:
    +

    CUDA_CHILD_GRAPH_NODE_PARAMS

    +
    +
    +
    + +
    +
    +eventWait#
    +

    Event wait node parameters.

    +
    +
    Type:
    +

    CUDA_EVENT_WAIT_NODE_PARAMS

    +
    +
    +
    + +
    +
    +eventRecord#
    +

    Event record node parameters.

    +
    +
    Type:
    +

    CUDA_EVENT_RECORD_NODE_PARAMS

    +
    +
    +
    + +
    +
    +extSemSignal#
    +

    External semaphore signal node parameters.

    +
    +
    Type:
    +

    CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +extSemWait#
    +

    External semaphore wait node parameters.

    +
    +
    Type:
    +

    CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +alloc#
    +

    Memory allocation node parameters.

    +
    +
    Type:
    +

    CUDA_MEM_ALLOC_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +free#
    +

    Memory free node parameters.

    +
    +
    Type:
    +

    CUDA_MEM_FREE_NODE_PARAMS

    +
    +
    +
    + +
    +
    +memOp#
    +

    MemOp node parameters.

    +
    +
    Type:
    +

    CUDA_BATCH_MEM_OP_NODE_PARAMS_v2

    +
    +
    +
    + +
    +
    +conditional#
    +

    Conditional node parameters.

    +
    +
    Type:
    +

    CUDA_CONDITIONAL_NODE_PARAMS

    +
    +
    +
    + +
    +
    +reserved2#
    +

    Reserved bytes. Must be zero.

    +
    +
    Type:
    +

    long long

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUeglFrame_v1#
    +

    CUDA EGLFrame structure Descriptor - structure defining one frame +of EGL. Each frame may contain one or more planes depending on +whether the surface * is Multiplanar or not.

    +
    +
    +frame#
    +
    +
    Type:
    +

    anon_union14

    +
    +
    +
    + +
    +
    +width#
    +

    Width of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +height#
    +

    Height of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +planeCount#
    +

    Number of planes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +numChannels#
    +

    Number of channels for the plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +frameType#
    +

    Array or Pitch

    +
    +
    Type:
    +

    CUeglFrameType

    +
    +
    +
    + +
    +
    +eglColorFormat#
    +

    CUDA EGL Color Format

    +
    +
    Type:
    +

    CUeglColorFormat

    +
    +
    +
    + +
    +
    +cuFormat#
    +

    CUDA Array Format

    +
    +
    Type:
    +

    CUarray_format

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUeglFrame#
    +

    CUDA EGLFrame structure Descriptor - structure defining one frame +of EGL. Each frame may contain one or more planes depending on +whether the surface * is Multiplanar or not.

    +
    +
    +frame#
    +
    +
    Type:
    +

    anon_union14

    +
    +
    +
    + +
    +
    +width#
    +

    Width of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +height#
    +

    Height of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of first plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +planeCount#
    +

    Number of planes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +numChannels#
    +

    Number of channels for the plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +frameType#
    +

    Array or Pitch

    +
    +
    Type:
    +

    CUeglFrameType

    +
    +
    +
    + +
    +
    +eglColorFormat#
    +

    CUDA EGL Color Format

    +
    +
    Type:
    +

    CUeglColorFormat

    +
    +
    +
    + +
    +
    +cuFormat#
    +

    CUDA Array Format

    +
    +
    Type:
    +

    CUarray_format

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUeglStreamConnection(*args, **kwargs)#
    +

    CUDA EGLSream Connection

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +driver.CUDA_VERSION = 12060#
    +

    CUDA API version number

    +
    + +
    +
    +driver.CU_IPC_HANDLE_SIZE = 64#
    +

    CUDA IPC handle size

    +
    + +
    +
    +driver.CU_STREAM_LEGACY = 1#
    +

    Legacy stream handle

    +

    Stream handle that can be passed as a CUstream to use an implicit stream with legacy synchronization behavior.

    +

    See details of the link_sync_behavior

    +
    + +
    +
    +driver.CU_STREAM_PER_THREAD = 2#
    +

    Per-thread stream handle

    +

    Stream handle that can be passed as a CUstream to use an implicit stream with per-thread synchronization behavior.

    +

    See details of the link_sync_behavior

    +
    + +
    +
    +driver.CU_COMPUTE_ACCELERATED_TARGET_BASE = 65536#
    +
    + +
    +
    +driver.CU_GRAPH_COND_ASSIGN_DEFAULT = 1#
    +

    Conditional node handle flags Default value is applied when graph is launched.

    +
    + +
    +
    +driver.CU_GRAPH_KERNEL_NODE_PORT_DEFAULT = 0#
    +

    This port activates when the kernel has finished executing.

    +
    + +
    +
    +driver.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC = 1#
    +

    This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC. See also CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT.

    +
    + +
    +
    +driver.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER = 2#
    +

    This port activates when all blocks of the kernel have begun execution. See also CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT.

    +
    + +
    +
    +driver.CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1#
    +
    + +
    +
    +driver.CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE = 2#
    +
    + +
    +
    +driver.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION = 4#
    +
    + +
    +
    +driver.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 5#
    +
    + +
    +
    +driver.CU_KERNEL_NODE_ATTRIBUTE_PRIORITY = 8#
    +
    + +
    +
    +driver.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = 9#
    +
    + +
    +
    +driver.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN = 10#
    +
    + +
    +
    +driver.CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = 13#
    +
    + +
    +
    +driver.CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 14#
    +
    + +
    +
    +driver.CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1#
    +
    + +
    +
    +driver.CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3#
    +
    + +
    +
    +driver.CU_STREAM_ATTRIBUTE_PRIORITY = 8#
    +
    + +
    +
    +driver.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = 9#
    +
    + +
    +
    +driver.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN = 10#
    +
    + +
    +
    +driver.CU_MEMHOSTALLOC_PORTABLE = 1#
    +

    If set, host memory is portable between CUDA contexts. Flag for cuMemHostAlloc()

    +
    + +
    +
    +driver.CU_MEMHOSTALLOC_DEVICEMAP = 2#
    +

    If set, host memory is mapped into CUDA address space and cuMemHostGetDevicePointer() may be called on the host pointer. Flag for cuMemHostAlloc()

    +
    + +
    +
    +driver.CU_MEMHOSTALLOC_WRITECOMBINED = 4#
    +

    If set, host memory is allocated as write-combined - fast to write, faster to DMA, slow to read except via SSE4 streaming load instruction (MOVNTDQA). Flag for cuMemHostAlloc()

    +
    + +
    +
    +driver.CU_MEMHOSTREGISTER_PORTABLE = 1#
    +

    If set, host memory is portable between CUDA contexts. Flag for cuMemHostRegister()

    +
    + +
    +
    +driver.CU_MEMHOSTREGISTER_DEVICEMAP = 2#
    +

    If set, host memory is mapped into CUDA address space and cuMemHostGetDevicePointer() may be called on the host pointer. Flag for cuMemHostRegister()

    +
    + +
    +
    +driver.CU_MEMHOSTREGISTER_IOMEMORY = 4#
    +

    If set, the passed memory pointer is treated as pointing to some memory-mapped I/O space, e.g. belonging to a third-party PCIe device. On Windows the flag is a no-op. On Linux that memory is marked as non cache-coherent for the GPU and is expected to be physically contiguous. It may return CUDA_ERROR_NOT_PERMITTED if run as an unprivileged user, CUDA_ERROR_NOT_SUPPORTED on older Linux kernel versions. On all other platforms, it is not supported and CUDA_ERROR_NOT_SUPPORTED is returned. Flag for cuMemHostRegister()

    +
    + +
    +
    +driver.CU_MEMHOSTREGISTER_READ_ONLY = 8#
    +

    If set, the passed memory pointer is treated as pointing to memory that is considered read-only by the device. On platforms without CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, this flag is required in order to register memory mapped to the CPU as read-only. Support for the use of this flag can be queried from the device attribute CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED. Using this flag with a current context associated with a device that does not have this attribute set will cause cuMemHostRegister to error with CUDA_ERROR_NOT_SUPPORTED.

    +
    + +
    +
    +driver.CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL = 1#
    +

    Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers

    +
    + +
    +
    +driver.CU_TENSOR_MAP_NUM_QWORDS = 16#
    +

    Size of tensor map descriptor

    +
    + +
    +
    +driver.CUDA_EXTERNAL_MEMORY_DEDICATED = 1#
    +

    Indicates that the external memory object is a dedicated resource

    +
    + +
    +
    +driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC = 1#
    +

    When the flags parameter of CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects.

    +
    + +
    +
    +driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC = 2#
    +

    When the flags parameter of CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS contains this flag, it indicates that waiting on an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects.

    +
    + +
    +
    +driver.CUDA_NVSCISYNC_ATTR_SIGNAL = 1#
    +

    When flags of cuDeviceGetNvSciSyncAttributes is set to this, it indicates that application needs signaler specific NvSciSyncAttr to be filled by cuDeviceGetNvSciSyncAttributes.

    +
    + +
    +
    +driver.CUDA_NVSCISYNC_ATTR_WAIT = 2#
    +

    When flags of cuDeviceGetNvSciSyncAttributes is set to this, it indicates that application needs waiter specific NvSciSyncAttr to be filled by cuDeviceGetNvSciSyncAttributes.

    +
    + +
    +
    +driver.CU_MEM_CREATE_USAGE_TILE_POOL = 1#
    +

    This flag if set indicates that the memory will be used as a tile pool.

    +
    + +
    +
    +driver.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC = 1#
    +

    If set, each kernel launched as part of cuLaunchCooperativeKernelMultiDevice only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution.

    +
    + +
    +
    +driver.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC = 2#
    +

    If set, any subsequent work pushed in a stream that participated in a call to cuLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution.

    +
    + +
    +
    +driver.CUDA_ARRAY3D_LAYERED = 1#
    +

    If set, the CUDA array is a collection of layers, where each layer is either a 1D or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number of layers, not the depth of a 3D array.

    +
    + +
    +
    +driver.CUDA_ARRAY3D_2DARRAY = 1#
    +

    Deprecated, use CUDA_ARRAY3D_LAYERED

    +
    + +
    +
    +driver.CUDA_ARRAY3D_SURFACE_LDST = 2#
    +

    This flag must be set in order to bind a surface reference to the CUDA array

    +
    + +
    +
    +driver.CUDA_ARRAY3D_CUBEMAP = 4#
    +

    If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The width of such a CUDA array must be equal to its height, and Depth must be six. If CUDA_ARRAY3D_LAYERED flag is also set, then the CUDA array is a collection of cubemaps and Depth must be a multiple of six.

    +
    + +
    +
    +driver.CUDA_ARRAY3D_TEXTURE_GATHER = 8#
    +

    This flag must be set in order to perform texture gather operations on a CUDA array.

    +
    + +
    +
    +driver.CUDA_ARRAY3D_DEPTH_TEXTURE = 16#
    +

    This flag if set indicates that the CUDA array is a DEPTH_TEXTURE.

    +
    + +
    +
    +driver.CUDA_ARRAY3D_COLOR_ATTACHMENT = 32#
    +

    This flag indicates that the CUDA array may be bound as a color target in an external graphics API

    +
    + +
    +
    +driver.CUDA_ARRAY3D_SPARSE = 64#
    +

    This flag if set indicates that the CUDA array or CUDA mipmapped array is a sparse CUDA array or CUDA mipmapped array respectively

    +
    + +
    +
    +driver.CUDA_ARRAY3D_DEFERRED_MAPPING = 128#
    +

    This flag if set indicates that the CUDA array or CUDA mipmapped array will allow deferred memory mapping

    +
    + +
    +
    +driver.CUDA_ARRAY3D_VIDEO_ENCODE_DECODE = 256#
    +

    This flag indicates that the CUDA array will be used for hardware accelerated video encode/decode operations.

    +
    + +
    +
    +driver.CU_TRSA_OVERRIDE_FORMAT = 1#
    +

    Override the texref format with a format inferred from the array. Flag for cuTexRefSetArray()

    +
    + +
    +
    +driver.CU_TRSF_READ_AS_INTEGER = 1#
    +

    Read the texture as integers rather than promoting the values to floats in the range [0,1]. Flag for cuTexRefSetFlags() and cuTexObjectCreate()

    +
    + +
    +
    +driver.CU_TRSF_NORMALIZED_COORDINATES = 2#
    +

    Use normalized texture coordinates in the range [0,1) instead of [0,dim). Flag for cuTexRefSetFlags() and cuTexObjectCreate()

    +
    + +
    +
    +driver.CU_TRSF_SRGB = 16#
    +

    Perform sRGB->linear conversion during texture read. Flag for cuTexRefSetFlags() and cuTexObjectCreate()

    +
    + +
    +
    +driver.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION = 32#
    +

    Disable any trilinear filtering optimizations. Flag for cuTexRefSetFlags() and cuTexObjectCreate()

    +
    + +
    +
    +driver.CU_TRSF_SEAMLESS_CUBEMAP = 64#
    +

    Enable seamless cube map filtering. Flag for cuTexObjectCreate()

    +
    + +
    +
    +driver.CU_LAUNCH_PARAM_END_AS_INT = 0#
    +

    C++ compile time constant for CU_LAUNCH_PARAM_END

    +
    + +
    +
    +driver.CU_LAUNCH_PARAM_END = 0#
    +

    End of array terminator for the extra parameter to cuLaunchKernel

    +
    + +
    +
    +driver.CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT = 1#
    +

    C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_POINTER

    +
    + +
    +
    +driver.CU_LAUNCH_PARAM_BUFFER_POINTER = 1#
    +

    Indicator that the next value in the extra parameter to cuLaunchKernel will be a pointer to a buffer containing all kernel parameters used for launching kernel f. This buffer needs to honor all alignment/padding requirements of the individual parameters. If CU_LAUNCH_PARAM_BUFFER_SIZE is not also specified in the extra array, then CU_LAUNCH_PARAM_BUFFER_POINTER will have no effect.

    +
    + +
    +
    +driver.CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT = 2#
    +

    C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_SIZE

    +
    + +
    +
    +driver.CU_LAUNCH_PARAM_BUFFER_SIZE = 2#
    +

    Indicator that the next value in the extra parameter to cuLaunchKernel will be a pointer to a size_t which contains the size of the buffer specified with CU_LAUNCH_PARAM_BUFFER_POINTER. It is required that CU_LAUNCH_PARAM_BUFFER_POINTER also be specified in the extra array if the value associated with CU_LAUNCH_PARAM_BUFFER_SIZE is not zero.

    +
    + +
    +
    +driver.CU_PARAM_TR_DEFAULT = -1#
    +

    For texture references loaded into the module, use default texunit from texture reference.

    +
    + +
    +
    +driver.CU_DEVICE_CPU = -1#
    +

    Device that represents the CPU

    +
    + +
    +
    +driver.CU_DEVICE_INVALID = -2#
    +

    Device that represents an invalid device

    +
    + +
    +
    +driver.MAX_PLANES = 3#
    +

    Maximum number of planes per frame

    +
    + +
    +
    +driver.CUDA_EGL_INFINITE_TIMEOUT = -1#
    +

    Indicates that timeout for cuEGLStreamConsumerAcquireFrame is infinite.

    +
    + +
    +
    +

    Error Handling#

    +

    This section describes the error handling functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuGetErrorString(error: CUresult)#
    +

    Gets the string description of an error code.

    +

    Sets *pStr to the address of a NULL-terminated string description of +the error code error. If the error code is not recognized, +CUDA_ERROR_INVALID_VALUE will be returned and *pStr will +be set to the NULL address.

    +
    +
    Parameters:
    +

    error (CUresult) – Error code to convert to string

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGetErrorName(error: CUresult)#
    +

    Gets the string representation of an error code enum name.

    +

    Sets *pStr to the address of a NULL-terminated string representation +of the name of the enum error code error. If the error code is not +recognized, CUDA_ERROR_INVALID_VALUE will be returned and +*pStr will be set to the NULL address.

    +
    +
    Parameters:
    +

    error (CUresult) – Error code to convert to string

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    CUresult, cudaGetErrorName

    +
    +
    + +
    +
    +

    Initialization#

    +

    This section describes the initialization functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuInit(unsigned int Flags)#
    +

    Initialize the CUDA driver API Initializes the driver API and must be called before any other function from the driver API in the current process. Currently, the Flags parameter must be 0. If cuInit() has not been called, any function from the driver API will return CUDA_ERROR_NOT_INITIALIZED.

    +
    +
    Parameters:
    +

    Flags (unsigned int) – Initialization flag for CUDA.

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_SYSTEM_DRIVER_MISMATCH, CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    + +
    +
    +

    Version Management#

    +

    This section describes the version management functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuDriverGetVersion()#
    +

    Returns the latest CUDA version supported by driver.

    +

    Returns in *driverVersion the version of CUDA supported by the +driver. The version is returned as (1000 * major + 10 * minor). For +example, CUDA 9.2 would be represented by 9020.

    +

    This function automatically returns +CUDA_ERROR_INVALID_VALUE if driverVersion is NULL.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Device Management#

    +

    This section describes the device management functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuDeviceGet(int ordinal)#
    +

    Returns a handle to a compute device.

    +

    Returns in *device a device handle given an ordinal in the range [0, +cuDeviceGetCount()-1].

    +
    +
    Parameters:
    +

    ordinal (int) – Device number to get handle for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetCount()#
    +

    Returns the number of compute-capable devices.

    +

    Returns in *count the number of devices with compute capability +greater than or equal to 2.0 that are available for execution. If there +is no such device, cuDeviceGetCount() returns 0.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetName(int length, dev)#
    +

    Returns an identifier string for the device.

    +

    Returns an ASCII string identifying the device dev in the NULL- +terminated string pointed to by name. length specifies the maximum +length of the string that may be returned.

    +
    +
    Parameters:
    +
      +
    • length (int) – Maximum length of string to store in name

    • +
    • dev (CUdevice) – Device to get identifier string for

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetUuid(dev)#
    +

    Return an UUID for the device.

    +

    Note there is a later version of this API, +cuDeviceGetUuid_v2. It will supplant this version in 12.0, +which is retained for minor version compatibility.

    +

    Returns 16-octets identifying the device dev in the structure pointed +by the uuid.

    +
    +
    Parameters:
    +

    dev (CUdevice) – Device to get identifier string for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetUuid_v2(dev)#
    +

    Return an UUID for the device (11.4+)

    +

    Returns 16-octets identifying the device dev in the structure pointed +by the uuid. If the device is in MIG mode, returns its MIG UUID which +uniquely identifies the subscribed MIG compute instance.

    +
    +
    Parameters:
    +

    dev (CUdevice) – Device to get identifier string for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetLuid(dev)#
    +

    Return an LUID and device node mask for the device.

    +

    Return identifying information (luid and deviceNodeMask) to allow +matching device with graphics APIs.

    +
    +
    Parameters:
    +

    dev (CUdevice) – Device to get identifier string for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceTotalMem(dev)#
    +

    Returns the total amount of memory on the device.

    +

    Returns in *bytes the total amount of memory available on the device +dev in bytes.

    +
    +
    Parameters:
    +

    dev (CUdevice) – Device handle

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetTexture1DLinearMaxWidth(pformat: CUarray_format, unsigned int numChannels, dev)#
    +

    Returns the maximum number of elements allocatable in a 1D linear texture for a given texture element size.

    +

    Returns in maxWidthInElements the maximum number of texture elements +allocatable in a 1D linear texture for given pformat and +numChannels.

    +
    +
    Parameters:
    +
      +
    • pformat (CUarray_format) – Texture format.

    • +
    • numChannels (unsigned) – Number of channels per texture element.

    • +
    • dev (CUdevice) – Device handle.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetAttribute(attrib: CUdevice_attribute, dev)#
    +

    Returns information about the device.

    +

    Returns in *pi the integer value of the attribute attrib on device +dev. The supported attributes are:

    + +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, dev, int flags)#
    +

    Return NvSciSync attributes that this device can support.

    +

    Returns in nvSciSyncAttrList, the properties of NvSciSync that this +CUDA device, dev can support. The returned nvSciSyncAttrList can be +used to create an NvSciSync object that matches this device’s +capabilities.

    +

    If NvSciSyncAttrKey_RequiredPerm field in nvSciSyncAttrList is +already set this API will return CUDA_ERROR_INVALID_VALUE.

    +

    The applications should set nvSciSyncAttrList to a valid +NvSciSyncAttrList failing which this API will return +CUDA_ERROR_INVALID_HANDLE.

    +

    The flags controls how applications intends to use the NvSciSync +created from the nvSciSyncAttrList. The valid flags are:

    + +

    At least one of these flags must be set, failing which the API returns +CUDA_ERROR_INVALID_VALUE. Both the flags are orthogonal to +one another: a developer may set both these flags that allows to set +both wait and signal specific attributes in the same +nvSciSyncAttrList.

    +

    Note that this API updates the input nvSciSyncAttrList with values +equivalent to the following public attribute key-values: +NvSciSyncAttrKey_RequiredPerm is set to

    +
      +
    • NvSciSyncAccessPerm_SignalOnly if +CUDA_NVSCISYNC_ATTR_SIGNAL is set in flags.

    • +
    • NvSciSyncAccessPerm_WaitOnly if CUDA_NVSCISYNC_ATTR_WAIT +is set in flags.

    • +
    • NvSciSyncAccessPerm_WaitSignal if both +CUDA_NVSCISYNC_ATTR_WAIT and +CUDA_NVSCISYNC_ATTR_SIGNAL are set in flags. +NvSciSyncAttrKey_PrimitiveInfo is set to

    • +
    • NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid device.

    • +
    • NvSciSyncAttrValPrimitiveType_Syncpoint if device is a Tegra +device.

    • +
    • NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if device +is GA10X+. NvSciSyncAttrKey_GpuId is set to the same UUID that is +returned for this device from cuDeviceGetUuid.

    • +
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, +CUDA_ERROR_NOT_INITIALIZED, +CUDA_ERROR_INVALID_VALUE, +CUDA_ERROR_INVALID_HANDLE, +CUDA_ERROR_INVALID_DEVICE, +CUDA_ERROR_NOT_SUPPORTED, +CUDA_ERROR_OUT_OF_MEMORY

    +
    +
    Parameters:
    +
      +
    • nvSciSyncAttrList (Any) – Return NvSciSync attributes supported.

    • +
    • dev (CUdevice) – Valid Cuda Device to get NvSciSync attributes for.

    • +
    • flags (int) – flags describing NvSciSync usage.

    • +
    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceSetMemPool(dev, pool)#
    +

    Sets the current memory pool of a device.

    +

    The memory pool must be local to the specified device. +cuMemAllocAsync allocates from the current mempool of the +provided stream’s device. By default, a device’s current memory pool is +its default memory pool.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    Use cuMemAllocFromPoolAsync to specify asynchronous allocations from a device different than the one the stream runs on.

    +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetMemPool(dev)#
    +

    Gets the current mempool for a device.

    +

    Returns the last pool provided to cuDeviceSetMemPool for +this device or the device’s default memory pool if +cuDeviceSetMemPool has never been called. By default the +current mempool is the default mempool for a device. Otherwise the +returned pool must have been set with cuDeviceSetMemPool.

    +
    +
    Parameters:
    +

    dev (CUdevice) – None

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetDefaultMemPool(dev)#
    +

    Returns the default mempool of a device.

    +

    The default mempool of a device contains device memory from that +device.

    +
    +
    Parameters:
    +

    dev (CUdevice) – None

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetExecAffinitySupport(typename: CUexecAffinityType, dev)#
    +

    Returns information about the execution affinity support of the device.

    +

    Returns in *pi whether execution affinity type typename is +supported by device dev. The supported types are:

    + +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuFlushGPUDirectRDMAWrites(target: CUflushGPUDirectRDMAWritesTarget, scope: CUflushGPUDirectRDMAWritesScope)#
    +

    Blocks until remote writes are visible to the specified scope.

    +

    Blocks until GPUDirect RDMA writes to the target context via mappings +created through APIs like nvidia_p2p_get_pages (see +https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are +visible to the specified scope.

    +

    If the scope equals or lies within the scope indicated by +CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING, the +call will be a no-op and can be safely omitted for performance. This +can be determined by comparing the numerical values between the two +enums, with smaller scopes having smaller values.

    +

    Users may query support for this API via +CU_DEVICE_ATTRIBUTE_FLUSH_FLUSH_GPU_DIRECT_RDMA_OPTIONS.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    + +
    +
    +

    Primary Context Management#

    +

    This section describes the primary context management functions of the low-level CUDA driver application programming interface.

    +

    The primary context is unique per device and shared with the CUDA runtime API. These functions allow integration with other libraries using CUDA.

    +
    +
    +cuda.bindings.driver.cuDevicePrimaryCtxRetain(dev)#
    +

    Retain the primary context on the GPU.

    +

    Retains the primary context on the device. Once the user successfully +retains the primary context, the primary context will be active and +available to the user until the user releases it with +cuDevicePrimaryCtxRelease() or resets it with +cuDevicePrimaryCtxReset(). Unlike cuCtxCreate() +the newly retained context is not pushed onto the stack.

    +

    Retaining the primary context for the first time will fail with +CUDA_ERROR_UNKNOWN if the compute mode of the device is +CU_COMPUTEMODE_PROHIBITED. The function +cuDeviceGetAttribute() can be used with +CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute +mode of the device. The nvidia-smi tool can be used to set the +compute mode for devices. Documentation for nvidia-smi can be +obtained by passing a -h option to it.

    +

    Please note that the primary context always supports pinned +allocations. Other flags can be specified by +cuDevicePrimaryCtxSetFlags().

    +
    +
    Parameters:
    +

    dev (CUdevice) – Device for which primary context is requested

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDevicePrimaryCtxRelease(dev)#
    +

    Release the primary context on the GPU.

    +

    Releases the primary context interop on the device. A retained context +should always be released once the user is done using it. The context +is automatically reset once the last reference to it is released. This +behavior is different when the primary context was retained by the CUDA +runtime from CUDA 4.0 and earlier. In this case, the primary context +remains always active.

    +

    Releasing a primary context that has not been previously retained will +fail with CUDA_ERROR_INVALID_CONTEXT.

    +

    Please note that unlike cuCtxDestroy() this method does not +pop the context from stack in any circumstances.

    +
    +
    Parameters:
    +

    dev (CUdevice) – Device which primary context is released

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_INVALID_CONTEXT

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDevicePrimaryCtxSetFlags(dev, unsigned int flags)#
    +

    Set flags for the primary context.

    +

    Sets the flags for the primary context on the device overwriting +perviously set ones.

    +

    The three LSBs of the flags parameter can be used to control how the +OS thread, which owns the CUDA context at the time of an API call, +interacts with the OS scheduler when waiting for results from the GPU. +Only one of the scheduling flags can be set when creating a context.

    +
      +
    • CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when +waiting for results from the GPU. This can decrease latency when +waiting for the GPU, but may lower the performance of CPU threads if +they are performing work in parallel with the CUDA thread.

    • +
    • CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread +when waiting for results from the GPU. This can increase latency when +waiting for the GPU, but can increase the performance of CPU threads +performing work in parallel with the GPU.

    • +
    • CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the +CPU thread on a synchronization primitive when waiting for the GPU to +finish work.

    • +
    • CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU +thread on a synchronization primitive when waiting for the GPU to +finish work. Deprecated: This flag was deprecated as of CUDA 4.0 +and was replaced with CU_CTX_SCHED_BLOCKING_SYNC.

    • +
    • CU_CTX_SCHED_AUTO: The default value if the flags +parameter is zero, uses a heuristic based on the number of active +CUDA contexts in the process C and the number of logical processors +in the system P. If C > P, then CUDA will yield to other OS +threads when waiting for the GPU (CU_CTX_SCHED_YIELD), +otherwise CUDA will not yield while waiting for results and actively +spin on the processor (CU_CTX_SCHED_SPIN). Additionally, +on Tegra devices, CU_CTX_SCHED_AUTO uses a heuristic +based on the power profile of the platform and may choose +CU_CTX_SCHED_BLOCKING_SYNC for low-powered devices.

    • +
    • CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce +local memory after resizing local memory for a kernel. This can +prevent thrashing by local memory allocations when launching many +kernels with high local memory usage at the cost of potentially +increased memory usage. Deprecated: This flag is deprecated and the +behavior enabled by this flag is now the default and cannot be +disabled.

    • +
    • CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been +enabled globally with cuCoredumpSetAttributeGlobal or +environment variables, this flag can be set during context creation +to instruct CUDA to create a coredump if this context raises an +exception during execution. These environment variables are described +in the CUDA-GDB user guide under the “GPU core dump support” section. +The initial settings will be taken from the global settings at the +time of context creation. The other settings that control coredump +output can be modified by calling cuCoredumpSetAttribute +from the created context after it becomes current.

    • +
    • CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU +coredumps have not been enabled globally with +cuCoredumpSetAttributeGlobal or environment variables, +this flag can be set during context creation to instruct CUDA to +create a coredump if data is written to a certain pipe that is +present in the OS space. These environment variables are described in +the CUDA-GDB user guide under the “GPU core dump support” section. It +is important to note that the pipe name must be set with +cuCoredumpSetAttributeGlobal before creating the context +if this flag is used. Setting this flag implies that +CU_CTX_COREDUMP_ENABLE is set. The initial settings will +be taken from the global settings at the time of context creation. +The other settings that control coredump output can be modified by +calling cuCoredumpSetAttribute from the created context +after it becomes current.

    • +
    • CU_CTX_SYNC_MEMOPS: Ensures that synchronous memory +operations initiated on this context will always synchronize. See +further documentation in the section titled “API Synchronization +behavior” to learn more about cases when synchronous memory +operations can exhibit asynchronous behavior.

    • +
    +
    +
    Parameters:
    +
      +
    • dev (CUdevice) – Device for which the primary context flags are set

    • +
    • flags (unsigned int) – New flags for the device

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDevicePrimaryCtxGetState(dev)#
    +

    Get the state of the primary context.

    +

    Returns in *flags the flags for the primary context of dev, and in +*active whether it is active. See +cuDevicePrimaryCtxSetFlags for flag values.

    +
    +
    Parameters:
    +

    dev (CUdevice) – Device to get primary context flags for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDevicePrimaryCtxReset(dev)#
    +

    Destroy all allocations and reset all state on the primary context.

    +

    Explicitly destroys and cleans up all resources associated with the +current device in the current process.

    +

    Note that it is responsibility of the calling function to ensure that +no other module in the process is using the device any more. For that +reason it is recommended to use cuDevicePrimaryCtxRelease() +in most cases. However it is safe for other modules to call +cuDevicePrimaryCtxRelease() even after resetting the +device. Resetting the primary context does not release it, an +application that has retained the primary context should explicitly +release its usage.

    +
    +
    Parameters:
    +

    dev (CUdevice) – Device for which primary context is destroyed

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +

    Context Management#

    +

    This section describes the context management functions of the low-level CUDA driver application programming interface.

    +

    Please note that some functions are described in Primary Context Management section.

    +
    +
    +cuda.bindings.driver.cuCtxCreate(unsigned int flags, dev)#
    +

    Create a CUDA context.

    +

    Creates a new CUDA context and associates it with the calling thread. +The flags parameter is described below. The context is created with a +usage count of 1 and the caller of cuCtxCreate() must call +cuCtxDestroy() when done using the context. If a context is +already current to the thread, it is supplanted by the newly created +context and may be restored by a subsequent call to +cuCtxPopCurrent().

    +

    The three LSBs of the flags parameter can be used to control how the +OS thread, which owns the CUDA context at the time of an API call, +interacts with the OS scheduler when waiting for results from the GPU. +Only one of the scheduling flags can be set when creating a context.

    +
      +
    • CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when +waiting for results from the GPU. This can decrease latency when +waiting for the GPU, but may lower the performance of CPU threads if +they are performing work in parallel with the CUDA thread.

    • +
    • CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread +when waiting for results from the GPU. This can increase latency when +waiting for the GPU, but can increase the performance of CPU threads +performing work in parallel with the GPU.

    • +
    • CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the +CPU thread on a synchronization primitive when waiting for the GPU to +finish work.

    • +
    • CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU +thread on a synchronization primitive when waiting for the GPU to +finish work. Deprecated: This flag was deprecated as of CUDA 4.0 +and was replaced with CU_CTX_SCHED_BLOCKING_SYNC.

    • +
    • CU_CTX_SCHED_AUTO: The default value if the flags +parameter is zero, uses a heuristic based on the number of active +CUDA contexts in the process C and the number of logical processors +in the system P. If C > P, then CUDA will yield to other OS +threads when waiting for the GPU (CU_CTX_SCHED_YIELD), +otherwise CUDA will not yield while waiting for results and actively +spin on the processor (CU_CTX_SCHED_SPIN). Additionally, +on Tegra devices, CU_CTX_SCHED_AUTO uses a heuristic +based on the power profile of the platform and may choose +CU_CTX_SCHED_BLOCKING_SYNC for low-powered devices.

    • +
    • CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned +allocations. This flag must be set in order to allocate pinned host +memory that is accessible to the GPU.

    • +
    • CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce +local memory after resizing local memory for a kernel. This can +prevent thrashing by local memory allocations when launching many +kernels with high local memory usage at the cost of potentially +increased memory usage. Deprecated: This flag is deprecated and the +behavior enabled by this flag is now the default and cannot be +disabled. Instead, the per-thread stack size can be controlled with +cuCtxSetLimit().

    • +
    • CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been +enabled globally with cuCoredumpSetAttributeGlobal or +environment variables, this flag can be set during context creation +to instruct CUDA to create a coredump if this context raises an +exception during execution. These environment variables are described +in the CUDA-GDB user guide under the “GPU core dump support” section. +The initial attributes will be taken from the global attributes at +the time of context creation. The other attributes that control +coredump output can be modified by calling +cuCoredumpSetAttribute from the created context after it +becomes current.

    • +
    • CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU +coredumps have not been enabled globally with +cuCoredumpSetAttributeGlobal or environment variables, +this flag can be set during context creation to instruct CUDA to +create a coredump if data is written to a certain pipe that is +present in the OS space. These environment variables are described in +the CUDA-GDB user guide under the “GPU core dump support” section. It +is important to note that the pipe name must be set with +cuCoredumpSetAttributeGlobal before creating the context +if this flag is used. Setting this flag implies that +CU_CTX_COREDUMP_ENABLE is set. The initial attributes +will be taken from the global attributes at the time of context +creation. The other attributes that control coredump output can be +modified by calling cuCoredumpSetAttribute from the +created context after it becomes current. Setting this flag on any +context creation is equivalent to setting the +CU_COREDUMP_ENABLE_USER_TRIGGER attribute to true +globally.

    • +
    • CU_CTX_SYNC_MEMOPS: Ensures that synchronous memory +operations initiated on this context will always synchronize. See +further documentation in the section titled “API Synchronization +behavior” to learn more about cases when synchronous memory +operations can exhibit asynchronous behavior.

    • +
    +

    Context creation will fail with CUDA_ERROR_UNKNOWN if the +compute mode of the device is CU_COMPUTEMODE_PROHIBITED. +The function cuDeviceGetAttribute() can be used with +CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute +mode of the device. The nvidia-smi tool can be used to set the +compute mode for * devices. Documentation for nvidia-smi can be +obtained by passing a -h option to it.

    +
    +
    Parameters:
    +
      +
    • flags (unsigned int) – Context creation flags

    • +
    • dev (CUdevice) – Device to create context on

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    In most cases it is recommended to use cuDevicePrimaryCtxRetain.

    +
    + +
    +
    +cuda.bindings.driver.cuCtxCreate_v3(paramsArray: Optional[Tuple[CUexecAffinityParam] | List[CUexecAffinityParam]], int numParams, unsigned int flags, dev)#
    +

    Create a CUDA context with execution affinity.

    +

    Creates a new CUDA context with execution affinity and associates it +with the calling thread. The paramsArray and flags parameter are +described below. The context is created with a usage count of 1 and the +caller of cuCtxCreate() must call +cuCtxDestroy() when done using the context. If a context is +already current to the thread, it is supplanted by the newly created +context and may be restored by a subsequent call to +cuCtxPopCurrent().

    +

    The type and the amount of execution resource the context can use is +limited by paramsArray and numParams. The paramsArray is an array +of CUexecAffinityParam and the numParams describes the size of the +array. If two CUexecAffinityParam in the array have the same type, +the latter execution affinity parameter overrides the former execution +affinity parameter. The supported execution affinity types are:

    +
      +
    • CU_EXEC_AFFINITY_TYPE_SM_COUNT limits the portion of SMs +that the context can use. The portion of SMs is specified as the +number of SMs via CUexecAffinitySmCount. This limit will be +internally rounded up to the next hardware-supported amount. Hence, +it is imperative to query the actual execution affinity of the +context via cuCtxGetExecAffinity after context creation. Currently, +this attribute is only supported under Volta+ MPS.

    • +
    +

    The three LSBs of the flags parameter can be used to control how the +OS thread, which owns the CUDA context at the time of an API call, +interacts with the OS scheduler when waiting for results from the GPU. +Only one of the scheduling flags can be set when creating a context.

    +
      +
    • CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when +waiting for results from the GPU. This can decrease latency when +waiting for the GPU, but may lower the performance of CPU threads if +they are performing work in parallel with the CUDA thread.

    • +
    • CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread +when waiting for results from the GPU. This can increase latency when +waiting for the GPU, but can increase the performance of CPU threads +performing work in parallel with the GPU.

    • +
    • CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the +CPU thread on a synchronization primitive when waiting for the GPU to +finish work.

    • +
    • CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU +thread on a synchronization primitive when waiting for the GPU to +finish work. Deprecated: This flag was deprecated as of CUDA 4.0 +and was replaced with CU_CTX_SCHED_BLOCKING_SYNC.

    • +
    • CU_CTX_SCHED_AUTO: The default value if the flags +parameter is zero, uses a heuristic based on the number of active +CUDA contexts in the process C and the number of logical processors +in the system P. If C > P, then CUDA will yield to other OS +threads when waiting for the GPU (CU_CTX_SCHED_YIELD), +otherwise CUDA will not yield while waiting for results and actively +spin on the processor (CU_CTX_SCHED_SPIN). Additionally, +on Tegra devices, CU_CTX_SCHED_AUTO uses a heuristic +based on the power profile of the platform and may choose +CU_CTX_SCHED_BLOCKING_SYNC for low-powered devices.

    • +
    • CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned +allocations. This flag must be set in order to allocate pinned host +memory that is accessible to the GPU.

    • +
    • CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce +local memory after resizing local memory for a kernel. This can +prevent thrashing by local memory allocations when launching many +kernels with high local memory usage at the cost of potentially +increased memory usage. Deprecated: This flag is deprecated and the +behavior enabled by this flag is now the default and cannot be +disabled. Instead, the per-thread stack size can be controlled with +cuCtxSetLimit().

    • +
    • CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been +enabled globally with cuCoredumpSetAttributeGlobal or +environment variables, this flag can be set during context creation +to instruct CUDA to create a coredump if this context raises an +exception during execution. These environment variables are described +in the CUDA-GDB user guide under the “GPU core dump support” section. +The initial attributes will be taken from the global attributes at +the time of context creation. The other attributes that control +coredump output can be modified by calling +cuCoredumpSetAttribute from the created context after it +becomes current.

    • +
    • CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU +coredumps have not been enabled globally with +cuCoredumpSetAttributeGlobal or environment variables, +this flag can be set during context creation to instruct CUDA to +create a coredump if data is written to a certain pipe that is +present in the OS space. These environment variables are described in +the CUDA-GDB user guide under the “GPU core dump support” section. It +is important to note that the pipe name must be set with +cuCoredumpSetAttributeGlobal before creating the context +if this flag is used. Setting this flag implies that +CU_CTX_COREDUMP_ENABLE is set. The initial attributes +will be taken from the global attributes at the time of context +creation. The other attributes that control coredump output can be +modified by calling cuCoredumpSetAttribute from the +created context after it becomes current. Setting this flag on any +context creation is equivalent to setting the +CU_COREDUMP_ENABLE_USER_TRIGGER attribute to true +globally.

    • +
    +

    Context creation will fail with CUDA_ERROR_UNKNOWN if the +compute mode of the device is CU_COMPUTEMODE_PROHIBITED. +The function cuDeviceGetAttribute() can be used with +CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute +mode of the device. The nvidia-smi tool can be used to set the +compute mode for * devices. Documentation for nvidia-smi can be +obtained by passing a -h option to it.

    +
    +
    Parameters:
    +
      +
    • paramsArray (List[CUexecAffinityParam]) – Execution affinity parameters

    • +
    • numParams (int) – Number of execution affinity parameters

    • +
    • flags (unsigned int) – Context creation flags

    • +
    • dev (CUdevice) – Device to create context on

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxCreate_v4(CUctxCreateParams ctxCreateParams: Optional[CUctxCreateParams], unsigned int flags, dev)#
    +

    Create a CUDA context.

    +

    Creates a new CUDA context and associates it with the calling thread. +The flags parameter is described below. The context is created with a +usage count of 1 and the caller of cuCtxCreate() must call +cuCtxDestroy() when done using the context. If a context is +already current to the thread, it is supplanted by the newly created +context and may be restored by a subsequent call to +cuCtxPopCurrent().

    +

    CUDA context can be created with execution affinity. The type and the +amount of execution resource the context can use is limited by +paramsArray and numExecAffinityParams in execAffinity. The +paramsArray is an array of CUexecAffinityParam and the +numExecAffinityParams describes the size of the paramsArray. If two +CUexecAffinityParam in the array have the same type, the latter +execution affinity parameter overrides the former execution affinity +parameter. The supported execution affinity types are:

    +
      +
    • CU_EXEC_AFFINITY_TYPE_SM_COUNT limits the portion of SMs +that the context can use. The portion of SMs is specified as the +number of SMs via CUexecAffinitySmCount. This limit will be +internally rounded up to the next hardware-supported amount. Hence, +it is imperative to query the actual execution affinity of the +context via cuCtxGetExecAffinity after context creation. Currently, +this attribute is only supported under Volta+ MPS.

    • +
    +

    CUDA context can be created in CIG(CUDA in Graphics) mode by setting /p +cigParams. Hardware support and software support for graphics clients +can be determined using cuDeviceGetAttribute() with +CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED. Data from graphics +client is shared with CUDA via the /p sharedData in /pcigParams. For +D3D12, /p sharedData is a ID3D12CommandQueue handle.

    +

    Either /p execAffinityParams or /p cigParams can be set to a non-null +value. Setting both to a non-null value will result in an undefined +behavior.

    +

    The three LSBs of the flags parameter can be used to control how the +OS thread, which owns the CUDA context at the time of an API call, +interacts with the OS scheduler when waiting for results from the GPU. +Only one of the scheduling flags can be set when creating a context.

    +
      +
    • CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when +waiting for results from the GPU. This can decrease latency when +waiting for the GPU, but may lower the performance of CPU threads if +they are performing work in parallel with the CUDA thread.

    • +
    • CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread +when waiting for results from the GPU. This can increase latency when +waiting for the GPU, but can increase the performance of CPU threads +performing work in parallel with the GPU.

    • +
    • CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the +CPU thread on a synchronization primitive when waiting for the GPU to +finish work.

    • +
    • CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU +thread on a synchronization primitive when waiting for the GPU to +finish work. Deprecated: This flag was deprecated as of CUDA 4.0 +and was replaced with CU_CTX_SCHED_BLOCKING_SYNC.

    • +
    • CU_CTX_SCHED_AUTO: The default value if the flags +parameter is zero, uses a heuristic based on the number of active +CUDA contexts in the process C and the number of logical processors +in the system P. If C > P, then CUDA will yield to other OS +threads when waiting for the GPU (CU_CTX_SCHED_YIELD), +otherwise CUDA will not yield while waiting for results and actively +spin on the processor (CU_CTX_SCHED_SPIN). Additionally, +on Tegra devices, CU_CTX_SCHED_AUTO uses a heuristic +based on the power profile of the platform and may choose +CU_CTX_SCHED_BLOCKING_SYNC for low-powered devices.

    • +
    • CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned +allocations. This flag must be set in order to allocate pinned host +memory that is accessible to the GPU.

    • +
    • CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce +local memory after resizing local memory for a kernel. This can +prevent thrashing by local memory allocations when launching many +kernels with high local memory usage at the cost of potentially +increased memory usage. Deprecated: This flag is deprecated and the +behavior enabled by this flag is now the default and cannot be +disabled. Instead, the per-thread stack size can be controlled with +cuCtxSetLimit().

    • +
    • CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been +enabled globally with cuCoredumpSetAttributeGlobal or +environment variables, this flag can be set during context creation +to instruct CUDA to create a coredump if this context raises an +exception during execution. These environment variables are described +in the CUDA-GDB user guide under the “GPU core dump support” section. +The initial attributes will be taken from the global attributes at +the time of context creation. The other attributes that control +coredump output can be modified by calling +cuCoredumpSetAttribute from the created context after it +becomes current. This flag is not supported when CUDA context is +created in CIG(CUDA in Graphics) mode.

    • +
    • CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU +coredumps have not been enabled globally with +cuCoredumpSetAttributeGlobal or environment variables, +this flag can be set during context creation to instruct CUDA to +create a coredump if data is written to a certain pipe that is +present in the OS space. These environment variables are described in +the CUDA-GDB user guide under the “GPU core dump support” section. It +is important to note that the pipe name must be set with +cuCoredumpSetAttributeGlobal before creating the context +if this flag is used. Setting this flag implies that +CU_CTX_COREDUMP_ENABLE is set. The initial attributes +will be taken from the global attributes at the time of context +creation. The other attributes that control coredump output can be +modified by calling cuCoredumpSetAttribute from the +created context after it becomes current. Setting this flag on any +context creation is equivalent to setting the +CU_COREDUMP_ENABLE_USER_TRIGGER attribute to true +globally. This flag is not supported when CUDA context is created in +CIG(CUDA in Graphics) mode.

    • +
    • CU_CTX_SYNC_MEMOPS: Ensures that synchronous memory +operations initiated on this context will always synchronize. See +further documentation in the section titled “API Synchronization +behavior” to learn more about cases when synchronous memory +operations can exhibit asynchronous behavior.

    • +
    +

    Context creation will fail with CUDA_ERROR_UNKNOWN if the +compute mode of the device is CU_COMPUTEMODE_PROHIBITED. +The function cuDeviceGetAttribute() can be used with +CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute +mode of the device. The nvidia-smi tool can be used to set the +compute mode for * devices. Documentation for nvidia-smi can be +obtained by passing a -h option to it.

    +

    Context creation will fail with :: CUDA_ERROR_INVALID_VALUE if invalid +parameter was passed by client to create the CUDA context.

    +

    Context creation in CIG mode will fail with +CUDA_ERROR_NOT_SUPPORTED if CIG is not supported by the +device or the driver.

    +
    +
    Parameters:
    +
      +
    • ctxCreateParams (CUctxCreateParams) – Context creation parameters

    • +
    • flags (unsigned int) – Context creation flags

    • +
    • dev (CUdevice) – Device to create context on

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxDestroy(ctx)#
    +

    Destroy a CUDA context.

    +

    Destroys the CUDA context specified by ctx. The context ctx will be +destroyed regardless of how many threads it is current to. It is the +responsibility of the calling function to ensure that no API call +issues using ctx while cuCtxDestroy() is executing.

    +

    Destroys and cleans up all resources associated with the context. It is +the caller’s responsibility to ensure that the context or its resources +are not accessed or passed in subsequent API calls and doing so will +result in undefined behavior. These resources include CUDA types +CUmodule, CUfunction, CUstream, +CUevent, CUarray, CUmipmappedArray, +CUtexObject, CUsurfObject, +CUtexref, CUsurfref, +CUgraphicsResource, CUlinkState, +CUexternalMemory and CUexternalSemaphore. These +resources also include memory allocations by cuMemAlloc(), +cuMemAllocHost(), cuMemAllocManaged() and +cuMemAllocPitch().

    +

    If ctx is current to the calling thread then ctx will also be +popped from the current thread’s context stack (as though +cuCtxPopCurrent() were called). If ctx is current to +other threads, then ctx will remain current to those threads, and +attempting to access ctx from those threads will result in the error +CUDA_ERROR_CONTEXT_IS_DESTROYED.

    +
    +
    Parameters:
    +

    ctx (CUcontext) – Context to destroy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    cuCtxDestroy() will not destroy memory allocations by cuMemCreate(), cuMemAllocAsync() and cuMemAllocFromPoolAsync(). These memory allocations are not associated with any CUDA context and need to be destroyed explicitly.

    +
    + +
    +
    +cuda.bindings.driver.cuCtxPushCurrent(ctx)#
    +

    Pushes a context on the current CPU thread.

    +

    Pushes the given context ctx onto the CPU thread’s stack of current +contexts. The specified context becomes the CPU thread’s current +context, so all CUDA functions that operate on the current context are +affected.

    +

    The previous current context may be made current again by calling +cuCtxDestroy() or cuCtxPopCurrent().

    +
    +
    Parameters:
    +

    ctx (CUcontext) – Context to push

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxPopCurrent()#
    +

    Pops the current CUDA context from the current CPU thread.

    +

    Pops the current CUDA context from the CPU thread and passes back the +old context handle in *pctx. That context may then be made current to +a different CPU thread by calling cuCtxPushCurrent().

    +

    If a context was current to the CPU thread before +cuCtxCreate() or cuCtxPushCurrent() was called, +this function makes that context current to the CPU thread again.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxSetCurrent(ctx)#
    +

    Binds the specified CUDA context to the calling CPU thread.

    +

    Binds the specified CUDA context to the calling CPU thread. If ctx is +NULL then the CUDA context previously bound to the calling CPU thread +is unbound and CUDA_SUCCESS is returned.

    +

    If there exists a CUDA context stack on the calling CPU thread, this +will replace the top of that stack with ctx. If ctx is NULL then +this will be equivalent to popping the top of the calling CPU thread’s +CUDA context stack (or a no-op if the calling CPU thread’s CUDA context +stack is empty).

    +
    +
    Parameters:
    +

    ctx (CUcontext) – Context to bind to the calling CPU thread

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxGetCurrent()#
    +

    Returns the CUDA context bound to the calling CPU thread.

    +

    Returns in *pctx the CUDA context bound to the calling CPU thread. If +no context is bound to the calling CPU thread then *pctx is set to +NULL and CUDA_SUCCESS is returned.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxGetDevice()#
    +

    Returns the device ID for the current context.

    +

    Returns in *device the ordinal of the current context’s device.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxGetFlags()#
    +

    Returns the flags for the current context.

    +

    Returns in *flags the flags of the current context. See +cuCtxCreate for flag values.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxSetFlags(unsigned int flags)#
    +

    Sets the flags for the current context.

    +

    Sets the flags for the current context overwriting previously set ones. +See cuDevicePrimaryCtxSetFlags for flag values.

    +
    +
    Parameters:
    +

    flags (unsigned int) – Flags to set on the current context

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxGetId(ctx)#
    +

    Returns the unique Id associated with the context supplied.

    +

    Returns in ctxId the unique Id which is associated with a given +context. The Id is unique for the life of the program for this instance +of CUDA. If context is supplied as NULL and there is one current, the +Id of the current context is returned.

    +
    +
    Parameters:
    +

    ctx (CUcontext) – Context for which to obtain the Id

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxSynchronize()#
    +

    Block for the current context’s tasks to complete.

    +

    Blocks until the current context has completed all preceding requested +tasks. If the current context is the primary context, green contexts +that have been created will also be synchronized. +cuCtxSynchronize() returns an error if one of the preceding +tasks failed. If the context was created with the +CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block +until the GPU context has finished its work.

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxSetLimit(limit: CUlimit, size_t value)#
    +

    Set resource limits.

    +

    Setting limit to value is a request by the application to update +the current limit maintained by the context. The driver is free to +modify the requested value to meet h/w requirements (this could be +clamping to minimum or maximum values, rounding up to nearest element +size, etc). The application can use cuCtxGetLimit() to find +out exactly what the limit has been set to.

    +

    Setting each CUlimit has its own specific restrictions, so +each is discussed here.

    +
      +
    • CU_LIMIT_STACK_SIZE controls the stack size in bytes of +each GPU thread. The driver automatically increases the per-thread +stack size for each kernel launch as needed. This size isn’t reset +back to the original value after each launch. Setting this value will +take effect immediately, and if necessary, the device will block +until all preceding requested tasks are complete.

    • +
    • CU_LIMIT_PRINTF_FIFO_SIZE controls the size in bytes of +the FIFO used by the printf() device system call. Setting +CU_LIMIT_PRINTF_FIFO_SIZE must be performed before +launching any kernel that uses the printf() device system +call, otherwise CUDA_ERROR_INVALID_VALUE will be +returned.

    • +
    • CU_LIMIT_MALLOC_HEAP_SIZE controls the size in bytes of +the heap used by the malloc() and free() +device system calls. Setting CU_LIMIT_MALLOC_HEAP_SIZE +must be performed before launching any kernel that uses the +malloc() or free() device system calls, +otherwise CUDA_ERROR_INVALID_VALUE will be returned.

    • +
    • CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH controls the maximum +nesting depth of a grid at which a thread can safely call +cudaDeviceSynchronize(). Setting this limit must be +performed before any launch of a kernel that uses the device runtime +and calls cudaDeviceSynchronize() above the default sync +depth, two levels of grids. Calls to +cudaDeviceSynchronize() will fail with error code +cudaErrorSyncDepthExceeded if the limitation is violated. +This limit can be set smaller than the default or up the maximum +launch depth of 24. When setting this limit, keep in mind that +additional levels of sync depth require the driver to reserve large +amounts of device memory which can no longer be used for user +allocations. If these reservations of device memory fail, +cuCtxSetLimit() will return +CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a +lower value. This limit is only applicable to devices of compute +capability < 9.0. Attempting to set this limit on devices of other +compute capability versions will result in the error +CUDA_ERROR_UNSUPPORTED_LIMIT being returned.

    • +
    • CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT controls the +maximum number of outstanding device runtime launches that can be +made from the current context. A grid is outstanding from the point +of launch up until the grid is known to have been completed. Device +runtime launches which violate this limitation fail and return +cudaErrorLaunchPendingCountExceeded when +cudaGetLastError() is called after launch. If more +pending launches than the default (2048 launches) are needed for a +module using the device runtime, this limit can be increased. Keep in +mind that being able to sustain additional pending launches will +require the driver to reserve larger amounts of device memory upfront +which can no longer be used for allocations. If these reservations +fail, cuCtxSetLimit() will return +CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a +lower value. This limit is only applicable to devices of compute +capability 3.5 and higher. Attempting to set this limit on devices of +compute capability less than 3.5 will result in the error +CUDA_ERROR_UNSUPPORTED_LIMIT being returned.

    • +
    • CU_LIMIT_MAX_L2_FETCH_GRANULARITY controls the L2 cache +fetch granularity. Values can range from 0B to 128B. This is purely a +performance hint and it can be ignored or clamped depending on the +platform.

    • +
    • CU_LIMIT_PERSISTING_L2_CACHE_SIZE controls size in bytes +available for persisting L2 cache. This is purely a performance hint +and it can be ignored or clamped depending on the platform.

    • +
    +
    +
    Parameters:
    +
      +
    • limit (CUlimit) – Limit to set

    • +
    • value (size_t) – Size of limit

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_UNSUPPORTED_LIMIT, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_INVALID_CONTEXT

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxGetLimit(limit: CUlimit)#
    +

    Returns resource limits.

    +

    Returns in *pvalue the current size of limit. The supported +CUlimit values are:

    + +
    +
    Parameters:
    +

    limit (CUlimit) – Limit to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxGetCacheConfig()#
    +

    Returns the preferred cache configuration for the current context.

    +

    On devices where the L1 cache and shared memory use the same hardware +resources, this function returns through pconfig the preferred cache +configuration for the current context. This is only a preference. The +driver will use the requested configuration if possible, but it is free +to choose a different configuration if required to execute functions.

    +

    This will return a pconfig of CU_FUNC_CACHE_PREFER_NONE +on devices where the size of the L1 cache and shared memory are fixed.

    +

    The supported cache configurations are:

    + +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxSetCacheConfig(config: CUfunc_cache)#
    +

    Sets the preferred cache configuration for the current context.

    +

    On devices where the L1 cache and shared memory use the same hardware +resources, this sets through config the preferred cache configuration +for the current context. This is only a preference. The driver will use +the requested configuration if possible, but it is free to choose a +different configuration if required to execute the function. Any +function preference set via cuFuncSetCacheConfig() or +cuKernelSetCacheConfig() will be preferred over this +context-wide setting. Setting the context-wide cache configuration to +CU_FUNC_CACHE_PREFER_NONE will cause subsequent kernel +launches to prefer to not change the cache configuration unless +required to launch the kernel.

    +

    This setting does nothing on devices where the size of the L1 cache and +shared memory are fixed.

    +

    Launching a kernel with a different preference than the most recent +preference setting may insert a device-side synchronization point.

    +

    The supported cache configurations are:

    + +
    +
    Parameters:
    +

    config (CUfunc_cache) – Requested cache configuration

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxGetApiVersion(ctx)#
    +

    Gets the context’s API version.

    +

    Returns a version number in version corresponding to the capabilities +of the context (e.g. 3010 or 3020), which library developers can use to +direct callers to a specific API version. If ctx is NULL, returns the +API version used to create the currently bound context.

    +

    Note that new API versions are only introduced when context +capabilities are changed that break binary compatibility, so the API +version and driver version may be different. For example, it is valid +for the API version to be 3020 while the driver version is 4020.

    +
    +
    Parameters:
    +

    ctx (CUcontext) – Context to check

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxGetStreamPriorityRange()#
    +

    Returns numerical values that correspond to the least and greatest stream priorities.

    +

    Returns in *leastPriority and *greatestPriority the numerical +values that correspond to the least and greatest stream priorities +respectively. Stream priorities follow a convention where lower numbers +imply greater priorities. The range of meaningful stream priorities is +given by [*greatestPriority, *leastPriority]. If the user attempts +to create a stream with a priority value that is outside the meaningful +range as specified by this API, the priority is automatically clamped +down or up to either *leastPriority or *greatestPriority +respectively. See cuStreamCreateWithPriority for details on +creating a priority stream. A NULL may be passed in for +*leastPriority or *greatestPriority if the value is not desired.

    +

    This function will return ‘0’ in both *leastPriority and +*greatestPriority if the current context’s device does not support +stream priorities (see cuDeviceGetAttribute).

    +
    +
    Returns:
    +

      +
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    • +
    • leastPriority (int) – Pointer to an int in which the numerical value for least stream +priority is returned

    • +
    • greatestPriority (int) – Pointer to an int in which the numerical value for greatest stream +priority is returned

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxResetPersistingL2Cache()#
    +

    Resets all persisting lines in cache to normal status.

    +

    cuCtxResetPersistingL2Cache Resets all persisting lines in +cache to normal status. Takes effect on function return.

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    CUaccessPolicyWindow

    +
    +
    + +
    +
    +cuda.bindings.driver.cuCtxGetExecAffinity(typename: CUexecAffinityType)#
    +

    Returns the execution affinity setting for the current context.

    +

    Returns in *pExecAffinity the current value of typename. The +supported CUexecAffinityType values are:

    + +
    +
    Parameters:
    +

    typename (CUexecAffinityType) – Execution affinity type to query

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    CUexecAffinityParam

    +
    +
    + +
    +
    +cuda.bindings.driver.cuCtxRecordEvent(hCtx, hEvent)#
    +

    Records an event.

    +

    Captures in hEvent all the activities of the context hCtx at the +time of this call. hEvent and hCtx must be from the same CUDA +context, otherwise CUDA_ERROR_INVALID_HANDLE will be +returned. Calls such as cuEventQuery() or +cuCtxWaitEvent() will then examine or wait for completion +of the work that was captured. Uses of hCtx after this call do not +modify hEvent. If the context passed to hCtx is the primary +context, hEvent will capture all the activities of the primary +context and its green contexts. If the context passed to hCtx is a +context converted from green context via +cuCtxFromGreenCtx(), hEvent will capture only the +activities of the green context.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    The API will return CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED if the specified context hCtx has a stream in the capture mode. In such a case, the call will invalidate all the conflicting captures.

    +
    + +
    +
    +cuda.bindings.driver.cuCtxWaitEvent(hCtx, hEvent)#
    +

    Make a context wait on an event.

    +

    Makes all future work submitted to context hCtx wait for all work +captured in hEvent. The synchronization will be performed on the +device and will not block the calling CPU thread. See +cuCtxRecordEvent() for details on what is captured by an +event. If the context passed to hCtx is the primary context, the +primary context and its green contexts will wait for hEvent. If the +context passed to hCtx is a context converted from green context via +cuCtxFromGreenCtx(), the green context will wait for +hEvent.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    hEvent may be from a different context or device than hCtx.

    +

    The API will return CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED and invalidate the capture if the specified event hEvent is part of an ongoing capture sequence or if the specified context hCtx has a stream in the capture mode.

    +
    + +
    +
    +

    Module Management#

    +

    This section describes the module management functions of the low-level CUDA driver application programming interface.

    +
    +
    +class cuda.bindings.driver.CUmoduleLoadingMode(value)#
    +

    CUDA Lazy Loading status

    +
    +
    +CU_MODULE_EAGER_LOADING = 1#
    +

    Lazy Kernel Loading is not enabled

    +
    + +
    +
    +CU_MODULE_LAZY_LOADING = 2#
    +

    Lazy Kernel Loading is enabled

    +
    + +
    + +
    +
    +cuda.bindings.driver.cuModuleLoad(char *fname)#
    +

    Loads a compute module.

    +

    Takes a filename fname and loads the corresponding module module +into the current context. The CUDA driver API does not attempt to +lazily allocate the resources needed by a module; if the memory for +functions and data (constant and global) needed by the module cannot be +allocated, cuModuleLoad() fails. The file should be a +cubin file as output by nvcc, or a PTX file either as output by +nvcc or handwritten, or a fatbin file as output by nvcc from +toolchain 4.0 or later.

    +
    +
    Parameters:
    +

    fname (bytes) – Filename of module to load

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuModuleLoadData(image)#
    +

    Load a module’s data.

    +

    Takes a pointer image and loads the corresponding module module +into the current context. The image may be a cubin or fatbin as +output by nvcc, or a NULL-terminated PTX, either as output by nvcc or +hand-written.

    +
    +
    Parameters:
    +

    image (Any) – Module data to load

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuModuleLoadDataEx(image, unsigned int numOptions, options: Optional[Tuple[CUjit_option] | List[CUjit_option]], optionValues: Optional[Tuple[Any] | List[Any]])#
    +

    Load a module’s data with options.

    +

    Takes a pointer image and loads the corresponding module module +into the current context. The image may be a cubin or fatbin as +output by nvcc, or a NULL-terminated PTX, either as output by nvcc or +hand-written.

    +
    +
    Parameters:
    +
      +
    • image (Any) – Module data to load

    • +
    • numOptions (unsigned int) – Number of options

    • +
    • options (List[CUjit_option]) – Options for JIT

    • +
    • optionValues (List[Any]) – Option values for JIT

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuModuleLoadFatBinary(fatCubin)#
    +

    Load a module’s data.

    +

    Takes a pointer fatCubin and loads the corresponding module module +into the current context. The pointer represents a fat binary object, +which is a collection of different cubin and/or PTX files, all +representing the same device code, but compiled and optimized for +different architectures.

    +

    Prior to CUDA 4.0, there was no documented API for constructing and +using fat binary objects by programmers. Starting with CUDA 4.0, fat +binary objects can be constructed by providing the -fatbin option to +nvcc. More information can be found in the nvcc document.

    +
    +
    Parameters:
    +

    fatCubin (Any) – Fat binary to load

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuModuleUnload(hmod)#
    +

    Unloads a module.

    +

    Unloads a module hmod from the current context. Attempting to unload +a module which was obtained from the Library Management API such as +cuLibraryGetModule will return +CUDA_ERROR_NOT_PERMITTED.

    +
    +
    Parameters:
    +

    hmod (CUmodule) – Module to unload

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_PERMITTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuModuleGetLoadingMode()#
    +

    Query lazy loading mode.

    +

    Returns lazy loading mode Module loading mode is controlled by +CUDA_MODULE_LOADING env variable

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuModuleLoad

    +
    +
    + +
    +
    +cuda.bindings.driver.cuModuleGetFunction(hmod, char *name)#
    +

    Returns a function handle.

    +

    Returns in *hfunc the handle of the function of name name located +in module hmod. If no function of that name exists, +cuModuleGetFunction() returns +CUDA_ERROR_NOT_FOUND.

    +
    +
    Parameters:
    +
      +
    • hmod (CUmodule) – Module to retrieve function from

    • +
    • name (bytes) – Name of function to retrieve

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuModuleGetFunctionCount(mod)#
    +

    Returns the number of functions within a module.

    +

    Returns in count the number of functions in mod.

    +
    +
    Parameters:
    +

    mod (CUmodule) – Module to query

    +
    +
    Returns:
    +

    +

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuModuleEnumerateFunctions(unsigned int numFunctions, mod)#
    +

    Returns the function handles within a module.

    +

    Returns in functions a maximum number of numFunctions function +handles within mod. When function loading mode is set to LAZY the +function retrieved may be partially loaded. The loading state of a +function can be queried using cuFunctionIsLoaded. CUDA APIs +may load the function automatically when called with partially loaded +function handle which may incur additional latency. Alternatively, +cuFunctionLoad can be used to explicitly load a function. +The returned function handles become invalid when the module is +unloaded.

    +
    +
    Parameters:
    +
      +
    • numFunctions (unsigned int) – Maximum number of function handles may be returned to the buffer

    • +
    • mod (CUmodule) – Module to query from

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuModuleGetGlobal(hmod, char *name)#
    +

    Returns a global pointer from a module.

    +

    Returns in *dptr and *bytes the base pointer and size of the global +of name name located in module hmod. If no variable of that name +exists, cuModuleGetGlobal() returns +CUDA_ERROR_NOT_FOUND. One of the parameters dptr or +numbytes (not both) can be NULL in which case it is ignored.

    +
    +
    Parameters:
    +
      +
    • hmod (CUmodule) – Module to retrieve global from

    • +
    • name (bytes) – Name of global to retrieve

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuModuleGetFunction, cuModuleGetTexRef, cuModuleLoad, cuModuleLoadData, cuModuleLoadDataEx, cuModuleLoadFatBinary, cuModuleUnload, cudaGetSymbolAddress, cudaGetSymbolSize

    +
    +
    + +
    +
    +cuda.bindings.driver.cuLinkCreate(unsigned int numOptions, options: Optional[Tuple[CUjit_option] | List[CUjit_option]], optionValues: Optional[Tuple[Any] | List[Any]])#
    +

    Creates a pending JIT linker invocation.

    +

    If the call is successful, the caller owns the returned CUlinkState, +which should eventually be destroyed with cuLinkDestroy. +The device code machine size (32 or 64 bit) will match the calling +application.

    +

    Both linker and compiler options may be specified. Compiler options +will be applied to inputs to this linker action which must be compiled +from PTX. The options CU_JIT_WALL_TIME, +CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES, and +CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES will accumulate data +until the CUlinkState is destroyed.

    +

    The data passed in via cuLinkAddData and +cuLinkAddFile will be treated as relocatable (-rdc=true to +nvcc) when linking the final cubin during cuLinkComplete +and will have similar consequences as offline relocatable device code +linking.

    +

    optionValues must remain valid for the life of the CUlinkState if +output options are used. No other references to inputs are maintained +after this call returns.

    +
    +
    Parameters:
    +
      +
    • numOptions (unsigned int) – Size of options arrays

    • +
    • options (List[CUjit_option]) – Array of linker and compiler options

    • +
    • optionValues (List[Any]) – Array of option values, each cast to void *

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted

    +
    + +
    +
    +cuda.bindings.driver.cuLinkAddData(state, typename: CUjitInputType, data, size_t size, char *name, unsigned int numOptions, options: Optional[Tuple[CUjit_option] | List[CUjit_option]], optionValues: Optional[Tuple[Any] | List[Any]])#
    +

    Add an input to a pending linker invocation.

    +

    Ownership of data is retained by the caller. No reference is retained +to any inputs after this call returns.

    +

    This method accepts only compiler options, which are used if the data +must be compiled from PTX, and does not accept any of +CU_JIT_WALL_TIME, CU_JIT_INFO_LOG_BUFFER, +CU_JIT_ERROR_LOG_BUFFER, +CU_JIT_TARGET_FROM_CUCONTEXT, or CU_JIT_TARGET.

    +
    +
    Parameters:
    +
      +
    • state (CUlinkState) – A pending linker action.

    • +
    • typename (CUjitInputType) – The type of the input data.

    • +
    • data (Any) – The input data. PTX must be NULL-terminated.

    • +
    • size (size_t) – The length of the input data.

    • +
    • name (bytes) – An optional name for this input in log messages.

    • +
    • numOptions (unsigned int) – Size of options.

    • +
    • options (List[CUjit_option]) – Options to be applied only for this input (overrides options from +cuLinkCreate).

    • +
    • optionValues (List[Any]) – Array of option values, each cast to void *.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_PTX, CUDA_ERROR_UNSUPPORTED_PTX_VERSION, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_NO_BINARY_FOR_GPU

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted

    +
    + +
    +
    +cuda.bindings.driver.cuLinkAddFile(state, typename: CUjitInputType, char *path, unsigned int numOptions, options: Optional[Tuple[CUjit_option] | List[CUjit_option]], optionValues: Optional[Tuple[Any] | List[Any]])#
    +

    Add a file input to a pending linker invocation.

    +

    No reference is retained to any inputs after this call returns.

    +

    This method accepts only compiler options, which are used if the input +must be compiled from PTX, and does not accept any of +CU_JIT_WALL_TIME, CU_JIT_INFO_LOG_BUFFER, +CU_JIT_ERROR_LOG_BUFFER, +CU_JIT_TARGET_FROM_CUCONTEXT, or CU_JIT_TARGET.

    +

    This method is equivalent to invoking cuLinkAddData on the +contents of the file.

    +
    +
    Parameters:
    +
      +
    • state (CUlinkState) – A pending linker action

    • +
    • typename (CUjitInputType) – The type of the input data

    • +
    • path (bytes) – Path to the input file

    • +
    • numOptions (unsigned int) – Size of options

    • +
    • options (List[CUjit_option]) – Options to be applied only for this input (overrides options from +cuLinkCreate)

    • +
    • optionValues (List[Any]) – Array of option values, each cast to void *

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_FILE_NOT_FOUND CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_PTX, CUDA_ERROR_UNSUPPORTED_PTX_VERSION, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_NO_BINARY_FOR_GPU

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted

    +
    + +
    +
    +cuda.bindings.driver.cuLinkComplete(state)#
    +

    Complete a pending linker invocation.

    +

    Completes the pending linker action and returns the cubin image for the +linked device code, which can be used with +cuModuleLoadData. The cubin is owned by state, so it +should be loaded before state is destroyed via +cuLinkDestroy. This call does not destroy state.

    +
    +
    Parameters:
    +

    state (CUlinkState) – A pending linker invocation

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLinkDestroy(state)#
    +

    Destroys state for a JIT linker invocation.

    +
    +
    Parameters:
    +

    state (CUlinkState) – State object for the linker invocation

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    cuLinkCreate

    +
    +
    + +
    +
    +

    Library Management#

    +

    This section describes the library management functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuLibraryLoadData(code, jitOptions: Optional[Tuple[CUjit_option] | List[CUjit_option]], jitOptionsValues: Optional[Tuple[Any] | List[Any]], unsigned int numJitOptions, libraryOptions: Optional[Tuple[CUlibraryOption] | List[CUlibraryOption]], libraryOptionValues: Optional[Tuple[Any] | List[Any]], unsigned int numLibraryOptions)#
    +

    Load a library with specified code and options.

    +

    Takes a pointer code and loads the corresponding library library +based on the application defined library loading mode:

    +
      +
    • If module loading is set to EAGER, via the environment variables +described in “Module loading”, library is loaded eagerly into all +contexts at the time of the call and future contexts at the time of +creation until the library is unloaded with +cuLibraryUnload().

    • +
    • If the environment variables are set to LAZY, library is not +immediately loaded onto all existent contexts and will only be loaded +when a function is needed for that context, such as a kernel launch.

    • +
    +

    These environment variables are described in the CUDA programming guide +under the “CUDA environment variables” section.

    +

    The code may be a cubin or fatbin as output by nvcc, or a NULL- +terminated PTX, either as output by nvcc or hand-written. A fatbin +should also contain relocatable code when doing separate compilation.

    +

    Options are passed as an array via jitOptions and any corresponding +parameters are passed in jitOptionsValues. The number of total JIT +options is supplied via numJitOptions. Any outputs will be returned +via jitOptionsValues.

    +

    Library load options are passed as an array via libraryOptions and +any corresponding parameters are passed in libraryOptionValues. The +number of total library load options is supplied via +numLibraryOptions.

    +
    +
    Parameters:
    +
      +
    • code (Any) – Code to load

    • +
    • jitOptions (List[CUjit_option]) – Options for JIT

    • +
    • jitOptionsValues (List[Any]) – Option values for JIT

    • +
    • numJitOptions (unsigned int) – Number of options

    • +
    • libraryOptions (List[CUlibraryOption]) – Options for loading

    • +
    • libraryOptionValues (List[Any]) – Option values for loading

    • +
    • numLibraryOptions (unsigned int) – Number of options for loading

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    If the library contains managed variables and no device in the system supports managed variables this call is expected to return CUDA_ERROR_NOT_SUPPORTED

    +
    + +
    +
    +cuda.bindings.driver.cuLibraryLoadFromFile(char *fileName, jitOptions: Optional[Tuple[CUjit_option] | List[CUjit_option]], jitOptionsValues: Optional[Tuple[Any] | List[Any]], unsigned int numJitOptions, libraryOptions: Optional[Tuple[CUlibraryOption] | List[CUlibraryOption]], libraryOptionValues: Optional[Tuple[Any] | List[Any]], unsigned int numLibraryOptions)#
    +

    Load a library with specified file and options.

    +

    Takes a pointer code and loads the corresponding library library +based on the application defined library loading mode:

    +
      +
    • If module loading is set to EAGER, via the environment variables +described in “Module loading”, library is loaded eagerly into all +contexts at the time of the call and future contexts at the time of +creation until the library is unloaded with +cuLibraryUnload().

    • +
    • If the environment variables are set to LAZY, library is not +immediately loaded onto all existent contexts and will only be loaded +when a function is needed for that context, such as a kernel launch.

    • +
    +

    These environment variables are described in the CUDA programming guide +under the “CUDA environment variables” section.

    +

    The file should be a cubin file as output by nvcc, or a PTX file +either as output by nvcc or handwritten, or a fatbin file as output +by nvcc. A fatbin should also contain relocatable code when doing +separate compilation.

    +

    Options are passed as an array via jitOptions and any corresponding +parameters are passed in jitOptionsValues. The number of total +options is supplied via numJitOptions. Any outputs will be returned +via jitOptionsValues.

    +

    Library load options are passed as an array via libraryOptions and +any corresponding parameters are passed in libraryOptionValues. The +number of total library load options is supplied via +numLibraryOptions.

    +
    +
    Parameters:
    +
      +
    • fileName (bytes) – File to load from

    • +
    • jitOptions (List[CUjit_option]) – Options for JIT

    • +
    • jitOptionsValues (List[Any]) – Option values for JIT

    • +
    • numJitOptions (unsigned int) – Number of options

    • +
    • libraryOptions (List[CUlibraryOption]) – Options for loading

    • +
    • libraryOptionValues (List[Any]) – Option values for loading

    • +
    • numLibraryOptions (unsigned int) – Number of options for loading

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    If the library contains managed variables and no device in the system supports managed variables this call is expected to return CUDA_ERROR_NOT_SUPPORTED

    +
    + +
    +
    +cuda.bindings.driver.cuLibraryUnload(library)#
    +

    Unloads a library.

    +

    Unloads the library specified with library

    +
    +
    Parameters:
    +

    library (CUlibrary) – Library to unload

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLibraryGetKernel(library, char *name)#
    +

    Returns a kernel handle.

    +

    Returns in pKernel the handle of the kernel with name name located +in library library. If kernel handle is not found, the call returns +CUDA_ERROR_NOT_FOUND.

    +
    +
    Parameters:
    +
      +
    • library (CUlibrary) – Library to retrieve kernel from

    • +
    • name (bytes) – Name of kernel to retrieve

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLibraryGetKernelCount(lib)#
    +

    Returns the number of kernels within a library.

    +

    Returns in count the number of kernels in lib.

    +
    +
    Parameters:
    +

    lib (CUlibrary) – Library to query

    +
    +
    Returns:
    +

    +

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuLibraryEnumerateKernels(unsigned int numKernels, lib)#
    +

    Retrieve the kernel handles within a library.

    +

    Returns in kernels a maximum number of numKernels kernel handles +within lib. The returned kernel handle becomes invalid when the +library is unloaded.

    +
    +
    Parameters:
    +
      +
    • numKernels (unsigned int) – Maximum number of kernel handles may be returned to the buffer

    • +
    • lib (CUlibrary) – Library to query from

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLibraryGetModule(library)#
    +

    Returns a module handle.

    +

    Returns in pMod the module handle associated with the current context +located in library library. If module handle is not found, the call +returns CUDA_ERROR_NOT_FOUND.

    +
    +
    Parameters:
    +

    library (CUlibrary) – Library to retrieve module from

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuKernelGetFunction(kernel)#
    +

    Returns a function handle.

    +

    Returns in pFunc the handle of the function for the requested kernel +kernel and the current context. If function handle is not found, the +call returns CUDA_ERROR_NOT_FOUND.

    +
    +
    Parameters:
    +

    kernel (CUkernel) – Kernel to retrieve function for the requested context

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuKernelGetLibrary(kernel)#
    +

    Returns a library handle.

    +

    Returns in pLib the handle of the library for the requested kernel +kernel

    +
    +
    Parameters:
    +

    kernel (CUkernel) – Kernel to retrieve library handle

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLibraryGetGlobal(library, char *name)#
    +

    Returns a global device pointer.

    +

    Returns in *dptr and *bytes the base pointer and size of the global +with name name for the requested library library and the current +context. If no global for the requested name name exists, the call +returns CUDA_ERROR_NOT_FOUND. One of the parameters dptr +or numbytes (not both) can be NULL in which case it is ignored.

    +
    +
    Parameters:
    +
      +
    • library (CUlibrary) – Library to retrieve global from

    • +
    • name (bytes) – Name of global to retrieve

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLibraryGetManaged(library, char *name)#
    +

    Returns a pointer to managed memory.

    +

    Returns in *dptr and *bytes the base pointer and size of the +managed memory with name name for the requested library library. If +no managed memory with the requested name name exists, the call +returns CUDA_ERROR_NOT_FOUND. One of the parameters dptr +or numbytes (not both) can be NULL in which case it is ignored. Note +that managed memory for library library is shared across devices and +is registered when the library is loaded into atleast one context.

    +
    +
    Parameters:
    +
      +
    • library (CUlibrary) – Library to retrieve managed memory from

    • +
    • name (bytes) – Name of managed memory to retrieve

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLibraryGetUnifiedFunction(library, char *symbol)#
    +

    Returns a pointer to a unified function.

    +

    Returns in *fptr the function pointer to a unified function denoted +by symbol. If no unified function with name symbol exists, the call +returns CUDA_ERROR_NOT_FOUND. If there is no device with +attribute CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS +present in the system, the call may return +CUDA_ERROR_NOT_FOUND.

    +
    +
    Parameters:
    +
      +
    • library (CUlibrary) – Library to retrieve function pointer memory from

    • +
    • symbol (bytes) – Name of function pointer to retrieve

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuKernelGetAttribute(attrib: CUfunction_attribute, kernel, dev)#
    +

    Returns information about a kernel.

    +

    Returns in *pi the integer value of the attribute attrib for the +kernel kernel for the requested device dev. The supported +attributes are:

    +
      +
    • CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum +number of threads per block, beyond which a launch of the kernel +would fail. This number depends on both the kernel and the requested +device.

    • +
    • CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of +statically-allocated shared memory per block required by this kernel. +This does not include dynamically-allocated shared memory requested +by the user at runtime.

    • +
    • CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of +user-allocated constant memory required by this kernel.

    • +
    • CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of +local memory used by each thread of this kernel.

    • +
    • CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used +by each thread of this kernel.

    • +
    • CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual +architecture version for which the kernel was compiled. This value is +the major PTX version * 10

      +
        +
      • the minor PTX version, so a PTX version 1.3 function would return +the value 13. Note that this may return the undefined value of 0 +for cubins compiled prior to CUDA 3.0.

      • +
      +
    • +
    • CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture +version for which the kernel was compiled. This value is the major +binary version * 10 + the minor binary version, so a binary version +1.3 function would return the value 13. Note that this will return a +value of 10 for legacy cubins that do not have a properly-encoded +binary architecture version.

    • +
    • CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether +the kernel has been compiled with user specified option “-Xptxas +–dlcm=ca” set.

    • +
    • CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The +maximum size in bytes of dynamically-allocated shared memory.

    • +
    • CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: +Preferred shared memory-L1 cache split ratio in percent of total +shared memory.

    • +
    • CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this +attribute is set, the kernel must launch with a valid cluster size +specified.

    • +
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required +cluster width in blocks.

    • +
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required +cluster height in blocks.

    • +
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required +cluster depth in blocks.

    • +
    • CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: +Indicates whether the function can be launched with non-portable +cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster +size may only function on the specific SKUs the program is tested on. +The launch might fail if the program is run on a different hardware +platform. CUDA API provides cudaOccupancyMaxActiveClusters to assist +with checking whether the desired size can be launched on the current +device. A portable cluster size is guaranteed to be functional on all +compute capabilities higher than the target compute capability. The +portable cluster size for sm_90 is 8 blocks per cluster. This value +may increase for future compute capabilities. The specific hardware +unit may support higher cluster sizes that’s not guaranteed to be +portable.

    • +
    • CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: +The block scheduling policy of a function. The value type is +CUclusterSchedulingPolicy.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    If another thread is trying to set the same attribute on the same device using cuKernelSetAttribute() simultaneously, the attribute query will give the old or new value depending on the interleavings chosen by the OS scheduler and memory consistency.

    +
    + +
    +
    +cuda.bindings.driver.cuKernelSetAttribute(attrib: CUfunction_attribute, int val, kernel, dev)#
    +

    Sets information about a kernel.

    +

    This call sets the value of a specified attribute attrib on the +kernel kernel for the requested device dev to an integer value +specified by val. This function returns CUDA_SUCCESS if the new value +of the attribute could be successfully set. If the set fails, this call +will return an error. Not all attributes can have values set. +Attempting to set a value on a read-only attribute will result in an +error (CUDA_ERROR_INVALID_VALUE)

    +

    Note that attributes set using cuFuncSetAttribute() will +override the attribute set by this API irrespective of whether the call +to cuFuncSetAttribute() is made before or after this API +call. However, cuKernelGetAttribute() will always return +the attribute value set by this API.

    +

    Supported attributes are:

    + +
    +
    Parameters:
    +
      +
    • attrib (CUfunction_attribute) – Attribute requested

    • +
    • val (int) – Value to set

    • +
    • kernel (CUkernel) – Kernel to set attribute of

    • +
    • dev (CUdevice) – Device to set attribute of

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_OUT_OF_MEMORY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    The API has stricter locking requirements in comparison to its legacy counterpart cuFuncSetAttribute() due to device-wide semantics. If multiple threads are trying to set the same attribute on the same device simultaneously, the attribute setting will depend on the interleavings chosen by the OS scheduler and memory consistency.

    +
    + +
    +
    +cuda.bindings.driver.cuKernelSetCacheConfig(kernel, config: CUfunc_cache, dev)#
    +

    Sets the preferred cache configuration for a device kernel.

    +

    On devices where the L1 cache and shared memory use the same hardware +resources, this sets through config the preferred cache configuration +for the device kernel kernel on the requested device dev. This is +only a preference. The driver will use the requested configuration if +possible, but it is free to choose a different configuration if +required to execute kernel. Any context-wide preference set via +cuCtxSetCacheConfig() will be overridden by this per-kernel +setting.

    +

    Note that attributes set using cuFuncSetCacheConfig() will +override the attribute set by this API irrespective of whether the call +to cuFuncSetCacheConfig() is made before or after this API +call.

    +

    This setting does nothing on devices where the size of the L1 cache and +shared memory are fixed.

    +

    Launching a kernel with a different preference than the most recent +preference setting may insert a device-side synchronization point.

    +

    The supported cache configurations are:

    + +
    +
    Parameters:
    +
      +
    • kernel (CUkernel) – Kernel to configure cache for

    • +
    • config (CUfunc_cache) – Requested cache configuration

    • +
    • dev (CUdevice) – Device to set attribute of

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_OUT_OF_MEMORY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    The API has stricter locking requirements in comparison to its legacy counterpart cuFuncSetCacheConfig() due to device-wide semantics. If multiple threads are trying to set a config on the same device simultaneously, the cache config setting will depend on the interleavings chosen by the OS scheduler and memory consistency.

    +
    + +
    +
    +cuda.bindings.driver.cuKernelGetName(hfunc)#
    +

    Returns the function name for a CUkernel handle.

    +

    Returns in **name the function name associated with the kernel handle +hfunc . The function name is returned as a null-terminated string. +The returned name is only valid when the kernel handle is valid. If the +library is unloaded or reloaded, one must call the API again to get the +updated name. This API may return a mangled name if the function is not +declared as having C linkage. If either **name or hfunc is NULL, +CUDA_ERROR_INVALID_VALUE is returned.

    +
    +
    Parameters:
    +

    hfunc (CUkernel) – The function handle to retrieve the name for

    +
    +
    Returns:
    +

    +

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuKernelGetParamInfo(kernel, size_t paramIndex)#
    +

    Returns the offset and size of a kernel parameter in the device-side parameter layout.

    +

    Queries the kernel parameter at paramIndex into kernel’s list of +parameters, and returns in paramOffset and paramSize the offset and +size, respectively, where the parameter will reside in the device-side +parameter layout. This information can be used to update kernel node +parameters from the device via +cudaGraphKernelNodeSetParam() and +cudaGraphKernelNodeUpdatesApply(). paramIndex must be +less than the number of parameters that kernel takes. paramSize can +be set to NULL if only the parameter offset is desired.

    +
    +
    Parameters:
    +
      +
    • kernel (CUkernel) – The kernel to query

    • +
    • paramIndex (size_t) – The parameter index to query

    • +
    +
    +
    Returns:
    +

      +
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    • +
    • paramOffset (int) – Returns the offset into the device-side parameter layout at which +the parameter resides

    • +
    • paramSize (int) – Optionally returns the size of the parameter in the device-side +parameter layout

    • +
    +

    +
    +
    +
    +

    See also

    +

    cuFuncGetParamInfo

    +
    +
    + +
    +
    +

    Memory Management#

    +

    This section describes the memory management functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuMemGetInfo()#
    +

    Gets free and total memory.

    +

    Returns in *total the total amount of memory available to the the +current context. Returns in *free the amount of memory on the device +that is free according to the OS. CUDA is not guaranteed to be able to +allocate all of the memory that the OS reports as free. In a multi- +tenet situation, free estimate returned is prone to race condition +where a new allocation/free done by a different process or a different +thread in the same process between the time when free memory was +estimated and reported, will result in deviation in free value reported +and actual free memory.

    +

    The integrated GPU on Tegra shares memory with CPU and other component +of the SoC. The free and total values returned by the API excludes the +SWAP memory space maintained by the OS on some platforms. The OS may +move some of the memory pages into swap area as the GPU or CPU allocate +or access memory. See Tegra app note on how to calculate total and free +memory on Tegra.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemAlloc(size_t bytesize)#
    +

    Allocates device memory.

    +

    Allocates bytesize bytes of linear memory on the device and returns +in *dptr a pointer to the allocated memory. The allocated memory is +suitably aligned for any kind of variable. The memory is not cleared. +If bytesize is 0, cuMemAlloc() returns +CUDA_ERROR_INVALID_VALUE.

    +
    +
    Parameters:
    +

    bytesize (size_t) – Requested allocation size in bytes

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemAllocPitch(size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes)#
    +

    Allocates pitched device memory.

    +

    Allocates at least WidthInBytes * Height bytes of linear memory on +the device and returns in *dptr a pointer to the allocated memory. +The function may pad the allocation to ensure that corresponding +pointers in any given row will continue to meet the alignment +requirements for coalescing as the address is updated from row to row. +ElementSizeBytes specifies the size of the largest reads and writes +that will be performed on the memory range. ElementSizeBytes may be +4, 8 or 16 (since coalesced memory transactions are not possible on +other data sizes). If ElementSizeBytes is smaller than the actual +read/write size of a kernel, the kernel will run correctly, but +possibly at reduced speed. The pitch returned in *pPitch by +cuMemAllocPitch() is the width in bytes of the allocation. +The intended usage of pitch is as a separate parameter of the +allocation, used to compute addresses within the 2D array. Given the +row and column of an array element of type T, the address is computed +as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    The pitch returned by cuMemAllocPitch() is guaranteed to +work with cuMemcpy2D() under all circumstances. For +allocations of 2D arrays, it is recommended that programmers consider +performing pitch allocations using cuMemAllocPitch(). Due +to alignment restrictions in the hardware, this is especially true if +the application will be performing 2D memory copies between different +regions of device memory (whether linear memory or CUDA arrays).

    +

    The byte alignment of the pitch returned by +cuMemAllocPitch() is guaranteed to match or exceed the +alignment requirement for texture binding with +cuTexRefSetAddress2D().

    +
    +
    Parameters:
    +
      +
    • WidthInBytes (size_t) – Requested allocation width in bytes

    • +
    • Height (size_t) – Requested allocation height in rows

    • +
    • ElementSizeBytes (unsigned int) – Size of largest reads/writes for range

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemFree(dptr)#
    +

    Frees device memory.

    +

    Frees the memory space pointed to by dptr, which must have been +returned by a previous call to one of the following memory allocation +APIs - cuMemAlloc(), cuMemAllocPitch(), +cuMemAllocManaged(), cuMemAllocAsync(), +cuMemAllocFromPoolAsync()

    +

    Note - This API will not perform any implict synchronization when the +pointer was allocated with cuMemAllocAsync or +cuMemAllocFromPoolAsync. Callers must ensure that all +accesses to these pointer have completed before invoking +cuMemFree. For best performance and memory reuse, users +should use cuMemFreeAsync to free memory allocated via the +stream ordered memory allocator. For all other pointers, this API may +perform implicit synchronization.

    +
    +
    Parameters:
    +

    dptr (CUdeviceptr) – Pointer to memory to free

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemGetAddressRange(dptr)#
    +

    Get information on memory allocations.

    +

    Returns the base address in *pbase and size in *psize of the +allocation by cuMemAlloc() or cuMemAllocPitch() +that contains the input pointer dptr. Both parameters pbase and +psize are optional. If one of them is NULL, it is ignored.

    +
    +
    Parameters:
    +

    dptr (CUdeviceptr) – Device pointer to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemAllocHost(size_t bytesize)#
    +

    Allocates page-locked host memory.

    +

    Allocates bytesize bytes of host memory that is page-locked and +accessible to the device. The driver tracks the virtual memory ranges +allocated with this function and automatically accelerates calls to +functions such as cuMemcpy(). Since the memory can be +accessed directly by the device, it can be read or written with much +higher bandwidth than pageable memory obtained with functions such as +malloc().

    +

    On systems where +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES +is true, cuMemAllocHost may not page-lock the allocated +memory.

    +

    Page-locking excessive amounts of memory with +cuMemAllocHost() may degrade system performance, since it +reduces the amount of memory available to the system for paging. As a +result, this function is best used sparingly to allocate staging areas +for data exchange between host and device.

    +

    Note all host memory allocated using cuMemAllocHost() will +automatically be immediately accessible to all contexts on all devices +which support unified addressing (as may be queried using +CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). The device pointer +that may be used to access this host memory from those contexts is +always equal to the returned host pointer *pp. See Unified +Addressing for additional details.

    +
    +
    Parameters:
    +

    bytesize (size_t) – Requested allocation size in bytes

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemFreeHost(p)#
    +

    Frees page-locked host memory.

    +

    Frees the memory space pointed to by p, which must have been returned +by a previous call to cuMemAllocHost().

    +
    +
    Parameters:
    +

    p (Any) – Pointer to memory to free

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemHostAlloc(size_t bytesize, unsigned int Flags)#
    +

    Allocates page-locked host memory.

    +

    Allocates bytesize bytes of host memory that is page-locked and +accessible to the device. The driver tracks the virtual memory ranges +allocated with this function and automatically accelerates calls to +functions such as cuMemcpyHtoD(). Since the memory can be +accessed directly by the device, it can be read or written with much +higher bandwidth than pageable memory obtained with functions such as +malloc().

    +

    On systems where +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES +is true, cuMemHostAlloc may not page-lock the allocated +memory.

    +

    Page-locking excessive amounts of memory may degrade system +performance, since it reduces the amount of memory available to the +system for paging. As a result, this function is best used sparingly to +allocate staging areas for data exchange between host and device.

    +

    The Flags parameter enables different options to be specified that +affect the allocation, as follows.

    +
      +
    • CU_MEMHOSTALLOC_PORTABLE: The memory returned by this +call will be considered as pinned memory by all CUDA contexts, not +just the one that performed the allocation.

    • +
    • CU_MEMHOSTALLOC_DEVICEMAP: Maps the allocation into the +CUDA address space. The device pointer to the memory may be obtained +by calling cuMemHostGetDevicePointer().

    • +
    • CU_MEMHOSTALLOC_WRITECOMBINED: Allocates the memory as +write-combined (WC). WC memory can be transferred across the PCI +Express bus more quickly on some system configurations, but cannot be +read efficiently by most CPUs. WC memory is a good option for buffers +that will be written by the CPU and read by the GPU via mapped pinned +memory or host->device transfers.

    • +
    +

    All of these flags are orthogonal to one another: a developer may +allocate memory that is portable, mapped and/or write-combined with no +restrictions.

    +

    The CU_MEMHOSTALLOC_DEVICEMAP flag may be specified on CUDA +contexts for devices that do not support mapped pinned memory. The +failure is deferred to cuMemHostGetDevicePointer() because +the memory may be mapped into other CUDA contexts via the +CU_MEMHOSTALLOC_PORTABLE flag.

    +

    The memory allocated by this function must be freed with +cuMemFreeHost().

    +

    Note all host memory allocated using cuMemHostAlloc() will +automatically be immediately accessible to all contexts on all devices +which support unified addressing (as may be queried using +CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). Unless the flag +CU_MEMHOSTALLOC_WRITECOMBINED is specified, the device +pointer that may be used to access this host memory from those contexts +is always equal to the returned host pointer *pp. If the flag +CU_MEMHOSTALLOC_WRITECOMBINED is specified, then the +function cuMemHostGetDevicePointer() must be used to query +the device pointer, even if the context supports unified addressing. +See Unified Addressing for additional details.

    +
    +
    Parameters:
    +
      +
    • bytesize (size_t) – Requested allocation size in bytes

    • +
    • Flags (unsigned int) – Flags for allocation request

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemHostGetDevicePointer(p, unsigned int Flags)#
    +

    Passes back device pointer of mapped pinned memory.

    +

    Passes back the device pointer pdptr corresponding to the mapped, +pinned host buffer p allocated by cuMemHostAlloc.

    +

    cuMemHostGetDevicePointer() will fail if the +CU_MEMHOSTALLOC_DEVICEMAP flag was not specified at the +time the memory was allocated, or if the function is called on a GPU +that does not support mapped pinned memory.

    +

    For devices that have a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, +the memory can also be accessed from the device using the host pointer +p. The device pointer returned by +cuMemHostGetDevicePointer() may or may not match the +original host pointer p and depends on the devices visible to the +application. If all devices visible to the application have a non-zero +value for the device attribute, the device pointer returned by +cuMemHostGetDevicePointer() will match the original pointer +p. If any device visible to the application has a zero value for the +device attribute, the device pointer returned by +cuMemHostGetDevicePointer() will not match the original +host pointer p, but it will be suitable for use on all devices +provided Unified Virtual Addressing is enabled. In such systems, it is +valid to access the memory using either pointer on devices that have a +non-zero value for the device attribute. Note however that such devices +should access the memory using only one of the two pointers and not +both.

    +

    Flags provides for future releases. For now, it must be set to 0.

    +
    +
    Parameters:
    +
      +
    • p (Any) – Host pointer

    • +
    • Flags (unsigned int) – Options (must be 0)

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemHostGetFlags(p)#
    +

    Passes back flags that were used for a pinned allocation.

    +

    Passes back the flags pFlags that were specified when allocating the +pinned host buffer p allocated by cuMemHostAlloc.

    +

    cuMemHostGetFlags() will fail if the pointer does not +reside in an allocation performed by cuMemAllocHost() or +cuMemHostAlloc().

    +
    +
    Parameters:
    +

    p (Any) – Host pointer

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemAllocManaged(size_t bytesize, unsigned int flags)#
    +

    Allocates memory that will be automatically managed by the Unified Memory system.

    +

    Allocates bytesize bytes of managed memory on the device and returns +in *dptr a pointer to the allocated memory. If the device doesn’t +support allocating managed memory, CUDA_ERROR_NOT_SUPPORTED +is returned. Support for managed memory can be queried using the device +attribute CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY. The allocated +memory is suitably aligned for any kind of variable. The memory is not +cleared. If bytesize is 0, cuMemAllocManaged returns +CUDA_ERROR_INVALID_VALUE. The pointer is valid on the CPU +and on all GPUs in the system that support managed memory. All accesses +to this pointer must obey the Unified Memory programming model.

    +

    flags specifies the default stream association for this allocation. +flags must be one of CU_MEM_ATTACH_GLOBAL or +CU_MEM_ATTACH_HOST. If CU_MEM_ATTACH_GLOBAL is +specified, then this memory is accessible from any stream on any +device. If CU_MEM_ATTACH_HOST is specified, then the +allocation should not be accessed from devices that have a zero value +for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS; an explicit +call to cuStreamAttachMemAsync will be required to enable +access on such devices.

    +

    If the association is later changed via +cuStreamAttachMemAsync to a single stream, the default +association as specified during cuMemAllocManaged is +restored when that stream is destroyed. For managed variables, the +default association is always CU_MEM_ATTACH_GLOBAL. Note +that destroying a stream is an asynchronous operation, and as a result, +the change to default association won’t happen until all work in the +stream has completed.

    +

    Memory allocated with cuMemAllocManaged should be released +with cuMemFree.

    +

    Device memory oversubscription is possible for GPUs that have a non- +zero value for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Managed +memory on such GPUs may be evicted from device memory to host memory at +any time by the Unified Memory driver in order to make room for other +allocations.

    +

    In a system where all GPUs have a non-zero value for the device +attribute CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, +managed memory may not be populated when this API returns and instead +may be populated on access. In such systems, managed memory can migrate +to any processor’s memory at any time. The Unified Memory driver will +employ heuristics to maintain data locality and prevent excessive page +faults to the extent possible. The application can also guide the +driver about memory usage patterns via cuMemAdvise. The +application can also explicitly migrate memory to a desired processor’s +memory via cuMemPrefetchAsync.

    +

    In a multi-GPU system where all of the GPUs have a zero value for the +device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS and all the +GPUs have peer-to-peer support with each other, the physical storage +for managed memory is created on the GPU which is active at the time +cuMemAllocManaged is called. All other GPUs will reference +the data at reduced bandwidth via peer mappings over the PCIe bus. The +Unified Memory driver does not migrate memory among such GPUs.

    +

    In a multi-GPU system where not all GPUs have peer-to-peer support with +each other and where the value of the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS is zero for +at least one of those GPUs, the location chosen for physical storage of +managed memory is system-dependent.

    +
      +
    • On Linux, the location chosen will be device memory as long as the +current set of active contexts are on devices that either have peer- +to-peer support with each other or have a non-zero value for the +device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If there +is an active context on a GPU that does not have a non-zero value for +that device attribute and it does not have peer-to-peer support with +the other devices that have active contexts on them, then the +location for physical storage will be ‘zero-copy’ or host memory. +Note that this means that managed memory that is located in device +memory is migrated to host memory if a new context is created on a +GPU that doesn’t have a non-zero value for the device attribute and +does not support peer-to-peer with at least one of the other devices +that has an active context. This in turn implies that context +creation may fail if there is insufficient host memory to migrate all +managed allocations.

    • +
    • On Windows, the physical storage is always created in ‘zero-copy’ or +host memory. All GPUs will reference the data at reduced bandwidth +over the PCIe bus. In these circumstances, use of the environment +variable CUDA_VISIBLE_DEVICES is recommended to restrict CUDA to only +use those GPUs that have peer-to-peer support. Alternatively, users +can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero value to +force the driver to always use device memory for physical storage. +When this environment variable is set to a non-zero value, all +contexts created in that process on devices that support managed +memory have to be peer-to-peer compatible with each other. Context +creation will fail if a context is created on a device that supports +managed memory and is not peer-to-peer compatible with any of the +other managed memory supporting devices on which contexts were +previously created, even if those contexts have been destroyed. These +environment variables are described in the CUDA programming guide +under the “CUDA environment variables” section.

    • +
    • On ARM, managed memory is not available on discrete gpu with Drive +PX-2.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceRegisterAsyncNotification(device, callbackFunc, userData)#
    +

    Registers a callback function to receive async notifications.

    +

    Registers callbackFunc to receive async notifications.

    +

    The userData parameter is passed to the callback function at async +notification time. Likewise, callback is also passed to the +callback function to distinguish between multiple registered callbacks.

    +

    The callback function being registered should be designed to return +quickly (~10ms). Any long running tasks should be queued for +execution on an application thread.

    +

    Callbacks may not call cuDeviceRegisterAsyncNotification or +cuDeviceUnregisterAsyncNotification. Doing so will result in +CUDA_ERROR_NOT_PERMITTED. Async notification callbacks +execute in an undefined order and may be serialized.

    +

    Returns in *callback a handle representing the registered callback +instance.

    +
    +
    Parameters:
    +
      +
    • device (CUdevice) – The device on which to register the callback

    • +
    • callbackFunc (CUasyncCallback) – The function to register as a callback

    • +
    • userData (Any) – A generic pointer to user data. This is passed into the callback +function.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceUnregisterAsyncNotification(device, callback)#
    +

    Unregisters an async notification callback.

    +

    Unregisters callback so that the corresponding callback function will +stop receiving async notifications.

    +
    +
    Parameters:
    +
      +
    • device (CUdevice) – The device from which to remove callback.

    • +
    • callback (CUasyncCallbackHandle) – The callback instance to unregister from receiving async +notifications.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS CUDA_ERROR_NOT_SUPPORTED CUDA_ERROR_INVALID_DEVICE CUDA_ERROR_INVALID_VALUE CUDA_ERROR_NOT_PERMITTED CUDA_ERROR_UNKNOWN

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetByPCIBusId(char *pciBusId)#
    +

    Returns a handle to a compute device.

    +

    Returns in *device a device handle given a PCI bus ID string.

    +

    where domain, bus, device, and function are all hexadecimal +values

    +
    +
    Parameters:
    +

    pciBusId (bytes) – String in one of the following forms:

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetPCIBusId(int length, dev)#
    +

    Returns a PCI Bus Id string for the device.

    +

    Returns an ASCII string identifying the device dev in the NULL- +terminated string pointed to by pciBusId. length specifies the +maximum length of the string that may be returned.

    +

    where domain, bus, device, and function are all hexadecimal +values. pciBusId should be large enough to store 13 characters +including the NULL-terminator.

    +
    +
    Parameters:
    +
      +
    • length (int) – Maximum length of string to store in name

    • +
    • dev (CUdevice) – Device to get identifier string for

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuIpcGetEventHandle(event)#
    +

    Gets an interprocess handle for a previously allocated event.

    +

    Takes as input a previously allocated event. This event must have been +created with the CU_EVENT_INTERPROCESS and +CU_EVENT_DISABLE_TIMING flags set. This opaque handle may +be copied into other processes and opened with +cuIpcOpenEventHandle to allow efficient hardware +synchronization between GPU work in different processes.

    +

    After the event has been opened in the importing process, +cuEventRecord, cuEventSynchronize, +cuStreamWaitEvent and cuEventQuery may be used +in either process. Performing operations on the imported event after +the exported event has been freed with cuEventDestroy will +result in undefined behavior.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cuapiDeviceGetAttribute with +CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    +
    +
    Parameters:
    +

    event (CUevent or cudaEvent_t) – Event allocated with CU_EVENT_INTERPROCESS and +CU_EVENT_DISABLE_TIMING flags.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuIpcOpenEventHandle(CUipcEventHandle handle: CUipcEventHandle)#
    +

    Opens an interprocess event handle for use in the current process.

    +

    Opens an interprocess event handle exported from another process with +cuIpcGetEventHandle. This function returns a +CUevent that behaves like a locally created event with the +CU_EVENT_DISABLE_TIMING flag specified. This event must be +freed with cuEventDestroy.

    +

    Performing operations on the imported event after the exported event +has been freed with cuEventDestroy will result in undefined +behavior.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cuapiDeviceGetAttribute with +CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    +
    +
    Parameters:
    +

    handle (CUipcEventHandle) – Interprocess handle to open

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuIpcGetMemHandle(dptr)#
    +

    Gets an interprocess memory handle for an existing device memory allocation.

    +

    Takes a pointer to the base of an existing device memory allocation +created with cuMemAlloc and exports it for use in another +process. This is a lightweight operation and may be called multiple +times on an allocation without adverse effects.

    +

    If a region of memory is freed with cuMemFree and a +subsequent call to cuMemAlloc returns memory with the same +device address, cuIpcGetMemHandle will return a unique +handle for the new memory.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cuapiDeviceGetAttribute with +CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    +
    +
    Parameters:
    +

    dptr (CUdeviceptr) – Base pointer to previously allocated device memory

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuIpcOpenMemHandle(CUipcMemHandle handle: CUipcMemHandle, unsigned int Flags)#
    +

    Opens an interprocess memory handle exported from another process and returns a device pointer usable in the local process.

    +

    Maps memory exported from another process with +cuIpcGetMemHandle into the current device address space. +For contexts on different devices cuIpcOpenMemHandle can +attempt to enable peer access between the devices as if the user called +cuCtxEnablePeerAccess. This behavior is controlled by the +CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS flag. +cuDeviceCanAccessPeer can determine if a mapping is +possible.

    +

    Contexts that may open CUipcMemHandles are restricted in +the following way. CUipcMemHandles from each +CUdevice in a given process may only be opened by one +CUcontext per CUdevice per other process.

    +

    If the memory handle has already been opened by the current context, +the reference count on the handle is incremented by 1 and the existing +device pointer is returned.

    +

    Memory returned from cuIpcOpenMemHandle must be freed with +cuIpcCloseMemHandle.

    +

    Calling cuMemFree on an exported memory region before +calling cuIpcCloseMemHandle in the importing context will +result in undefined behavior.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cuapiDeviceGetAttribute with +CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    No guarantees are made about the address returned in *pdptr. In particular, multiple processes may not receive the same address for the same handle.

    +
    + +
    +
    +cuda.bindings.driver.cuIpcCloseMemHandle(dptr)#
    +

    Attempts to close memory mapped with cuIpcOpenMemHandle.

    +

    Decrements the reference count of the memory returned by +cuIpcOpenMemHandle by 1. When the reference count reaches +0, this API unmaps the memory. The original allocation in the exporting +process as well as imported mappings in other processes will be +unaffected.

    +

    Any resources used to enable peer access will be freed if this is the +last mapping using them.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cuapiDeviceGetAttribute with +CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED

    +
    +
    Parameters:
    +

    dptr (CUdeviceptr) – Device pointer returned by cuIpcOpenMemHandle

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_MAP_FAILED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemHostRegister(p, size_t bytesize, unsigned int Flags)#
    +

    Registers an existing host memory range for use by CUDA.

    +

    Page-locks the memory range specified by p and bytesize and maps it +for the device(s) as specified by Flags. This memory range also is +added to the same tracking mechanism as cuMemHostAlloc to +automatically accelerate calls to functions such as +cuMemcpyHtoD(). Since the memory can be accessed directly +by the device, it can be read or written with much higher bandwidth +than pageable memory that has not been registered. Page-locking +excessive amounts of memory may degrade system performance, since it +reduces the amount of memory available to the system for paging. As a +result, this function is best used sparingly to register staging areas +for data exchange between host and device.

    +

    On systems where +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES +is true, cuMemHostRegister will not page-lock the memory +range specified by ptr but only populate unpopulated pages.

    +

    The Flags parameter enables different options to be specified that +affect the allocation, as follows.

    + +

    All of these flags are orthogonal to one another: a developer may page- +lock memory that is portable or mapped with no restrictions.

    +

    The CU_MEMHOSTREGISTER_DEVICEMAP flag may be specified on +CUDA contexts for devices that do not support mapped pinned memory. The +failure is deferred to cuMemHostGetDevicePointer() because +the memory may be mapped into other CUDA contexts via the +CU_MEMHOSTREGISTER_PORTABLE flag.

    +

    For devices that have a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, +the memory can also be accessed from the device using the host pointer +p. The device pointer returned by +cuMemHostGetDevicePointer() may or may not match the +original host pointer ptr and depends on the devices visible to the +application. If all devices visible to the application have a non-zero +value for the device attribute, the device pointer returned by +cuMemHostGetDevicePointer() will match the original pointer +ptr. If any device visible to the application has a zero value for +the device attribute, the device pointer returned by +cuMemHostGetDevicePointer() will not match the original +host pointer ptr, but it will be suitable for use on all devices +provided Unified Virtual Addressing is enabled. In such systems, it is +valid to access the memory using either pointer on devices that have a +non-zero value for the device attribute. Note however that such devices +should access the memory using only of the two pointers and not both.

    +

    The memory page-locked by this function must be unregistered with +cuMemHostUnregister().

    +
    +
    Parameters:
    +
      +
    • p (Any) – Host pointer to memory to page-lock

    • +
    • bytesize (size_t) – Size in bytes of the address range to page-lock

    • +
    • Flags (unsigned int) – Flags for allocation request

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemHostUnregister(p)#
    +

    Unregisters a memory range that was registered with cuMemHostRegister.

    +

    Unmaps the memory range whose base address is specified by p, and +makes it pageable again.

    +

    The base address must be the same one specified to +cuMemHostRegister().

    +
    +
    Parameters:
    +

    p (Any) – Host pointer to memory to unregister

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpy(dst, src, size_t ByteCount)#
    +

    Copies memory.

    +

    Copies data between two pointers. dst and src are base pointers of +the destination and source, respectively. ByteCount specifies the +number of bytes to copy. Note that this function infers the type of the +transfer (host to host, host to device, device to device, or device to +host) from the pointer values. This function is only allowed in +contexts which support unified addressing.

    +
    +
    Parameters:
    +
      +
    • dst (CUdeviceptr) – Destination unified virtual address space pointer

    • +
    • src (CUdeviceptr) – Source unified virtual address space pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyPeer(dstDevice, dstContext, srcDevice, srcContext, size_t ByteCount)#
    +

    Copies device memory between two contexts.

    +

    Copies from device memory in one context to device memory in another +context. dstDevice is the base device pointer of the destination +memory and dstContext is the destination context. srcDevice is the +base device pointer of the source memory and srcContext is the source +pointer. ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • dstContext (CUcontext) – Destination context

    • +
    • srcDevice (CUdeviceptr) – Source device pointer

    • +
    • srcContext (CUcontext) – Source context

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyHtoD(dstDevice, srcHost, size_t ByteCount)#
    +

    Copies memory from Host to Device.

    +

    Copies from host memory to device memory. dstDevice and srcHost are +the base addresses of the destination and source, respectively. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • srcHost (Any) – Source host pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyDtoH(dstHost, srcDevice, size_t ByteCount)#
    +

    Copies memory from Device to Host.

    +

    Copies from device to host memory. dstHost and srcDevice specify +the base pointers of the destination and source, respectively. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstHost (Any) – Destination host pointer

    • +
    • srcDevice (CUdeviceptr) – Source device pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyDtoD(dstDevice, srcDevice, size_t ByteCount)#
    +

    Copies memory from Device to Device.

    +

    Copies from device memory to device memory. dstDevice and srcDevice +are the base pointers of the destination and source, respectively. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • srcDevice (CUdeviceptr) – Source device pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyDtoA(dstArray, size_t dstOffset, srcDevice, size_t ByteCount)#
    +

    Copies memory from Device to Array.

    +

    Copies from device memory to a 1D CUDA array. dstArray and +dstOffset specify the CUDA array handle and starting index of the +destination data. srcDevice specifies the base pointer of the source. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstArray (CUarray) – Destination array

    • +
    • dstOffset (size_t) – Offset in bytes of destination array

    • +
    • srcDevice (CUdeviceptr) – Source device pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyAtoD(dstDevice, srcArray, size_t srcOffset, size_t ByteCount)#
    +

    Copies memory from Array to Device.

    +

    Copies from one 1D CUDA array to device memory. dstDevice specifies +the base pointer of the destination and must be naturally aligned with +the CUDA array elements. srcArray and srcOffset specify the CUDA +array handle and the offset in bytes into the array where the copy is +to begin. ByteCount specifies the number of bytes to copy and must be +evenly divisible by the array element size.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • srcArray (CUarray) – Source array

    • +
    • srcOffset (size_t) – Offset in bytes of source array

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyHtoA(dstArray, size_t dstOffset, srcHost, size_t ByteCount)#
    +

    Copies memory from Host to Array.

    +

    Copies from host memory to a 1D CUDA array. dstArray and dstOffset +specify the CUDA array handle and starting offset in bytes of the +destination data. pSrc specifies the base address of the source. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstArray (CUarray) – Destination array

    • +
    • dstOffset (size_t) – Offset in bytes of destination array

    • +
    • srcHost (Any) – Source host pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyAtoH(dstHost, srcArray, size_t srcOffset, size_t ByteCount)#
    +

    Copies memory from Array to Host.

    +

    Copies from one 1D CUDA array to host memory. dstHost specifies the +base pointer of the destination. srcArray and srcOffset specify the +CUDA array handle and starting offset in bytes of the source data. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstHost (Any) – Destination device pointer

    • +
    • srcArray (CUarray) – Source array

    • +
    • srcOffset (size_t) – Offset in bytes of source array

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyAtoA(dstArray, size_t dstOffset, srcArray, size_t srcOffset, size_t ByteCount)#
    +

    Copies memory from Array to Array.

    +

    Copies from one 1D CUDA array to another. dstArray and srcArray +specify the handles of the destination and source CUDA arrays for the +copy, respectively. dstOffset and srcOffset specify the destination +and source offsets in bytes into the CUDA arrays. ByteCount is the +number of bytes to be copied. The size of the elements in the CUDA +arrays need not be the same format, but the elements must be the same +size; and count must be evenly divisible by that size.

    +
    +
    Parameters:
    +
      +
    • dstArray (CUarray) – Destination array

    • +
    • dstOffset (size_t) – Offset in bytes of destination array

    • +
    • srcArray (CUarray) – Source array

    • +
    • srcOffset (size_t) – Offset in bytes of source array

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpy2D(CUDA_MEMCPY2D pCopy: Optional[CUDA_MEMCPY2D])#
    +

    Copies memory for 2D arrays.

    +

    Perform a 2D memory copy according to the parameters specified in +pCopy. The CUDA_MEMCPY2D structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • srcMemoryType and dstMemoryType specify the +type of memory of the source and destination, respectively; +CUmemorytype_enum is defined as:

    • +
    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, +srcDevice and srcPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. srcArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If srcMemoryType is CU_MEMORYTYPE_HOST, +srcHost and srcPitch specify the (host) base +address of the source data and the bytes per row to apply. +srcArray is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, +srcDevice and srcPitch specify the (device) +base address of the source data and the bytes per row to apply. +srcArray is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, +srcArray specifies the handle of the source data. +srcHost, srcDevice and srcPitch are +ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_HOST, +dstHost and dstPitch specify the (host) base +address of the destination data and the bytes per row to apply. +dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, +dstDevice and dstPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. dstArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, +dstDevice and dstPitch specify the (device) +base address of the destination data and the bytes per row to apply. +dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, +dstArray specifies the handle of the destination data. +dstHost, dstDevice and dstPitch are +ignored.

    +
      +
    • srcXInBytes and srcY specify the base address +of the source data for the copy.

    • +
    +

    For host pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, srcXInBytes must be evenly divisible by +the array element size.

    +
      +
    • dstXInBytes and dstY specify the base address +of the destination data for the copy.

    • +
    +

    For host pointers, the base address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, dstXInBytes must be evenly divisible by +the array element size.

    + +

    cuMemcpy2D() returns an error if any pitch is greater than +the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH). +cuMemAllocPitch() passes back pitches that always work with +cuMemcpy2D(). On intra-device memory copies (device to +device, CUDA array to device, CUDA array to CUDA array), +cuMemcpy2D() may fail for pitches not computed by +cuMemAllocPitch(). cuMemcpy2DUnaligned() does +not have this restriction, but may run significantly slower in the +cases where cuMemcpy2D() would have returned an error code.

    +
    +
    Parameters:
    +

    pCopy (CUDA_MEMCPY2D) – Parameters for the memory copy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpy2DUnaligned(CUDA_MEMCPY2D pCopy: Optional[CUDA_MEMCPY2D])#
    +

    Copies memory for 2D arrays.

    +

    Perform a 2D memory copy according to the parameters specified in +pCopy. The CUDA_MEMCPY2D structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • srcMemoryType and dstMemoryType specify the +type of memory of the source and destination, respectively; +CUmemorytype_enum is defined as:

    • +
    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, +srcDevice and srcPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. srcArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If srcMemoryType is CU_MEMORYTYPE_HOST, +srcHost and srcPitch specify the (host) base +address of the source data and the bytes per row to apply. +srcArray is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, +srcDevice and srcPitch specify the (device) +base address of the source data and the bytes per row to apply. +srcArray is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, +srcArray specifies the handle of the source data. +srcHost, srcDevice and srcPitch are +ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, +dstDevice and dstPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. dstArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If dstMemoryType is CU_MEMORYTYPE_HOST, +dstHost and dstPitch specify the (host) base +address of the destination data and the bytes per row to apply. +dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, +dstDevice and dstPitch specify the (device) +base address of the destination data and the bytes per row to apply. +dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, +dstArray specifies the handle of the destination data. +dstHost, dstDevice and dstPitch are +ignored.

    +
      +
    • srcXInBytes and srcY specify the base address +of the source data for the copy.

    • +
    +

    For host pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, srcXInBytes must be evenly divisible by +the array element size.

    +
      +
    • dstXInBytes and dstY specify the base address +of the destination data for the copy.

    • +
    +

    For host pointers, the base address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, dstXInBytes must be evenly divisible by +the array element size.

    + +

    cuMemcpy2D() returns an error if any pitch is greater than +the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH). +cuMemAllocPitch() passes back pitches that always work with +cuMemcpy2D(). On intra-device memory copies (device to +device, CUDA array to device, CUDA array to CUDA array), +cuMemcpy2D() may fail for pitches not computed by +cuMemAllocPitch(). cuMemcpy2DUnaligned() does +not have this restriction, but may run significantly slower in the +cases where cuMemcpy2D() would have returned an error code.

    +
    +
    Parameters:
    +

    pCopy (CUDA_MEMCPY2D) – Parameters for the memory copy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpy3D(CUDA_MEMCPY3D pCopy: Optional[CUDA_MEMCPY3D])#
    +

    Copies memory for 3D arrays.

    +

    Perform a 3D memory copy according to the parameters specified in +pCopy. The CUDA_MEMCPY3D structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • srcMemoryType and dstMemoryType specify the +type of memory of the source and destination, respectively; +CUmemorytype_enum is defined as:

    • +
    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, +srcDevice and srcPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. srcArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If srcMemoryType is CU_MEMORYTYPE_HOST, +srcHost, srcPitch and srcHeight +specify the (host) base address of the source data, the bytes per row, +and the height of each 2D slice of the 3D array. srcArray +is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, +srcDevice, srcPitch and srcHeight +specify the (device) base address of the source data, the bytes per +row, and the height of each 2D slice of the 3D array. +srcArray is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, +srcArray specifies the handle of the source data. +srcHost, srcDevice, srcPitch and +srcHeight are ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, +dstDevice and dstPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. dstArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If dstMemoryType is CU_MEMORYTYPE_HOST, +dstHost and dstPitch specify the (host) base +address of the destination data, the bytes per row, and the height of +each 2D slice of the 3D array. dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, +dstDevice and dstPitch specify the (device) +base address of the destination data, the bytes per row, and the height +of each 2D slice of the 3D array. dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, +dstArray specifies the handle of the destination data. +dstHost, dstDevice, dstPitch and +dstHeight are ignored.

    + +

    For host pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, srcXInBytes must be evenly divisible by +the array element size.

    +
      +
    • dstXInBytes, dstY and dstZ specify the base +address of the destination data for the copy.

    • +
    +

    For host pointers, the base address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, dstXInBytes must be evenly divisible by +the array element size.

    + +

    cuMemcpy3D() returns an error if any pitch is greater than +the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH).

    +

    The srcLOD and dstLOD members of the +CUDA_MEMCPY3D structure must be set to 0.

    +
    +
    Parameters:
    +

    pCopy (CUDA_MEMCPY3D) – Parameters for the memory copy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpy3DPeer(CUDA_MEMCPY3D_PEER pCopy: Optional[CUDA_MEMCPY3D_PEER])#
    +

    Copies memory between contexts.

    +

    Perform a 3D memory copy according to the parameters specified in +pCopy. See the definition of the CUDA_MEMCPY3D_PEER +structure for documentation of its parameters.

    +
    +
    Parameters:
    +

    pCopy (CUDA_MEMCPY3D_PEER) – Parameters for the memory copy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyAsync(dst, src, size_t ByteCount, hStream)#
    +

    Copies memory asynchronously.

    +

    Copies data between two pointers. dst and src are base pointers of +the destination and source, respectively. ByteCount specifies the +number of bytes to copy. Note that this function infers the type of the +transfer (host to host, host to device, device to device, or device to +host) from the pointer values. This function is only allowed in +contexts which support unified addressing.

    +
    +
    Parameters:
    +
      +
    • dst (CUdeviceptr) – Destination unified virtual address space pointer

    • +
    • src (CUdeviceptr) – Source unified virtual address space pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyPeerAsync(dstDevice, dstContext, srcDevice, srcContext, size_t ByteCount, hStream)#
    +

    Copies device memory between two contexts asynchronously.

    +

    Copies from device memory in one context to device memory in another +context. dstDevice is the base device pointer of the destination +memory and dstContext is the destination context. srcDevice is the +base device pointer of the source memory and srcContext is the source +pointer. ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • dstContext (CUcontext) – Destination context

    • +
    • srcDevice (CUdeviceptr) – Source device pointer

    • +
    • srcContext (CUcontext) – Source context

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyHtoDAsync(dstDevice, srcHost, size_t ByteCount, hStream)#
    +

    Copies memory from Host to Device.

    +

    Copies from host memory to device memory. dstDevice and srcHost are +the base addresses of the destination and source, respectively. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • srcHost (Any) – Source host pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyDtoHAsync(dstHost, srcDevice, size_t ByteCount, hStream)#
    +

    Copies memory from Device to Host.

    +

    Copies from device to host memory. dstHost and srcDevice specify +the base pointers of the destination and source, respectively. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstHost (Any) – Destination host pointer

    • +
    • srcDevice (CUdeviceptr) – Source device pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyDtoDAsync(dstDevice, srcDevice, size_t ByteCount, hStream)#
    +

    Copies memory from Device to Device.

    +

    Copies from device memory to device memory. dstDevice and srcDevice +are the base pointers of the destination and source, respectively. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • srcDevice (CUdeviceptr) – Source device pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyHtoAAsync(dstArray, size_t dstOffset, srcHost, size_t ByteCount, hStream)#
    +

    Copies memory from Host to Array.

    +

    Copies from host memory to a 1D CUDA array. dstArray and dstOffset +specify the CUDA array handle and starting offset in bytes of the +destination data. srcHost specifies the base address of the source. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstArray (CUarray) – Destination array

    • +
    • dstOffset (size_t) – Offset in bytes of destination array

    • +
    • srcHost (Any) – Source host pointer

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpyAtoHAsync(dstHost, srcArray, size_t srcOffset, size_t ByteCount, hStream)#
    +

    Copies memory from Array to Host.

    +

    Copies from one 1D CUDA array to host memory. dstHost specifies the +base pointer of the destination. srcArray and srcOffset specify the +CUDA array handle and starting offset in bytes of the source data. +ByteCount specifies the number of bytes to copy.

    +
    +
    Parameters:
    +
      +
    • dstHost (Any) – Destination pointer

    • +
    • srcArray (CUarray) – Source array

    • +
    • srcOffset (size_t) – Offset in bytes of source array

    • +
    • ByteCount (size_t) – Size of memory copy in bytes

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpy2DAsync(CUDA_MEMCPY2D pCopy: Optional[CUDA_MEMCPY2D], hStream)#
    +

    Copies memory for 2D arrays.

    +

    Perform a 2D memory copy according to the parameters specified in +pCopy. The CUDA_MEMCPY2D structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • srcMemoryType and dstMemoryType specify the +type of memory of the source and destination, respectively; +CUmemorytype_enum is defined as:

    • +
    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If srcMemoryType is CU_MEMORYTYPE_HOST, +srcHost and srcPitch specify the (host) base +address of the source data and the bytes per row to apply. +srcArray is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, +srcDevice and srcPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. srcArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, +srcDevice and srcPitch specify the (device) +base address of the source data and the bytes per row to apply. +srcArray is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, +srcArray specifies the handle of the source data. +srcHost, srcDevice and srcPitch are +ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, +dstDevice and dstPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. dstArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If dstMemoryType is CU_MEMORYTYPE_HOST, +dstHost and dstPitch specify the (host) base +address of the destination data and the bytes per row to apply. +dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, +dstDevice and dstPitch specify the (device) +base address of the destination data and the bytes per row to apply. +dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, +dstArray specifies the handle of the destination data. +dstHost, dstDevice and dstPitch are +ignored.

    +
      +
    • srcXInBytes and srcY specify the base address +of the source data for the copy.

    • +
    +

    For host pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, srcXInBytes must be evenly divisible by +the array element size.

    +
      +
    • dstXInBytes and dstY specify the base address +of the destination data for the copy.

    • +
    +

    For host pointers, the base address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, dstXInBytes must be evenly divisible by +the array element size.

    + +

    cuMemcpy2DAsync() returns an error if any pitch is greater +than the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH). +cuMemAllocPitch() passes back pitches that always work with +cuMemcpy2D(). On intra-device memory copies (device to +device, CUDA array to device, CUDA array to CUDA array), +cuMemcpy2DAsync() may fail for pitches not computed by +cuMemAllocPitch().

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpy3DAsync(CUDA_MEMCPY3D pCopy: Optional[CUDA_MEMCPY3D], hStream)#
    +

    Copies memory for 3D arrays.

    +

    Perform a 3D memory copy according to the parameters specified in +pCopy. The CUDA_MEMCPY3D structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • srcMemoryType and dstMemoryType specify the +type of memory of the source and destination, respectively; +CUmemorytype_enum is defined as:

    • +
    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If srcMemoryType is CU_MEMORYTYPE_UNIFIED, +srcDevice and srcPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. srcArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If srcMemoryType is CU_MEMORYTYPE_HOST, +srcHost, srcPitch and srcHeight +specify the (host) base address of the source data, the bytes per row, +and the height of each 2D slice of the 3D array. srcArray +is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_DEVICE, +srcDevice, srcPitch and srcHeight +specify the (device) base address of the source data, the bytes per +row, and the height of each 2D slice of the 3D array. +srcArray is ignored.

    +

    If srcMemoryType is CU_MEMORYTYPE_ARRAY, +srcArray specifies the handle of the source data. +srcHost, srcDevice, srcPitch and +srcHeight are ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_UNIFIED, +dstDevice and dstPitch specify the (unified +virtual address space) base address of the source data and the bytes +per row to apply. dstArray is ignored. This value may be +used only if unified addressing is supported in the calling context.

    +

    If dstMemoryType is CU_MEMORYTYPE_HOST, +dstHost and dstPitch specify the (host) base +address of the destination data, the bytes per row, and the height of +each 2D slice of the 3D array. dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_DEVICE, +dstDevice and dstPitch specify the (device) +base address of the destination data, the bytes per row, and the height +of each 2D slice of the 3D array. dstArray is ignored.

    +

    If dstMemoryType is CU_MEMORYTYPE_ARRAY, +dstArray specifies the handle of the destination data. +dstHost, dstDevice, dstPitch and +dstHeight are ignored.

    + +

    For host pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, srcXInBytes must be evenly divisible by +the array element size.

    +
      +
    • dstXInBytes, dstY and dstZ specify the base +address of the destination data for the copy.

    • +
    +

    For host pointers, the base address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For device pointers, the starting address is

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUDA arrays, dstXInBytes must be evenly divisible by +the array element size.

    + +

    cuMemcpy3DAsync() returns an error if any pitch is greater +than the maximum allowed (CU_DEVICE_ATTRIBUTE_MAX_PITCH).

    +

    The srcLOD and dstLOD members of the +CUDA_MEMCPY3D structure must be set to 0.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemcpy3DPeerAsync(CUDA_MEMCPY3D_PEER pCopy: Optional[CUDA_MEMCPY3D_PEER], hStream)#
    +

    Copies memory between contexts asynchronously.

    +

    Perform a 3D memory copy according to the parameters specified in +pCopy. See the definition of the CUDA_MEMCPY3D_PEER +structure for documentation of its parameters.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD8(dstDevice, unsigned char uc, size_t N)#
    +

    Initializes device memory.

    +

    Sets the memory range of N 8-bit values to the specified value uc.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • uc (unsigned char) – Value to set

    • +
    • N (size_t) – Number of elements

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD16(dstDevice, unsigned short us, size_t N)#
    +

    Initializes device memory.

    +

    Sets the memory range of N 16-bit values to the specified value us. +The dstDevice pointer must be two byte aligned.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • us (unsigned short) – Value to set

    • +
    • N (size_t) – Number of elements

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD32(dstDevice, unsigned int ui, size_t N)#
    +

    Initializes device memory.

    +

    Sets the memory range of N 32-bit values to the specified value ui. +The dstDevice pointer must be four byte aligned.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • ui (unsigned int) – Value to set

    • +
    • N (size_t) – Number of elements

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD2D8(dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height)#
    +

    Initializes device memory.

    +

    Sets the 2D memory range of Width 8-bit values to the specified value +uc. Height specifies the number of rows to set, and dstPitch +specifies the number of bytes between each row. This function performs +fastest when the pitch is one that has been passed back by +cuMemAllocPitch().

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • +
    • uc (unsigned char) – Value to set

    • +
    • Width (size_t) – Width of row

    • +
    • Height (size_t) – Number of rows

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD2D16(dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height)#
    +

    Initializes device memory.

    +

    Sets the 2D memory range of Width 16-bit values to the specified +value us. Height specifies the number of rows to set, and +dstPitch specifies the number of bytes between each row. The +dstDevice pointer and dstPitch offset must be two byte aligned. +This function performs fastest when the pitch is one that has been +passed back by cuMemAllocPitch().

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • +
    • us (unsigned short) – Value to set

    • +
    • Width (size_t) – Width of row

    • +
    • Height (size_t) – Number of rows

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD2D32(dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height)#
    +

    Initializes device memory.

    +

    Sets the 2D memory range of Width 32-bit values to the specified +value ui. Height specifies the number of rows to set, and +dstPitch specifies the number of bytes between each row. The +dstDevice pointer and dstPitch offset must be four byte aligned. +This function performs fastest when the pitch is one that has been +passed back by cuMemAllocPitch().

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • +
    • ui (unsigned int) – Value to set

    • +
    • Width (size_t) – Width of row

    • +
    • Height (size_t) – Number of rows

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD8Async(dstDevice, unsigned char uc, size_t N, hStream)#
    +

    Sets device memory.

    +

    Sets the memory range of N 8-bit values to the specified value uc.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • uc (unsigned char) – Value to set

    • +
    • N (size_t) – Number of elements

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD16Async(dstDevice, unsigned short us, size_t N, hStream)#
    +

    Sets device memory.

    +

    Sets the memory range of N 16-bit values to the specified value us. +The dstDevice pointer must be two byte aligned.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • us (unsigned short) – Value to set

    • +
    • N (size_t) – Number of elements

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD32Async(dstDevice, unsigned int ui, size_t N, hStream)#
    +

    Sets device memory.

    +

    Sets the memory range of N 32-bit values to the specified value ui. +The dstDevice pointer must be four byte aligned.

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • ui (unsigned int) – Value to set

    • +
    • N (size_t) – Number of elements

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD2D8Async(dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, hStream)#
    +

    Sets device memory.

    +

    Sets the 2D memory range of Width 8-bit values to the specified value +uc. Height specifies the number of rows to set, and dstPitch +specifies the number of bytes between each row. This function performs +fastest when the pitch is one that has been passed back by +cuMemAllocPitch().

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • +
    • uc (unsigned char) – Value to set

    • +
    • Width (size_t) – Width of row

    • +
    • Height (size_t) – Number of rows

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD2D16Async(dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, hStream)#
    +

    Sets device memory.

    +

    Sets the 2D memory range of Width 16-bit values to the specified +value us. Height specifies the number of rows to set, and +dstPitch specifies the number of bytes between each row. The +dstDevice pointer and dstPitch offset must be two byte aligned. +This function performs fastest when the pitch is one that has been +passed back by cuMemAllocPitch().

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • +
    • us (unsigned short) – Value to set

    • +
    • Width (size_t) – Width of row

    • +
    • Height (size_t) – Number of rows

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemsetD2D32Async(dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, hStream)#
    +

    Sets device memory.

    +

    Sets the 2D memory range of Width 32-bit values to the specified +value ui. Height specifies the number of rows to set, and +dstPitch specifies the number of bytes between each row. The +dstDevice pointer and dstPitch offset must be four byte aligned. +This function performs fastest when the pitch is one that has been +passed back by cuMemAllocPitch().

    +
    +
    Parameters:
    +
      +
    • dstDevice (CUdeviceptr) – Destination device pointer

    • +
    • dstPitch (size_t) – Pitch of destination device pointer(Unused if Height is 1)

    • +
    • ui (unsigned int) – Value to set

    • +
    • Width (size_t) – Width of row

    • +
    • Height (size_t) – Number of rows

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuArrayCreate(CUDA_ARRAY_DESCRIPTOR pAllocateArray: Optional[CUDA_ARRAY_DESCRIPTOR])#
    +

    Creates a 1D or 2D CUDA array.

    +

    Creates a CUDA array according to the CUDA_ARRAY_DESCRIPTOR +structure pAllocateArray and returns a handle to the new CUDA array +in *pHandle. The CUDA_ARRAY_DESCRIPTOR is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • Width, and Height are the width, and height of the CUDA array (in +elements); the CUDA array is one-dimensional if height is 0, two- +dimensional otherwise;

    • +
    • Format specifies the format of the elements; +CUarray_format is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • NumChannels specifies the number of packed components per CUDA +array element; it may be 1, 2, or 4;

    • +
    +

    Here are examples of CUDA array descriptions:

    +

    Description for a CUDA array of 2048 floats:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    Description for a 64 x 64 CUDA array of floats:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    Description for a width x height CUDA array of 64-bit, 4x16-bit +float16’s:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    Description for a width x height CUDA array of 16-bit elements, +each of which is two 8-bit unsigned chars:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +
    +
    Parameters:
    +

    pAllocateArray (CUDA_ARRAY_DESCRIPTOR) – Array descriptor

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuArrayGetDescriptor(hArray)#
    +

    Get a 1D or 2D CUDA array descriptor.

    +

    Returns in *pArrayDescriptor a descriptor containing information on +the format and dimensions of the CUDA array hArray. It is useful for +subroutines that have been passed a CUDA array, but need to know the +CUDA array parameters for validation or other purposes.

    +
    +
    Parameters:
    +

    hArray (CUarray) – Array to get descriptor of

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuArrayGetSparseProperties(array)#
    +

    Returns the layout properties of a sparse CUDA array.

    +

    Returns the layout properties of a sparse CUDA array in +sparseProperties If the CUDA array is not allocated with flag +CUDA_ARRAY3D_SPARSE CUDA_ERROR_INVALID_VALUE +will be returned.

    +

    If the returned value in flags +contains CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, then +miptailSize represents the +total size of the array. Otherwise, it will be zero. Also, the returned +value in miptailFirstLevel is +always zero. Note that the array must have been allocated using +cuArrayCreate or cuArray3DCreate. For CUDA +arrays obtained using cuMipmappedArrayGetLevel, +CUDA_ERROR_INVALID_VALUE will be returned. Instead, +cuMipmappedArrayGetSparseProperties must be used to obtain +the sparse properties of the entire CUDA mipmapped array to which +array belongs to.

    +
    +
    Parameters:
    +

    array (CUarray) – CUDA array to get the sparse properties of

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMipmappedArrayGetSparseProperties(mipmap)#
    +

    Returns the layout properties of a sparse CUDA mipmapped array.

    +

    Returns the sparse array layout properties in sparseProperties If the +CUDA mipmapped array is not allocated with flag +CUDA_ARRAY3D_SPARSE CUDA_ERROR_INVALID_VALUE +will be returned.

    +

    For non-layered CUDA mipmapped arrays, +miptailSize returns the size +of the mip tail region. The mip tail region includes all mip levels +whose width, height or depth is less than that of the tile. For layered +CUDA mipmapped arrays, if +flags contains +CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, then +miptailSize specifies the size +of the mip tail of all layers combined. Otherwise, +miptailSize specifies mip tail +size per layer. The returned value of +miptailFirstLevel is valid +only if miptailSize is non- +zero.

    +
    +
    Parameters:
    +

    mipmap (CUmipmappedArray) – CUDA mipmapped array to get the sparse properties of

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuArrayGetMemoryRequirements(array, device)#
    +

    Returns the memory requirements of a CUDA array.

    +

    Returns the memory requirements of a CUDA array in memoryRequirements +If the CUDA array is not allocated with flag +CUDA_ARRAY3D_DEFERRED_MAPPING +CUDA_ERROR_INVALID_VALUE will be returned.

    +

    The returned value in size +represents the total size of the CUDA array. The returned value in +alignment represents the +alignment necessary for mapping the CUDA array.

    +
    +
    Parameters:
    +
      +
    • array (CUarray) – CUDA array to get the memory requirements of

    • +
    • device (CUdevice) – Device to get the memory requirements for

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMipmappedArrayGetMemoryRequirements(mipmap, device)#
    +

    Returns the memory requirements of a CUDA mipmapped array.

    +

    Returns the memory requirements of a CUDA mipmapped array in +memoryRequirements If the CUDA mipmapped array is not allocated with +flag CUDA_ARRAY3D_DEFERRED_MAPPING +CUDA_ERROR_INVALID_VALUE will be returned.

    +

    The returned value in size +represents the total size of the CUDA mipmapped array. The returned +value in alignment +represents the alignment necessary for mapping the CUDA mipmapped +array.

    +
    +
    Parameters:
    +
      +
    • mipmap (CUmipmappedArray) – CUDA mipmapped array to get the memory requirements of

    • +
    • device (CUdevice) – Device to get the memory requirements for

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuArrayGetPlane(hArray, unsigned int planeIdx)#
    +

    Gets a CUDA array plane from a CUDA array.

    +

    Returns in pPlaneArray a CUDA array that represents a single format +plane of the CUDA array hArray.

    +

    If planeIdx is greater than the maximum number of planes in this +array or if the array does not have a multi-planar format e.g: +CU_AD_FORMAT_NV12, then +CUDA_ERROR_INVALID_VALUE is returned.

    +

    Note that if the hArray has format CU_AD_FORMAT_NV12, +then passing in 0 for planeIdx returns a CUDA array of the same size +as hArray but with one channel and +CU_AD_FORMAT_UNSIGNED_INT8 as its format. If 1 is passed +for planeIdx, then the returned CUDA array has half the height and +width of hArray with two channels and +CU_AD_FORMAT_UNSIGNED_INT8 as its format.

    +
    +
    Parameters:
    +
      +
    • hArray (CUarray) – Multiplanar CUDA array

    • +
    • planeIdx (unsigned int) – Plane index

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuArrayDestroy(hArray)#
    +

    Destroys a CUDA array.

    +

    Destroys the CUDA array hArray.

    +
    +
    Parameters:
    +

    hArray (CUarray) – Array to destroy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_ARRAY_IS_MAPPED, CUDA_ERROR_CONTEXT_IS_DESTROYED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuArray3DCreate(CUDA_ARRAY3D_DESCRIPTOR pAllocateArray: Optional[CUDA_ARRAY3D_DESCRIPTOR])#
    +

    Creates a 3D CUDA array.

    +

    Creates a CUDA array according to the +CUDA_ARRAY3D_DESCRIPTOR structure pAllocateArray and +returns a handle to the new CUDA array in *pHandle. The +CUDA_ARRAY3D_DESCRIPTOR is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • Width, Height, and Depth are the width, height, and depth of +the CUDA array (in elements); the following types of CUDA arrays can +be allocated:

      +
        +
      • A 1D array is allocated if Height and Depth extents are both +zero.

      • +
      • A 2D array is allocated if only Depth extent is zero.

      • +
      • A 3D array is allocated if all three extents are non-zero.

      • +
      • A 1D layered CUDA array is allocated if only Height is zero and +the CUDA_ARRAY3D_LAYERED flag is set. Each layer is a +1D array. The number of layers is determined by the depth extent.

      • +
      • A 2D layered CUDA array is allocated if all three extents are non- +zero and the CUDA_ARRAY3D_LAYERED flag is set. Each +layer is a 2D array. The number of layers is determined by the +depth extent.

      • +
      • A cubemap CUDA array is allocated if all three extents are non-zero +and the CUDA_ARRAY3D_CUBEMAP flag is set. Width must +be equal to Height, and Depth must be six. A cubemap is a +special type of 2D layered CUDA array, where the six layers +represent the six faces of a cube. The order of the six layers in +memory is the same as that listed in +CUarray_cubemap_face.

      • +
      • A cubemap layered CUDA array is allocated if all three extents are +non-zero, and both, CUDA_ARRAY3D_CUBEMAP and +CUDA_ARRAY3D_LAYERED flags are set. Width must be +equal to Height, and Depth must be a multiple of six. A cubemap +layered CUDA array is a special type of 2D layered CUDA array that +consists of a collection of cubemaps. The first six layers +represent the first cubemap, the next six layers form the second +cubemap, and so on.

      • +
      +
    • +
    • Format specifies the format of the elements; +CUarray_format is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • NumChannels specifies the number of packed components per CUDA +array element; it may be 1, 2, or 4;

    • +
    • Flags may be set to

      +
        +
      • CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA +arrays. If this flag is set, Depth specifies the number of +layers, not the depth of a 3D array.

      • +
      • CUDA_ARRAY3D_SURFACE_LDST to enable surface references +to be bound to the CUDA array. If this flag is not set, +cuSurfRefSetArray will fail when attempting to bind the +CUDA array to a surface reference.

      • +
      • CUDA_ARRAY3D_CUBEMAP to enable creation of cubemaps. If +this flag is set, Width must be equal to Height, and Depth +must be six. If the CUDA_ARRAY3D_LAYERED flag is also +set, then Depth must be a multiple of six.

      • +
      • CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA +array will be used for texture gather. Texture gather can only be +performed on 2D CUDA arrays.

      • +
      +
    • +
    +

    Width, Height and Depth must meet certain size requirements as +listed in the following table. All values are specified in elements. +Note that for brevity’s sake, the full name of the device attribute is +not specified. For ex., TEXTURE1D_WIDTH refers to the device attribute +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH.

    +

    Note that 2D CUDA arrays have different size requirements if the +CUDA_ARRAY3D_TEXTURE_GATHER flag is set. Width and +Height must not be greater than +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH and +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT +respectively, in that case.

    +

    View CUDA Toolkit Documentation for a table example

    +

    Here are examples of CUDA array descriptions:

    +

    Description for a CUDA array of 2048 floats:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    Description for a 64 x 64 CUDA array of floats:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    Description for a width x height x depth CUDA array of 64-bit, +4x16-bit float16’s:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +
    +
    Parameters:
    +

    pAllocateArray (CUDA_ARRAY3D_DESCRIPTOR) – 3D array descriptor

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuArray3DGetDescriptor(hArray)#
    +

    Get a 3D CUDA array descriptor.

    +

    Returns in *pArrayDescriptor a descriptor containing information on +the format and dimensions of the CUDA array hArray. It is useful for +subroutines that have been passed a CUDA array, but need to know the +CUDA array parameters for validation or other purposes.

    +

    This function may be called on 1D and 2D arrays, in which case the +Height and/or Depth members of the descriptor struct will be set to +0.

    +
    +
    Parameters:
    +

    hArray (CUarray) – 3D array to get descriptor of

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMipmappedArrayCreate(CUDA_ARRAY3D_DESCRIPTOR pMipmappedArrayDesc: Optional[CUDA_ARRAY3D_DESCRIPTOR], unsigned int numMipmapLevels)#
    +

    Creates a CUDA mipmapped array.

    +

    Creates a CUDA mipmapped array according to the +CUDA_ARRAY3D_DESCRIPTOR structure pMipmappedArrayDesc and +returns a handle to the new CUDA mipmapped array in *pHandle. +numMipmapLevels specifies the number of mipmap levels to be +allocated. This value is clamped to the range [1, 1 + +floor(log2(max(width, height, depth)))].

    +

    The CUDA_ARRAY3D_DESCRIPTOR is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • Width, Height, and Depth are the width, height, and depth of +the CUDA array (in elements); the following types of CUDA arrays can +be allocated:

      +
        +
      • A 1D mipmapped array is allocated if Height and Depth extents +are both zero.

      • +
      • A 2D mipmapped array is allocated if only Depth extent is zero.

      • +
      • A 3D mipmapped array is allocated if all three extents are non- +zero.

      • +
      • A 1D layered CUDA mipmapped array is allocated if only Height is +zero and the CUDA_ARRAY3D_LAYERED flag is set. Each +layer is a 1D array. The number of layers is determined by the +depth extent.

      • +
      • A 2D layered CUDA mipmapped array is allocated if all three extents +are non-zero and the CUDA_ARRAY3D_LAYERED flag is set. +Each layer is a 2D array. The number of layers is determined by the +depth extent.

      • +
      • A cubemap CUDA mipmapped array is allocated if all three extents +are non-zero and the CUDA_ARRAY3D_CUBEMAP flag is set. +Width must be equal to Height, and Depth must be six. A +cubemap is a special type of 2D layered CUDA array, where the six +layers represent the six faces of a cube. The order of the six +layers in memory is the same as that listed in +CUarray_cubemap_face.

      • +
      • A cubemap layered CUDA mipmapped array is allocated if all three +extents are non-zero, and both, CUDA_ARRAY3D_CUBEMAP +and CUDA_ARRAY3D_LAYERED flags are set. Width must be +equal to Height, and Depth must be a multiple of six. A cubemap +layered CUDA array is a special type of 2D layered CUDA array that +consists of a collection of cubemaps. The first six layers +represent the first cubemap, the next six layers form the second +cubemap, and so on.

      • +
      +
    • +
    • Format specifies the format of the elements; +CUarray_format is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • NumChannels specifies the number of packed components per CUDA +array element; it may be 1, 2, or 4;

    • +
    • Flags may be set to

      +
        +
      • CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA +mipmapped arrays. If this flag is set, Depth specifies the number +of layers, not the depth of a 3D array.

      • +
      • CUDA_ARRAY3D_SURFACE_LDST to enable surface references +to be bound to individual mipmap levels of the CUDA mipmapped +array. If this flag is not set, cuSurfRefSetArray will +fail when attempting to bind a mipmap level of the CUDA mipmapped +array to a surface reference.

      • +
      +
    • +
    • CUDA_ARRAY3D_CUBEMAP to enable creation of mipmapped

    • +
    +

    cubemaps. If this flag is set, Width must be equal to Height, and +Depth must be six. If the CUDA_ARRAY3D_LAYERED flag is +also set, then Depth must be a multiple of six.

    +
    +
      +
    • CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA +mipmapped array will be used for texture gather. Texture gather can +only be performed on 2D CUDA mipmapped arrays.

    • +
    +
    +

    Width, Height and Depth must meet certain size requirements as +listed in the following table. All values are specified in elements. +Note that for brevity’s sake, the full name of the device attribute is +not specified. For ex., TEXTURE1D_MIPMAPPED_WIDTH refers to the device +attribute +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH.

    +

    View CUDA Toolkit Documentation for a table example

    +
    +
    Parameters:
    +
      +
    • pMipmappedArrayDesc (CUDA_ARRAY3D_DESCRIPTOR) – mipmapped array descriptor

    • +
    • numMipmapLevels (unsigned int) – Number of mipmap levels

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMipmappedArrayGetLevel(hMipmappedArray, unsigned int level)#
    +

    Gets a mipmap level of a CUDA mipmapped array.

    +

    Returns in *pLevelArray a CUDA array that represents a single mipmap +level of the CUDA mipmapped array hMipmappedArray.

    +

    If level is greater than the maximum number of levels in this +mipmapped array, CUDA_ERROR_INVALID_VALUE is returned.

    +
    +
    Parameters:
    +
      +
    • hMipmappedArray (CUmipmappedArray) – CUDA mipmapped array

    • +
    • level (unsigned int) – Mipmap level

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMipmappedArrayDestroy(hMipmappedArray)#
    +

    Destroys a CUDA mipmapped array.

    +

    Destroys the CUDA mipmapped array hMipmappedArray.

    +
    +
    Parameters:
    +

    hMipmappedArray (CUmipmappedArray) – Mipmapped array to destroy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_ARRAY_IS_MAPPED, CUDA_ERROR_CONTEXT_IS_DESTROYED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemGetHandleForAddressRange(dptr, size_t size, handleType: CUmemRangeHandleType, unsigned long long flags)#
    +

    Retrieve handle for an address range.

    +

    Get a handle of the specified type to an address range. The address +range must have been obtained by a prior call to either +cuMemAlloc or cuMemAddressReserve. If the +address range was obtained via cuMemAddressReserve, it must +also be fully mapped via cuMemMap. The address range must +have been obtained by a prior call to either cuMemAllocHost +or cuMemHostAlloc on Tegra.

    +

    Users must ensure the dptr and size are aligned to the host page +size.

    +

    When requesting +CUmemRangeHandleType::CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD, users are +expected to query for dma_buf support for the platform by using +CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED device attribute +before calling this API. The handle will be interpreted as a pointer +to an integer to store the dma_buf file descriptor. Users must ensure +the entire address range is backed and mapped when the address range is +allocated by cuMemAddressReserve. All the physical +allocations backing the address range must be resident on the same +device and have identical allocation properties. Users are also +expected to retrieve a new handle every time the underlying physical +allocation(s) corresponding to a previously queried VA range are +changed.

    +
    +
    Parameters:
    +
      +
    • dptr (CUdeviceptr) – Pointer to a valid CUDA device allocation. Must be aligned to host +page size.

    • +
    • size (size_t) – Length of the address range. Must be aligned to host page size.

    • +
    • handleType (CUmemRangeHandleType) – Type of handle requested (defines type and size of the handle +output parameter)

    • +
    • flags (unsigned long long) – Reserved, must be zero

    • +
    +
    +
    Returns:
    +

      +
    • CUresult – CUDA_SUCCESS CUDA_ERROR_INVALID_VALUE CUDA_ERROR_NOT_SUPPORTED

    • +
    • handle (Any) – Pointer to the location where the returned handle will be stored.

    • +
    +

    +
    +
    +
    + +
    +
    +

    Virtual Memory Management#

    +

    This section describes the virtual memory management functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuMemAddressReserve(size_t size, size_t alignment, addr, unsigned long long flags)#
    +

    Allocate an address range reservation.

    +

    Reserves a virtual address range based on the given parameters, giving +the starting address of the range in ptr. This API requires a system +that supports UVA. The size and address parameters must be a multiple +of the host page size and the alignment must be a power of two or zero +for default alignment.

    +
    +
    Parameters:
    +
      +
    • size (size_t) – Size of the reserved virtual address range requested

    • +
    • alignment (size_t) – Alignment of the reserved virtual address range requested

    • +
    • addr (CUdeviceptr) – Fixed starting address range requested

    • +
    • flags (unsigned long long) – Currently unused, must be zero

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuMemAddressFree

    +
    +
    + +
    +
    +cuda.bindings.driver.cuMemAddressFree(ptr, size_t size)#
    +

    Free an address range reservation.

    +

    Frees a virtual address range reserved by cuMemAddressReserve. The size +must match what was given to memAddressReserve and the ptr given must +match what was returned from memAddressReserve.

    +
    +
    Parameters:
    +
      +
    • ptr (CUdeviceptr) – Starting address of the virtual address range to free

    • +
    • size (size_t) – Size of the virtual address region to free

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    cuMemAddressReserve

    +
    +
    + +
    +
    +cuda.bindings.driver.cuMemCreate(size_t size, CUmemAllocationProp prop: Optional[CUmemAllocationProp], unsigned long long flags)#
    +

    Create a CUDA memory handle representing a memory allocation of a given size described by the given properties.

    +

    This creates a memory allocation on the target device specified through +the prop structure. The created allocation will not have any device +or host mappings. The generic memory handle for the allocation can be +mapped to the address space of calling process via +cuMemMap. This handle cannot be transmitted directly to +other processes (see cuMemExportToShareableHandle). On +Windows, the caller must also pass an LPSECURITYATTRIBUTE in prop to +be associated with this handle which limits or allows access to this +handle for a recipient process (see +win32HandleMetaData for more). The +size of this allocation must be a multiple of the the value given via +cuMemGetAllocationGranularity with the +CU_MEM_ALLOC_GRANULARITY_MINIMUM flag. To create a CPU +allocation targeting a specific host NUMA node, applications must set +CUmemAllocationProp::CUmemLocation::type to +CU_MEM_LOCATION_TYPE_HOST_NUMA and +CUmemAllocationProp::CUmemLocation::id must specify the +NUMA ID of the CPU. On systems where NUMA is not available +CUmemAllocationProp::CUmemLocation::id must be set to 0. +Specifying CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT or +CU_MEM_LOCATION_TYPE_HOST as the +type will result in +CUDA_ERROR_INVALID_VALUE.

    +

    Applications can set +requestedHandleTypes to +CU_MEM_HANDLE_TYPE_FABRIC in order to create allocations +suitable for sharing within an IMEX domain. An IMEX domain is either an +OS instance or a group of securely connected OS instances using the +NVIDIA IMEX daemon. An IMEX channel is a global resource within the +IMEX domain that represents a logical entity that aims to provide fine +grained accessibility control for the participating processes. When +exporter and importer CUDA processes have been granted access to the +same IMEX channel, they can securely share memory. If the allocating +process does not have access setup for an IMEX channel, attempting to +create a CUmemGenericAllocationHandle with +CU_MEM_HANDLE_TYPE_FABRIC will result in +CUDA_ERROR_NOT_PERMITTED. The nvidia-modprobe CLI provides +more information regarding setting up of IMEX channels.

    +

    If CUmemAllocationProp::allocFlags::usage contains +CU_MEM_CREATE_USAGE_TILE_POOL flag then the memory +allocation is intended only to be used as backing tile pool for sparse +CUDA arrays and sparse CUDA mipmapped arrays. (see +cuMemMapArrayAsync).

    +
    +
    Parameters:
    +
      +
    • size (size_t) – Size of the allocation requested

    • +
    • prop (CUmemAllocationProp) – Properties of the allocation to create.

    • +
    • flags (unsigned long long) – flags for future use, must be zero now.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemRelease(handle)#
    +

    Release a memory handle representing a memory allocation which was previously allocated through cuMemCreate.

    +

    Frees the memory that was allocated on a device through cuMemCreate.

    +

    The memory allocation will be freed when all outstanding mappings to +the memory are unmapped and when all outstanding references to the +handle (including it’s shareable counterparts) are also released. The +generic memory handle can be freed when there are still outstanding +mappings made with this handle. Each time a recipient process imports a +shareable handle, it needs to pair it with cuMemRelease for +the handle to be freed. If handle is not a valid handle the behavior +is undefined.

    +
    +
    Parameters:
    +

    handle (CUmemGenericAllocationHandle) – Value of handle which was returned previously by cuMemCreate.

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    cuMemCreate

    +
    +
    + +
    +
    +cuda.bindings.driver.cuMemMap(ptr, size_t size, size_t offset, handle, unsigned long long flags)#
    +

    Maps an allocation handle to a reserved virtual address range.

    +

    Maps bytes of memory represented by handle starting from byte +offset to size to address range [addr, addr + size]. This +range must be an address reservation previously reserved with +cuMemAddressReserve, and offset + size must be less +than the size of the memory allocation. Both ptr, size, and +offset must be a multiple of the value given via +cuMemGetAllocationGranularity with the +CU_MEM_ALLOC_GRANULARITY_MINIMUM flag. If handle +represents a multicast object, ptr, size and offset must be +aligned to the value returned by cuMulticastGetGranularity +with the flag CU_MULTICAST_MINIMUM_GRANULARITY. For best +performance however, it is recommended that ptr, size and offset +be aligned to the value returned by +cuMulticastGetGranularity with the flag +CU_MULTICAST_RECOMMENDED_GRANULARITY.

    +

    Please note calling cuMemMap does not make the address +accessible, the caller needs to update accessibility of a contiguous +mapped VA range by calling cuMemSetAccess.

    +

    Once a recipient process obtains a shareable memory handle from +cuMemImportFromShareableHandle, the process must use +cuMemMap to map the memory into its address ranges before +setting accessibility with cuMemSetAccess.

    +

    cuMemMap can only create mappings on VA range reservations +that are not currently mapped.

    +
    +
    Parameters:
    +
      +
    • ptr (CUdeviceptr) – Address where memory will be mapped.

    • +
    • size (size_t) – Size of the memory mapping.

    • +
    • offset (size_t) – Offset into the memory represented by

    • +
    • handle (CUmemGenericAllocationHandle) – Handle to a shareable memory

    • +
    • flags (unsigned long long) – flags for future use, must be zero now.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemMapArrayAsync(mapInfoList: Optional[Tuple[CUarrayMapInfo] | List[CUarrayMapInfo]], unsigned int count, hStream)#
    +

    Maps or unmaps subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays.

    +

    Performs map or unmap operations on subregions of sparse CUDA arrays +and sparse CUDA mipmapped arrays. Each operation is specified by a +CUarrayMapInfo entry in the mapInfoList array of size +count. The structure CUarrayMapInfo is defined as follow:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where resourceType specifies the type of +resource to be operated on. If resourceType +is set to CUresourcetype::CU_RESOURCE_TYPE_ARRAY then +CUarrayMapInfo::resource::array must be set to a valid +sparse CUDA array handle. The CUDA array must be either a 2D, 2D +layered or 3D CUDA array and must have been allocated using +cuArrayCreate or cuArray3DCreate with the flag +CUDA_ARRAY3D_SPARSE or +CUDA_ARRAY3D_DEFERRED_MAPPING. For CUDA arrays obtained +using cuMipmappedArrayGetLevel, +CUDA_ERROR_INVALID_VALUE will be returned. If +resourceType is set to +CUresourcetype::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY then +CUarrayMapInfo::resource::mipmap must be set to a valid +sparse CUDA mipmapped array handle. The CUDA mipmapped array must be +either a 2D, 2D layered or 3D CUDA mipmapped array and must have been +allocated using cuMipmappedArrayCreate with the flag +CUDA_ARRAY3D_SPARSE or +CUDA_ARRAY3D_DEFERRED_MAPPING.

    +

    subresourceType specifies the type of +subresource within the resource. +CUarraySparseSubresourceType_enum is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where +CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL +indicates a sparse-miplevel which spans at least one tile in every +dimension. The remaining miplevels which are too small to span at least +one tile in any dimension constitute the mip tail region as indicated +by +CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL +subresource type.

    +

    If subresourceType is set to +CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL +then CUarrayMapInfo::subresource::sparseLevel struct must +contain valid array subregion offsets and extents. The +CUarrayMapInfo::subresource::sparseLevel::offsetX, +CUarrayMapInfo::subresource::sparseLevel::offsetY and +CUarrayMapInfo::subresource::sparseLevel::offsetZ must +specify valid X, Y and Z offsets respectively. The +CUarrayMapInfo::subresource::sparseLevel::extentWidth, +CUarrayMapInfo::subresource::sparseLevel::extentHeight and +CUarrayMapInfo::subresource::sparseLevel::extentDepth must +specify valid width, height and depth extents respectively. These +offsets and extents must be aligned to the corresponding tile +dimension. For CUDA mipmapped arrays +CUarrayMapInfo::subresource::sparseLevel::level must +specify a valid mip level index. Otherwise, must be zero. For layered +CUDA arrays and layered CUDA mipmapped arrays +CUarrayMapInfo::subresource::sparseLevel::layer must +specify a valid layer index. Otherwise, must be zero. +CUarrayMapInfo::subresource::sparseLevel::offsetZ must be +zero and +CUarrayMapInfo::subresource::sparseLevel::extentDepth must +be set to 1 for 2D and 2D layered CUDA arrays and CUDA mipmapped +arrays. Tile extents can be obtained by calling +cuArrayGetSparseProperties and +cuMipmappedArrayGetSparseProperties

    +

    If subresourceType is set to +CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL +then CUarrayMapInfo::subresource::miptail struct must +contain valid mip tail offset in +CUarrayMapInfo::subresource::miptail::offset and size in +CUarrayMapInfo::subresource::miptail::size. Both, mip tail +offset and mip tail size must be aligned to the tile size. For layered +CUDA mipmapped arrays which don’t have the flag +CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL set in +flags as returned by +cuMipmappedArrayGetSparseProperties, +CUarrayMapInfo::subresource::miptail::layer must specify a +valid layer index. Otherwise, must be zero.

    +

    If CUarrayMapInfo::resource::array or +CUarrayMapInfo::resource::mipmap was created with +CUDA_ARRAY3D_DEFERRED_MAPPING flag set the +subresourceType and the contents of +CUarrayMapInfo::subresource will be ignored.

    +

    memOperationType specifies the type of +operation. CUmemOperationType is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If memOperationType is set to +CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP then the +subresource will be mapped onto the tile pool memory specified by +CUarrayMapInfo::memHandle at offset +offset. The tile pool allocation has to be +created by specifying the CU_MEM_CREATE_USAGE_TILE_POOL +flag when calling cuMemCreate. Also, +memHandleType must be set to +CUmemHandleType::CU_MEM_HANDLE_TYPE_GENERIC.

    +

    If memOperationType is set to +CUmemOperationType::CU_MEM_OPERATION_TYPE_UNMAP then an +unmapping operation is performed. CUarrayMapInfo::memHandle +must be NULL.

    +

    deviceBitMask specifies the list of devices +that must map or unmap physical memory. Currently, this mask must have +exactly one bit set, and the corresponding device must match the device +associated with the stream. If +memOperationType is set to +CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP, the device +must also match the device associated with the tile pool memory +allocation as specified by CUarrayMapInfo::memHandle.

    +

    flags and +:py:obj:`~.CUarrayMapInfo.reserved`[] are unused and must be set to +zero.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemUnmap(ptr, size_t size)#
    +

    Unmap the backing memory of a given address range.

    +

    The range must be the entire contiguous address range that was mapped +to. In other words, cuMemUnmap cannot unmap a sub-range of +an address range mapped by cuMemCreate / +cuMemMap. Any backing memory allocations will be freed if +there are no existing mappings and there are no unreleased memory +handles.

    +

    When cuMemUnmap returns successfully the address range is +converted to an address reservation and can be used for a future calls +to cuMemMap. Any new mapping to this virtual address will +need to have access granted through cuMemSetAccess, as all +mappings start with no accessibility setup.

    +
    +
    Parameters:
    +
      +
    • ptr (CUdeviceptr) – Starting address for the virtual address range to unmap

    • +
    • size (size_t) – Size of the virtual address range to unmap

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemSetAccess(ptr, size_t size, desc: Optional[Tuple[CUmemAccessDesc] | List[CUmemAccessDesc]], size_t count)#
    +

    Set the access flags for each location specified in desc for the given virtual address range.

    +

    Given the virtual address range via ptr and size, and the locations +in the array given by desc and count, set the access flags for the +target locations. The range must be a fully mapped address range +containing all allocations created by cuMemMap / +cuMemCreate. Users cannot specify +CU_MEM_LOCATION_TYPE_HOST_NUMA accessibility for +allocations created on with other location types. Note: When +CUmemAccessDesc::CUmemLocation::type is +CU_MEM_LOCATION_TYPE_HOST_NUMA, +CUmemAccessDesc::CUmemLocation::id is ignored. When setting +the access flags for a virtual address range mapping a multicast +object, ptr and size must be aligned to the value returned by +cuMulticastGetGranularity with the flag +CU_MULTICAST_MINIMUM_GRANULARITY. For best performance +however, it is recommended that ptr and size be aligned to the +value returned by cuMulticastGetGranularity with the flag +CU_MULTICAST_RECOMMENDED_GRANULARITY.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemGetAccess(CUmemLocation location: Optional[CUmemLocation], ptr)#
    +

    Get the access flags set for the given location and ptr.

    +
    +
    Parameters:
    +
      +
    • location (CUmemLocation) – Location in which to check the flags for

    • +
    • ptr (CUdeviceptr) – Address in which to check the access flags for

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuMemSetAccess

    +
    +
    + +
    +
    +cuda.bindings.driver.cuMemExportToShareableHandle(handle, handleType: CUmemAllocationHandleType, unsigned long long flags)#
    +

    Exports an allocation to a requested shareable handle type.

    +

    Given a CUDA memory handle, create a shareable memory allocation handle +that can be used to share the memory with other processes. The +recipient process can convert the shareable handle back into a CUDA +memory handle using cuMemImportFromShareableHandle and map +it with cuMemMap. The implementation of what this handle is +and how it can be transferred is defined by the requested handle type +in handleType

    +

    Once all shareable handles are closed and the allocation is released, +the allocated memory referenced will be released back to the OS and +uses of the CUDA handle afterward will lead to undefined behavior.

    +

    This API can also be used in conjunction with other APIs (e.g. Vulkan, +OpenGL) that support importing memory from the shareable type

    +
    +
    Parameters:
    +
      +
    • handle (CUmemGenericAllocationHandle) – CUDA handle for the memory allocation

    • +
    • handleType (CUmemAllocationHandleType) – Type of shareable handle requested (defines type and size of the +shareableHandle output parameter)

    • +
    • flags (unsigned long long) – Reserved, must be zero

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemImportFromShareableHandle(osHandle, shHandleType: CUmemAllocationHandleType)#
    +

    Imports an allocation from a requested shareable handle type.

    +

    If the current process cannot support the memory described by this +shareable handle, this API will error as +CUDA_ERROR_NOT_SUPPORTED.

    +

    If shHandleType is CU_MEM_HANDLE_TYPE_FABRIC and the +importer process has not been granted access to the same IMEX channel +as the exporter process, this API will error as +CUDA_ERROR_NOT_PERMITTED.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Importing shareable handles exported from some graphics APIs(VUlkan, OpenGL, etc) created on devices under an SLI group may not be supported, and thus this API will return CUDA_ERROR_NOT_SUPPORTED. There is no guarantee that the contents of handle will be the same CUDA memory handle for the same given OS shareable handle, or the same underlying allocation.

    +
    + +
    +
    +cuda.bindings.driver.cuMemGetAllocationGranularity(CUmemAllocationProp prop: Optional[CUmemAllocationProp], option: CUmemAllocationGranularity_flags)#
    +

    Calculates either the minimal or recommended granularity.

    +

    Calculates either the minimal or recommended granularity for a given +allocation specification and returns it in granularity. This +granularity can be used as a multiple for alignment, size, or address +mapping.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuMemCreate, cuMemMap

    +
    +
    + +
    +
    +cuda.bindings.driver.cuMemGetAllocationPropertiesFromHandle(handle)#
    +

    Retrieve the contents of the property structure defining properties for this handle.

    +
    +
    Parameters:
    +

    handle (CUmemGenericAllocationHandle) – Handle which to perform the query on

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemRetainAllocationHandle(addr)#
    +

    Given an address addr, returns the allocation handle of the backing memory allocation.

    +

    The handle is guaranteed to be the same handle value used to map the +memory. If the address requested is not mapped, the function will fail. +The returned handle must be released with corresponding number of calls +to cuMemRelease.

    +
    +
    Parameters:
    +

    addr (Any) – Memory address to query, that has been mapped previously.

    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    The address addr, can be any address in a range previously mapped by cuMemMap, and not necessarily the start address.

    +
    + +
    +
    +

    Stream Ordered Memory Allocator#

    +

    This section describes the stream ordered memory allocator exposed by the low-level CUDA driver application programming interface.

    +

    overview

    +

    The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior.

    +

    The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee.

    +

    Supported Platforms

    +

    Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED

    +
    +
    +cuda.bindings.driver.cuMemFreeAsync(dptr, hStream)#
    +

    Frees memory with stream ordered semantics.

    +

    Inserts a free operation into hStream. The allocation must not be +accessed after stream execution reaches the free. After this API +returns, accessing the memory from any subsequent work launched on the +GPU or querying its pointer attributes results in undefined behavior.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +

    Notes

    +

    During stream capture, this function results in the creation of a free node and must therefore be passed the address of a graph allocation.

    +
    + +
    +
    +cuda.bindings.driver.cuMemAllocAsync(size_t bytesize, hStream)#
    +

    Allocates memory with stream ordered semantics.

    +

    Inserts an allocation operation into hStream. A pointer to the +allocated memory is returned immediately in *dptr. The allocation must +not be accessed until the the allocation operation completes. The +allocation comes from the memory pool current to the stream’s device.

    +
    +
    Parameters:
    +
      +
    • bytesize (size_t) – Number of bytes to allocate

    • +
    • hStream (CUstream or cudaStream_t) – The stream establishing the stream ordering contract and the memory +pool to allocate from

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    The default memory pool of a device contains device memory from that device.

    +

    Basic stream ordering allows future work submitted into the same stream to use the allocation. Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation operation completes before work submitted in a separate stream runs.

    +

    During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool’s properties are used to set the node’s creation parameters.

    +
    + +
    +
    +cuda.bindings.driver.cuMemPoolTrimTo(pool, size_t minBytesToKeep)#
    +

    Tries to release memory back to the OS.

    +

    Releases memory back to the OS until the pool contains fewer than +minBytesToKeep reserved bytes, or there is no more memory that the +allocator can safely release. The allocator cannot release OS +allocations that back outstanding asynchronous allocations. The OS +allocations may happen at different granularity from the user +allocations.

    +
    +
    Parameters:
    +
      +
    • pool (CUmemoryPool or cudaMemPool_t) – The memory pool to trim

    • +
    • minBytesToKeep (size_t) – If the pool has less than minBytesToKeep reserved, the TrimTo +operation is a no-op. Otherwise the pool will be guaranteed to have +at least minBytesToKeep bytes reserved after the operation.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    : Allocations that have not been freed count as outstanding.

    +

    : Allocations that have been asynchronously freed but whose completion has not been observed on the host (eg. by a synchronize) can count as outstanding.

    +
    + +
    +
    +cuda.bindings.driver.cuMemPoolSetAttribute(pool, attr: CUmemPool_attribute, value)#
    +

    Sets attributes of a memory pool.

    +

    Supported attributes are:

    +
      +
    • CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = +cuuint64_t) Amount of reserved memory in bytes to hold onto before +trying to release memory back to the OS. When more than the release +threshold bytes of memory are held by the memory pool, the allocator +will try to release memory back to the OS on the next call to stream, +event or context synchronize. (default 0)

    • +
    • CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value +type = int) Allow cuMemAllocAsync to use memory +asynchronously freed in another stream as long as a stream ordering +dependency of the allocating stream on the free action exists. Cuda +events and null stream interactions can create the required stream +ordered dependencies. (default enabled)

    • +
    • CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = +int) Allow reuse of already completed frees when there is no +dependency between the free and allocation. (default enabled)

    • +
    • CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value +type = int) Allow cuMemAllocAsync to insert new stream +dependencies in order to establish the stream ordering required to +reuse a piece of memory released by cuMemFreeAsync +(default enabled).

    • +
    • CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = +cuuint64_t) Reset the high watermark that tracks the amount of +backing memory that was allocated for the memory pool. It is illegal +to set this attribute to a non-zero value.

    • +
    • CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) +Reset the high watermark that tracks the amount of used memory that +was allocated for the memory pool.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemPoolGetAttribute(pool, attr: CUmemPool_attribute)#
    +

    Gets attributes of a memory pool.

    +

    Supported attributes are:

    +
      +
    • CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = +cuuint64_t) Amount of reserved memory in bytes to hold onto before +trying to release memory back to the OS. When more than the release +threshold bytes of memory are held by the memory pool, the allocator +will try to release memory back to the OS on the next call to stream, +event or context synchronize. (default 0)

    • +
    • CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value +type = int) Allow cuMemAllocAsync to use memory +asynchronously freed in another stream as long as a stream ordering +dependency of the allocating stream on the free action exists. Cuda +events and null stream interactions can create the required stream +ordered dependencies. (default enabled)

    • +
    • CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = +int) Allow reuse of already completed frees when there is no +dependency between the free and allocation. (default enabled)

    • +
    • CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value +type = int) Allow cuMemAllocAsync to insert new stream +dependencies in order to establish the stream ordering required to +reuse a piece of memory released by cuMemFreeAsync +(default enabled).

    • +
    • CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: (value type = +cuuint64_t) Amount of backing memory currently allocated for the +mempool

    • +
    • CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = +cuuint64_t) High watermark of backing memory allocated for the +mempool since the last time it was reset.

    • +
    • CU_MEMPOOL_ATTR_USED_MEM_CURRENT: (value type = +cuuint64_t) Amount of memory from the pool that is currently in use +by the application.

    • +
    • CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) +High watermark of the amount of memory from the pool that was in use +by the application.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemPoolSetAccess(pool, map: Optional[Tuple[CUmemAccessDesc] | List[CUmemAccessDesc]], size_t count)#
    +

    Controls visibility of pools between devices.

    +
    +
    Parameters:
    +
      +
    • pool (CUmemoryPool or cudaMemPool_t) – The pool being modified

    • +
    • map (List[CUmemAccessDesc]) – Array of access descriptors. Each descriptor instructs the access +to enable for a single gpu.

    • +
    • count (size_t) – Number of descriptors in the map array.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemPoolGetAccess(memPool, CUmemLocation location: Optional[CUmemLocation])#
    +

    Returns the accessibility of a pool from a device.

    +

    Returns the accessibility of the pool’s memory from the specified +location.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

      +
    • CUresult

    • +
    • flags (CUmemAccess_flags) – the accessibility of the pool from the specified location

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemPoolCreate(CUmemPoolProps poolProps: Optional[CUmemPoolProps])#
    +

    Creates a memory pool.

    +

    Creates a CUDA memory pool and returns the handle in pool. The +poolProps determines the properties of the pool such as the backing +device and IPC capabilities.

    +

    To create a memory pool targeting a specific host NUMA node, +applications must set CUmemPoolProps::CUmemLocation::type +to CU_MEM_LOCATION_TYPE_HOST_NUMA and +CUmemPoolProps::CUmemLocation::id must specify the NUMA ID +of the host memory node. Specifying +CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT or +CU_MEM_LOCATION_TYPE_HOST as the +CUmemPoolProps::CUmemLocation::type will result in +CUDA_ERROR_INVALID_VALUE. By default, the pool’s memory +will be accessible from the device it is allocated on. In the case of +pools created with CU_MEM_LOCATION_TYPE_HOST_NUMA, their +default accessibility will be from the host CPU. Applications can +control the maximum size of the pool by specifying a non-zero value for +maxSize. If set to 0, the maximum size of +the pool will default to a system dependent value.

    +

    Applications can set handleTypes to +CU_MEM_HANDLE_TYPE_FABRIC in order to create +CUmemoryPool suitable for sharing within an IMEX domain. An +IMEX domain is either an OS instance or a group of securely connected +OS instances using the NVIDIA IMEX daemon. An IMEX channel is a global +resource within the IMEX domain that represents a logical entity that +aims to provide fine grained accessibility control for the +participating processes. When exporter and importer CUDA processes have +been granted access to the same IMEX channel, they can securely share +memory. If the allocating process does not have access setup for an +IMEX channel, attempting to export a CUmemoryPool with +CU_MEM_HANDLE_TYPE_FABRIC will result in +CUDA_ERROR_NOT_PERMITTED. The nvidia-modprobe CLI provides +more information regarding setting up of IMEX channels.

    +
    +
    Parameters:
    +

    poolProps (CUmemPoolProps) – None

    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Specifying CU_MEM_HANDLE_TYPE_NONE creates a memory pool that will not support IPC.

    +
    + +
    +
    +cuda.bindings.driver.cuMemPoolDestroy(pool)#
    +

    Destroys the specified memory pool.

    +

    If any pointers obtained from this pool haven’t been freed or the pool +has free operations that haven’t completed when +cuMemPoolDestroy is invoked, the function will return +immediately and the resources associated with the pool will be released +automatically once there are no more outstanding allocations.

    +

    Destroying the current mempool of a device sets the default mempool of +that device as the current mempool for that device.

    +
    +
    Parameters:
    +

    pool (CUmemoryPool or cudaMemPool_t) – None

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    A device’s default memory pool cannot be destroyed.

    +
    + +
    +
    +cuda.bindings.driver.cuMemAllocFromPoolAsync(size_t bytesize, pool, hStream)#
    +

    Allocates memory from a specified pool with stream ordered semantics.

    +

    Inserts an allocation operation into hStream. A pointer to the +allocated memory is returned immediately in *dptr. The allocation must +not be accessed until the the allocation operation completes. The +allocation comes from the specified memory pool.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool’s properties are used to set the node’s creation parameters.

    +
    + +
    +
    +cuda.bindings.driver.cuMemPoolExportToShareableHandle(pool, handleType: CUmemAllocationHandleType, unsigned long long flags)#
    +

    Exports a memory pool to the requested handle type.

    +

    Given an IPC capable mempool, create an OS handle to share the pool +with another process. A recipient process can convert the shareable +handle into a mempool with +cuMemPoolImportFromShareableHandle. Individual pointers can +then be shared with the cuMemPoolExportPointer and +cuMemPoolImportPointer APIs. The implementation of what the +shareable handle is and how it can be transferred is defined by the +requested handle type.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    : To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than CU_MEM_HANDLE_TYPE_NONE.

    +
    + +
    +
    +cuda.bindings.driver.cuMemPoolImportFromShareableHandle(handle, handleType: CUmemAllocationHandleType, unsigned long long flags)#
    +

    imports a memory pool from a shared handle.

    +

    Specific allocations can be imported from the imported pool with +cuMemPoolImportPointer.

    +

    If handleType is CU_MEM_HANDLE_TYPE_FABRIC and the +importer process has not been granted access to the same IMEX channel +as the exporter process, this API will error as +CUDA_ERROR_NOT_PERMITTED.

    +
    +
    Parameters:
    +
      +
    • handle (Any) – OS handle of the pool to open

    • +
    • handleType (CUmemAllocationHandleType) – The type of handle being imported

    • +
    • flags (unsigned long long) – must be 0

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Imported memory pools do not support creating new allocations. As such imported memory pools may not be used in cuDeviceSetMemPool or cuMemAllocFromPoolAsync calls.

    +
    + +
    +
    +cuda.bindings.driver.cuMemPoolExportPointer(ptr)#
    +

    Export data to share a memory pool allocation between processes.

    +

    Constructs shareData_out for sharing a specific allocation from an +already shared memory pool. The recipient process can import the +allocation with the cuMemPoolImportPointer api. The data is +not a handle and may be shared through any IPC mechanism.

    +
    +
    Parameters:
    +

    ptr (CUdeviceptr) – pointer to memory being exported

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemPoolImportPointer(pool, CUmemPoolPtrExportData shareData: Optional[CUmemPoolPtrExportData])#
    +

    Import a memory pool allocation from another process.

    +

    Returns in ptr_out a pointer to the imported memory. The imported +memory must not be accessed before the allocation operation completes +in the exporting process. The imported memory must be freed from all +importing processes before being freed in the exporting process. The +pointer may be freed with cuMemFree or cuMemFreeAsync. If +cuMemFreeAsync is used, the free must be completed on the importing +process before the free operation on the exporting process.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    The cuMemFreeAsync api may be used in the exporting process before the cuMemFreeAsync operation completes in its stream as long as the cuMemFreeAsync in the exporting process specifies a stream with a stream dependency on the importing process’s cuMemFreeAsync.

    +
    + +
    +
    +

    Multicast Object Management#

    +

    This section describes the CUDA multicast object operations exposed by the low-level CUDA driver application programming interface.

    +

    overview

    +

    A multicast object created via cuMulticastCreate enables certain memory operations to be broadcast to a team of devices. Devices can be added to a multicast object via cuMulticastAddDevice. Memory can be bound on each participating device via either cuMulticastBindMem or cuMulticastBindAddr. Multicast objects can be mapped into a device’s virtual address space using the virtual memmory management APIs (see cuMemMap and cuMemSetAccess).

    +

    Supported Platforms

    +

    Support for multicast on a specific device can be queried using the device attribute CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED

    +
    +
    +cuda.bindings.driver.cuMulticastCreate(CUmulticastObjectProp prop: Optional[CUmulticastObjectProp])#
    +

    Create a generic allocation handle representing a multicast object described by the given properties.

    +

    This creates a multicast object as described by prop. The number of +participating devices is specified by +numDevices. Devices can be added to +the multicast object via cuMulticastAddDevice. All +participating devices must be added to the multicast object before +memory can be bound to it. Memory is bound to the multicast object via +either cuMulticastBindMem or +cuMulticastBindAddr, and can be unbound via +cuMulticastUnbind. The total amount of memory that can be +bound per device is specified by +pysize. This size must be a +multiple of the value returned by cuMulticastGetGranularity +with the flag CU_MULTICAST_GRANULARITY_MINIMUM. For best +performance however, the size should be aligned to the value returned +by cuMulticastGetGranularity with the flag +CU_MULTICAST_GRANULARITY_RECOMMENDED.

    +

    After all participating devices have been added, multicast objects can +also be mapped to a device’s virtual address space using the virtual +memory management APIs (see cuMemMap and +cuMemSetAccess). Multicast objects can also be shared with +other processes by requesting a shareable handle via +cuMemExportToShareableHandle. Note that the desired types +of shareable handles must be specified in the bitmask +handleTypes. Multicast objects can be +released using the virtual memory management API +cuMemRelease.

    +
    +
    Parameters:
    +

    prop (CUmulticastObjectProp) – Properties of the multicast object to create.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMulticastAddDevice(mcHandle, dev)#
    +

    Associate a device to a multicast object.

    +

    Associates a device to a multicast object. The added device will be a +part of the multicast team of size specified by +numDevices during +cuMulticastCreate. The association of the device to the +multicast object is permanent during the life time of the multicast +object. All devices must be added to the multicast team before any +memory can be bound to any device in the team. Any calls to +cuMulticastBindMem or cuMulticastBindAddr will +block until all devices have been added. Similarly all devices must be +added to the multicast team before a virtual address range can be +mapped to the multicast object. A call to cuMemMap will +block until all devices have been added.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMulticastBindMem(mcHandle, size_t mcOffset, memHandle, size_t memOffset, size_t size, unsigned long long flags)#
    +

    Bind a memory allocation represented by a handle to a multicast object.

    +

    Binds a memory allocation specified by memHandle and created via +cuMemCreate to a multicast object represented by mcHandle +and created via cuMulticastCreate. The intended size of +the bind, the offset in the multicast range mcOffset as well as the +offset in the memory memOffset must be a multiple of the value +returned by cuMulticastGetGranularity with the flag +CU_MULTICAST_GRANULARITY_MINIMUM. For best performance +however, size, mcOffset and memOffset should be aligned to the +granularity of the memory allocation(see +cuMemGetAllocationGranularity) or to the value returned by +cuMulticastGetGranularity with the flag +CU_MULTICAST_GRANULARITY_RECOMMENDED.

    +

    The size + memOffset cannot be larger than the size of the +allocated memory. Similarly the size + mcOffset cannot be larger +than the size of the multicast object. The memory allocation must have +beeen created on one of the devices that was added to the multicast +team via cuMulticastAddDevice. Externally shareable as well +as imported multicast objects can be bound only to externally shareable +memory. Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if +there are insufficient resources required to perform the bind. This +call may also return CUDA_ERROR_SYSTEM_NOT_READY if the necessary +system software is not initialized or running.

    +
    +
    Parameters:
    +
      +
    • mcHandle (CUmemGenericAllocationHandle) – Handle representing a multicast object.

    • +
    • mcOffset (size_t) – Offset into the multicast object for attachment.

    • +
    • memHandle (CUmemGenericAllocationHandle) – Handle representing a memory allocation.

    • +
    • memOffset (size_t) – Offset into the memory for attachment.

    • +
    • size (size_t) – Size of the memory that will be bound to the multicast object.

    • +
    • flags (unsigned long long) – Flags for future use, must be zero for now.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_SYSTEM_NOT_READY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMulticastBindAddr(mcHandle, size_t mcOffset, memptr, size_t size, unsigned long long flags)#
    +

    Bind a memory allocation represented by a virtual address to a multicast object.

    +

    Binds a memory allocation specified by its mapped address memptr to a +multicast object represented by mcHandle. The memory must have been +allocated via cuMemCreate or cudaMallocAsync. +The intended size of the bind, the offset in the multicast range +mcOffset and memptr must be a multiple of the value returned by +cuMulticastGetGranularity with the flag +CU_MULTICAST_GRANULARITY_MINIMUM. For best performance +however, size, mcOffset and memptr should be aligned to the value +returned by cuMulticastGetGranularity with the flag +CU_MULTICAST_GRANULARITY_RECOMMENDED.

    +

    The size cannot be larger than the size of the allocated memory. +Similarly the size + mcOffset cannot be larger than the total size +of the multicast object. The memory allocation must have beeen created +on one of the devices that was added to the multicast team via +cuMulticastAddDevice. Externally shareable as well as +imported multicast objects can be bound only to externally shareable +memory. Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if +there are insufficient resources required to perform the bind. This +call may also return CUDA_ERROR_SYSTEM_NOT_READY if the necessary +system software is not initialized or running.

    +
    +
    Parameters:
    +
      +
    • mcHandle (CUmemGenericAllocationHandle) – Handle representing a multicast object.

    • +
    • mcOffset (size_t) – Offset into multicast va range for attachment.

    • +
    • memptr (CUdeviceptr) – Virtual address of the memory allocation.

    • +
    • size (size_t) – Size of memory that will be bound to the multicast object.

    • +
    • flags (unsigned long long) – Flags for future use, must be zero now.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED, CUDA_ERROR_OUT_OF_MEMORY, CUDA_ERROR_SYSTEM_NOT_READY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMulticastUnbind(mcHandle, dev, size_t mcOffset, size_t size)#
    +

    Unbind any memory allocations bound to a multicast object at a given offset and upto a given size.

    +

    Unbinds any memory allocations hosted on dev and bound to a multicast +object at mcOffset and upto a given size. The intended size of +the unbind and the offset in the multicast range ( mcOffset ) must be +a multiple of the value returned by +cuMulticastGetGranularity flag +CU_MULTICAST_GRANULARITY_MINIMUM. The size + mcOffset +cannot be larger than the total size of the multicast object.

    +
    +
    Parameters:
    +
      +
    • mcHandle (CUmemGenericAllocationHandle) – Handle representing a multicast object.

    • +
    • dev (CUdevice) – Device that hosts the memory allocation.

    • +
    • mcOffset (size_t) – Offset into the multicast object.

    • +
    • size (size_t) – Desired size to unbind.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_PERMITTED, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    Warning: The mcOffset and the size must match the corresponding values specified during the bind call. Any other values may result in undefined behavior.

    +
    + +
    +
    +cuda.bindings.driver.cuMulticastGetGranularity(CUmulticastObjectProp prop: Optional[CUmulticastObjectProp], option: CUmulticastGranularity_flags)#
    +

    Calculates either the minimal or recommended granularity for multicast object.

    +

    Calculates either the minimal or recommended granularity for a given +set of multicast object properties and returns it in granularity. This +granularity can be used as a multiple for size, bind offsets and +address mappings of the multicast object.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Unified Addressing#

    +

    This section describes the unified addressing functions of the low-level CUDA driver application programming interface.

    +

    Overview

    +

    CUDA devices can share a unified address space with the host. For these devices there is no distinction between a device pointer and a host pointer – the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below).

    +

    Supported Platforms

    +

    Whether or not a device supports unified addressing may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING.

    +

    Unified addressing is automatically enabled in 64-bit processes

    +

    Looking Up Information from Pointer Values

    +

    It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cuPointerGetAttribute()

    +

    Since pointers are unique, it is not necessary to specify information about the pointers specified to the various copy functions in the CUDA API. The function cuMemcpy() may be used to perform a copy between two pointers, ignoring whether they point to host or device memory (making cuMemcpyHtoD(), cuMemcpyDtoD(), and cuMemcpyDtoH() unnecessary for devices supporting unified addressing). For multidimensional copies, the memory type CU_MEMORYTYPE_UNIFIED may be used to specify that the CUDA driver should infer the location of the pointer from its value.

    +

    Automatic Mapping of Host Allocated Host Memory

    +

    All host memory allocated in all contexts using cuMemAllocHost() and cuMemHostAlloc() is always directly accessible from all contexts on all devices that support unified addressing. This is the case regardless of whether or not the flags CU_MEMHOSTALLOC_PORTABLE and CU_MEMHOSTALLOC_DEVICEMAP are specified.

    +

    The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host, so it is not necessary to call cuMemHostGetDevicePointer() to get the device pointer for these allocations.

    +

    Note that this is not the case for memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below.

    +

    Automatic Registration of Peer Memory

    +

    Upon enabling direct access from a context that supports unified addressing to another peer context that supports unified addressing using cuCtxEnablePeerAccess() all memory allocated in the peer context using cuMemAlloc() and cuMemAllocPitch() will immediately be accessible by the current context. The device pointer value through which any peer memory may be accessed in the current context is the same pointer value through which that memory may be accessed in the peer context.

    +

    Exceptions, Disjoint Addressing

    +

    Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cuMemHostRegister() and host memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all contexts that support unified addressing.

    +

    This device address may be queried using cuMemHostGetDevicePointer() when a context using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory through cuMemcpy() and similar functions using the CU_MEMORYTYPE_UNIFIED memory type.

    +
    +
    +cuda.bindings.driver.cuPointerGetAttribute(attribute: CUpointer_attribute, ptr)#
    +

    Returns information about a pointer.

    +

    The supported attributes are:

    +
      +
    • CU_POINTER_ATTRIBUTE_CONTEXT:

    • +
    • Returns in *data the CUcontext in which ptr was +allocated or registered. The type of data must be +CUcontext *.

    • +
    • If ptr was not allocated by, mapped by, or registered with a +CUcontext which uses unified virtual addressing then +CUDA_ERROR_INVALID_VALUE is returned.

    • +
    • CU_POINTER_ATTRIBUTE_MEMORY_TYPE:

    • +
    • Returns in *data the physical memory type of the memory that ptr +addresses as a CUmemorytype enumerated value. The type of +data must be unsigned int.

    • +
    • If ptr addresses device memory then *data is set to +CU_MEMORYTYPE_DEVICE. The particular CUdevice +on which the memory resides is the CUdevice of the +CUcontext returned by the +CU_POINTER_ATTRIBUTE_CONTEXT attribute of ptr.

    • +
    • If ptr addresses host memory then *data is set to +CU_MEMORYTYPE_HOST.

    • +
    • If ptr was not allocated by, mapped by, or registered with a +CUcontext which uses unified virtual addressing then +CUDA_ERROR_INVALID_VALUE is returned.

    • +
    • If the current CUcontext does not support unified virtual +addressing then CUDA_ERROR_INVALID_CONTEXT is returned.

    • +
    • CU_POINTER_ATTRIBUTE_DEVICE_POINTER:

    • +
    • Returns in *data the device pointer value through which ptr may +be accessed by kernels running in the current CUcontext. +The type of data must be CUdeviceptr *.

    • +
    • If there exists no device pointer value through which kernels running +in the current CUcontext may access ptr then +CUDA_ERROR_INVALID_VALUE is returned.

    • +
    • If there is no current CUcontext then +CUDA_ERROR_INVALID_CONTEXT is returned.

    • +
    • Except in the exceptional disjoint addressing cases discussed below, +the value returned in *data will equal the input value ptr.

    • +
    • CU_POINTER_ATTRIBUTE_HOST_POINTER:

    • +
    • Returns in *data the host pointer value through which ptr may be +accessed by by the host program. The type of data must be void **. +If there exists no host pointer value through which the host program +may directly access ptr then CUDA_ERROR_INVALID_VALUE +is returned.

    • +
    • Except in the exceptional disjoint addressing cases discussed below, +the value returned in *data will equal the input value ptr.

    • +
    • CU_POINTER_ATTRIBUTE_P2P_TOKENS:

    • +
    • Returns in *data two tokens for use with the nv-p2p.h Linux kernel +interface. data must be a struct of type +CUDA_POINTER_ATTRIBUTE_P2P_TOKENS.

    • +
    • ptr must be a pointer to memory obtained from +pycuMemAlloc(). Note that p2pToken and +vaSpaceToken are only valid for the lifetime of the source +allocation. A subsequent allocation at the same address may return +completely different tokens. Querying this attribute has a side +effect of setting the attribute +CU_POINTER_ATTRIBUTE_SYNC_MEMOPS for the region of memory +that ptr points to.

    • +
    • CU_POINTER_ATTRIBUTE_SYNC_MEMOPS:

    • +
    • A boolean attribute which when set, ensures that synchronous memory +operations initiated on the region of memory that ptr points to +will always synchronize. See further documentation in the section +titled “API synchronization behavior” to learn more about cases when +synchronous memory operations can exhibit asynchronous behavior.

    • +
    • CU_POINTER_ATTRIBUTE_BUFFER_ID:

    • +
    • Returns in *data a buffer ID which is guaranteed to be unique +within the process. data must point to an unsigned long long.

    • +
    • ptr must be a pointer to memory obtained from a CUDA memory +allocation API. Every memory allocation from any of the CUDA memory +allocation APIs will have a unique ID over a process lifetime. +Subsequent allocations do not reuse IDs from previous freed +allocations. IDs are only unique within a single process.

    • +
    • CU_POINTER_ATTRIBUTE_IS_MANAGED:

    • +
    • Returns in *data a boolean that indicates whether the pointer +points to managed memory or not.

    • +
    • If ptr is not a valid CUDA pointer then +CUDA_ERROR_INVALID_VALUE is returned.

    • +
    • CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL:

    • +
    • Returns in *data an integer representing a device ordinal of a +device against which the memory was allocated or registered.

    • +
    • CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE:

    • +
    • Returns in *data a boolean that indicates if this pointer maps to +an allocation that is suitable for cudaIpcGetMemHandle.

    • +
    • CU_POINTER_ATTRIBUTE_RANGE_START_ADDR:

    • +
    • Returns in *data the starting address for the allocation referenced +by the device pointer ptr. Note that this is not necessarily the +address of the mapped region, but the address of the mappable address +range ptr references (e.g. from cuMemAddressReserve).

    • +
    • CU_POINTER_ATTRIBUTE_RANGE_SIZE:

    • +
    • Returns in *data the size for the allocation referenced by the +device pointer ptr. Note that this is not necessarily the size of +the mapped region, but the size of the mappable address range ptr +references (e.g. from cuMemAddressReserve). To retrieve +the size of the mapped region, see cuMemGetAddressRange

    • +
    • CU_POINTER_ATTRIBUTE_MAPPED:

    • +
    • Returns in *data a boolean that indicates if this pointer is in a +valid address range that is mapped to a backing allocation.

    • +
    • CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES:

    • +
    • Returns a bitmask of the allowed handle types for an allocation that +may be passed to cuMemExportToShareableHandle.

    • +
    • CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE:

    • +
    • Returns in *data the handle to the mempool that the allocation was +obtained from.

    • +
    +

    Note that for most allocations in the unified virtual address space the +host and device pointer for accessing the allocation will be the same. +The exceptions to this are

    +
      +
    • user memory registered using cuMemHostRegister

    • +
    • host memory allocated using cuMemHostAlloc with the +CU_MEMHOSTALLOC_WRITECOMBINED flag For these types of +allocation there will exist separate, disjoint host and device +addresses for accessing the allocation. In particular

    • +
    • The host address will correspond to an invalid unmapped device +address (which will result in an exception if accessed from the +device)

    • +
    • The device address will correspond to an invalid unmapped host +address (which will result in an exception if accessed from the +host). For these types of allocations, querying +CU_POINTER_ATTRIBUTE_HOST_POINTER and +CU_POINTER_ATTRIBUTE_DEVICE_POINTER may be used to +retrieve the host and device addresses from either address.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemPrefetchAsync(devPtr, size_t count, dstDevice, hStream)#
    +

    Prefetches memory to the specified destination device.

    +

    Note there is a later version of this API, +cuMemPrefetchAsync_v2. It will supplant this version in +13.0, which is retained for minor version compatibility.

    +

    Prefetches memory to the specified destination device. devPtr is the +base device pointer of the memory to be prefetched and dstDevice is +the destination device. count specifies the number of bytes to copy. +hStream is the stream in which the operation is enqueued. The memory +range must refer to managed memory allocated via +cuMemAllocManaged or declared via managed variables or it +may also refer to system-allocated memory on systems with non-zero +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS.

    +

    Passing in CU_DEVICE_CPU for dstDevice will prefetch the data to host +memory. If dstDevice is a GPU, then the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be non- +zero. Additionally, hStream must be associated with a device that has +a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS.

    +

    The start address and end address of the memory range will be rounded +down and rounded up respectively to be aligned to CPU page size before +the prefetch operation is enqueued in the stream.

    +

    If no physical memory has been allocated for this region, then this +memory region will be populated and mapped on the destination device. +If there’s insufficient memory to prefetch the desired region, the +Unified Memory driver may evict pages from other +cuMemAllocManaged allocations to host memory in order to +make room. Device memory allocated using cuMemAlloc or +cuArrayCreate will not be evicted.

    +

    By default, any mappings to the previous location of the migrated pages +are removed and mappings for the new location are only setup on +dstDevice. The exact behavior however also depends on the settings +applied to this memory range via cuMemAdvise as described +below:

    +

    If CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of +this memory range, then that subset will create a read-only copy of the +pages on dstDevice.

    +

    If CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any +subset of this memory range, then the pages will be migrated to +dstDevice even if dstDevice is not the preferred location of any +pages in the memory range.

    +

    If CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset +of this memory range, then mappings to those pages from all the +appropriate processors are updated to refer to the new location if +establishing such a mapping is possible. Otherwise, those mappings are +cleared.

    +

    Note that this API is not required for functionality and only serves to +improve performance by allowing the application to migrate data to a +suitable location before it is accessed. Memory accesses to this range +are always coherent and are allowed even when the data is actively +being migrated.

    +

    Note that this function is asynchronous with respect to the host and +all work on other devices.

    +
    +
    Parameters:
    +
      +
    • devPtr (CUdeviceptr) – Pointer to be prefetched

    • +
    • count (size_t) – Size in bytes

    • +
    • dstDevice (CUdevice) – Destination device to prefetch to

    • +
    • hStream (CUstream or cudaStream_t) – Stream to enqueue prefetch operation

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemPrefetchAsync_v2(devPtr, size_t count, CUmemLocation location: CUmemLocation, unsigned int flags, hStream)#
    +

    Prefetches memory to the specified destination location.

    +

    Prefetches memory to the specified destination location. devPtr is +the base device pointer of the memory to be prefetched and location +specifies the destination location. count specifies the number of +bytes to copy. hStream is the stream in which the operation is +enqueued. The memory range must refer to managed memory allocated via +cuMemAllocManaged or declared via managed variables.

    +

    Specifying CU_MEM_LOCATION_TYPE_DEVICE for +type will prefetch memory to GPU specified by +device ordinal id which must have non-zero +value for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. +Additionally, hStream must be associated with a device that has a +non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Specifying +CU_MEM_LOCATION_TYPE_HOST as type +will prefetch data to host memory. Applications can request prefetching +memory to a specific host NUMA node by specifying +CU_MEM_LOCATION_TYPE_HOST_NUMA for +type and a valid host NUMA node id in +id Users can also request prefetching memory +to the host NUMA node closest to the current thread’s CPU by specifying +CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT for +type. Note when +type is etiher +CU_MEM_LOCATION_TYPE_HOST OR +CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT, +id will be ignored.

    +

    The start address and end address of the memory range will be rounded +down and rounded up respectively to be aligned to CPU page size before +the prefetch operation is enqueued in the stream.

    +

    If no physical memory has been allocated for this region, then this +memory region will be populated and mapped on the destination device. +If there’s insufficient memory to prefetch the desired region, the +Unified Memory driver may evict pages from other +cuMemAllocManaged allocations to host memory in order to +make room. Device memory allocated using cuMemAlloc or +cuArrayCreate will not be evicted.

    +

    By default, any mappings to the previous location of the migrated pages +are removed and mappings for the new location are only setup on the +destination location. The exact behavior however also depends on the +settings applied to this memory range via cuMemAdvise as +described below:

    +

    If CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of +this memory range, then that subset will create a read-only copy of the +pages on destination location. If however the destination location is a +host NUMA node, then any pages of that subset that are already in +another host NUMA node will be transferred to the destination.

    +

    If CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any +subset of this memory range, then the pages will be migrated to +location even if location is not the preferred location of any +pages in the memory range.

    +

    If CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset +of this memory range, then mappings to those pages from all the +appropriate processors are updated to refer to the new location if +establishing such a mapping is possible. Otherwise, those mappings are +cleared.

    +

    Note that this API is not required for functionality and only serves to +improve performance by allowing the application to migrate data to a +suitable location before it is accessed. Memory accesses to this range +are always coherent and are allowed even when the data is actively +being migrated.

    +

    Note that this function is asynchronous with respect to the host and +all work on other devices.

    +
    +
    Parameters:
    +
      +
    • devPtr (CUdeviceptr) – Pointer to be prefetched

    • +
    • count (size_t) – Size in bytes

    • +
    • dstDevice (CUmemLocation) – Destination device to prefetch to

    • +
    • flags (unsigned int) – flags for future use, must be zero now.

    • +
    • hStream (CUstream or cudaStream_t) – Stream to enqueue prefetch operation

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemAdvise(devPtr, size_t count, advice: CUmem_advise, device)#
    +

    Advise about the usage of a given memory range.

    +

    Note there is a later version of this API, cuMemAdvise_v2. +It will supplant this version in 13.0, which is retained for minor +version compatibility.

    +

    Advise the Unified Memory subsystem about the usage pattern for the +memory range starting at devPtr with a size of count bytes. The +start address and end address of the memory range will be rounded down +and rounded up respectively to be aligned to CPU page size before the +advice is applied. The memory range must refer to managed memory +allocated via cuMemAllocManaged or declared via managed +variables. The memory range could also refer to system-allocated +pageable memory provided it represents a valid, host-accessible region +of memory and all additional constraints imposed by advice as +outlined below are also satisfied. Specifying an invalid system- +allocated pageable memory range results in an error being returned.

    +

    The advice parameter can take the following values:

    +
      +
    • CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data +is mostly going to be read from and only occasionally written to. Any +read accesses from any processor to this region will create a read- +only copy of at least the accessed pages in that processor’s memory. +Additionally, if cuMemPrefetchAsync is called on this +region, it will create a read-only copy of the data on the +destination processor. If any processor writes to this region, all +copies of the corresponding page will be invalidated except for the +one where the write occurred. The device argument is ignored for +this advice. Note that for a page to be read-duplicated, the +accessing processor must either be the CPU or a GPU that has a non- +zero value for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Also, if a +context is created on a device that does not have the device +attribute CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS +set, then read-duplication will not occur until all such contexts are +destroyed. If the memory region refers to valid system-allocated +pageable memory, then the accessing device must have a non-zero value +for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read- +only copy to be created on that device. Note however that if the +accessing device also has a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, +then setting this advice will not create a read-only copy when that +device accesses this memory region.

    • +
    • CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of +CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the +Unified Memory driver from attempting heuristic read-duplication on +the memory range. Any read-duplicated copies of the data will be +collapsed into a single copy. The location for the collapsed copy +will be the preferred location if the page has a preferred location +and one of the read-duplicated copies was resident at that location. +Otherwise, the location chosen is arbitrary.

    • +
    • CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets +the preferred location for the data to be the memory belonging to +device. Passing in CU_DEVICE_CPU for device sets the preferred +location as host memory. If device is a GPU, then it must have a +non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting +the preferred location does not cause data to migrate to that +location immediately. Instead, it guides the migration policy when a +fault occurs on that memory region. If the data is already in its +preferred location and the faulting processor can establish a mapping +without requiring the data to be migrated, then data migration will +be avoided. On the other hand, if the data is not in its preferred +location or if a direct mapping cannot be established, then it will +be migrated to the processor accessing it. It is important to note +that setting the preferred location does not prevent data prefetching +done using cuMemPrefetchAsync. Having a preferred +location can override the page thrash detection and resolution logic +in the Unified Memory driver. Normally, if a page is detected to be +constantly thrashing between for example host and device memory, the +page may eventually be pinned to host memory by the Unified Memory +driver. But if the preferred location is set as device memory, then +the page will continue to thrash indefinitely. If +CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory +region or any subset of it, then the policies associated with that +advice will override the policies of this advice, unless read +accesses from device will not result in a read-only copy being +created on that device as outlined in description for the advice +CU_MEM_ADVISE_SET_READ_MOSTLY. If the memory region +refers to valid system-allocated pageable memory, then device must +have a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS.

    • +
    • CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect +of CU_MEM_ADVISE_SET_PREFERRED_LOCATION and changes the +preferred location to none.

    • +
    • CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that +the data will be accessed by device. Passing in +CU_DEVICE_CPU for device will set the advice for the +CPU. If device is a GPU, then the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be +non-zero. This advice does not cause data migration and has no impact +on the location of the data per se. Instead, it causes the data to +always be mapped in the specified processor’s page tables, as long as +the location of the data permits a mapping to be established. If the +data gets migrated for any reason, the mappings are updated +accordingly. This advice is recommended in scenarios where data +locality is not important, but avoiding faults is. Consider for +example a system containing multiple GPUs with peer-to-peer access +enabled, where the data located on one GPU is occasionally accessed +by peer GPUs. In such scenarios, migrating data over to the other +GPUs is not as important because the accesses are infrequent and the +overhead of migration may be too high. But preventing faults can +still help improve performance, and so having a mapping set up in +advance is useful. Note that on CPU access of this data, the data may +be migrated to host memory because the CPU typically cannot access +device memory directly. Any GPU that had the +CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will +now have its mapping updated to point to the page in host memory. If +CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory +region or any subset of it, then the policies associated with that +advice will override the policies of this advice. Additionally, if +the preferred location of this memory region or any subset of it is +also device, then the policies associated with +CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the +policies of this advice. If the memory region refers to valid system- +allocated pageable memory, then device must have a non-zero value +for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, +if device has a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, +then this call has no effect.

    • +
    • CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of +CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to the data +from device may be removed at any time causing accesses to result +in non-fatal page faults. If the memory region refers to valid +system-allocated pageable memory, then device must have a non-zero +value for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, +if device has a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, +then this call has no effect.

    • +
    +
    +
    Parameters:
    +
      +
    • devPtr (CUdeviceptr) – Pointer to memory to set the advice for

    • +
    • count (size_t) – Size in bytes of the memory range

    • +
    • advice (CUmem_advise) – Advice to be applied for the specified memory range

    • +
    • device (CUdevice) – Device to apply the advice for

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemAdvise_v2(devPtr, size_t count, advice: CUmem_advise, CUmemLocation location: CUmemLocation)#
    +

    Advise about the usage of a given memory range.

    +

    Advise the Unified Memory subsystem about the usage pattern for the +memory range starting at devPtr with a size of count bytes. The +start address and end address of the memory range will be rounded down +and rounded up respectively to be aligned to CPU page size before the +advice is applied. The memory range must refer to managed memory +allocated via cuMemAllocManaged or declared via managed +variables. The memory range could also refer to system-allocated +pageable memory provided it represents a valid, host-accessible region +of memory and all additional constraints imposed by advice as +outlined below are also satisfied. Specifying an invalid system- +allocated pageable memory range results in an error being returned.

    +

    The advice parameter can take the following values:

    +
      +
    • CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data +is mostly going to be read from and only occasionally written to. Any +read accesses from any processor to this region will create a read- +only copy of at least the accessed pages in that processor’s memory. +Additionally, if cuMemPrefetchAsync or +cuMemPrefetchAsync_v2 is called on this region, it will +create a read-only copy of the data on the destination processor. If +the target location for cuMemPrefetchAsync_v2 is a host +NUMA node and a read-only copy already exists on another host NUMA +node, that copy will be migrated to the targeted host NUMA node. If +any processor writes to this region, all copies of the corresponding +page will be invalidated except for the one where the write occurred. +If the writing processor is the CPU and the preferred location of the +page is a host NUMA node, then the page will also be migrated to that +host NUMA node. The location argument is ignored for this advice. +Note that for a page to be read-duplicated, the accessing processor +must either be the CPU or a GPU that has a non-zero value for the +device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Also, if a +context is created on a device that does not have the device +attribute CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS +set, then read-duplication will not occur until all such contexts are +destroyed. If the memory region refers to valid system-allocated +pageable memory, then the accessing device must have a non-zero value +for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read- +only copy to be created on that device. Note however that if the +accessing device also has a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, +then setting this advice will not create a read-only copy when that +device accesses this memory region.

    • +
    • CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of +CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the +Unified Memory driver from attempting heuristic read-duplication on +the memory range. Any read-duplicated copies of the data will be +collapsed into a single copy. The location for the collapsed copy +will be the preferred location if the page has a preferred location +and one of the read-duplicated copies was resident at that location. +Otherwise, the location chosen is arbitrary. Note: The location +argument is ignored for this advice.

    • +
    • CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets +the preferred location for the data to be the memory belonging to +location. When type is +CU_MEM_LOCATION_TYPE_HOST, id +is ignored and the preferred location is set to be host memory. To +set the preferred location to a specific host NUMA node, applications +must set type to +CU_MEM_LOCATION_TYPE_HOST_NUMA and +id must specify the NUMA ID of the host +NUMA node. If type is set to +CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT, +id will be ignored and the the host NUMA +node closest to the calling thread’s CPU will be used as the +preferred location. If type is a +CU_MEM_LOCATION_TYPE_DEVICE, then +id must be a valid device ordinal and the +device must have a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting +the preferred location does not cause data to migrate to that +location immediately. Instead, it guides the migration policy when a +fault occurs on that memory region. If the data is already in its +preferred location and the faulting processor can establish a mapping +without requiring the data to be migrated, then data migration will +be avoided. On the other hand, if the data is not in its preferred +location or if a direct mapping cannot be established, then it will +be migrated to the processor accessing it. It is important to note +that setting the preferred location does not prevent data prefetching +done using cuMemPrefetchAsync. Having a preferred +location can override the page thrash detection and resolution logic +in the Unified Memory driver. Normally, if a page is detected to be +constantly thrashing between for example host and device memory, the +page may eventually be pinned to host memory by the Unified Memory +driver. But if the preferred location is set as device memory, then +the page will continue to thrash indefinitely. If +CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory +region or any subset of it, then the policies associated with that +advice will override the policies of this advice, unless read +accesses from location will not result in a read-only copy being +created on that procesor as outlined in description for the advice +CU_MEM_ADVISE_SET_READ_MOSTLY. If the memory region +refers to valid system-allocated pageable memory, and +type is CU_MEM_LOCATION_TYPE_DEVICE then +id must be a valid device that has a non- +zero alue for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS.

    • +
    • CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect +of CU_MEM_ADVISE_SET_PREFERRED_LOCATION and changes the +preferred location to none. The location argument is ignored for +this advice.

    • +
    • CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that +the data will be accessed by processor location. The +type must be either +CU_MEM_LOCATION_TYPE_DEVICE with +id representing a valid device ordinal or +CU_MEM_LOCATION_TYPE_HOST and +id will be ignored. All other location +types are invalid. If id is a GPU, then the +device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be +non-zero. This advice does not cause data migration and has no impact +on the location of the data per se. Instead, it causes the data to +always be mapped in the specified processor’s page tables, as long as +the location of the data permits a mapping to be established. If the +data gets migrated for any reason, the mappings are updated +accordingly. This advice is recommended in scenarios where data +locality is not important, but avoiding faults is. Consider for +example a system containing multiple GPUs with peer-to-peer access +enabled, where the data located on one GPU is occasionally accessed +by peer GPUs. In such scenarios, migrating data over to the other +GPUs is not as important because the accesses are infrequent and the +overhead of migration may be too high. But preventing faults can +still help improve performance, and so having a mapping set up in +advance is useful. Note that on CPU access of this data, the data may +be migrated to host memory because the CPU typically cannot access +device memory directly. Any GPU that had the +CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will +now have its mapping updated to point to the page in host memory. If +CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory +region or any subset of it, then the policies associated with that +advice will override the policies of this advice. Additionally, if +the preferred location of this memory region or any subset of it is +also location, then the policies associated with +CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the +policies of this advice. If the memory region refers to valid system- +allocated pageable memory, and type is +CU_MEM_LOCATION_TYPE_DEVICE then device in +id must have a non-zero value for the +device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, +if id has a non-zero value for the device +attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, +then this call has no effect.

    • +
    • CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of +CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to the data +from location may be removed at any time causing accesses to result +in non-fatal page faults. If the memory region refers to valid +system-allocated pageable memory, and type +is CU_MEM_LOCATION_TYPE_DEVICE then device in +id must have a non-zero value for the +device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, +if id has a non-zero value for the device +attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, +then this call has no effect.

    • +
    +
    +
    Parameters:
    +
      +
    • devPtr (CUdeviceptr) – Pointer to memory to set the advice for

    • +
    • count (size_t) – Size in bytes of the memory range

    • +
    • advice (CUmem_advise) – Advice to be applied for the specified memory range

    • +
    • location (CUmemLocation) – location to apply the advice for

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemRangeGetAttribute(size_t dataSize, attribute: CUmem_range_attribute, devPtr, size_t count)#
    +

    Query an attribute of a given memory range.

    +

    Query an attribute about the memory range starting at devPtr with a +size of count bytes. The memory range must refer to managed memory +allocated via cuMemAllocManaged or declared via managed +variables.

    +

    The attribute parameter can take the following values:

    +
      +
    • CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: If this attribute is +specified, data will be interpreted as a 32-bit integer, and +dataSize must be 4. The result returned will be 1 if all pages in +the given memory range have read-duplication enabled, or 0 otherwise.

    • +
    • CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: If this +attribute is specified, data will be interpreted as a 32-bit +integer, and dataSize must be 4. The result returned will be a GPU +device id if all pages in the memory range have that GPU as their +preferred location, or it will be CU_DEVICE_CPU if all pages in the +memory range have the CPU as their preferred location, or it will be +CU_DEVICE_INVALID if either all the pages don’t have the same +preferred location or some of the pages don’t have a preferred +location at all. Note that the actual location of the pages in the +memory range at the time of the query may be different from the +preferred location.

    • +
    • CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: If this attribute is +specified, data will be interpreted as an array of 32-bit integers, +and dataSize must be a non-zero multiple of 4. The result returned +will be a list of device ids that had +CU_MEM_ADVISE_SET_ACCESSED_BY set for that entire memory +range. If any device does not have that advice set for the entire +memory range, that device will not be included. If data is larger +than the number of devices that have that advice set for that memory +range, CU_DEVICE_INVALID will be returned in all the extra space +provided. For ex., if dataSize is 12 (i.e. data has 3 elements) +and only device 0 has the advice set, then the result returned will +be { 0, CU_DEVICE_INVALID, CU_DEVICE_INVALID }. If data is smaller +than the number of devices that have that advice set, then only as +many devices will be returned as can fit in the array. There is no +guarantee on which specific devices will be returned, however.

    • +
    • CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: If this +attribute is specified, data will be interpreted as a 32-bit +integer, and dataSize must be 4. The result returned will be the +last location to which all pages in the memory range were prefetched +explicitly via cuMemPrefetchAsync. This will either be a +GPU id or CU_DEVICE_CPU depending on whether the last location for +prefetch was a GPU or the CPU respectively. If any page in the memory +range was never explicitly prefetched or if all pages were not +prefetched to the same location, CU_DEVICE_INVALID will be returned. +Note that this simply returns the last location that the application +requested to prefetch the memory range to. It gives no indication as +to whether the prefetch operation to that location has completed or +even begun.

    • +
    • CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE: If this +attribute is specified, data will be interpreted as a +CUmemLocationType, and dataSize must be +sizeof(CUmemLocationType). The CUmemLocationType returned +will be CU_MEM_LOCATION_TYPE_DEVICE if all pages in the +memory range have the same GPU as their preferred location, or +CUmemLocationType will be +CU_MEM_LOCATION_TYPE_HOST if all pages in the memory +range have the CPU as their preferred location, or it will be +CU_MEM_LOCATION_TYPE_HOST_NUMA if all the pages in the +memory range have the same host NUMA node ID as their preferred +location or it will be CU_MEM_LOCATION_TYPE_INVALID if +either all the pages don’t have the same preferred location or some +of the pages don’t have a preferred location at all. Note that the +actual location type of the pages in the memory range at the time of +the query may be different from the preferred location type.

      + +
    • +
    • CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE: If +this attribute is specified, data will be interpreted as a +CUmemLocationType, and dataSize must be +sizeof(CUmemLocationType). The result returned will be the last +location to which all pages in the memory range were prefetched +explicitly via cuMemPrefetchAsync. The +CUmemLocationType returned will be +CU_MEM_LOCATION_TYPE_DEVICE if the last prefetch location +was a GPU or CU_MEM_LOCATION_TYPE_HOST if it was the CPU +or CU_MEM_LOCATION_TYPE_HOST_NUMA if the last prefetch +location was a specific host NUMA node. If any page in the memory +range was never explicitly prefetched or if all pages were not +prefetched to the same location, CUmemLocationType will +be CU_MEM_LOCATION_TYPE_INVALID. Note that this simply +returns the last location type that the application requested to +prefetch the memory range to. It gives no indication as to whether +the prefetch operation to that location has completed or even begun.

      + +
    • +
    +
    +
    Parameters:
    +
      +
    • dataSize (size_t) – Array containing the size of data

    • +
    • attribute (CUmem_range_attribute) – The attribute to query

    • +
    • devPtr (CUdeviceptr) – Start of the range to query

    • +
    • count (size_t) – Size of the range to query

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuMemRangeGetAttributes(dataSizes: Tuple[int] | List[int], attributes: Optional[Tuple[CUmem_range_attribute] | List[CUmem_range_attribute]], size_t numAttributes, devPtr, size_t count)#
    +

    Query attributes of a given memory range.

    +

    Query attributes of the memory range starting at devPtr with a size +of count bytes. The memory range must refer to managed memory +allocated via cuMemAllocManaged or declared via managed +variables. The attributes array will be interpreted to have +numAttributes entries. The dataSizes array will also be interpreted +to have numAttributes entries. The results of the query will be +stored in data.

    +

    The list of supported attributes are given below. Please refer to +cuMemRangeGetAttribute for attribute descriptions and +restrictions.

    + +
    +
    Parameters:
    +
      +
    • dataSizes (List[int]) – Array containing the sizes of each result

    • +
    • attributes (List[CUmem_range_attribute]) – An array of attributes to query (numAttributes and the number of +attributes in this array should match)

    • +
    • numAttributes (size_t) – Number of attributes to query

    • +
    • devPtr (CUdeviceptr) – Start of the range to query

    • +
    • count (size_t) – Size of the range to query

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuPointerSetAttribute(value, attribute: CUpointer_attribute, ptr)#
    +

    Set attributes on a previously allocated memory region.

    +

    The supported attributes are:

    +
      +
    • CU_POINTER_ATTRIBUTE_SYNC_MEMOPS:

    • +
    • A boolean attribute that can either be set (1) or unset (0). When +set, the region of memory that ptr points to is guaranteed to +always synchronize memory operations that are synchronous. If there +are some previously initiated synchronous memory operations that are +pending when this attribute is set, the function does not return +until those memory operations are complete. See further documentation +in the section titled “API synchronization behavior” to learn more +about cases when synchronous memory operations can exhibit +asynchronous behavior. value will be considered as a pointer to an +unsigned integer to which this attribute is to be set.

    • +
    +
    +
    Parameters:
    +
      +
    • value (Any) – Pointer to memory containing the value to be set

    • +
    • attribute (CUpointer_attribute) – Pointer attribute to set

    • +
    • ptr (CUdeviceptr) – Pointer to a memory region allocated using CUDA memory allocation +APIs

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_DEVICE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuPointerGetAttributes(unsigned int numAttributes, attributes: Optional[Tuple[CUpointer_attribute] | List[CUpointer_attribute]], ptr)#
    +

    Returns information about a pointer.

    +

    The supported attributes are (refer to +cuPointerGetAttribute for attribute descriptions and +restrictions):

    + +

    Unlike cuPointerGetAttribute, this function will not return +an error when the ptr encountered is not a valid CUDA pointer. +Instead, the attributes are assigned default NULL values and +CUDA_SUCCESS is returned.

    +

    If ptr was not allocated by, mapped by, or registered with a +CUcontext which uses UVA (Unified Virtual Addressing), +CUDA_ERROR_INVALID_CONTEXT is returned.

    +
    +
    Parameters:
    +
      +
    • numAttributes (unsigned int) – Number of attributes to query

    • +
    • attributes (List[CUpointer_attribute]) – An array of attributes to query (numAttributes and the number of +attributes in this array should match)

    • +
    • ptr (CUdeviceptr) – Pointer to query

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Stream Management#

    +

    This section describes the stream management functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuStreamCreate(unsigned int Flags)#
    +

    Create a stream.

    +

    Creates a stream and returns a handle in phStream. The Flags +argument determines behaviors of the stream.

    +

    Valid values for Flags are:

    +
      +
    • CU_STREAM_DEFAULT: Default stream creation flag.

    • +
    • CU_STREAM_NON_BLOCKING: Specifies that work running in +the created stream may run concurrently with work in stream 0 (the +NULL stream), and that the created stream should perform no implicit +synchronization with stream 0.

    • +
    +
    +
    Parameters:
    +

    Flags (unsigned int) – Parameters for stream creation

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamCreateWithPriority(unsigned int flags, int priority)#
    +

    Create a stream with the given priority.

    +

    Creates a stream with the specified priority and returns a handle in +phStream. This affects the scheduling priority of work in the stream. +Priorities provide a hint to preferentially run work with higher +priority when possible, but do not preempt already-running work or +provide any other functional guarantee on execution order.

    +

    priority follows a convention where lower numbers represent higher +priorities. ‘0’ represents default priority. The range of meaningful +numerical priorities can be queried using +cuCtxGetStreamPriorityRange. If the specified priority is +outside the numerical range returned by +cuCtxGetStreamPriorityRange, it will automatically be +clamped to the lowest or the highest number in the range.

    +
    +
    Parameters:
    +
      +
    • flags (unsigned int) – Flags for stream creation. See cuStreamCreate for a +list of valid flags

    • +
    • priority (int) – Stream priority. Lower numbers represent higher priorities. See +cuCtxGetStreamPriorityRange for more information about +meaningful stream priorities that can be passed.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Stream priorities are supported only on GPUs with compute capability 3.5 or higher.

    +

    In the current implementation, only compute kernels launched in priority streams are affected by the stream’s priority. Stream priorities have no effect on host-to-device and device-to-host memory operations.

    +
    + +
    +
    +cuda.bindings.driver.cuStreamGetPriority(hStream)#
    +

    Query the priority of a given stream.

    +

    Query the priority of a stream created using +cuStreamCreate, cuStreamCreateWithPriority or +cuGreenCtxStreamCreate and return the priority in +priority. Note that if the stream was created with a priority outside +the numerical range returned by +cuCtxGetStreamPriorityRange, this function returns the +clamped priority. See cuStreamCreateWithPriority for +details about priority clamping.

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamGetFlags(hStream)#
    +

    Query the flags of a given stream.

    +

    Query the flags of a stream created using cuStreamCreate, +cuStreamCreateWithPriority or +cuGreenCtxStreamCreate and return the flags in flags.

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamGetId(hStream)#
    +

    Returns the unique Id associated with the stream handle supplied.

    +

    Returns in streamId the unique Id which is associated with the given +stream handle. The Id is unique for the life of the program.

    +

    The stream handle hStream can refer to any of the following:

    + +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamGetCtx(hStream)#
    +

    Query the context associated with a stream.

    +

    Returns the CUDA context that the stream is associated with.

    +

    Note there is a later version of this API, +cuStreamGetCtx_v2. It will supplant this version in CUDA +13.0. It is recommended to use cuStreamGetCtx_v2 till then +as this version will return CUDA_ERROR_NOT_SUPPORTED for +streams created via the API cuGreenCtxStreamCreate.

    +

    The stream handle hStream can refer to any of the following:

    + +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamGetCtx_v2(hStream)#
    +

    Query the contexts associated with a stream.

    +

    Returns the contexts that the stream is associated with.

    +

    If the stream is associated with a green context, the API returns the +green context in pGreenCtx and the primary context of the associated +device in pCtx.

    +

    If the stream is associated with a regular context, the API returns the +regular context in pCtx and NULL in pGreenCtx.

    +

    The stream handle hStream can refer to any of the following:

    + +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamWaitEvent(hStream, hEvent, unsigned int Flags)#
    +

    Make a compute stream wait on an event.

    +

    Makes all future work submitted to hStream wait for all work captured +in hEvent. See cuEventRecord() for details on what is +captured by an event. The synchronization will be performed efficiently +on the device when applicable. hEvent may be from a different context +or device than hStream.

    +

    flags include:

    +
      +
    • CU_EVENT_WAIT_DEFAULT: Default event creation flag.

    • +
    • CU_EVENT_WAIT_EXTERNAL: Event is captured in the graph as +an external event node when performing stream capture. This flag is +invalid outside of stream capture.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamAddCallback(hStream, callback, userData, unsigned int flags)#
    +

    Add a callback to a compute stream.

    +

    Adds a callback to be called on the host after all currently enqueued +items in the stream have completed. For each cuStreamAddCallback call, +the callback will be executed exactly once. The callback will block +later work in the stream until it is finished.

    +

    The callback may be passed CUDA_SUCCESS or an error code. +In the event of a device error, all subsequently executed callbacks +will receive an appropriate CUresult.

    +

    Callbacks must not make any CUDA API calls. Attempting to use a CUDA +API will result in CUDA_ERROR_NOT_PERMITTED. Callbacks must +not perform any synchronization that may depend on outstanding device +work or other callbacks that are not mandated to run earlier. Callbacks +without a mandated order (in independent streams) execute in undefined +order and may be serialized.

    +

    For the purposes of Unified Memory, callback execution makes a number +of guarantees:

    +
      +
    • The callback stream is considered idle for the duration of the +callback. Thus, for example, a callback may always use memory +attached to the callback stream.

    • +
    • The start of execution of a callback has the same effect as +synchronizing an event recorded in the same stream immediately prior +to the callback. It thus synchronizes streams which have been +“joined” prior to the callback.

    • +
    • Adding device work to any stream does not have the effect of making +the stream active until all preceding host functions and stream +callbacks have executed. Thus, for example, a callback might use +global attached memory even if work has been added to another stream, +if the work has been ordered behind the callback with an event.

    • +
    • Completion of a callback does not cause a stream to become active +except as described above. The callback stream will remain idle if no +device work follows the callback, and will remain idle across +consecutive callbacks without device work in between. Thus, for +example, stream synchronization can be done by signaling from a +callback at the end of the stream.

    • +
    +
    +
    Parameters:
    +
      +
    • hStream (CUstream or cudaStream_t) – Stream to add callback to

    • +
    • callback (CUstreamCallback) – The function to call once preceding stream operations are complete

    • +
    • userData (Any) – User specified data to be passed to the callback function

    • +
    • flags (unsigned int) – Reserved for future use, must be 0

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    This function is slated for eventual deprecation and removal. If you do not require the callback to execute in case of a device error, consider using cuLaunchHostFunc. Additionally, this function is not supported with cuStreamBeginCapture and cuStreamEndCapture, unlike cuLaunchHostFunc.

    +
    + +
    +
    +cuda.bindings.driver.cuStreamBeginCapture(hStream, mode: CUstreamCaptureMode)#
    +

    Begins graph capture on a stream.

    +

    Begin graph capture on hStream. When a stream is in capture mode, all +operations pushed into the stream will not be executed, but will +instead be captured into a graph, which will be returned via +cuStreamEndCapture. Capture may not be initiated if +stream is CU_STREAM_LEGACY. Capture must be ended on the same stream +in which it was initiated, and it may only be initiated if the stream +is not already in capture mode. The capture mode may be queried via +cuStreamIsCapturing. A unique id representing the capture +sequence may be queried via cuStreamGetCaptureInfo.

    +

    If mode is not CU_STREAM_CAPTURE_MODE_RELAXED, +cuStreamEndCapture must be called on this stream from the +same thread.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    +
    + +
    +
    +cuda.bindings.driver.cuStreamBeginCaptureToGraph(hStream, hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], dependencyData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies, mode: CUstreamCaptureMode)#
    +

    Begins graph capture on a stream to an existing graph.

    +

    Begin graph capture on hStream, placing new nodes into an existing +graph. When a stream is in capture mode, all operations pushed into the +stream will not be executed, but will instead be captured into +hGraph. The graph will not be instantiable until the user calls +cuStreamEndCapture.

    +

    Capture may not be initiated if stream is CU_STREAM_LEGACY. Capture +must be ended on the same stream in which it was initiated, and it may +only be initiated if the stream is not already in capture mode. The +capture mode may be queried via cuStreamIsCapturing. A +unique id representing the capture sequence may be queried via +cuStreamGetCaptureInfo.

    +

    If mode is not CU_STREAM_CAPTURE_MODE_RELAXED, +cuStreamEndCapture must be called on this stream from the +same thread.

    +
    +
    Parameters:
    +
      +
    • hStream (CUstream or cudaStream_t) – Stream in which to initiate capture.

    • +
    • hGraph (CUgraph or cudaGraph_t) – Graph to capture into.

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the first node captured in the stream. Can be NULL +if numDependencies is 0.

    • +
    • dependencyData (List[CUgraphEdgeData]) – Optional array of data associated with each dependency.

    • +
    • numDependencies (size_t) – Number of dependencies.

    • +
    • mode (CUstreamCaptureMode) – Controls the interaction of this capture sequence with other API +calls that are potentially unsafe. For more details see +cuThreadExchangeStreamCaptureMode.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    +
    + +
    +
    +cuda.bindings.driver.cuThreadExchangeStreamCaptureMode(mode: CUstreamCaptureMode)#
    +

    Swaps the stream capture interaction mode for a thread.

    +

    Sets the calling thread’s stream capture interaction mode to the value +contained in *mode, and overwrites *mode with the previous mode for +the thread. To facilitate deterministic behavior across function or +module boundaries, callers are encouraged to use this API in a push-pop +fashion:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    During stream capture (see cuStreamBeginCapture), some +actions, such as a call to cudaMalloc, may be unsafe. In +the case of cudaMalloc, the operation is not enqueued +asynchronously to a stream, and is not observed by stream capture. +Therefore, if the sequence of operations captured via +cuStreamBeginCapture depended on the allocation being +replayed whenever the graph is launched, the captured graph would be +invalid.

    +

    Therefore, stream capture places restrictions on API calls that can be +made within or concurrently to a +cuStreamBeginCapture-cuStreamEndCapture +sequence. This behavior can be controlled via this API and flags to +cuStreamBeginCapture.

    +

    A thread’s mode is one of the following:

    +
      +
    • CU_STREAM_CAPTURE_MODE_GLOBAL: This is the default mode. If the +local thread has an ongoing capture sequence that was not initiated +with CU_STREAM_CAPTURE_MODE_RELAXED at cuStreamBeginCapture, or +if any other thread has a concurrent capture sequence initiated with +CU_STREAM_CAPTURE_MODE_GLOBAL, this thread is prohibited from +potentially unsafe API calls.

    • +
    • CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: If the local thread has an +ongoing capture sequence not initiated with +CU_STREAM_CAPTURE_MODE_RELAXED, it is prohibited from potentially +unsafe API calls. Concurrent capture sequences in other threads are +ignored.

    • +
    • CU_STREAM_CAPTURE_MODE_RELAXED: The local thread is not prohibited +from potentially unsafe API calls. Note that the thread is still +prohibited from API calls which necessarily conflict with stream +capture, for example, attempting cuEventQuery on an event +that was last recorded inside a capture sequence.

    • +
    +
    +
    Parameters:
    +

    mode (CUstreamCaptureMode) – Pointer to mode value to swap with the current mode

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuStreamBeginCapture

    +
    +
    + +
    +
    +cuda.bindings.driver.cuStreamEndCapture(hStream)#
    +

    Ends capture on a stream, returning the captured graph.

    +

    End capture on hStream, returning the captured graph via phGraph. +Capture must have been initiated on hStream via a call to +cuStreamBeginCapture. If capture was invalidated, due to a +violation of the rules of stream capture, then a NULL graph will be +returned.

    +

    If the mode argument to cuStreamBeginCapture was not +CU_STREAM_CAPTURE_MODE_RELAXED, this call must be from the +same thread as cuStreamBeginCapture.

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Stream to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamIsCapturing(hStream)#
    +

    Returns a stream’s capture status.

    +

    Return the capture status of hStream via captureStatus. After a +successful call, *captureStatus will contain one of the following:

    + +

    Note that, if this is called on CU_STREAM_LEGACY (the “null +stream”) while a blocking stream in the same context is capturing, it +will return CUDA_ERROR_STREAM_CAPTURE_IMPLICIT and +*captureStatus is unspecified after the call. The blocking stream +capture is not invalidated.

    +

    When a blocking stream is capturing, the legacy stream is in an +unusable state until the blocking stream capture is terminated. The +legacy stream is not supported for stream capture, but attempted use +would have an implicit dependency on the capturing stream(s).

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Stream to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamGetCaptureInfo(hStream)#
    +

    Query a stream’s capture state.

    +

    Query stream state related to stream capture.

    +

    If called on CU_STREAM_LEGACY (the “null stream”) while a +stream not created with CU_STREAM_NON_BLOCKING is +capturing, returns CUDA_ERROR_STREAM_CAPTURE_IMPLICIT.

    +

    Valid data (other than capture status) is returned only if both of the +following are true:

    + +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – The stream to query

    +
    +
    Returns:
    +

      +
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_STREAM_CAPTURE_IMPLICIT

    • +
    • captureStatus_out (CUstreamCaptureStatus) – Location to return the capture status of the stream; required

    • +
    • id_out (cuuint64_t) – Optional location to return an id for the capture sequence, which +is unique over the lifetime of the process

    • +
    • graph_out (CUgraph) – Optional location to return the graph being captured into. All +operations other than destroy and node removal are permitted on the +graph while the capture sequence is in progress. This API does not +transfer ownership of the graph, which is transferred or destroyed +at cuStreamEndCapture. Note that the graph handle may +be invalidated before end of capture for certain errors. Nodes that +are or become unreachable from the original stream at +cuStreamEndCapture due to direct actions on the graph +do not trigger CUDA_ERROR_STREAM_CAPTURE_UNJOINED.

    • +
    • dependencies_out (List[CUgraphNode]) – Optional location to store a pointer to an array of nodes. The next +node to be captured in the stream will depend on this set of nodes, +absent operations such as event wait which modify this set. The +array pointer is valid until the next API call which operates on +the stream or until the capture is terminated. The node handles may +be copied out and are valid until they or the graph is destroyed. +The driver-owned array may also be passed directly to APIs that +operate on the graph (not the stream) without copying.

    • +
    • numDependencies_out (int) – Optional location to store the size of the array returned in +dependencies_out.

    • +
    +

    +
    +
    +
    +

    See also

    +
    +
    cuStreamGetCaptureInfo_v3

    py:obj:~.cuStreamBeginCapture, cuStreamIsCapturing, cuStreamUpdateCaptureDependencies

    +
    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuStreamGetCaptureInfo_v3(hStream)#
    +

    Query a stream’s capture state (12.3+)

    +

    Query stream state related to stream capture.

    +

    If called on CU_STREAM_LEGACY (the “null stream”) while a +stream not created with CU_STREAM_NON_BLOCKING is +capturing, returns CUDA_ERROR_STREAM_CAPTURE_IMPLICIT.

    +

    Valid data (other than capture status) is returned only if both of the +following are true:

    + +

    If edgeData_out is non-NULL then dependencies_out must be as well. +If dependencies_out is non-NULL and edgeData_out is NULL, but there +is non-zero edge data for one or more of the current stream +dependencies, the call will return CUDA_ERROR_LOSSY_QUERY.

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – The stream to query

    +
    +
    Returns:
    +

      +
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_STREAM_CAPTURE_IMPLICIT, CUDA_ERROR_LOSSY_QUERY

    • +
    • captureStatus_out (CUstreamCaptureStatus) – Location to return the capture status of the stream; required

    • +
    • id_out (cuuint64_t) – Optional location to return an id for the capture sequence, which +is unique over the lifetime of the process

    • +
    • graph_out (CUgraph) – Optional location to return the graph being captured into. All +operations other than destroy and node removal are permitted on the +graph while the capture sequence is in progress. This API does not +transfer ownership of the graph, which is transferred or destroyed +at cuStreamEndCapture. Note that the graph handle may +be invalidated before end of capture for certain errors. Nodes that +are or become unreachable from the original stream at +cuStreamEndCapture due to direct actions on the graph +do not trigger CUDA_ERROR_STREAM_CAPTURE_UNJOINED.

    • +
    • dependencies_out (List[CUgraphNode]) – Optional location to store a pointer to an array of nodes. The next +node to be captured in the stream will depend on this set of nodes, +absent operations such as event wait which modify this set. The +array pointer is valid until the next API call which operates on +the stream or until the capture is terminated. The node handles may +be copied out and are valid until they or the graph is destroyed. +The driver-owned array may also be passed directly to APIs that +operate on the graph (not the stream) without copying.

    • +
    • edgeData_out (List[CUgraphEdgeData]) – Optional location to store a pointer to an array of graph edge +data. This array parallels dependencies_out; the next node to be +added has an edge to dependencies_out`[i] with annotation +`edgeData_out`[i] for each `i. The array pointer is valid until +the next API call which operates on the stream or until the capture +is terminated.

    • +
    • numDependencies_out (int) – Optional location to store the size of the array returned in +dependencies_out.

    • +
    +

    +
    +
    +
    +

    See also

    +
    +
    cuStreamGetCaptureInfo

    py:obj:~.cuStreamBeginCapture, cuStreamIsCapturing, cuStreamUpdateCaptureDependencies

    +
    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuStreamUpdateCaptureDependencies(hStream, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, unsigned int flags)#
    +

    Update the set of dependencies in a capturing stream (11.3+)

    +

    Modifies the dependency set of a capturing stream. The dependency set +is the set of nodes that the next captured node in the stream will +depend on.

    +

    Valid flags are CU_STREAM_ADD_CAPTURE_DEPENDENCIES and +CU_STREAM_SET_CAPTURE_DEPENDENCIES. These control whether +the set passed to the API is added to the existing set or replaces it. +A flags value of 0 defaults to +CU_STREAM_ADD_CAPTURE_DEPENDENCIES.

    +

    Nodes that are removed from the dependency set via this API do not +result in CUDA_ERROR_STREAM_CAPTURE_UNJOINED if they are +unreachable from the stream at cuStreamEndCapture.

    +

    Returns CUDA_ERROR_ILLEGAL_STATE if the stream is not +capturing.

    +

    This API is new in CUDA 11.3. Developers requiring compatibility across +minor versions to CUDA 11.0 should not use this API or provide a +fallback.

    +
    +
    Parameters:
    +
      +
    • hStream (CUstream or cudaStream_t) – The stream to update

    • +
    • dependencies (List[CUgraphNode]) – The set of dependencies to add

    • +
    • numDependencies (size_t) – The size of the dependencies array

    • +
    • flags (unsigned int) – See above

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_ILLEGAL_STATE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamUpdateCaptureDependencies_v2(hStream, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], dependencyData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies, unsigned int flags)#
    +

    Update the set of dependencies in a capturing stream (12.3+)

    +

    Modifies the dependency set of a capturing stream. The dependency set +is the set of nodes that the next captured node in the stream will +depend on along with the edge data for those dependencies.

    +

    Valid flags are CU_STREAM_ADD_CAPTURE_DEPENDENCIES and +CU_STREAM_SET_CAPTURE_DEPENDENCIES. These control whether +the set passed to the API is added to the existing set or replaces it. +A flags value of 0 defaults to +CU_STREAM_ADD_CAPTURE_DEPENDENCIES.

    +

    Nodes that are removed from the dependency set via this API do not +result in CUDA_ERROR_STREAM_CAPTURE_UNJOINED if they are +unreachable from the stream at cuStreamEndCapture.

    +

    Returns CUDA_ERROR_ILLEGAL_STATE if the stream is not +capturing.

    +
    +
    Parameters:
    +
      +
    • hStream (CUstream or cudaStream_t) – The stream to update

    • +
    • dependencies (List[CUgraphNode]) – The set of dependencies to add

    • +
    • dependencyData (List[CUgraphEdgeData]) – Optional array of data associated with each dependency.

    • +
    • numDependencies (size_t) – The size of the dependencies array

    • +
    • flags (unsigned int) – See above

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_ILLEGAL_STATE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamAttachMemAsync(hStream, dptr, size_t length, unsigned int flags)#
    +

    Attach memory to a stream asynchronously.

    +

    Enqueues an operation in hStream to specify stream association of +length bytes of memory starting from dptr. This function is a +stream-ordered operation, meaning that it is dependent on, and will +only take effect when, previous work in stream has completed. Any +previous association is automatically replaced.

    +

    dptr must point to one of the following types of memories:

    +
      +
    • managed memory declared using the managed keyword or allocated with +cuMemAllocManaged.

    • +
    • a valid host-accessible region of system-allocated pageable memory. +This type of memory may only be specified if the device associated +with the stream reports a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS.

    • +
    +

    For managed allocations, length must be either zero or the entire +allocation’s size. Both indicate that the entire allocation’s stream +association is being changed. Currently, it is not possible to change +stream association for a portion of a managed allocation.

    +

    For pageable host allocations, length must be non-zero.

    +

    The stream association is specified using flags which must be one of +CUmemAttach_flags. If the CU_MEM_ATTACH_GLOBAL +flag is specified, the memory can be accessed by any stream on any +device. If the CU_MEM_ATTACH_HOST flag is specified, the +program makes a guarantee that it won’t access the memory on the device +from any stream on a device that has a zero value for the device +attribute CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If +the CU_MEM_ATTACH_SINGLE flag is specified and hStream is +associated with a device that has a zero value for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, the program +makes a guarantee that it will only access the memory on the device +from hStream. It is illegal to attach singly to the NULL stream, +because the NULL stream is a virtual global stream and not a specific +stream. An error will be returned in this case.

    +

    When memory is associated with a single stream, the Unified Memory +system will allow CPU access to this memory region so long as all +operations in hStream have completed, regardless of whether other +streams are active. In effect, this constrains exclusive ownership of +the managed memory region by an active GPU to per-stream activity +instead of whole-GPU activity.

    +

    Accessing memory on the device from streams that are not associated +with it will produce undefined results. No error checking is performed +by the Unified Memory system to ensure that kernels launched into other +streams do not access this region.

    +

    It is a program’s responsibility to order calls to +cuStreamAttachMemAsync via events, synchronization or other +means to ensure legal access to memory at all times. Data visibility +and coherency will be changed appropriately for all kernels which +follow a stream-association change.

    +

    If hStream is destroyed while data is associated with it, the +association is removed and the association reverts to the default +visibility of the allocation as specified at +cuMemAllocManaged. For managed variables, the default +association is always CU_MEM_ATTACH_GLOBAL. Note that +destroying a stream is an asynchronous operation, and as a result, the +change to default association won’t happen until all work in the stream +has completed.

    +
    +
    Parameters:
    +
      +
    • hStream (CUstream or cudaStream_t) – Stream in which to enqueue the attach operation

    • +
    • dptr (CUdeviceptr) – Pointer to memory (must be a pointer to managed memory or to a +valid host-accessible region of system-allocated pageable memory)

    • +
    • length (size_t) – Length of memory

    • +
    • flags (unsigned int) – Must be one of CUmemAttach_flags

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamQuery(hStream)#
    +

    Determine status of a compute stream.

    +

    Returns CUDA_SUCCESS if all operations in the stream +specified by hStream have completed, or +CUDA_ERROR_NOT_READY if not.

    +

    For the purposes of Unified Memory, a return value of +CUDA_SUCCESS is equivalent to having called +cuStreamSynchronize().

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Stream to query status of

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_READY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamSynchronize(hStream)#
    +

    Wait until a stream’s tasks are completed.

    +
    +

    Waits until the device has completed all operations in the stream +specified by hStream. If the context was created with the +CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block +until the stream is finished with all of its tasks.

    +
    +

    ote_null_stream

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuStreamDestroy(hStream)#
    +

    Destroys a stream.

    +

    Destroys the stream specified by hStream.

    +

    In case the device is still doing work in the stream hStream when +cuStreamDestroy() is called, the function will return +immediately and the resources associated with hStream will be +released automatically once the device has completed all work in +hStream.

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Stream to destroy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamCopyAttributes(dst, src)#
    +

    Copies attributes from source stream to destination stream.

    +

    Copies attributes from source stream src to destination stream dst. +Both streams must have the same context.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    CUaccessPolicyWindow

    +
    +
    + +
    +
    +cuda.bindings.driver.cuStreamGetAttribute(hStream, attr: CUstreamAttrID)#
    +

    Queries stream attribute.

    +

    Queries attribute attr from hStream and stores it in corresponding +member of value_out.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    CUaccessPolicyWindow

    +
    +
    + +
    +
    +cuda.bindings.driver.cuStreamSetAttribute(hStream, attr: CUstreamAttrID, CUstreamAttrValue value: Optional[CUstreamAttrValue])#
    +

    Sets stream attribute.

    +

    Sets attribute attr on hStream from corresponding attribute of +value. The updated attribute will be applied to subsequent work +submitted to the stream. It will not affect previously submitted work.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    CUaccessPolicyWindow

    +
    +
    + +
    +
    +

    Event Management#

    +

    This section describes the event management functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuEventCreate(unsigned int Flags)#
    +

    Creates an event.

    +

    Creates an event *phEvent for the current context with the flags +specified via Flags. Valid flags include:

    + +
    +
    Parameters:
    +

    Flags (unsigned int) – Event creation flags

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEventRecord(hEvent, hStream)#
    +

    Records an event.

    +

    Captures in hEvent the contents of hStream at the time of this +call. hEvent and hStream must be from the same context otherwise +CUDA_ERROR_INVALID_HANDLE is returned. Calls such as +cuEventQuery() or cuStreamWaitEvent() will then +examine or wait for completion of the work that was captured. Uses of +hStream after this call do not modify hEvent. See note on default +stream behavior for what is captured in the default case.

    +

    cuEventRecord() can be called multiple times on the same +event and will overwrite the previously captured state. Other APIs such +as cuStreamWaitEvent() use the most recently captured state +at the time of the API call, and are not affected by later calls to +cuEventRecord(). Before the first call to +cuEventRecord(), an event represents an empty set of work, +so for example cuEventQuery() would return +CUDA_SUCCESS.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEventRecordWithFlags(hEvent, hStream, unsigned int flags)#
    +

    Records an event.

    +

    Captures in hEvent the contents of hStream at the time of this +call. hEvent and hStream must be from the same context otherwise +CUDA_ERROR_INVALID_HANDLE is returned. Calls such as +cuEventQuery() or cuStreamWaitEvent() will then +examine or wait for completion of the work that was captured. Uses of +hStream after this call do not modify hEvent. See note on default +stream behavior for what is captured in the default case.

    +

    cuEventRecordWithFlags() can be called multiple times on +the same event and will overwrite the previously captured state. Other +APIs such as cuStreamWaitEvent() use the most recently +captured state at the time of the API call, and are not affected by +later calls to cuEventRecordWithFlags(). Before the first +call to cuEventRecordWithFlags(), an event represents an +empty set of work, so for example cuEventQuery() would +return CUDA_SUCCESS.

    +

    flags include:

    + +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEventQuery(hEvent)#
    +

    Queries an event’s status.

    +

    Queries the status of all work currently captured by hEvent. See +cuEventRecord() for details on what is captured by an +event.

    +

    Returns CUDA_SUCCESS if all captured work has been +completed, or CUDA_ERROR_NOT_READY if any captured work is +incomplete.

    +

    For the purposes of Unified Memory, a return value of +CUDA_SUCCESS is equivalent to having called +cuEventSynchronize().

    +
    +
    Parameters:
    +

    hEvent (CUevent or cudaEvent_t) – Event to query

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_READY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEventSynchronize(hEvent)#
    +

    Waits for an event to complete.

    +

    Waits until the completion of all work currently captured in hEvent. +See cuEventRecord() for details on what is captured by an +event.

    +

    Waiting for an event that was created with the +CU_EVENT_BLOCKING_SYNC flag will cause the calling CPU +thread to block until the event has been completed by the device. If +the CU_EVENT_BLOCKING_SYNC flag has not been set, then the +CPU thread will busy-wait until the event has been completed by the +device.

    +
    +
    Parameters:
    +

    hEvent (CUevent or cudaEvent_t) – Event to wait for

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEventDestroy(hEvent)#
    +

    Destroys an event.

    +

    Destroys the event specified by hEvent.

    +

    An event may be destroyed before it is complete (i.e., while +cuEventQuery() would return +CUDA_ERROR_NOT_READY). In this case, the call does not +block on completion of the event, and any associated resources will +automatically be released asynchronously at completion.

    +
    +
    Parameters:
    +

    hEvent (CUevent or cudaEvent_t) – Event to destroy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEventElapsedTime(hStart, hEnd)#
    +

    Computes the elapsed time between two events.

    +

    Computes the elapsed time between two events (in milliseconds with a +resolution of around 0.5 microseconds).

    +

    If either event was last recorded in a non-NULL stream, the resulting +time may be greater than expected (even if both used the same stream +handle). This happens because the cuEventRecord() operation +takes place asynchronously and there is no guarantee that the measured +latency is actually just between the two events. Any number of other +different stream operations could execute in between the two measured +events, thus altering the timing in a significant way.

    +

    If cuEventRecord() has not been called on either event then +CUDA_ERROR_INVALID_HANDLE is returned. If +cuEventRecord() has been called on both events but one or +both of them has not yet been completed (that is, +cuEventQuery() would return +CUDA_ERROR_NOT_READY on at least one of the events), +CUDA_ERROR_NOT_READY is returned. If either event was +created with the CU_EVENT_DISABLE_TIMING flag, then this +function will return CUDA_ERROR_INVALID_HANDLE.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    External Resource Interoperability#

    +

    This section describes the external resource interoperability functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuImportExternalMemory(CUDA_EXTERNAL_MEMORY_HANDLE_DESC memHandleDesc: Optional[CUDA_EXTERNAL_MEMORY_HANDLE_DESC])#
    +

    Imports an external memory object.

    +

    Imports an externally allocated memory object and returns a handle to +that in extMem_out.

    +

    The properties of the handle being imported must be described in +memHandleDesc. The CUDA_EXTERNAL_MEMORY_HANDLE_DESC +structure is defined as follows:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where type specifies the +type of handle being imported. CUexternalMemoryHandleType +is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If type is +CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, then +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::fd must be a +valid file descriptor referencing a memory object. Ownership of the +file descriptor is transferred to the CUDA driver when the handle is +imported successfully. Performing any operations on the file descriptor +after it is imported results in undefined behavior.

    +

    If type is +CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32, then exactly +one of +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must +not be NULL. If +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle is +not NULL, then it must represent a valid shared NT handle that +references a memory object. Ownership of this handle is not transferred +to CUDA after the import operation, so the application must release the +handle using the appropriate system call. If +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name is +not NULL, then it must point to a NULL-terminated array of UTF-16 +characters that refers to a memory object.

    +

    If type is +CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT, then +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle +must be non-NULL and +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must +be NULL. The handle specified must be a globally shared KMT handle. +This handle does not hold a reference to the underlying object, and +thus will be invalid when all references to the memory object are +destroyed.

    +

    If type is +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP, then exactly one +of CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle +and CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name +must not be NULL. If +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle is +not NULL, then it must represent a valid shared NT handle that is +returned by ID3D12Device::CreateSharedHandle when referring to a +ID3D12Heap object. This handle holds a reference to the underlying +object. If +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name is +not NULL, then it must point to a NULL-terminated array of UTF-16 +characters that refers to a ID3D12Heap object.

    +

    If type is +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE, then exactly +one of +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must +not be NULL. If +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle is +not NULL, then it must represent a valid shared NT handle that is +returned by ID3D12Device::CreateSharedHandle when referring to a +ID3D12Resource object. This handle holds a reference to the underlying +object. If +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name is +not NULL, then it must point to a NULL-terminated array of UTF-16 +characters that refers to a ID3D12Resource object.

    +

    If type is +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE, then +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle +must represent a valid shared NT handle that is returned by +IDXGIResource1::CreateSharedHandle when referring to a ID3D11Resource +object. If +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name is +not NULL, then it must point to a NULL-terminated array of UTF-16 +characters that refers to a ID3D11Resource object.

    +

    If type is +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT, then +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle +must represent a valid shared KMT handle that is returned by +IDXGIResource::GetSharedHandle when referring to a ID3D11Resource +object and +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must +be NULL.

    +

    If type is +CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then +CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::nvSciBufObject +must be non-NULL and reference a valid NvSciBuf object. If the NvSciBuf +object imported into CUDA is also mapped by other drivers, then the +application must use cuWaitExternalSemaphoresAsync or +cuSignalExternalSemaphoresAsync as appropriate barriers to +maintain coherence between CUDA and the other drivers. See +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC and +CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC for +memory synchronization.

    +

    The size of the memory object must be specified in +size.

    +

    Specifying the flag CUDA_EXTERNAL_MEMORY_DEDICATED in +flags indicates that the +resource is a dedicated resource. The definition of what a dedicated +resource is outside the scope of this extension. This flag must be set +if type is one of the +following: CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT

    +
    +
    Parameters:
    +

    memHandleDesc (CUDA_EXTERNAL_MEMORY_HANDLE_DESC) – Memory import handle descriptor

    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    If the Vulkan memory imported into CUDA is mapped on the CPU then the application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges as well as appropriate Vulkan pipeline barriers to maintain coherence between CPU and GPU. For more information on these APIs, please refer to “Synchronization +and Cache Control” chapter from Vulkan specification.

    +
    + +
    +
    +cuda.bindings.driver.cuExternalMemoryGetMappedBuffer(extMem, CUDA_EXTERNAL_MEMORY_BUFFER_DESC bufferDesc: Optional[CUDA_EXTERNAL_MEMORY_BUFFER_DESC])#
    +

    Maps a buffer onto an imported memory object.

    +

    Maps a buffer onto an imported memory object and returns a device +pointer in devPtr.

    +

    The properties of the buffer being mapped must be described in +bufferDesc. The CUDA_EXTERNAL_MEMORY_BUFFER_DESC +structure is defined as follows:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where offset is the offset +in the memory object where the buffer’s base address is. +size is the size of the +buffer. flags must be +zero.

    +

    The offset and size have to be suitably aligned to match the +requirements of the external API. Mapping two buffers whose ranges +overlap may or may not result in the same virtual address being +returned for the overlapped portion. In such cases, the application +must ensure that all accesses to that region from the GPU are volatile. +Otherwise writes made via one address are not guaranteed to be visible +via the other address, even if they’re issued by the same thread. It is +recommended that applications map the combined range instead of mapping +separate buffers and then apply the appropriate offsets to the returned +pointer to derive the individual buffers.

    +

    The returned pointer devPtr must be freed using +cuMemFree.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuExternalMemoryGetMappedMipmappedArray(extMem, CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC mipmapDesc: Optional[CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC])#
    +

    Maps a CUDA mipmapped array onto an external memory object.

    +

    Maps a CUDA mipmapped array onto an external object and returns a +handle to it in mipmap.

    +

    The properties of the CUDA mipmapped array being mapped must be +described in mipmapDesc. The structure +CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC is defined as +follows:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where offset is +the offset in the memory object where the base level of the mipmap +chain is. +arrayDesc +describes the format, dimensions and type of the base level of the +mipmap chain. For further details on these parameters, please refer to +the documentation for cuMipmappedArrayCreate. Note that if +the mipmapped array is bound as a color target in the graphics API, +then the flag CUDA_ARRAY3D_COLOR_ATTACHMENT must be +specified in +CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc::Flags. +numLevels +specifies the total number of levels in the mipmap chain.

    +

    If extMem was imported from a handle of type +CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then +numLevels must be +equal to 1.

    +

    The returned CUDA mipmapped array must be freed using +cuMipmappedArrayDestroy.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDestroyExternalMemory(extMem)#
    +

    Destroys an external memory object.

    +

    Destroys the specified external memory object. Any existing buffers and +CUDA mipmapped arrays mapped onto this object must no longer be used +and must be explicitly freed using cuMemFree and +cuMipmappedArrayDestroy respectively.

    +
    +
    Parameters:
    +

    extMem (CUexternalMemory) – External memory object to be destroyed

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuImportExternalSemaphore(CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC semHandleDesc: Optional[CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC])#
    +

    Imports an external semaphore.

    +

    Imports an externally allocated synchronization object and returns a +handle to that in extSem_out.

    +

    The properties of the handle being imported must be described in +semHandleDesc. The CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC is +defined as follows:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where type specifies +the type of handle being imported. +CUexternalSemaphoreHandleType is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If type is +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, then +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a +valid file descriptor referencing a synchronization object. Ownership +of the file descriptor is transferred to the CUDA driver when the +handle is imported successfully. Performing any operations on the file +descriptor after it is imported results in undefined behavior.

    +

    If type is +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, then +exactly one of +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +and +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name +must not be NULL. If +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +is not NULL, then it must represent a valid shared NT handle that +references a synchronization object. Ownership of this handle is not +transferred to CUDA after the import operation, so the application must +release the handle using the appropriate system call. If +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is +not NULL, then it must name a valid synchronization object.

    +

    If type is +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT, then +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +must be non-NULL and +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name +must be NULL. The handle specified must be a globally shared KMT +handle. This handle does not hold a reference to the underlying object, +and thus will be invalid when all references to the synchronization +object are destroyed.

    +

    If type is +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then exactly +one of +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +and +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name +must not be NULL. If +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +is not NULL, then it must represent a valid shared NT handle that is +returned by ID3D12Device::CreateSharedHandle when referring to a +ID3D12Fence object. This handle holds a reference to the underlying +object. If +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is +not NULL, then it must name a valid synchronization object that refers +to a valid ID3D12Fence object.

    +

    If type is +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, then +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +represents a valid shared NT handle that is returned by +ID3D11Fence::CreateSharedHandle. If +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is +not NULL, then it must name a valid synchronization object that refers +to a valid ID3D11Fence object.

    +

    If type is +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, then +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::nvSciSyncObj +represents a valid NvSciSyncObj.

    +

    CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, then +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +represents a valid shared NT handle that is returned by +IDXGIResource1::CreateSharedHandle when referring to a IDXGIKeyedMutex +object. If +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is +not NULL, then it must name a valid synchronization object that refers +to a valid IDXGIKeyedMutex object.

    +

    If type is +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT, +then +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +represents a valid shared KMT handle that is returned by +IDXGIResource::GetSharedHandle when referring to a IDXGIKeyedMutex +object and +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name +must be NULL.

    +

    If type is +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, +then CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must +be a valid file descriptor referencing a synchronization object. +Ownership of the file descriptor is transferred to the CUDA driver when +the handle is imported successfully. Performing any operations on the +file descriptor after it is imported results in undefined behavior.

    +

    If type is +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32, +then exactly one of +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +and +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name +must not be NULL. If +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle +is not NULL, then it must represent a valid shared NT handle that +references a synchronization object. Ownership of this handle is not +transferred to CUDA after the import operation, so the application must +release the handle using the appropriate system call. If +CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name is +not NULL, then it must name a valid synchronization object.

    +
    +
    Parameters:
    +

    semHandleDesc (CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC) – Semaphore import handle descriptor

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuSignalExternalSemaphoresAsync(extSemArray: Optional[Tuple[CUexternalSemaphore] | List[CUexternalSemaphore]], paramsArray: Optional[Tuple[CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS] | List[CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS]], unsigned int numExtSems, stream)#
    +

    Signals a set of external semaphore objects.

    +

    Enqueues a signal operation on a set of externally allocated semaphore +object in the specified stream. The operations will be executed when +all prior operations in the stream complete.

    +

    The exact semantics of signaling a semaphore depends on the type of the +object.

    +

    If the semaphore object is any one of the following types: +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT then +signaling the semaphore will set it to the signaled state.

    +

    If the semaphore object is any one of the following types: +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 +then the semaphore will be set to the value specified in +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::fence::value.

    +

    If the semaphore object is of the type +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC this API sets +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence +to a value that can be used by subsequent waiters of the same NvSciSync +object to order operations with those currently submitted in stream. +Such an update will overwrite previous contents of +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence. +By default, signaling such an external semaphore object causes +appropriate memory synchronization operations to be performed over all +external memory objects that are imported as +CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that +any subsequent accesses made by other importers of the same set of +NvSciBuf memory object(s) are coherent. These operations can be skipped +by specifying the flag +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC, which +can be used as a performance optimization when data coherency is not +required. But specifying this flag in scenarios where data coherency is +required results in undefined behavior. Also, for semaphore object of +the type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, if +the NvSciSyncAttrList used to create the NvSciSyncObj had not set the +flags in cuDeviceGetNvSciSyncAttributes to +CUDA_NVSCISYNC_ATTR_SIGNAL, this API will return +CUDA_ERROR_NOT_SUPPORTED. NvSciSyncFence associated with semaphore +object of the type +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC can be +deterministic. For this the NvSciSyncAttrList used to create the +semaphore object must have value of +NvSciSyncAttrKey_RequireDeterministicFences key set to true. +Deterministic fences allow users to enqueue a wait over the semaphore +object even before corresponding signal is enqueued. For such a +semaphore object, CUDA guarantees that each signal operation will +increment the fence value by ‘1’. Users are expected to track count of +signals enqueued on the semaphore object and insert waits accordingly. +When such a semaphore object is signaled from multiple streams, due to +concurrent stream execution, it is possible that the order in which the +semaphore gets signaled is indeterministic. This could lead to waiters +of the semaphore getting unblocked incorrectly. Users are expected to +handle such situations, either by not using the same semaphore object +with deterministic fence support enabled in different streams or by +adding explicit dependency amongst such streams so that the semaphore +is signaled in order.

    +

    If the semaphore object is any one of the following types: +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT +then the keyed mutex will be released with the key specified in +CUDA_EXTERNAL_SEMAPHORE_PARAMS::params::keyedmutex::key.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuWaitExternalSemaphoresAsync(extSemArray: Optional[Tuple[CUexternalSemaphore] | List[CUexternalSemaphore]], paramsArray: Optional[Tuple[CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS] | List[CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS]], unsigned int numExtSems, stream)#
    +

    Waits on a set of external semaphore objects.

    +

    Enqueues a wait operation on a set of externally allocated semaphore +object in the specified stream. The operations will be executed when +all prior operations in the stream complete.

    +

    The exact semantics of waiting on a semaphore depends on the type of +the object.

    +

    If the semaphore object is any one of the following types: +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT then +waiting on the semaphore will wait until the semaphore reaches the +signaled state. The semaphore will then be reset to the unsignaled +state. Therefore for every signal operation, there can only be one wait +operation.

    +

    If the semaphore object is any one of the following types: +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 +then waiting on the semaphore will wait until the value of the +semaphore is greater than or equal to +CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::fence::value.

    +

    If the semaphore object is of the type +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC then, waiting +on the semaphore will wait until the +CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence +is signaled by the signaler of the NvSciSyncObj that was associated +with this semaphore object. By default, waiting on such an external +semaphore object causes appropriate memory synchronization operations +to be performed over all external memory objects that are imported as +CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that +any subsequent accesses made by other importers of the same set of +NvSciBuf memory object(s) are coherent. These operations can be skipped +by specifying the flag +CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC, which +can be used as a performance optimization when data coherency is not +required. But specifying this flag in scenarios where data coherency is +required results in undefined behavior. Also, for semaphore object of +the type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, if +the NvSciSyncAttrList used to create the NvSciSyncObj had not set the +flags in cuDeviceGetNvSciSyncAttributes to +CUDA_NVSCISYNC_ATTR_WAIT, this API will return +CUDA_ERROR_NOT_SUPPORTED.

    +

    If the semaphore object is any one of the following types: +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT +then the keyed mutex will be acquired when it is released with the key +specified in +CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::key +or until the timeout specified by +CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::timeoutMs +has lapsed. The timeout interval can either be a finite value specified +in milliseconds or an infinite value. In case an infinite value is +specified the timeout never elapses. The windows INFINITE macro must be +used to specify infinite timeout.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED, CUDA_ERROR_TIMEOUT

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDestroyExternalSemaphore(extSem)#
    +

    Destroys an external semaphore.

    +

    Destroys an external semaphore object and releases any references to +the underlying resource. Any outstanding signals or waits must have +completed before the semaphore is destroyed.

    +
    +
    Parameters:
    +

    extSem (CUexternalSemaphore) – External semaphore to be destroyed

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +

    Stream Memory Operations#

    +

    This section describes the stream memory operations of the low-level CUDA driver application programming interface.

    +

    Support for the CU_STREAM_WAIT_VALUE_NOR flag can be queried with ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2.

    +

    Support for the cuStreamWriteValue64() and cuStreamWaitValue64() functions, as well as for the CU_STREAM_MEM_OP_WAIT_VALUE_64 and CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS.

    +

    Support for both CU_STREAM_WAIT_VALUE_FLUSH and CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform hardware features and can be queried with cuDeviceGetAttribute() and CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.

    +

    Note that all memory pointers passed as parameters to these operations are device pointers. Where necessary a device pointer should be obtained, for example with cuMemHostGetDevicePointer().

    +

    None of the operations accepts pointers to managed memory buffers (cuMemAllocManaged).

    +

    Warning: Improper use of these APIs may deadlock the application. Synchronization ordering established through these APIs is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by these APIs should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order.

    +
    +
    +cuda.bindings.driver.cuStreamWaitValue32(stream, addr, value, unsigned int flags)#
    +

    Wait on a memory location.

    +

    Enqueues a synchronization of the stream on the given memory location. +Work ordered after the operation will block until the given condition +on the memory is satisfied. By default, the condition is to wait for +(int32_t)(*addr - value) >= 0, a cyclic greater-or-equal. Other +condition types can be specified via flags.

    +

    If the memory was registered via cuMemHostRegister(), the +device pointer should be obtained with +cuMemHostGetDevicePointer(). This function cannot be used +with managed memory (cuMemAllocManaged).

    +

    Support for CU_STREAM_WAIT_VALUE_NOR can be queried with +cuDeviceGetAttribute() and +CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order.

    +
    + +
    +
    +cuda.bindings.driver.cuStreamWaitValue64(stream, addr, value, unsigned int flags)#
    +

    Wait on a memory location.

    +

    Enqueues a synchronization of the stream on the given memory location. +Work ordered after the operation will block until the given condition +on the memory is satisfied. By default, the condition is to wait for +(int64_t)(*addr - value) >= 0, a cyclic greater-or-equal. Other +condition types can be specified via flags.

    +

    If the memory was registered via cuMemHostRegister(), the +device pointer should be obtained with +cuMemHostGetDevicePointer().

    +

    Support for this can be queried with cuDeviceGetAttribute() +and CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order.

    +
    + +
    +
    +cuda.bindings.driver.cuStreamWriteValue32(stream, addr, value, unsigned int flags)#
    +

    Write a value to memory.

    +

    Write a value to memory.

    +

    If the memory was registered via cuMemHostRegister(), the +device pointer should be obtained with +cuMemHostGetDevicePointer(). This function cannot be used +with managed memory (cuMemAllocManaged).

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamWriteValue64(stream, addr, value, unsigned int flags)#
    +

    Write a value to memory.

    +

    Write a value to memory.

    +

    If the memory was registered via cuMemHostRegister(), the +device pointer should be obtained with +cuMemHostGetDevicePointer().

    +

    Support for this can be queried with cuDeviceGetAttribute() +and CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuStreamBatchMemOp(stream, unsigned int count, paramArray: Optional[Tuple[CUstreamBatchMemOpParams] | List[CUstreamBatchMemOpParams]], unsigned int flags)#
    +

    Batch operations to synchronize the stream via memory operations.

    +

    This is a batch version of cuStreamWaitValue32() and +cuStreamWriteValue32(). Batching operations may avoid some +performance overhead in both the API call and the device execution +versus adding them to the stream in separate API calls. The operations +are enqueued in the order they appear in the array.

    +

    See CUstreamBatchMemOpType for the full set of supported +operations, and cuStreamWaitValue32(), +cuStreamWaitValue64(), cuStreamWriteValue32(), +and cuStreamWriteValue64() for details of specific +operations.

    +

    See related APIs for details on querying support for specific +operations.

    +
    +
    Parameters:
    +
      +
    • stream (CUstream or cudaStream_t) – The stream to enqueue the operations in.

    • +
    • count (unsigned int) – The number of operations in the array. Must be less than 256.

    • +
    • paramArray (List[CUstreamBatchMemOpParams]) – The types and parameters of the individual operations.

    • +
    • flags (unsigned int) – Reserved for future expansion; must be 0.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. For more information, see the Stream Memory Operations section in the programming guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html).

    +
    + +
    +
    +

    Execution Control#

    +

    This section describes the execution control functions of the low-level CUDA driver application programming interface.

    +
    +
    +class cuda.bindings.driver.CUfunctionLoadingState(value)#
    +
    +
    +CU_FUNCTION_LOADING_STATE_UNLOADED = 0#
    +
    + +
    +
    +CU_FUNCTION_LOADING_STATE_LOADED = 1#
    +
    + +
    +
    +CU_FUNCTION_LOADING_STATE_MAX = 2#
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuFuncGetAttribute(attrib: CUfunction_attribute, hfunc)#
    +

    Returns information about a function.

    +

    Returns in *pi the integer value of the attribute attrib on the +kernel given by hfunc. The supported attributes are:

    +
      +
    • CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum +number of threads per block, beyond which a launch of the function +would fail. This number depends on both the function and the device +on which the function is currently loaded.

    • +
    • CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of +statically-allocated shared memory per block required by this +function. This does not include dynamically-allocated shared memory +requested by the user at runtime.

    • +
    • CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of +user-allocated constant memory required by this function.

    • +
    • CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of +local memory used by each thread of this function.

    • +
    • CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used +by each thread of this function.

    • +
    • CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual +architecture version for which the function was compiled. This value +is the major PTX version * 10

      +
        +
      • the minor PTX version, so a PTX version 1.3 function would return +the value 13. Note that this may return the undefined value of 0 +for cubins compiled prior to CUDA 3.0.

      • +
      +
    • +
    • CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture +version for which the function was compiled. This value is the major +binary version * 10 + the minor binary version, so a binary version +1.3 function would return the value 13. Note that this will return a +value of 10 for legacy cubins that do not have a properly-encoded +binary architecture version.

    • +
    • CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether +the function has been compiled with user specified option “-Xptxas +–dlcm=ca” set .

    • +
    • CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The +maximum size in bytes of dynamically-allocated shared memory.

    • +
    • CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: +Preferred shared memory-L1 cache split ratio in percent of total +shared memory.

    • +
    • CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this +attribute is set, the kernel must launch with a valid cluster size +specified.

    • +
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required +cluster width in blocks.

    • +
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required +cluster height in blocks.

    • +
    • CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required +cluster depth in blocks.

    • +
    • CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: +Indicates whether the function can be launched with non-portable +cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster +size may only function on the specific SKUs the program is tested on. +The launch might fail if the program is run on a different hardware +platform. CUDA API provides cudaOccupancyMaxActiveClusters to assist +with checking whether the desired size can be launched on the current +device. A portable cluster size is guaranteed to be functional on all +compute capabilities higher than the target compute capability. The +portable cluster size for sm_90 is 8 blocks per cluster. This value +may increase for future compute capabilities. The specific hardware +unit may support higher cluster sizes that’s not guaranteed to be +portable.

    • +
    • CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: +The block scheduling policy of a function. The value type is +CUclusterSchedulingPolicy.

    • +
    +

    With a few execeptions, function attributes may also be queried on +unloaded function handles returned from +cuModuleEnumerateFunctions. +CUDA_ERROR_FUNCTION_NOT_LOADED is returned if the attribute +requires a fully loaded function but the function is not loaded. The +loading state of a function may be queried using +cuFuncIsloaded. cuFuncLoad may be called to +explicitly load a function before querying the following attributes +that require the function to be loaded:

    + +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuFuncSetAttribute(hfunc, attrib: CUfunction_attribute, int value)#
    +

    Sets information about a function.

    +

    This call sets the value of a specified attribute attrib on the +kernel given by hfunc to an integer value specified by val This +function returns CUDA_SUCCESS if the new value of the attribute could +be successfully set. If the set fails, this call will return an error. +Not all attributes can have values set. Attempting to set a value on a +read-only attribute will result in an error (CUDA_ERROR_INVALID_VALUE)

    +

    Supported attributes for the cuFuncSetAttribute call are:

    + +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuFuncSetCacheConfig(hfunc, config: CUfunc_cache)#
    +

    Sets the preferred cache configuration for a device function.

    +

    On devices where the L1 cache and shared memory use the same hardware +resources, this sets through config the preferred cache configuration +for the device function hfunc. This is only a preference. The driver +will use the requested configuration if possible, but it is free to +choose a different configuration if required to execute hfunc. Any +context-wide preference set via cuCtxSetCacheConfig() will +be overridden by this per-function setting unless the per-function +setting is CU_FUNC_CACHE_PREFER_NONE. In that case, the +current context-wide setting will be used.

    +

    This setting does nothing on devices where the size of the L1 cache and +shared memory are fixed.

    +

    Launching a kernel with a different preference than the most recent +preference setting may insert a device-side synchronization point.

    +

    The supported cache configurations are:

    + +
    +
    Parameters:
    +
      +
    • hfunc (CUfunction) – Kernel to configure cache for

    • +
    • config (CUfunc_cache) – Requested cache configuration

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuFuncGetModule(hfunc)#
    +

    Returns a module handle.

    +

    Returns in *hmod the handle of the module that function hfunc is +located in. The lifetime of the module corresponds to the lifetime of +the context it was loaded in or until the module is explicitly +unloaded.

    +

    The CUDA runtime manages its own modules loaded into the primary +context. If the handle returned by this API refers to a module loaded +by the CUDA runtime, calling cuModuleUnload() on that +module will result in undefined behavior.

    +
    +
    Parameters:
    +

    hfunc (CUfunction) – Function to retrieve module for

    +
    +
    Returns:
    +

    +

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuFuncGetName(hfunc)#
    +

    Returns the function name for a CUfunction handle.

    +

    Returns in **name the function name associated with the function +handle hfunc . The function name is returned as a null-terminated +string. The returned name is only valid when the function handle is +valid. If the module is unloaded or reloaded, one must call the API +again to get the updated name. This API may return a mangled name if +the function is not declared as having C linkage. If either **name or +hfunc is NULL, CUDA_ERROR_INVALID_VALUE is returned.

    +
    +
    Parameters:
    +

    hfunc (CUfunction) – The function handle to retrieve the name for

    +
    +
    Returns:
    +

    +

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuFuncGetParamInfo(func, size_t paramIndex)#
    +

    Returns the offset and size of a kernel parameter in the device-side parameter layout.

    +

    Queries the kernel parameter at paramIndex into func’s list of +parameters, and returns in paramOffset and paramSize the offset and +size, respectively, where the parameter will reside in the device-side +parameter layout. This information can be used to update kernel node +parameters from the device via +cudaGraphKernelNodeSetParam() and +cudaGraphKernelNodeUpdatesApply(). paramIndex must be +less than the number of parameters that func takes. paramSize can +be set to NULL if only the parameter offset is desired.

    +
    +
    Parameters:
    +
      +
    • func (CUfunction) – The function to query

    • +
    • paramIndex (size_t) – The parameter index to query

    • +
    +
    +
    Returns:
    +

      +
    • CUresultCUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    • +
    • paramOffset (int) – Returns the offset into the device-side parameter layout at which +the parameter resides

    • +
    • paramSize (int) – Optionally returns the size of the parameter in the device-side +parameter layout

    • +
    +

    +
    +
    +
    +

    See also

    +

    cuKernelGetParamInfo

    +
    +
    + +
    +
    +cuda.bindings.driver.cuFuncIsLoaded(function)#
    +

    Returns if the function is loaded.

    +

    Returns in state the loading state of function.

    +
    +
    Parameters:
    +

    function (CUfunction) – the function to check

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuFuncLoad(function)#
    +

    Loads a function.

    +

    Finalizes function loading for function. Calling this API with a +fully loaded function has no effect.

    +
    +
    Parameters:
    +

    function (CUfunction) – the function to load

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLaunchKernel(f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, hStream, kernelParams, void_ptr extra)#
    +

    Launches a CUDA function CUfunction or a CUDA kernel CUkernel.

    +

    Invokes the function CUfunction or the kernel +CUkernel f on a gridDimX x gridDimY x gridDimZ grid +of blocks. Each block contains blockDimX x blockDimY x blockDimZ +threads.

    +

    sharedMemBytes sets the amount of dynamic shared memory that will be +available to each thread block.

    +

    Kernel parameters to f can be specified in one of two ways:

    +

    1) Kernel parameters can be specified via kernelParams. If f has N +parameters, then kernelParams needs to be an array of N pointers. +Each of `kernelParams`[0] through `kernelParams`[N-1] must point to a +region of memory from which the actual kernel parameter will be copied. +The number of kernel parameters and their offsets and sizes do not need +to be specified as that information is retrieved directly from the +kernel’s image.

    +

    2) Kernel parameters can also be packaged by the application into a +single buffer that is passed in via the extra parameter. This places +the burden on the application of knowing each kernel parameter’s size +and alignment/padding within the buffer. Here is an example of using +the extra parameter in this manner:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    The extra parameter exists to allow cuLaunchKernel to +take additional less commonly used arguments. extra specifies a list +of names of extra settings and their corresponding values. Each extra +setting name is immediately followed by the corresponding value. The +list must be terminated with either NULL or +CU_LAUNCH_PARAM_END.

    + +

    The error CUDA_ERROR_INVALID_VALUE will be returned if +kernel parameters are specified with both kernelParams and extra +(i.e. both kernelParams and extra are non-NULL).

    +

    Calling cuLaunchKernel() invalidates the persistent +function state set through the following deprecated APIs: +cuFuncSetBlockShape(), cuFuncSetSharedSize(), +cuParamSetSize(), cuParamSeti(), +cuParamSetf(), cuParamSetv().

    +

    Note that to use cuLaunchKernel(), the kernel f must +either have been compiled with toolchain version 3.2 or later so that +it will contain kernel parameter information, or have no kernel +parameters. If either of these conditions is not met, then +cuLaunchKernel() will return +CUDA_ERROR_INVALID_IMAGE.

    +

    Note that the API can also be used to launch context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to launch the +kernel on will either be taken from the specified stream hStream or +the current context in case of NULL stream.

    +
    +
    Parameters:
    +
      +
    • f (CUfunction) – Function CUfunction or Kernel CUkernel to +launch

    • +
    • gridDimX (unsigned int) – Width of grid in blocks

    • +
    • gridDimY (unsigned int) – Height of grid in blocks

    • +
    • gridDimZ (unsigned int) – Depth of grid in blocks

    • +
    • blockDimX (unsigned int) – X dimension of each thread block

    • +
    • blockDimY (unsigned int) – Y dimension of each thread block

    • +
    • blockDimZ (unsigned int) – Z dimension of each thread block

    • +
    • sharedMemBytes (unsigned int) – Dynamic shared-memory size per thread block in bytes

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    • kernelParams (Any) – Array of pointers to kernel parameters

    • +
    • extra (List[Any]) – Extra options

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_LAUNCH_FAILED, CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, CUDA_ERROR_LAUNCH_TIMEOUT, CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, CUDA_ERROR_NOT_FOUND

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLaunchKernelEx(CUlaunchConfig config: Optional[CUlaunchConfig], f, kernelParams, void_ptr extra)#
    +

    Launches a CUDA function CUfunction or a CUDA kernel CUkernel with launch-time configuration.

    +

    Invokes the function CUfunction or the kernel +CUkernel f with the specified launch-time configuration +config.

    +

    The CUlaunchConfig structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • gridDimX is the width of the grid in +blocks.

    • +
    • gridDimY is the height of the grid in +blocks.

    • +
    • gridDimZ is the depth of the grid in +blocks.

    • +
    • blockDimX is the X dimension of each +thread block.

    • +
    • blockDimX is the Y dimension of each +thread block.

    • +
    • blockDimZ is the Z dimension of each +thread block.

    • +
    • sharedMemBytes is the dynamic shared- +memory size per thread block in bytes.

    • +
    • hStream is the handle to the stream to +perform the launch in. The CUDA context associated with this stream +must match that associated with function f.

    • +
    • attrs is an array of +numAttrs continguous +CUlaunchAttribute elements. The value of this pointer is +not considered if numAttrs is zero. +However, in that case, it is recommended to set the pointer to NULL.

    • +
    • numAttrs is the number of attributes +populating the first numAttrs positions of +the attrs array.

    • +
    +

    Launch-time configuration is specified by adding entries to +attrs. Each entry is an attribute ID and a +corresponding attribute value.

    +

    The CUlaunchAttribute structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • id is a unique enum identifying the +attribute.

    • +
    • value is a union that hold the +attribute value.

    • +
    +

    An example of using the config parameter:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    The CUlaunchAttributeID enum is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    and the corresponding CUlaunchAttributeValue union as :

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    Setting CU_LAUNCH_ATTRIBUTE_COOPERATIVE to a non-zero value +causes the kernel launch to be a cooperative launch, with exactly the +same usage and semantics of cuLaunchCooperativeKernel.

    +

    Setting +CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION to a +non-zero values causes the kernel to use programmatic means to resolve +its stream dependency – enabling the CUDA runtime to opportunistically +allow the grid’s execution to overlap with the previous kernel in the +stream, if that kernel requests the overlap.

    +

    CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT records an event +along with the kernel launch. Event recorded through this launch +attribute is guaranteed to only trigger after all block in the +associated kernel trigger the event. A block can trigger the event +through PTX launchdep.release or CUDA builtin function +cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be +inserted at the beginning of each block’s execution if +triggerAtBlockStart is set to non-0. Note that dependents (including +the CPU thread calling cuEventSynchronize()) are not +guaranteed to observe the release precisely when it is released. For +example, cuEventSynchronize() may only observe the event +trigger long after the associated kernel has completed. This recording +type is primarily meant for establishing programmatic dependency +between device tasks. The event supplied must not be an interprocess or +interop event. The event must disable timing (i.e. created with +CU_EVENT_DISABLE_TIMING flag set).

    +

    CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT records an +event along with the kernel launch. Nominally, the event is triggered +once all blocks of the kernel have begun execution. Currently this is a +best effort. If a kernel B has a launch completion dependency on a +kernel A, B may wait until A is complete. Alternatively, blocks of B +may begin before all blocks of A have begun, for example:

    +
      +
    • If B can claim execution resources unavaiable to A, for example if +they run on different GPUs.

    • +
    • If B is a higher priority than A.

    • +
    +

    Exercise caution if such an ordering inversion could lead to deadlock. +The event supplied must not be an interprocess or interop event. The +event must disable timing (i.e. must be created with the +CU_EVENT_DISABLE_TIMING flag set).

    +

    Setting CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE to +1 on a captured launch causes the resulting kernel node to be device- +updatable. This attribute is specific to graphs, and passing it to a +launch in a non-capturing stream results in an error. Passing a value +other than 0 or 1 is not allowed.

    +

    On success, a handle will be returned via +CUlaunchAttributeValue::deviceUpdatableKernelNode::devNode +which can be passed to the various device-side update functions to +update the node’s kernel parameters from within another kernel. For +more information on the types of device updates that can be made, as +well as the relevant limitations thereof, see +cudaGraphKernelNodeUpdatesApply.

    +

    Kernel nodes which are device-updatable have additional restrictions +compared to regular kernel nodes. Firstly, device-updatable nodes +cannot be removed from their graph via cuGraphDestroyNode. +Additionally, once opted-in to this functionality, a node cannot opt +out, and any attempt to set the attribute to 0 will result in an error. +Graphs containing one or more device-updatable node also do not allow +multiple instantiation.

    +

    The effect of other attributes is consistent with their effect when set +via persistent APIs.

    +

    See cuStreamSetAttribute for

    + +

    See cuFuncSetAttribute for

    + +

    Kernel parameters to f can be specified in the same ways that they +can be using cuLaunchKernel.

    +

    Note that the API can also be used to launch context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to launch the +kernel on will either be taken from the specified stream +hStream or the current context in case of +NULL stream.

    +
    +
    Parameters:
    +
      +
    • config (CUlaunchConfig) – Config to launch

    • +
    • f (CUfunction) – Function CUfunction or Kernel CUkernel to +launch

    • +
    • kernelParams (Any) – Array of pointers to kernel parameters

    • +
    • extra (List[Any]) – Extra options

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_LAUNCH_FAILED, CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, CUDA_ERROR_LAUNCH_TIMEOUT, CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, CUDA_ERROR_NOT_FOUND

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLaunchCooperativeKernel(f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, hStream, kernelParams)#
    +

    Launches a CUDA function CUfunction or a CUDA kernel CUkernel where thread blocks can cooperate and synchronize as they execute.

    +

    Invokes the function CUfunction or the kernel +CUkernel f on a gridDimX x gridDimY x gridDimZ grid +of blocks. Each block contains blockDimX x blockDimY x blockDimZ +threads.

    +

    sharedMemBytes sets the amount of dynamic shared memory that will be +available to each thread block.

    +

    The device on which this kernel is invoked must have a non-zero value +for the device attribute +CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH.

    +

    The total number of blocks launched cannot exceed the maximum number of +blocks per multiprocessor as returned by +cuOccupancyMaxActiveBlocksPerMultiprocessor (or +cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times +the number of multiprocessors as specified by the device attribute +CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT.

    +

    The kernel cannot make use of CUDA dynamic parallelism.

    +

    Kernel parameters must be specified via kernelParams. If f has N +parameters, then kernelParams needs to be an array of N pointers. +Each of `kernelParams`[0] through `kernelParams`[N-1] must point to a +region of memory from which the actual kernel parameter will be copied. +The number of kernel parameters and their offsets and sizes do not need +to be specified as that information is retrieved directly from the +kernel’s image.

    +

    Calling cuLaunchCooperativeKernel() sets persistent +function state that is the same as function state set through +cuLaunchKernel API

    +

    When the kernel f is launched via +cuLaunchCooperativeKernel(), the previous block shape, +shared size and parameter info associated with f is overwritten.

    +

    Note that to use cuLaunchCooperativeKernel(), the kernel +f must either have been compiled with toolchain version 3.2 or later +so that it will contain kernel parameter information, or have no kernel +parameters. If either of these conditions is not met, then +cuLaunchCooperativeKernel() will return +CUDA_ERROR_INVALID_IMAGE.

    +

    Note that the API can also be used to launch context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to launch the +kernel on will either be taken from the specified stream hStream or +the current context in case of NULL stream.

    +
    +
    Parameters:
    +
      +
    • f (CUfunction) – Function CUfunction or Kernel CUkernel to +launch

    • +
    • gridDimX (unsigned int) – Width of grid in blocks

    • +
    • gridDimY (unsigned int) – Height of grid in blocks

    • +
    • gridDimZ (unsigned int) – Depth of grid in blocks

    • +
    • blockDimX (unsigned int) – X dimension of each thread block

    • +
    • blockDimY (unsigned int) – Y dimension of each thread block

    • +
    • blockDimZ (unsigned int) – Z dimension of each thread block

    • +
    • sharedMemBytes (unsigned int) – Dynamic shared-memory size per thread block in bytes

    • +
    • hStream (CUstream or cudaStream_t) – Stream identifier

    • +
    • kernelParams (Any) – Array of pointers to kernel parameters

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_LAUNCH_FAILED, CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, CUDA_ERROR_LAUNCH_TIMEOUT, CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, CUDA_ERROR_NOT_FOUND

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLaunchCooperativeKernelMultiDevice(launchParamsList: Optional[Tuple[CUDA_LAUNCH_PARAMS] | List[CUDA_LAUNCH_PARAMS]], unsigned int numDevices, unsigned int flags)#
    +

    Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute.

    +

    [Deprecated]

    +

    Invokes kernels as specified in the launchParamsList array where each +element of the array specifies all the parameters required to perform a +single kernel launch. These kernels can cooperate and synchronize as +they execute. The size of the array is specified by numDevices.

    +

    No two kernels can be launched on the same device. All the devices +targeted by this multi-device launch must be identical. All devices +must have a non-zero value for the device attribute +CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH.

    +

    All kernels launched must be identical with respect to the compiled +code. Note that any device, constant or managed variables present in +the module that owns the kernel launched on each device, are +independently instantiated on every device. It is the application’s +responsibility to ensure these variables are initialized and used +appropriately.

    +

    The size of the grids as specified in blocks, the size of the blocks +themselves and the amount of shared memory used by each thread block +must also match across all launched kernels.

    +

    The streams used to launch these kernels must have been created via +either cuStreamCreate or +cuStreamCreateWithPriority. The NULL stream or +CU_STREAM_LEGACY or CU_STREAM_PER_THREAD cannot +be used.

    +

    The total number of blocks launched per kernel cannot exceed the +maximum number of blocks per multiprocessor as returned by +cuOccupancyMaxActiveBlocksPerMultiprocessor (or +cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times +the number of multiprocessors as specified by the device attribute +CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. Since the total +number of blocks launched per device has to match across all devices, +the maximum number of blocks that can be launched per device will be +limited by the device with the least number of multiprocessors.

    +

    The kernels cannot make use of CUDA dynamic parallelism.

    +

    The CUDA_LAUNCH_PARAMS structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • function specifies the kernel to be +launched. All functions must be identical with respect to the +compiled code. Note that you can also specify context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then casting to +CUfunction. In this case, the context to launch the +kernel on be taken from the specified stream +hStream.

    • +
    • gridDimX is the width of the grid in +blocks. This must match across all kernels launched.

    • +
    • gridDimY is the height of the grid in +blocks. This must match across all kernels launched.

    • +
    • gridDimZ is the depth of the grid in +blocks. This must match across all kernels launched.

    • +
    • blockDimX is the X dimension of each +thread block. This must match across all kernels launched.

    • +
    • blockDimX is the Y dimension of each +thread block. This must match across all kernels launched.

    • +
    • blockDimZ is the Z dimension of each +thread block. This must match across all kernels launched.

    • +
    • sharedMemBytes is the dynamic shared- +memory size per thread block in bytes. This must match across all +kernels launched.

    • +
    • hStream is the handle to the stream to +perform the launch in. This cannot be the NULL stream or +CU_STREAM_LEGACY or CU_STREAM_PER_THREAD. The +CUDA context associated with this stream must match that associated +with function.

    • +
    • kernelParams is an array of pointers +to kernel parameters. If function has +N parameters, then kernelParams needs +to be an array of N pointers. Each of +:py:obj:`~.CUDA_LAUNCH_PARAMS.kernelParams`[0] through +:py:obj:`~.CUDA_LAUNCH_PARAMS.kernelParams`[N-1] must point to a +region of memory from which the actual kernel parameter will be +copied. The number of kernel parameters and their offsets and sizes +do not need to be specified as that information is retrieved directly +from the kernel’s image.

    • +
    +

    By default, the kernel won’t begin execution on any GPU until all prior +work in all the specified streams has completed. This behavior can be +overridden by specifying the flag +CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC. +When this flag is specified, each kernel will only wait for prior work +in the stream corresponding to that GPU to complete before it begins +execution.

    +

    Similarly, by default, any subsequent work pushed in any of the +specified streams will not begin execution until the kernels on all +GPUs have completed. This behavior can be overridden by specifying the +flag +CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC. +When this flag is specified, any subsequent work pushed in any of the +specified streams will only wait for the kernel launched on the GPU +corresponding to that stream to complete before it begins execution.

    +

    Calling cuLaunchCooperativeKernelMultiDevice() sets +persistent function state that is the same as function state set +through cuLaunchKernel API when called individually for +each element in launchParamsList.

    +

    When kernels are launched via +cuLaunchCooperativeKernelMultiDevice(), the previous block +shape, shared size and parameter info associated with each +function in launchParamsList is +overwritten.

    +

    Note that to use cuLaunchCooperativeKernelMultiDevice(), +the kernels must either have been compiled with toolchain version 3.2 +or later so that it will contain kernel parameter information, or have +no kernel parameters. If either of these conditions is not met, then +cuLaunchCooperativeKernelMultiDevice() will return +CUDA_ERROR_INVALID_IMAGE.

    +
    +
    Parameters:
    +
      +
    • launchParamsList (List[CUDA_LAUNCH_PARAMS]) – List of launch parameters, one per device

    • +
    • numDevices (unsigned int) – Size of the launchParamsList array

    • +
    • flags (unsigned int) – Flags to control launch behavior

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_IMAGE, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_LAUNCH_FAILED, CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, CUDA_ERROR_LAUNCH_TIMEOUT, CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, CUDA_ERROR_SHARED_OBJECT_INIT_FAILED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuLaunchHostFunc(hStream, fn, userData)#
    +

    Enqueues a host function call in a stream.

    +

    Enqueues a host function to run in a stream. The function will be +called after currently enqueued work and will block work added after +it.

    +

    The host function must not make any CUDA API calls. Attempting to use a +CUDA API may result in CUDA_ERROR_NOT_PERMITTED, but this +is not required. The host function must not perform any synchronization +that may depend on outstanding CUDA work not mandated to run earlier. +Host functions without a mandated order (such as in independent +streams) execute in undefined order and may be serialized.

    +

    For the purposes of Unified Memory, execution makes a number of +guarantees:

    +
      +
    • The stream is considered idle for the duration of the function’s +execution. Thus, for example, the function may always use memory +attached to the stream it was enqueued in.

    • +
    • The start of execution of the function has the same effect as +synchronizing an event recorded in the same stream immediately prior +to the function. It thus synchronizes streams which have been +“joined” prior to the function.

    • +
    • Adding device work to any stream does not have the effect of making +the stream active until all preceding host functions and stream +callbacks have executed. Thus, for example, a function might use +global attached memory even if work has been added to another stream, +if the work has been ordered behind the function call with an event.

    • +
    • Completion of the function does not cause a stream to become active +except as described above. The stream will remain idle if no device +work follows the function, and will remain idle across consecutive +host functions or stream callbacks without device work in between. +Thus, for example, stream synchronization can be done by signaling +from a host function at the end of the stream.

    • +
    +

    Note that, in contrast to cuStreamAddCallback, the function +will not be called in the event of an error in the CUDA context.

    +
    +
    Parameters:
    +
      +
    • hStream (CUstream or cudaStream_t) – Stream to enqueue function call in

    • +
    • fn (CUhostFn) – The function to call once preceding stream operations are complete

    • +
    • userData (Any) – User-specified data to be passed to the function

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +

    Graph Management#

    +

    This section describes the graph management functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuGraphCreate(unsigned int flags)#
    +

    Creates a graph.

    +

    Creates an empty graph, which is returned via phGraph.

    +
    +
    Parameters:
    +

    flags (unsigned int) – Graph creation flags, must be 0

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddKernelNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_KERNEL_NODE_PARAMS nodeParams: Optional[CUDA_KERNEL_NODE_PARAMS])#
    +

    Creates a kernel execution node and adds it to a graph.

    +

    Creates a new kernel execution node and adds it to hGraph with +numDependencies dependencies specified via dependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. dependencies may not have any duplicate entries. A +handle to the new node will be returned in phGraphNode.

    +

    The CUDA_KERNEL_NODE_PARAMS structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    When the graph is launched, the node will invoke kernel func on a +(gridDimX x gridDimY x gridDimZ) grid of blocks. Each block +contains (blockDimX x blockDimY x blockDimZ) threads.

    +

    sharedMemBytes sets the amount of dynamic shared memory that will be +available to each thread block.

    +

    Kernel parameters to func can be specified in one of two ways:

    +

    1) Kernel parameters can be specified via kernelParams. If the kernel +has N parameters, then kernelParams needs to be an array of N +pointers. Each pointer, from `kernelParams`[0] to `kernelParams`[N-1], +points to the region of memory from which the actual parameter will be +copied. The number of kernel parameters and their offsets and sizes do +not need to be specified as that information is retrieved directly from +the kernel’s image.

    +

    2) Kernel parameters for non-cooperative kernels can also be packaged +by the application into a single buffer that is passed in via extra. +This places the burden on the application of knowing each kernel +parameter’s size and alignment/padding within the buffer. The extra +parameter exists to allow this function to take additional less +commonly used arguments. extra specifies a list of names of extra +settings and their corresponding values. Each extra setting name is +immediately followed by the corresponding value. The list must be +terminated with either NULL or CU_LAUNCH_PARAM_END.

    + +

    The error CUDA_ERROR_INVALID_VALUE will be returned if +kernel parameters are specified with both kernelParams and extra +(i.e. both kernelParams and extra are non-NULL). +CUDA_ERROR_INVALID_VALUE will be returned if extra is +used for a cooperative kernel.

    +

    The kernelParams or extra array, as well as the argument values it +points to, are copied during this call.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • nodeParams (CUDA_KERNEL_NODE_PARAMS) – Parameters for the GPU execution node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Kernels launched using graphs must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    +
    + +
    +
    +cuda.bindings.driver.cuGraphKernelNodeGetParams(hNode)#
    +

    Returns a kernel node’s parameters.

    +

    Returns the parameters of kernel node hNode in nodeParams. The +kernelParams or extra array returned in nodeParams, as well as +the argument values it points to, are owned by the node. This memory +remains valid until the node is destroyed or its parameters are +modified, and should not be modified directly. Use +cuGraphKernelNodeSetParams to update the parameters of this +node.

    +

    The params will contain either kernelParams or extra, according to +which of these was most recently set on the node.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphKernelNodeSetParams(hNode, CUDA_KERNEL_NODE_PARAMS nodeParams: Optional[CUDA_KERNEL_NODE_PARAMS])#
    +

    Sets a kernel node’s parameters.

    +

    Sets the parameters of kernel node hNode to nodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddMemcpyNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_MEMCPY3D copyParams: Optional[CUDA_MEMCPY3D], ctx)#
    +

    Creates a memcpy node and adds it to a graph.

    +

    Creates a new memcpy node and adds it to hGraph with +numDependencies dependencies specified via dependencies. It is +possible for numDependencies to be 0, in which case the node will be +placed at the root of the graph. dependencies may not have any +duplicate entries. A handle to the new node will be returned in +phGraphNode.

    +

    When the graph is launched, the node will perform the memcpy described +by copyParams. See cuMemcpy3D() for a description of the +structure and its restrictions.

    +

    Memcpy nodes have some additional restrictions with regards to managed +memory, if the system contains at least one device which has a zero +value for the device attribute +CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If one or +more of the operands refer to managed memory, then using the memory +type CU_MEMORYTYPE_UNIFIED is disallowed for those +operand(s). The managed memory will be treated as residing on either +the host or the device, depending on which memory type is specified.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • copyParams (CUDA_MEMCPY3D) – Parameters for the memory copy

    • +
    • ctx (CUcontext) – Context on which to run the node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphMemcpyNodeGetParams(hNode)#
    +

    Returns a memcpy node’s parameters.

    +

    Returns the parameters of memcpy node hNode in nodeParams.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphMemcpyNodeSetParams(hNode, CUDA_MEMCPY3D nodeParams: Optional[CUDA_MEMCPY3D])#
    +

    Sets a memcpy node’s parameters.

    +

    Sets the parameters of memcpy node hNode to nodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddMemsetNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_MEMSET_NODE_PARAMS memsetParams: Optional[CUDA_MEMSET_NODE_PARAMS], ctx)#
    +

    Creates a memset node and adds it to a graph.

    +

    Creates a new memset node and adds it to hGraph with +numDependencies dependencies specified via dependencies. It is +possible for numDependencies to be 0, in which case the node will be +placed at the root of the graph. dependencies may not have any +duplicate entries. A handle to the new node will be returned in +phGraphNode.

    +

    The element size must be 1, 2, or 4 bytes. When the graph is launched, +the node will perform the memset described by memsetParams.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • memsetParams (CUDA_MEMSET_NODE_PARAMS) – Parameters for the memory set

    • +
    • ctx (CUcontext) – Context on which to run the node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphMemsetNodeGetParams(hNode)#
    +

    Returns a memset node’s parameters.

    +

    Returns the parameters of memset node hNode in nodeParams.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphMemsetNodeSetParams(hNode, CUDA_MEMSET_NODE_PARAMS nodeParams: Optional[CUDA_MEMSET_NODE_PARAMS])#
    +

    Sets a memset node’s parameters.

    +

    Sets the parameters of memset node hNode to nodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddHostNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_HOST_NODE_PARAMS nodeParams: Optional[CUDA_HOST_NODE_PARAMS])#
    +

    Creates a host execution node and adds it to a graph.

    +

    Creates a new CPU execution node and adds it to hGraph with +numDependencies dependencies specified via dependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. dependencies may not have any duplicate entries. A +handle to the new node will be returned in phGraphNode.

    +

    When the graph is launched, the node will invoke the specified CPU +function. Host nodes are not supported under MPS with pre-Volta GPUs.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • nodeParams (CUDA_HOST_NODE_PARAMS) – Parameters for the host node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphHostNodeGetParams(hNode)#
    +

    Returns a host node’s parameters.

    +

    Returns the parameters of host node hNode in nodeParams.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphHostNodeSetParams(hNode, CUDA_HOST_NODE_PARAMS nodeParams: Optional[CUDA_HOST_NODE_PARAMS])#
    +

    Sets a host node’s parameters.

    +

    Sets the parameters of host node hNode to nodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddChildGraphNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, childGraph)#
    +

    Creates a child graph node and adds it to a graph.

    +

    Creates a new node which executes an embedded graph, and adds it to +hGraph with numDependencies dependencies specified via +dependencies. It is possible for numDependencies to be 0, in which +case the node will be placed at the root of the graph. dependencies +may not have any duplicate entries. A handle to the new node will be +returned in phGraphNode.

    +

    If hGraph contains allocation or free nodes, this call will return an +error.

    +

    The node executes an embedded child graph. The child graph is cloned in +this call.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • childGraph (CUgraph or cudaGraph_t) – The graph to clone into this node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphChildGraphNodeGetGraph(hNode)#
    +

    Gets a handle to the embedded graph of a child graph node.

    +

    Gets a handle to the embedded graph in a child graph node. This call +does not clone the graph. Changes to the graph will be reflected in the +node, and the node retains ownership of the graph.

    +

    Allocation and free nodes cannot be added to the returned graph. +Attempting to do so will return an error.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the embedded graph for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddEmptyNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies)#
    +

    Creates an empty node and adds it to a graph.

    +

    Creates a new node which performs no operation, and adds it to hGraph +with numDependencies dependencies specified via dependencies. It is +possible for numDependencies to be 0, in which case the node will be +placed at the root of the graph. dependencies may not have any +duplicate entries. A handle to the new node will be returned in +phGraphNode.

    +

    An empty node performs no operation during execution, but can be used +for transitive ordering. For example, a phased execution graph with 2 +groups of n nodes with a barrier between them can be represented using +an empty node and 2*n dependency edges, rather than no empty node and +n^2 dependency edges.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddEventRecordNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, event)#
    +

    Creates an event record node and adds it to a graph.

    +

    Creates a new event record node and adds it to hGraph with +numDependencies dependencies specified via dependencies and event +specified in event. It is possible for numDependencies to be 0, in +which case the node will be placed at the root of the graph. +dependencies may not have any duplicate entries. A handle to the new +node will be returned in phGraphNode.

    +

    Each launch of the graph will record event to capture execution of +the node’s dependencies.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • event (CUevent or cudaEvent_t) – Event for the node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphEventRecordNodeGetEvent(hNode)#
    +

    Returns the event associated with an event record node.

    +

    Returns the event of event record node hNode in event_out.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the event for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphEventRecordNodeSetEvent(hNode, event)#
    +

    Sets an event record node’s event.

    +

    Sets the event of event record node hNode to event.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddEventWaitNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, event)#
    +

    Creates an event wait node and adds it to a graph.

    +

    Creates a new event wait node and adds it to hGraph with +numDependencies dependencies specified via dependencies and event +specified in event. It is possible for numDependencies to be 0, in +which case the node will be placed at the root of the graph. +dependencies may not have any duplicate entries. A handle to the new +node will be returned in phGraphNode.

    +

    The graph node will wait for all work captured in event. See +cuEventRecord() for details on what is captured by an +event. event may be from a different context or device than the +launch stream.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • event (CUevent or cudaEvent_t) – Event for the node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphEventWaitNodeGetEvent(hNode)#
    +

    Returns the event associated with an event wait node.

    +

    Returns the event of event wait node hNode in event_out.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the event for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphEventWaitNodeSetEvent(hNode, event)#
    +

    Sets an event wait node’s event.

    +

    Sets the event of event wait node hNode to event.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddExternalSemaphoresSignalNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS])#
    +

    Creates an external semaphore signal node and adds it to a graph.

    +

    Creates a new external semaphore signal node and adds it to hGraph +with numDependencies dependencies specified via dependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. dependencies may not have any duplicate entries. A +handle to the new node will be returned in phGraphNode.

    +

    Performs a signal operation on a set of externally allocated semaphore +objects when the node is launched. The operation(s) will occur after +all of the node’s dependencies have completed.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExternalSemaphoresSignalNodeGetParams(hNode)#
    +

    Returns an external semaphore signal node’s parameters.

    +

    Returns the parameters of an external semaphore signal node hNode in +params_out. The extSemArray and paramsArray returned in +params_out, are owned by the node. This memory remains valid until +the node is destroyed or its parameters are modified, and should not be +modified directly. Use +cuGraphExternalSemaphoresSignalNodeSetParams to update the +parameters of this node.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExternalSemaphoresSignalNodeSetParams(hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS])#
    +

    Sets an external semaphore signal node’s parameters.

    +

    Sets the parameters of an external semaphore signal node hNode to +nodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddExternalSemaphoresWaitNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_EXT_SEM_WAIT_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_WAIT_NODE_PARAMS])#
    +

    Creates an external semaphore wait node and adds it to a graph.

    +

    Creates a new external semaphore wait node and adds it to hGraph with +numDependencies dependencies specified via dependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. dependencies may not have any duplicate entries. A +handle to the new node will be returned in phGraphNode.

    +

    Performs a wait operation on a set of externally allocated semaphore +objects when the node is launched. The node’s dependencies will not be +launched until the wait operation has completed.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExternalSemaphoresWaitNodeGetParams(hNode)#
    +

    Returns an external semaphore wait node’s parameters.

    +

    Returns the parameters of an external semaphore wait node hNode in +params_out. The extSemArray and paramsArray returned in +params_out, are owned by the node. This memory remains valid until +the node is destroyed or its parameters are modified, and should not be +modified directly. Use +cuGraphExternalSemaphoresSignalNodeSetParams to update the +parameters of this node.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExternalSemaphoresWaitNodeSetParams(hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_WAIT_NODE_PARAMS])#
    +

    Sets an external semaphore wait node’s parameters.

    +

    Sets the parameters of an external semaphore wait node hNode to +nodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddBatchMemOpNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_BATCH_MEM_OP_NODE_PARAMS nodeParams: Optional[CUDA_BATCH_MEM_OP_NODE_PARAMS])#
    +

    Creates a batch memory operation node and adds it to a graph.

    +

    Creates a new batch memory operation node and adds it to hGraph with +numDependencies dependencies specified via dependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. dependencies may not have any duplicate entries. A +handle to the new node will be returned in phGraphNode.

    +

    When the node is added, the paramArray inside nodeParams is copied +and therefore it can be freed after the call returns.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Warning: Improper use of this API may deadlock the application. Synchronization ordering established through this API is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by this API should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. For more information, see the Stream Memory Operations section in the programming guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html).

    +
    + +
    +
    +cuda.bindings.driver.cuGraphBatchMemOpNodeGetParams(hNode)#
    +

    Returns a batch mem op node’s parameters.

    +

    Returns the parameters of batch mem op node hNode in +nodeParams_out. The paramArray returned in nodeParams_out is +owned by the node. This memory remains valid until the node is +destroyed or its parameters are modified, and should not be modified +directly. Use cuGraphBatchMemOpNodeSetParams to update the +parameters of this node.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphBatchMemOpNodeSetParams(hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS nodeParams: Optional[CUDA_BATCH_MEM_OP_NODE_PARAMS])#
    +

    Sets a batch mem op node’s parameters.

    +

    Sets the parameters of batch mem op node hNode to nodeParams.

    +

    The paramArray inside nodeParams is copied and therefore it can be +freed after the call returns.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_OUT_OF_MEMORY

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecBatchMemOpNodeSetParams(hGraphExec, hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS nodeParams: Optional[CUDA_BATCH_MEM_OP_NODE_PARAMS])#
    +

    Sets the parameters for a batch mem op node in the given graphExec.

    +

    Sets the parameters of a batch mem op node in an executable graph +hGraphExec. The node is identified by the corresponding node hNode +in the non-executable graph, from which the executable graph was +instantiated.

    +

    The following fields on operations may be modified on an executable +graph:

    +

    op.waitValue.address op.waitValue.value[64] op.waitValue.flags bits +corresponding to wait type (i.e. CU_STREAM_WAIT_VALUE_FLUSH bit cannot +be modified) op.writeValue.address op.writeValue.value[64]

    +

    Other fields, such as the context, count or type of operations, and +other types of operations such as membars, may not be modified.

    +

    hNode must not have been removed from the original graph.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    The paramArray inside nodeParams is copied and therefore it can be +freed after the call returns.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddMemAllocNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUDA_MEM_ALLOC_NODE_PARAMS nodeParams: Optional[CUDA_MEM_ALLOC_NODE_PARAMS])#
    +

    Creates an allocation node and adds it to a graph.

    +

    Creates a new allocation node and adds it to hGraph with +numDependencies dependencies specified via dependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. dependencies may not have any duplicate entries. A +handle to the new node will be returned in phGraphNode.

    +

    When cuGraphAddMemAllocNode creates an allocation node, it +returns the address of the allocation in nodeParams.dptr. The +allocation’s address remains fixed across instantiations and launches.

    +

    If the allocation is freed in the same graph, by creating a free node +using cuGraphAddMemFreeNode, the allocation can be accessed +by nodes ordered after the allocation node but before the free node. +These allocations cannot be freed outside the owning graph, and they +can only be freed once in the owning graph.

    +

    If the allocation is not freed in the same graph, then it can be +accessed not only by nodes in the graph which are ordered after the +allocation node, but also by stream operations ordered after the +graph’s execution but before the allocation is freed.

    +

    Allocations which are not freed in the same graph can be freed by:

    + +

    It is not possible to free an allocation in both the owning graph and +another graph. If the allocation is freed in the same graph, a free +node cannot be added to another graph. If the allocation is freed in +another graph, a free node can no longer be added to the owning graph.

    +

    The following restrictions apply to graphs which contain allocation +and/or memory free nodes:

    +
      +
    • Nodes and edges of the graph cannot be deleted.

    • +
    • The graph cannot be used in a child node.

    • +
    • Only one instantiation of the graph may exist at any point in time.

    • +
    • The graph cannot be cloned.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphMemAllocNodeGetParams(hNode)#
    +

    Returns a memory alloc node’s parameters.

    +

    Returns the parameters of a memory alloc node hNode in params_out. +The poolProps and accessDescs returned in params_out, are owned +by the node. This memory remains valid until the node is destroyed. The +returned parameters must not be modified.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddMemFreeNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, dptr)#
    +

    Creates a memory free node and adds it to a graph.

    +

    Creates a new memory free node and adds it to hGraph with +numDependencies dependencies specified via dependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. dependencies may not have any duplicate entries. A +handle to the new node will be returned in phGraphNode.

    +

    cuGraphAddMemFreeNode will return +CUDA_ERROR_INVALID_VALUE if the user attempts to free:

    +
      +
    • an allocation twice in the same graph.

    • +
    • an address that was not returned by an allocation node.

    • +
    • an invalid address.

    • +
    +

    The following restrictions apply to graphs which contain allocation +and/or memory free nodes:

    +
      +
    • Nodes and edges of the graph cannot be deleted.

    • +
    • The graph cannot be used in a child node.

    • +
    • Only one instantiation of the graph may exist at any point in time.

    • +
    • The graph cannot be cloned.

    • +
    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • dptr (CUdeviceptr) – Address of memory to free

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphMemFreeNodeGetParams(hNode)#
    +

    Returns a memory free node’s parameters.

    +

    Returns the address of a memory free node hNode in dptr_out.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGraphMemTrim(device)#
    +

    Free unused memory that was cached on the specified device for use with graphs back to the OS.

    +

    Blocks which are not in use by a graph that is either currently +executing or scheduled to execute are freed back to the operating +system.

    +
    +
    Parameters:
    +

    device (CUdevice) – The device for which cached memory should be freed.

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_DEVICE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetGraphMemAttribute(device, attr: CUgraphMem_attribute)#
    +

    Query asynchronous allocation attributes related to graphs.

    +

    Valid attributes are:

    + +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceSetGraphMemAttribute(device, attr: CUgraphMem_attribute, value)#
    +

    Set asynchronous allocation attributes related to graphs.

    +

    Valid attributes are:

    + +
    +
    Parameters:
    +
      +
    • device (CUdevice) – Specifies the scope of the query

    • +
    • attr (CUgraphMem_attribute) – attribute to get

    • +
    • value (Any) – pointer to value to set

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_DEVICE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphClone(originalGraph)#
    +

    Clones a graph.

    +

    This function creates a copy of originalGraph and returns it in +phGraphClone. All parameters are copied into the cloned graph. The +original graph may be modified after this call without affecting the +clone.

    +

    Child graph nodes in the original graph are recursively copied into the +clone.

    +
    +
    Parameters:
    +

    originalGraph (CUgraph or cudaGraph_t) – Graph to clone

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphNodeFindInClone(hOriginalNode, hClonedGraph)#
    +

    Finds a cloned version of a node.

    +

    This function returns the node in hClonedGraph corresponding to +hOriginalNode in the original graph.

    +

    hClonedGraph must have been cloned from hOriginalGraph via +cuGraphClone. hOriginalNode must have been in +hOriginalGraph at the time of the call to cuGraphClone, +and the corresponding cloned node in hClonedGraph must not have been +removed. The cloned node is then returned via phClonedNode.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuGraphClone

    +
    +
    + +
    +
    +cuda.bindings.driver.cuGraphNodeGetType(hNode)#
    +

    Returns a node’s type.

    +

    Returns the node type of hNode in typename.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphGetNodes(hGraph, size_t numNodes=0)#
    +

    Returns a graph’s nodes.

    +

    Returns a list of hGraph’s nodes. nodes may be NULL, in which case +this function will return the number of nodes in numNodes. Otherwise, +numNodes entries will be filled in. If numNodes is higher than the +actual number of nodes, the remaining entries in nodes will be set to +NULL, and the number of nodes actually obtained will be returned in +numNodes.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to query

    • +
    • numNodes (int) – See description

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphGetRootNodes(hGraph, size_t numRootNodes=0)#
    +

    Returns a graph’s root nodes.

    +

    Returns a list of hGraph’s root nodes. rootNodes may be NULL, in +which case this function will return the number of root nodes in +numRootNodes. Otherwise, numRootNodes entries will be filled in. If +numRootNodes is higher than the actual number of root nodes, the +remaining entries in rootNodes will be set to NULL, and the number of +nodes actually obtained will be returned in numRootNodes.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to query

    • +
    • numRootNodes (int) – See description

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphGetEdges(hGraph, size_t numEdges=0)#
    +

    Returns a graph’s dependency edges.

    +

    Returns a list of hGraph’s dependency edges. Edges are returned via +corresponding indices in from and to; that is, the node in to`[i] +has a dependency on the node in `from`[i]. `from and to may both be +NULL, in which case this function only returns the number of edges in +numEdges. Otherwise, numEdges entries will be filled in. If +numEdges is higher than the actual number of edges, the remaining +entries in from and to will be set to NULL, and the number of edges +actually returned will be written to numEdges.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to get the edges from

    • +
    • numEdges (int) – See description

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphGetEdges_v2(hGraph, size_t numEdges=0)#
    +

    Returns a graph’s dependency edges (12.3+)

    +

    Returns a list of hGraph’s dependency edges. Edges are returned via +corresponding indices in from, to and edgeData; that is, the node +in to`[i] has a dependency on the node in `from`[i] with data +`edgeData`[i]. `from and to may both be NULL, in which case this +function only returns the number of edges in numEdges. Otherwise, +numEdges entries will be filled in. If numEdges is higher than the +actual number of edges, the remaining entries in from and to will +be set to NULL, and the number of edges actually returned will be +written to numEdges. edgeData may alone be NULL, in which case the +edges must all have default (zeroed) edge data. Attempting a lossy +query via NULL edgeData will result in +CUDA_ERROR_LOSSY_QUERY. If edgeData is non-NULL then +from and to must be as well.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to get the edges from

    • +
    • numEdges (int) – See description

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphNodeGetDependencies(hNode, size_t numDependencies=0)#
    +

    Returns a node’s dependencies.

    +

    Returns a list of node’s dependencies. dependencies may be NULL, in +which case this function will return the number of dependencies in +numDependencies. Otherwise, numDependencies entries will be filled +in. If numDependencies is higher than the actual number of +dependencies, the remaining entries in dependencies will be set to +NULL, and the number of nodes actually obtained will be returned in +numDependencies.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphNodeGetDependencies_v2(hNode, size_t numDependencies=0)#
    +

    Returns a node’s dependencies (12.3+)

    +

    Returns a list of node’s dependencies. dependencies may be NULL, in +which case this function will return the number of dependencies in +numDependencies. Otherwise, numDependencies entries will be filled +in. If numDependencies is higher than the actual number of +dependencies, the remaining entries in dependencies will be set to +NULL, and the number of nodes actually obtained will be returned in +numDependencies.

    +

    Note that if an edge has non-zero (non-default) edge data and +edgeData is NULL, this API will return +CUDA_ERROR_LOSSY_QUERY. If edgeData is non-NULL, then +dependencies must be as well.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphNodeGetDependentNodes(hNode, size_t numDependentNodes=0)#
    +

    Returns a node’s dependent nodes.

    +

    Returns a list of node’s dependent nodes. dependentNodes may be +NULL, in which case this function will return the number of dependent +nodes in numDependentNodes. Otherwise, numDependentNodes entries +will be filled in. If numDependentNodes is higher than the actual +number of dependent nodes, the remaining entries in dependentNodes +will be set to NULL, and the number of nodes actually obtained will be +returned in numDependentNodes.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphNodeGetDependentNodes_v2(hNode, size_t numDependentNodes=0)#
    +

    Returns a node’s dependent nodes (12.3+)

    +

    Returns a list of node’s dependent nodes. dependentNodes may be +NULL, in which case this function will return the number of dependent +nodes in numDependentNodes. Otherwise, numDependentNodes entries +will be filled in. If numDependentNodes is higher than the actual +number of dependent nodes, the remaining entries in dependentNodes +will be set to NULL, and the number of nodes actually obtained will be +returned in numDependentNodes.

    +

    Note that if an edge has non-zero (non-default) edge data and +edgeData is NULL, this API will return +CUDA_ERROR_LOSSY_QUERY. If edgeData is non-NULL, then +dependentNodes must be as well.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddDependencies(hGraph, from_: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], to: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies)#
    +

    Adds dependency edges to a graph.

    +

    The number of dependencies to be added is defined by numDependencies +Elements in from and to at corresponding indices define a +dependency. Each node in from and to must belong to hGraph.

    +

    If numDependencies is 0, elements in from and to will be ignored. +Specifying an existing dependency will return an error.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which dependencies are added

    • +
    • from (List[CUgraphNode]) – Array of nodes that provide the dependencies

    • +
    • to (List[CUgraphNode]) – Array of dependent nodes

    • +
    • numDependencies (size_t) – Number of dependencies to be added

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddDependencies_v2(hGraph, from_: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], to: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], edgeData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies)#
    +

    Adds dependency edges to a graph (12.3+)

    +

    The number of dependencies to be added is defined by numDependencies +Elements in from and to at corresponding indices define a +dependency. Each node in from and to must belong to hGraph.

    +

    If numDependencies is 0, elements in from and to will be ignored. +Specifying an existing dependency will return an error.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which dependencies are added

    • +
    • from (List[CUgraphNode]) – Array of nodes that provide the dependencies

    • +
    • to (List[CUgraphNode]) – Array of dependent nodes

    • +
    • edgeData (List[CUgraphEdgeData]) – Optional array of edge data. If NULL, default (zeroed) edge data is +assumed.

    • +
    • numDependencies (size_t) – Number of dependencies to be added

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphRemoveDependencies(hGraph, from_: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], to: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies)#
    +

    Removes dependency edges from a graph.

    +

    The number of dependencies to be removed is defined by +numDependencies. Elements in from and to at corresponding indices +define a dependency. Each node in from and to must belong to +hGraph.

    +

    If numDependencies is 0, elements in from and to will be ignored. +Specifying a non-existing dependency will return an error.

    +

    Dependencies cannot be removed from graphs which contain allocation or +free nodes. Any attempt to do so will return an error.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph from which to remove dependencies

    • +
    • from (List[CUgraphNode]) – Array of nodes that provide the dependencies

    • +
    • to (List[CUgraphNode]) – Array of dependent nodes

    • +
    • numDependencies (size_t) – Number of dependencies to be removed

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphRemoveDependencies_v2(hGraph, from_: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], to: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], edgeData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies)#
    +

    Removes dependency edges from a graph (12.3+)

    +

    The number of dependencies to be removed is defined by +numDependencies. Elements in from and to at corresponding indices +define a dependency. Each node in from and to must belong to +hGraph.

    +

    If numDependencies is 0, elements in from and to will be ignored. +Specifying an edge that does not exist in the graph, with data matching +edgeData, results in an error. edgeData is nullable, which is +equivalent to passing default (zeroed) data for each edge.

    +

    Dependencies cannot be removed from graphs which contain allocation or +free nodes. Any attempt to do so will return an error.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph from which to remove dependencies

    • +
    • from (List[CUgraphNode]) – Array of nodes that provide the dependencies

    • +
    • to (List[CUgraphNode]) – Array of dependent nodes

    • +
    • edgeData (List[CUgraphEdgeData]) – Optional array of edge data. If NULL, edge data is assumed to be +default (zeroed).

    • +
    • numDependencies (size_t) – Number of dependencies to be removed

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphDestroyNode(hNode)#
    +

    Remove a node from the graph.

    +

    Removes hNode from its graph. This operation also severs any +dependencies of other nodes on hNode and vice versa.

    +

    Nodes which belong to a graph which contains allocation or free nodes +cannot be destroyed. Any attempt to do so will return an error.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to remove

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphInstantiate(hGraph, unsigned long long flags)#
    +

    Creates an executable graph from a graph.

    +

    Instantiates hGraph as an executable graph. The graph is validated +for any structural constraints or intra-node constraints which were not +previously validated. If instantiation is successful, a handle to the +instantiated graph is returned in phGraphExec.

    +

    The flags parameter controls the behavior of instantiation and +subsequent graph launches. Valid flags are:

    +
      +
    • CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, which +configures a graph containing memory allocation nodes to +automatically free any unfreed memory allocations before the graph is +relaunched.

    • +
    • CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH, which +configures the graph for launch from the device. If this flag is +passed, the executable graph handle returned can be used to launch +the graph from both the host and device. This flag can only be used +on platforms which support unified addressing. This flag cannot be +used in conjunction with +CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH.

    • +
    • CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, which +causes the graph to use the priorities from the per-node attributes +rather than the priority of the launch stream during execution. Note +that priorities are only available on kernel nodes, and are copied +from stream priority during stream capture.

    • +
    +

    If hGraph contains any allocation or free nodes, there can be at most +one executable graph in existence for that graph at a time. An attempt +to instantiate a second executable graph before destroying the first +with cuGraphExecDestroy will result in an error. The same +also applies if hGraph contains any device-updatable kernel nodes.

    +

    If hGraph contains kernels which call device-side cudaGraphLaunch() +from multiple contexts, this will result in an error.

    +

    Graphs instantiated for launch on the device have additional +restrictions which do not apply to host graphs:

    +
      +
    • The graph’s nodes must reside on a single context.

    • +
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, +and child graph nodes.

    • +
    • The graph cannot be empty and must contain at least one kernel, +memcpy, or memset node. Operation-specific restrictions are outlined +below.

    • +
    • Kernel nodes:

      +
        +
      • Use of CUDA Dynamic Parallelism is not permitted.

      • +
      • Cooperative launches are permitted as long as MPS is not in use.

      • +
      +
    • +
    • Memcpy nodes:

      +
        +
      • Only copies involving device memory and/or pinned device-mapped +host memory are permitted.

      • +
      • Copies involving CUDA arrays are not permitted.

      • +
      • Both operands must be accessible from the current context, and the +current context must match the context of other nodes in the graph.

      • +
      +
    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphInstantiateWithParams(hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS instantiateParams: Optional[CUDA_GRAPH_INSTANTIATE_PARAMS])#
    +

    Creates an executable graph from a graph.

    +

    Instantiates hGraph as an executable graph according to the +instantiateParams structure. The graph is validated for any +structural constraints or intra-node constraints which were not +previously validated. If instantiation is successful, a handle to the +instantiated graph is returned in phGraphExec.

    +

    instantiateParams controls the behavior of instantiation and +subsequent graph launches, as well as returning more detailed +information in the event of an error. +CUDA_GRAPH_INSTANTIATE_PARAMS is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    The flags field controls the behavior of instantiation and subsequent +graph launches. Valid flags are:

    + +

    If hGraph contains any allocation or free nodes, there can be at most +one executable graph in existence for that graph at a time. An attempt +to instantiate a second executable graph before destroying the first +with cuGraphExecDestroy will result in an error. The same +also applies if hGraph contains any device-updatable kernel nodes.

    +

    If hGraph contains kernels which call device-side cudaGraphLaunch() +from multiple contexts, this will result in an error.

    +

    Graphs instantiated for launch on the device have additional +restrictions which do not apply to host graphs:

    +
      +
    • The graph’s nodes must reside on a single context.

    • +
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, +and child graph nodes.

    • +
    • The graph cannot be empty and must contain at least one kernel, +memcpy, or memset node. Operation-specific restrictions are outlined +below.

    • +
    • Kernel nodes:

      +
        +
      • Use of CUDA Dynamic Parallelism is not permitted.

      • +
      • Cooperative launches are permitted as long as MPS is not in use.

      • +
      +
    • +
    • Memcpy nodes:

      +
        +
      • Only copies involving device memory and/or pinned device-mapped +host memory are permitted.

      • +
      • Copies involving CUDA arrays are not permitted.

      • +
      • Both operands must be accessible from the current context, and the +current context must match the context of other nodes in the graph.

      • +
      +
    • +
    +

    In the event of an error, the result_out and hErrNode_out fields +will contain more information about the nature of the error. Possible +error reporting includes:

    +
      +
    • CUDA_GRAPH_INSTANTIATE_ERROR, if passed an invalid value +or if an unexpected error occurred which is described by the return +value of the function. hErrNode_out will be set to NULL.

    • +
    • CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE, if the graph +structure is invalid. hErrNode_out will be set to one of the +offending nodes.

    • +
    • CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED, if +the graph is instantiated for device launch but contains a node of an +unsupported node type, or a node which performs unsupported +operations, such as use of CUDA dynamic parallelism within a kernel +node. hErrNode_out will be set to this node.

    • +
    • CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED, if +the graph is instantiated for device launch but a node’s context +differs from that of another node. This error can also be returned if +a graph is not instantiated for device launch and it contains kernels +which call device-side cudaGraphLaunch() from multiple contexts. +hErrNode_out will be set to this node.

    • +
    +

    If instantiation is successful, result_out will be set to +CUDA_GRAPH_INSTANTIATE_SUCCESS, and hErrNode_out will be +set to NULL.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecGetFlags(hGraphExec)#
    +

    Query the instantiation flags of an executable graph.

    +

    Returns the flags that were passed to instantiation for the given +executable graph. CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD will +not be returned by this API as it does not affect the resulting +executable graph.

    +
    +
    Parameters:
    +

    hGraphExec (CUgraphExec or cudaGraphExec_t) – The executable graph to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecKernelNodeSetParams(hGraphExec, hNode, CUDA_KERNEL_NODE_PARAMS nodeParams: Optional[CUDA_KERNEL_NODE_PARAMS])#
    +

    Sets the parameters for a kernel node in the given graphExec.

    +

    Sets the parameters of a kernel node in an executable graph +hGraphExec. The node is identified by the corresponding node hNode +in the non-executable graph, from which the executable graph was +instantiated.

    +

    hNode must not have been removed from the original graph. All +nodeParams fields may change, but the following restrictions apply to +func updates:

    +
      +
    • The owning context of the function cannot change.

    • +
    • A node whose function originally did not use CUDA dynamic parallelism +cannot be updated to a function which uses CDP

    • +
    • A node whose function originally did not make device-side update +calls cannot be updated to a function which makes device-side update +calls.

    • +
    • If hGraphExec was not instantiated for device launch, a node whose +function originally did not use device-side cudaGraphLaunch() cannot +be updated to a function which uses device-side cudaGraphLaunch() +unless the node resides on the same context as nodes which contained +such calls at instantiate-time. If no such calls were present at +instantiation, these updates cannot be performed at all.

    • +
    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    If hNode is a device-updatable kernel node, the next upload/launch of +hGraphExec will overwrite any previous device-side updates. +Additionally, applying host updates to a device-updatable kernel node +while it is being updated from the device will result in undefined +behavior.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecMemcpyNodeSetParams(hGraphExec, hNode, CUDA_MEMCPY3D copyParams: Optional[CUDA_MEMCPY3D], ctx)#
    +

    Sets the parameters for a memcpy node in the given graphExec.

    +

    Updates the work represented by hNode in hGraphExec as though +hNode had contained copyParams at instantiation. hNode must remain +in the graph which was used to instantiate hGraphExec. Changed edges +to and from hNode are ignored.

    +

    The source and destination memory in copyParams must be allocated +from the same contexts as the original source and destination memory. +Both the instantiation-time memory operands and the memory operands in +copyParams must be 1-dimensional. Zero-length operations are not +supported.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    Returns CUDA_ERROR_INVALID_VALUE if the memory operands’ mappings +changed or either the original or new memory operands are +multidimensional.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecMemsetNodeSetParams(hGraphExec, hNode, CUDA_MEMSET_NODE_PARAMS memsetParams: Optional[CUDA_MEMSET_NODE_PARAMS], ctx)#
    +

    Sets the parameters for a memset node in the given graphExec.

    +

    Updates the work represented by hNode in hGraphExec as though +hNode had contained memsetParams at instantiation. hNode must +remain in the graph which was used to instantiate hGraphExec. Changed +edges to and from hNode are ignored.

    +

    Zero sized operations are not supported.

    +

    The new destination pointer in memsetParams must be to the same kind of +allocation as the original destination pointer and have the same +context association and device mapping as the original destination +pointer.

    +

    Both the value and pointer address may be updated. Changing other +aspects of the memset (width, height, element size or pitch) may cause +the update to be rejected. Specifically, for 2d memsets, all dimension +changes are rejected. For 1d memsets, changes in height are explicitly +rejected and other changes are oportunistically allowed if the +resulting work maps onto the work resources already allocated for the +node.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecHostNodeSetParams(hGraphExec, hNode, CUDA_HOST_NODE_PARAMS nodeParams: Optional[CUDA_HOST_NODE_PARAMS])#
    +

    Sets the parameters for a host node in the given graphExec.

    +

    Updates the work represented by hNode in hGraphExec as though +hNode had contained nodeParams at instantiation. hNode must remain +in the graph which was used to instantiate hGraphExec. Changed edges +to and from hNode are ignored.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecChildGraphNodeSetParams(hGraphExec, hNode, childGraph)#
    +

    Updates node parameters in the child graph node in the given graphExec.

    +

    Updates the work represented by hNode in hGraphExec as though the +nodes contained in hNode’s graph had the parameters contained in +childGraph’s nodes at instantiation. hNode must remain in the graph +which was used to instantiate hGraphExec. Changed edges to and from +hNode are ignored.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    The topology of childGraph, as well as the node insertion order, must +match that of the graph contained in hNode. See +cuGraphExecUpdate() for a list of restrictions on what can +be updated in an instantiated graph. The update is recursive, so child +graph nodes contained within the top level child graph will also be +updated.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event)#
    +

    Sets the event for an event record node in the given graphExec.

    +

    Sets the event of an event record node in an executable graph +hGraphExec. The node is identified by the corresponding node hNode +in the non-executable graph, from which the executable graph was +instantiated.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event)#
    +

    Sets the event for an event wait node in the given graphExec.

    +

    Sets the event of an event wait node in an executable graph +hGraphExec. The node is identified by the corresponding node hNode +in the non-executable graph, from which the executable graph was +instantiated.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_SIGNAL_NODE_PARAMS])#
    +

    Sets the parameters for an external semaphore signal node in the given graphExec.

    +

    Sets the parameters of an external semaphore signal node in an +executable graph hGraphExec. The node is identified by the +corresponding node hNode in the non-executable graph, from which the +executable graph was instantiated.

    +

    hNode must not have been removed from the original graph.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    Changing nodeParams->numExtSems is not supported.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS nodeParams: Optional[CUDA_EXT_SEM_WAIT_NODE_PARAMS])#
    +

    Sets the parameters for an external semaphore wait node in the given graphExec.

    +

    Sets the parameters of an external semaphore wait node in an executable +graph hGraphExec. The node is identified by the corresponding node +hNode in the non-executable graph, from which the executable graph +was instantiated.

    +

    hNode must not have been removed from the original graph.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    Changing nodeParams->numExtSems is not supported.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphNodeSetEnabled(hGraphExec, hNode, unsigned int isEnabled)#
    +

    Enables or disables the specified node in the given graphExec.

    +

    Sets hNode to be either enabled or disabled. Disabled nodes are +functionally equivalent to empty nodes until they are reenabled. +Existing node parameters are not affected by disabling/enabling the +node.

    +

    The node is identified by the corresponding node hNode in the non- +executable graph, from which the executable graph was instantiated.

    +

    hNode must not have been removed from the original graph.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    If hNode is a device-updatable kernel node, the next upload/launch of +hGraphExec will overwrite any previous device-side updates. +Additionally, applying host updates to a device-updatable kernel node +while it is being updated from the device will result in undefined +behavior.

    +
    +
    Parameters:
    +
      +
    • hGraphExec (CUgraphExec or cudaGraphExec_t) – The executable graph in which to set the specified node

    • +
    • hNode (CUgraphNode or cudaGraphNode_t) – Node from the graph from which graphExec was instantiated

    • +
    • isEnabled (unsigned int) – Node is enabled if != 0, otherwise the node is disabled

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    Currently only kernel, memset and memcpy nodes are supported.

    +
    + +
    +
    +cuda.bindings.driver.cuGraphNodeGetEnabled(hGraphExec, hNode)#
    +

    Query whether a node in the given graphExec is enabled.

    +

    Sets isEnabled to 1 if hNode is enabled, or 0 if hNode is disabled.

    +

    The node is identified by the corresponding node hNode in the non- +executable graph, from which the executable graph was instantiated.

    +

    hNode must not have been removed from the original graph.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Currently only kernel, memset and memcpy nodes are supported.

    +

    This function will not reflect device-side updates for device-updatable kernel nodes.

    +
    + +
    +
    +cuda.bindings.driver.cuGraphUpload(hGraphExec, hStream)#
    +

    Uploads an executable graph in a stream.

    +

    Uploads hGraphExec to the device in hStream without executing it. +Uploads of the same hGraphExec will be serialized. Each upload is +ordered behind both any previous work in hStream and any previous +launches of hGraphExec. Uses memory cached by stream to back the +allocations owned by hGraphExec.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphLaunch(hGraphExec, hStream)#
    +

    Launches an executable graph in a stream.

    +

    Executes hGraphExec in hStream. Only one instance of hGraphExec +may be executing at a time. Each launch is ordered behind both any +previous work in hStream and any previous launches of hGraphExec. +To execute a graph concurrently, it must be instantiated multiple times +into multiple executable graphs.

    +

    If any allocations created by hGraphExec remain unfreed (from a +previous launch) and hGraphExec was not instantiated with +CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, the launch +will fail with CUDA_ERROR_INVALID_VALUE.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecDestroy(hGraphExec)#
    +

    Destroys an executable graph.

    +

    Destroys the executable graph specified by hGraphExec, as well as all +of its executable nodes. If the executable graph is in-flight, it will +not be terminated, but rather freed asynchronously on completion.

    +
    +
    Parameters:
    +

    hGraphExec (CUgraphExec or cudaGraphExec_t) – Executable graph to destroy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphDestroy(hGraph)#
    +

    Destroys a graph.

    +

    Destroys the graph specified by hGraph, as well as all of its nodes.

    +
    +
    Parameters:
    +

    hGraph (CUgraph or cudaGraph_t) – Graph to destroy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    cuGraphCreate

    +
    +
    + +
    +
    +cuda.bindings.driver.cuGraphExecUpdate(hGraphExec, hGraph)#
    +

    Check whether an executable graph can be updated with a graph and perform the update if possible.

    +

    Updates the node parameters in the instantiated graph specified by +hGraphExec with the node parameters in a topologically identical +graph specified by hGraph.

    +

    Limitations:

    +
      +
    • Kernel nodes:

      +
        +
      • The owning context of the function cannot change.

      • +
      • A node whose function originally did not use CUDA dynamic +parallelism cannot be updated to a function which uses CDP.

      • +
      • A node whose function originally did not make device-side update +calls cannot be updated to a function which makes device-side +update calls.

      • +
      • A cooperative node cannot be updated to a non-cooperative node, and +vice-versa.

      • +
      • If the graph was instantiated with +CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, the priority +attribute cannot change. Equality is checked on the originally +requested priority values, before they are clamped to the device’s +supported range.

      • +
      • If hGraphExec was not instantiated for device launch, a node +whose function originally did not use device-side cudaGraphLaunch() +cannot be updated to a function which uses device-side +cudaGraphLaunch() unless the node resides on the same context as +nodes which contained such calls at instantiate-time. If no such +calls were present at instantiation, these updates cannot be +performed at all.

      • +
      • Neither hGraph nor hGraphExec may contain device-updatable +kernel nodes.

      • +
      +
    • +
    • Memset and memcpy nodes:

      +
        +
      • The CUDA device(s) to which the operand(s) was allocated/mapped +cannot change.

      • +
      • The source/destination memory must be allocated from the same +contexts as the original source/destination memory.

      • +
      • For 2d memsets, only address and assinged value may be updated.

      • +
      • For 1d memsets, updating dimensions is also allowed, but may fail +if the resulting operation doesn’t map onto the work resources +already allocated for the node.

      • +
      +
    • +
    • Additional memcpy node restrictions:

      +
        +
      • Changing either the source or destination memory type(i.e. +CU_MEMORYTYPE_DEVICE, CU_MEMORYTYPE_ARRAY, etc.) is not supported.

      • +
      +
    • +
    • External semaphore wait nodes and record nodes:

      +
        +
      • Changing the number of semaphores is not supported.

      • +
      +
    • +
    • Conditional nodes:

      +
        +
      • Changing node parameters is not supported.

      • +
      • Changeing parameters of nodes within the conditional body graph is +subject to the rules above.

      • +
      • Conditional handle flags and default values are updated as part of +the graph update.

      • +
      +
    • +
    +

    Note: The API may add further restrictions in future releases. The +return code should always be checked.

    +

    cuGraphExecUpdate sets the result member of resultInfo to +CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED under the following +conditions:

    +
      +
    • The count of nodes directly in hGraphExec and hGraph differ, in +which case resultInfo->errorNode is set to NULL.

    • +
    • hGraph has more exit nodes than hGraph, in which case +resultInfo->errorNode is set to one of the exit nodes in hGraph.

    • +
    • A node in hGraph has a different number of dependencies than the +node from hGraphExec it is paired with, in which case +resultInfo->errorNode is set to the node from hGraph.

    • +
    • A node in hGraph has a dependency that does not match with the +corresponding dependency of the paired node from hGraphExec. +resultInfo->errorNode will be set to the node from hGraph. +resultInfo->errorFromNode will be set to the mismatched dependency. +The dependencies are paired based on edge order and a dependency does +not match when the nodes are already paired based on other edges +examined in the graph.

    • +
    +

    cuGraphExecUpdate sets the result member of resultInfo to:

    +
      +
    • CU_GRAPH_EXEC_UPDATE_ERROR if passed an invalid value.

    • +
    • CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED if the graph topology +changed

    • +
    • CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED if the type of a node +changed, in which case hErrorNode_out is set to the node from +hGraph.

    • +
    • CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE if the +function changed in an unsupported way(see note above), in which case +hErrorNode_out is set to the node from hGraph

    • +
    • CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED if any parameters to a +node changed in a way that is not supported, in which case +hErrorNode_out is set to the node from hGraph.

    • +
    • CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED if any attributes of a +node changed in a way that is not supported, in which case +hErrorNode_out is set to the node from hGraph.

    • +
    • CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED if something about a node is +unsupported, like the node’s type or configuration, in which case +hErrorNode_out is set to the node from hGraph

    • +
    +

    If the update fails for a reason not listed above, the result member of +resultInfo will be set to CU_GRAPH_EXEC_UPDATE_ERROR. If the update +succeeds, the result member will be set to +CU_GRAPH_EXEC_UPDATE_SUCCESS.

    +

    cuGraphExecUpdate returns CUDA_SUCCESS when the updated was performed +successfully. It returns CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE if the +graph update was not performed because it included changes which +violated constraints specific to instantiated graph update.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuGraphInstantiate

    +
    +
    + +
    +
    +cuda.bindings.driver.cuGraphKernelNodeCopyAttributes(dst, src)#
    +

    Copies attributes from source node to destination node.

    +

    Copies attributes from source node src to destination node dst. +Both node must have the same context.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    CUaccessPolicyWindow

    +
    +
    + +
    +
    +cuda.bindings.driver.cuGraphKernelNodeGetAttribute(hNode, attr: CUkernelNodeAttrID)#
    +

    Queries node attribute.

    +

    Queries attribute attr from node hNode and stores it in +corresponding member of value_out.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    CUaccessPolicyWindow

    +
    +
    + +
    +
    +cuda.bindings.driver.cuGraphKernelNodeSetAttribute(hNode, attr: CUkernelNodeAttrID, CUkernelNodeAttrValue value: Optional[CUkernelNodeAttrValue])#
    +

    Sets node attribute.

    +

    Sets attribute attr on node hNode from corresponding attribute of +value.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    CUaccessPolicyWindow

    +
    +
    + +
    +
    +cuda.bindings.driver.cuGraphDebugDotPrint(hGraph, char *path, unsigned int flags)#
    +

    Write a DOT file describing graph structure.

    +

    Using the provided hGraph, write to path a DOT formatted +description of the graph. By default this includes the graph topology, +node types, node id, kernel names and memcpy direction. flags can be +specified to write more detailed information about each node type such +as parameter values, kernel attributes, node and function handles.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – The graph to create a DOT file from

    • +
    • path (bytes) – The path to write the DOT file to

    • +
    • flags (unsigned int) – Flags from CUgraphDebugDot_flags for specifying which additional +node information to write

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_OPERATING_SYSTEM

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuUserObjectCreate(ptr, destroy, unsigned int initialRefcount, unsigned int flags)#
    +

    Create a user object.

    +

    Create a user object with the specified destructor callback and initial +reference count. The initial references are owned by the caller.

    +

    Destructor callbacks cannot make CUDA API calls and should avoid +blocking behavior, as they are executed by a shared internal thread. +Another thread may be signaled to perform such actions, if it does not +block forward progress of tasks scheduled through CUDA.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • ptr (Any) – The pointer to pass to the destroy function

    • +
    • destroy (CUhostFn) – Callback to free the user object when it is no longer in use

    • +
    • initialRefcount (unsigned int) – The initial refcount to create the object with, typically 1. The +initial references are owned by the calling thread.

    • +
    • flags (unsigned int) – Currently it is required to pass +CU_USER_OBJECT_NO_DESTRUCTOR_SYNC, which is the only +defined flag. This indicates that the destroy callback cannot be +waited on by any CUDA API. Users requiring synchronization of the +callback should signal its completion manually.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuUserObjectRetain(object, unsigned int count)#
    +

    Retain a reference to a user object.

    +

    Retains new references to a user object. The new references are owned +by the caller.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • object (CUuserObject) – The object to retain

    • +
    • count (unsigned int) – The number of references to retain, typically 1. Must be nonzero +and not larger than INT_MAX.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuUserObjectRelease(object, unsigned int count)#
    +

    Release a reference to a user object.

    +

    Releases user object references owned by the caller. The object’s +destructor is invoked if the reference count reaches zero.

    +

    It is undefined behavior to release references not owned by the caller, +or to use a user object handle after all references are released.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • object (CUuserObject) – The object to release

    • +
    • count (unsigned int) – The number of references to release, typically 1. Must be nonzero +and not larger than INT_MAX.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphRetainUserObject(graph, object, unsigned int count, unsigned int flags)#
    +

    Retain a reference to a user object from a graph.

    +

    Creates or moves user object references that will be owned by a CUDA +graph.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – The graph to associate the reference with

    • +
    • object (CUuserObject) – The user object to retain a reference for

    • +
    • count (unsigned int) – The number of references to add to the graph, typically 1. Must be +nonzero and not larger than INT_MAX.

    • +
    • flags (unsigned int) – The optional flag CU_GRAPH_USER_OBJECT_MOVE transfers +references from the calling thread, rather than create new +references. Pass 0 to create new references.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphReleaseUserObject(graph, object, unsigned int count)#
    +

    Release a user object reference from a graph.

    +

    Releases user object references owned by a graph.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – The graph that will release the reference

    • +
    • object (CUuserObject) – The user object to release a reference for

    • +
    • count (unsigned int) – The number of references to release, typically 1. Must be nonzero +and not larger than INT_MAX.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddNode(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], size_t numDependencies, CUgraphNodeParams nodeParams: Optional[CUgraphNodeParams])#
    +

    Adds a node of arbitrary type to a graph.

    +

    Creates a new node in hGraph described by nodeParams with +numDependencies dependencies specified via dependencies. +numDependencies may be 0. dependencies may be null if +numDependencies is 0. dependencies may not have any duplicate +entries.

    +

    nodeParams is a tagged union. The node type should be specified in +the typename field, and type-specific parameters in the corresponding +union member. All unused bytes - that is, reserved0 and all bytes +past the utilized union member - must be set to zero. It is recommended +to use brace initialization or memset to ensure all bytes are +initialized.

    +

    Note that for some node types, nodeParams may contain “out +parameters” which are modified during the call, such as +nodeParams->alloc.dptr.

    +

    A handle to the new node will be returned in phGraphNode.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • nodeParams (CUgraphNodeParams) – Specification of the node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphAddNode_v2(hGraph, dependencies: Optional[Tuple[CUgraphNode] | List[CUgraphNode]], dependencyData: Optional[Tuple[CUgraphEdgeData] | List[CUgraphEdgeData]], size_t numDependencies, CUgraphNodeParams nodeParams: Optional[CUgraphNodeParams])#
    +

    Adds a node of arbitrary type to a graph (12.3+)

    +

    Creates a new node in hGraph described by nodeParams with +numDependencies dependencies specified via dependencies. +numDependencies may be 0. dependencies may be null if +numDependencies is 0. dependencies may not have any duplicate +entries.

    +

    nodeParams is a tagged union. The node type should be specified in +the typename field, and type-specific parameters in the corresponding +union member. All unused bytes - that is, reserved0 and all bytes +past the utilized union member - must be set to zero. It is recommended +to use brace initialization or memset to ensure all bytes are +initialized.

    +

    Note that for some node types, nodeParams may contain “out +parameters” which are modified during the call, such as +nodeParams->alloc.dptr.

    +

    A handle to the new node will be returned in phGraphNode.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • dependencies (List[CUgraphNode]) – Dependencies of the node

    • +
    • dependencyData (List[CUgraphEdgeData]) – Optional edge data for the dependencies. If NULL, the data is +assumed to be default (zeroed) for all dependencies.

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • nodeParams (CUgraphNodeParams) – Specification of the node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphNodeSetParams(hNode, CUgraphNodeParams nodeParams: Optional[CUgraphNodeParams])#
    +

    Update’s a graph node’s parameters.

    +

    Sets the parameters of graph node hNode to nodeParams. The node +type specified by nodeParams->type must match the type of hNode. +nodeParams must be fully initialized and all unused bytes (reserved, +padding) zeroed.

    +

    Modifying parameters is not supported for node types +CU_GRAPH_NODE_TYPE_MEM_ALLOC and CU_GRAPH_NODE_TYPE_MEM_FREE.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphExecNodeSetParams(hGraphExec, hNode, CUgraphNodeParams nodeParams: Optional[CUgraphNodeParams])#
    +

    Update’s a graph node’s parameters in an instantiated graph.

    +

    Sets the parameters of a node in an executable graph hGraphExec. The +node is identified by the corresponding node hNode in the non- +executable graph from which the executable graph was instantiated. +hNode must not have been removed from the original graph.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    Allowed changes to parameters on executable graphs are as follows:

    +

    View CUDA Toolkit Documentation for a table example

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_NOT_SUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphConditionalHandleCreate(hGraph, ctx, unsigned int defaultLaunchValue, unsigned int flags)#
    +

    Create a conditional handle.

    +

    Creates a conditional handle associated with hGraph.

    +

    The conditional handle must be associated with a conditional node in +this graph or one of its children.

    +

    Handles not associated with a conditional node may cause graph +instantiation to fail.

    +

    Handles can only be set from the context with which they are +associated.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph which will contain the conditional node using this handle.

    • +
    • ctx (CUcontext) – Context for the handle and associated conditional node.

    • +
    • defaultLaunchValue (unsigned int) – Optional initial value for the conditional variable.

    • +
    • flags (unsigned int) – Currently must be CU_GRAPH_COND_ASSIGN_DEFAULT or 0.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuGraphAddNode

    +
    +
    + +
    +
    +

    Occupancy#

    +

    This section describes the occupancy calculation functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuOccupancyMaxActiveBlocksPerMultiprocessor(func, int blockSize, size_t dynamicSMemSize)#
    +

    Returns occupancy of a function.

    +

    Returns in *numBlocks the number of the maximum active blocks per +streaming multiprocessor.

    +

    Note that the API can also be used with context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to use for +calculations will be the current context.

    +
    +
    Parameters:
    +
      +
    • func (CUfunction) – Kernel for which occupancy is calculated

    • +
    • blockSize (int) – Block size the kernel is intended to be launched with

    • +
    • dynamicSMemSize (size_t) – Per-block dynamic shared memory usage intended, in bytes

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(func, int blockSize, size_t dynamicSMemSize, unsigned int flags)#
    +

    Returns occupancy of a function.

    +

    Returns in *numBlocks the number of the maximum active blocks per +streaming multiprocessor.

    +

    The Flags parameter controls how special cases are handled. The valid +flags are:

    + +

    Note that the API can also be with launch context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to use for +calculations will be the current context.

    +
    +
    Parameters:
    +
      +
    • func (CUfunction) – Kernel for which occupancy is calculated

    • +
    • blockSize (int) – Block size the kernel is intended to be launched with

    • +
    • dynamicSMemSize (size_t) – Per-block dynamic shared memory usage intended, in bytes

    • +
    • flags (unsigned int) – Requested behavior for the occupancy calculator

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuOccupancyMaxPotentialBlockSize(func, blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit)#
    +

    Suggest a launch configuration with reasonable occupancy.

    +

    Returns in *blockSize a reasonable block size that can achieve the +maximum occupancy (or, the maximum number of active warps with the +fewest blocks per multiprocessor), and in *minGridSize the minimum +grid size to achieve the maximum occupancy.

    +

    If blockSizeLimit is 0, the configurator will use the maximum block +size permitted by the device / function instead.

    +

    If per-block dynamic shared memory allocation is not needed, the user +should leave both blockSizeToDynamicSMemSize and dynamicSMemSize as +0.

    +

    If per-block dynamic shared memory allocation is needed, then if the +dynamic shared memory size is constant regardless of block size, the +size should be passed through dynamicSMemSize, and +blockSizeToDynamicSMemSize should be NULL.

    +

    Otherwise, if the per-block dynamic shared memory size varies with +different block sizes, the user needs to provide a unary function +through blockSizeToDynamicSMemSize that computes the dynamic shared +memory needed by func for any given block size. dynamicSMemSize is +ignored. An example signature is:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    Note that the API can also be used with context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to use for +calculations will be the current context.

    +
    +
    Parameters:
    +
      +
    • func (CUfunction) – Kernel for which launch configuration is calculated

    • +
    • blockSizeToDynamicSMemSize (CUoccupancyB2DSize) – A function that calculates how much per-block dynamic shared memory +func uses based on the block size

    • +
    • dynamicSMemSize (size_t) – Dynamic shared memory usage intended, in bytes

    • +
    • blockSizeLimit (int) – The maximum block size func is designed to handle

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaOccupancyMaxPotentialBlockSize

    +
    +
    + +
    +
    +cuda.bindings.driver.cuOccupancyMaxPotentialBlockSizeWithFlags(func, blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags)#
    +

    Suggest a launch configuration with reasonable occupancy.

    +

    An extended version of cuOccupancyMaxPotentialBlockSize. In +addition to arguments passed to +cuOccupancyMaxPotentialBlockSize, +cuOccupancyMaxPotentialBlockSizeWithFlags also takes a +Flags parameter.

    +

    The Flags parameter controls how special cases are handled. The valid +flags are:

    +
      +
    • CU_OCCUPANCY_DEFAULT, which maintains the default +behavior as cuOccupancyMaxPotentialBlockSize;

    • +
    • CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses +the default behavior on platform where global caching affects +occupancy. On such platforms, the launch configurations that produces +maximal occupancy might not support global caching. Setting +CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE guarantees that the +the produced launch configuration is global caching compatible at a +potential cost of occupancy. More information can be found about this +feature in the “Unified L1/Texture Cache” section of the Maxwell +tuning guide.

    • +
    +

    Note that the API can also be used with context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to use for +calculations will be the current context.

    +
    +
    Parameters:
    +
      +
    • func (CUfunction) – Kernel for which launch configuration is calculated

    • +
    • blockSizeToDynamicSMemSize (CUoccupancyB2DSize) – A function that calculates how much per-block dynamic shared memory +func uses based on the block size

    • +
    • dynamicSMemSize (size_t) – Dynamic shared memory usage intended, in bytes

    • +
    • blockSizeLimit (int) – The maximum block size func is designed to handle

    • +
    • flags (unsigned int) – Options

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaOccupancyMaxPotentialBlockSizeWithFlags

    +
    +
    + +
    +
    +cuda.bindings.driver.cuOccupancyAvailableDynamicSMemPerBlock(func, int numBlocks, int blockSize)#
    +

    Returns dynamic shared memory available per block when launching numBlocks blocks on SM.

    +

    Returns in *dynamicSmemSize the maximum size of dynamic shared memory +to allow numBlocks blocks per SM.

    +

    Note that the API can also be used with context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to use for +calculations will be the current context.

    +
    +
    Parameters:
    +
      +
    • func (CUfunction) – Kernel function for which occupancy is calculated

    • +
    • numBlocks (int) – Number of blocks to fit on SM

    • +
    • blockSize (int) – Size of the blocks

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuOccupancyMaxPotentialClusterSize(func, CUlaunchConfig config: Optional[CUlaunchConfig])#
    +

    Given the kernel function (func) and launch configuration (config), return the maximum cluster size in *clusterSize.

    +

    The cluster dimensions in config are ignored. If func has a required +cluster size set (see cudaFuncGetAttributes / +cuFuncGetAttribute),`*clusterSize` will reflect the +required cluster size.

    +

    By default this function will always return a value that’s portable on +future hardware. A higher value may be returned if the kernel function +allows non-portable cluster sizes.

    +

    This function will respect the compile time launch bounds.

    +

    Note that the API can also be used with context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to use for +calculations will either be taken from the specified stream +config->hStream or the current context in case of NULL stream.

    +
    +
    Parameters:
    +
      +
    • func (CUfunction) – Kernel function for which maximum cluster size is calculated

    • +
    • config (CUlaunchConfig) – Launch configuration for the given kernel function

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuOccupancyMaxActiveClusters(func, CUlaunchConfig config: Optional[CUlaunchConfig])#
    +

    Given the kernel function (func) and launch configuration (config), return the maximum number of clusters that could co-exist on the target device in *numClusters.

    +

    If the function has required cluster size already set (see +cudaFuncGetAttributes / cuFuncGetAttribute), +the cluster size from config must either be unspecified or match the +required size. Without required sizes, the cluster size must be +specified in config, else the function will return an error.

    +

    Note that various attributes of the kernel function may affect +occupancy calculation. Runtime environment may affect how the hardware +schedules the clusters, so the calculated occupancy is not guaranteed +to be achievable.

    +

    Note that the API can also be used with context-less kernel +CUkernel by querying the handle using +cuLibraryGetKernel() and then passing it to the API by +casting to CUfunction. Here, the context to use for +calculations will either be taken from the specified stream +config->hStream or the current context in case of NULL stream.

    +
    +
    Parameters:
    +
      +
    • func (CUfunction) – Kernel function for which maximum number of clusters are calculated

    • +
    • config (CUlaunchConfig) – Launch configuration for the given kernel function

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Texture Object Management#

    +

    This section describes the texture object management functions of the low-level CUDA driver application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher.

    +
    +
    +cuda.bindings.driver.cuTexObjectCreate(CUDA_RESOURCE_DESC pResDesc: Optional[CUDA_RESOURCE_DESC], CUDA_TEXTURE_DESC pTexDesc: Optional[CUDA_TEXTURE_DESC], CUDA_RESOURCE_VIEW_DESC pResViewDesc: Optional[CUDA_RESOURCE_VIEW_DESC])#
    +

    Creates a texture object.

    +

    Creates a texture object and returns it in pTexObject. pResDesc +describes the data to texture from. pTexDesc describes how the data +should be sampled. pResViewDesc is an optional argument that +specifies an alternate format for the data described by pResDesc, and +also describes the subresource region to restrict access to when +texturing. pResViewDesc can only be specified if the type of resource +is a CUDA array or a CUDA mipmapped array not in a block compressed +format.

    +

    Texture objects are only supported on devices of compute capability 3.0 +or higher. Additionally, a texture object is an opaque value, and, as +such, should only be accessed through CUDA API calls.

    +

    The CUDA_RESOURCE_DESC structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • resType specifies the type of resource +to texture from. CUresourceType is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    +

    If resType is set to +CU_RESOURCE_TYPE_ARRAY, +CUDA_RESOURCE_DESC::res::array::hArray must be set to a +valid CUDA array handle.

    +

    If resType is set to +CU_RESOURCE_TYPE_MIPMAPPED_ARRAY, +CUDA_RESOURCE_DESC::res::mipmap::hMipmappedArray must be +set to a valid CUDA mipmapped array handle.

    +

    If resType is set to +CU_RESOURCE_TYPE_LINEAR, +CUDA_RESOURCE_DESC::res::linear::devPtr must be set to a +valid device pointer, that is aligned to +CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. +CUDA_RESOURCE_DESC::res::linear::format and +CUDA_RESOURCE_DESC::res::linear::numChannels describe the +format of each component and the number of components per array +element. CUDA_RESOURCE_DESC::res::linear::sizeInBytes +specifies the size of the array in bytes. The total number of elements +in the linear address range cannot exceed +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. The +number of elements is computed as (sizeInBytes / (sizeof(format) * +numChannels)).

    +

    If resType is set to +CU_RESOURCE_TYPE_PITCH2D, +CUDA_RESOURCE_DESC::res::pitch2D::devPtr must be set to a +valid device pointer, that is aligned to +CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. +CUDA_RESOURCE_DESC::res::pitch2D::format and +CUDA_RESOURCE_DESC::res::pitch2D::numChannels describe the +format of each component and the number of components per array +element. CUDA_RESOURCE_DESC::res::pitch2D::width and +CUDA_RESOURCE_DESC::res::pitch2D::height specify the width +and height of the array in elements, and cannot exceed +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and +CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT +respectively. +CUDA_RESOURCE_DESC::res::pitch2D::pitchInBytes specifies +the pitch between two rows in bytes and has to be aligned to +CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. Pitch cannot +exceed CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH.

    +
      +
    • flags must be set to zero.

    • +
    +

    The CUDA_TEXTURE_DESC struct is defined as

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where

    +
      +
    • addressMode specifies the addressing +mode for each dimension of the texture data. +CUaddress_mode is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • This is ignored if resType is +CU_RESOURCE_TYPE_LINEAR. Also, if the flag, +CU_TRSF_NORMALIZED_COORDINATES is not set, the only +supported address mode is CU_TR_ADDRESS_MODE_CLAMP.

    • +
    • filterMode specifies the filtering mode +to be used when fetching from the texture. CUfilter_mode is defined +as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • This is ignored if resType is +CU_RESOURCE_TYPE_LINEAR.

    • +
    • flags can be any combination of the +following:

      +
        +
      • CU_TRSF_READ_AS_INTEGER, which suppresses the default +behavior of having the texture promote integer data to floating +point data in the range [0, 1]. Note that texture with 32-bit +integer format would not be promoted, regardless of whether or not +this flag is specified.

      • +
      • CU_TRSF_NORMALIZED_COORDINATES, which suppresses the +default behavior of having the texture coordinates range from [0, +Dim) where Dim is the width or height of the CUDA array. Instead, +the texture coordinates [0, 1.0) reference the entire breadth of +the array dimension; Note that for CUDA mipmapped arrays, this flag +has to be set.

      • +
      • CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables +any trilinear filtering optimizations. Trilinear optimizations +improve texture filtering performance by allowing bilinear +filtering on textures in scenarios where it can closely approximate +the expected results.

      • +
      • CU_TRSF_SEAMLESS_CUBEMAP, which enables seamless cube +map filtering. This flag can only be specified if the underlying +resource is a CUDA array or a CUDA mipmapped array that was created +with the flag CUDA_ARRAY3D_CUBEMAP. When seamless cube +map filtering is enabled, texture address modes specified by +addressMode are ignored. Instead, if +the filterMode is set to +CU_TR_FILTER_MODE_POINT the address mode +CU_TR_ADDRESS_MODE_CLAMP will be applied for all +dimensions. If the filterMode is set +to CU_TR_FILTER_MODE_LINEAR seamless cube map filtering +will be performed when sampling along the cube face borders.

      • +
      +
    • +
    • maxAnisotropy specifies the maximum +anisotropy ratio to be used when doing anisotropic filtering. This +value will be clamped to the range [1,16].

    • +
    • mipmapFilterMode specifies the filter +mode when the calculated mipmap level lies between two defined mipmap +levels.

    • +
    • mipmapLevelBias specifies the offset to +be applied to the calculated mipmap level.

    • +
    • minMipmapLevelClamp specifies the lower +end of the mipmap level range to clamp access to.

    • +
    • maxMipmapLevelClamp specifies the upper +end of the mipmap level range to clamp access to.

    • +
    +

    The CUDA_RESOURCE_VIEW_DESC struct is defined as

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • format specifies how the data +contained in the CUDA array or CUDA mipmapped array should be +interpreted. Note that this can incur a change in size of the texture +data. If the resource view format is a block compressed format, then +the underlying CUDA array or CUDA mipmapped array has to have a base +of format CU_AD_FORMAT_UNSIGNED_INT32. with 2 or 4 +channels, depending on the block compressed format. For ex., BC1 and +BC4 require the underlying CUDA array to have a format of +CU_AD_FORMAT_UNSIGNED_INT32 with 2 channels. The other BC +formats require the underlying resource to have the same base format +but with 4 channels.

    • +
    • width specifies the new width of +the texture data. If the resource view format is a block compressed +format, this value has to be 4 times the original width of the +resource. For non block compressed formats, this value has to be +equal to that of the original resource.

    • +
    • height specifies the new height +of the texture data. If the resource view format is a block +compressed format, this value has to be 4 times the original height +of the resource. For non block compressed formats, this value has to +be equal to that of the original resource.

    • +
    • depth specifies the new depth of +the texture data. This value has to be equal to that of the original +resource.

    • +
    • firstMipmapLevel specifies the +most detailed mipmap level. This will be the new mipmap level zero. +For non-mipmapped resources, this value has to be +zero.:py:obj:~.CUDA_TEXTURE_DESC.minMipmapLevelClamp and +maxMipmapLevelClamp will be relative to +this value. For ex., if the firstMipmapLevel is set to 2, and a +minMipmapLevelClamp of 1.2 is specified, then the actual minimum +mipmap level clamp will be 3.2.

    • +
    • lastMipmapLevel specifies the +least detailed mipmap level. For non-mipmapped resources, this value +has to be zero.

    • +
    • firstLayer specifies the first +layer index for layered textures. This will be the new layer zero. +For non-layered resources, this value has to be zero.

    • +
    • lastLayer specifies the last +layer index for layered textures. For non-layered resources, this +value has to be zero.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuTexObjectDestroy(texObject)#
    +

    Destroys a texture object.

    +

    Destroys the texture object specified by texObject.

    +
    +
    Parameters:
    +

    texObject (CUtexObject) – Texture object to destroy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuTexObjectGetResourceDesc(texObject)#
    +

    Returns a texture object’s resource descriptor.

    +

    Returns the resource descriptor for the texture object specified by +texObject.

    +
    +
    Parameters:
    +

    texObject (CUtexObject) – Texture object

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuTexObjectGetTextureDesc(texObject)#
    +

    Returns a texture object’s texture descriptor.

    +

    Returns the texture descriptor for the texture object specified by +texObject.

    +
    +
    Parameters:
    +

    texObject (CUtexObject) – Texture object

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuTexObjectGetResourceViewDesc(texObject)#
    +

    Returns a texture object’s resource view descriptor.

    +

    Returns the resource view descriptor for the texture object specified +by texObject. If no resource view was set for texObject, the +CUDA_ERROR_INVALID_VALUE is returned.

    +
    +
    Parameters:
    +

    texObject (CUtexObject) – Texture object

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Surface Object Management#

    +

    This section describes the surface object management functions of the low-level CUDA driver application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher.

    +
    +
    +cuda.bindings.driver.cuSurfObjectCreate(CUDA_RESOURCE_DESC pResDesc: Optional[CUDA_RESOURCE_DESC])#
    +

    Creates a surface object.

    +

    Creates a surface object and returns it in pSurfObject. pResDesc +describes the data to perform surface load/stores on. +resType must be +CU_RESOURCE_TYPE_ARRAY and +CUDA_RESOURCE_DESC::res::array::hArray must be set to a +valid CUDA array handle. flags must be +set to zero.

    +

    Surface objects are only supported on devices of compute capability 3.0 +or higher. Additionally, a surface object is an opaque value, and, as +such, should only be accessed through CUDA API calls.

    +
    +
    Parameters:
    +

    pResDesc (CUDA_RESOURCE_DESC) – Resource descriptor

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuSurfObjectDestroy(surfObject)#
    +

    Destroys a surface object.

    +

    Destroys the surface object specified by surfObject.

    +
    +
    Parameters:
    +

    surfObject (CUsurfObject) – Surface object to destroy

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuSurfObjectGetResourceDesc(surfObject)#
    +

    Returns a surface object’s resource descriptor.

    +

    Returns the resource descriptor for the surface object specified by +surfObject.

    +
    +
    Parameters:
    +

    surfObject (CUsurfObject) – Surface object

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Tensor Map Object Managment#

    +

    This section describes the tensor map object management functions of the low-level CUDA driver application programming interface. The tensor core API is only supported on devices of compute capability 9.0 or higher.

    +
    +
    +cuda.bindings.driver.cuTensorMapEncodeTiled(tensorDataType: CUtensorMapDataType, tensorRank, globalAddress, globalDim: Optional[Tuple[cuuint64_t] | List[cuuint64_t]], globalStrides: Optional[Tuple[cuuint64_t] | List[cuuint64_t]], boxDim: Optional[Tuple[cuuint32_t] | List[cuuint32_t]], elementStrides: Optional[Tuple[cuuint32_t] | List[cuuint32_t]], interleave: CUtensorMapInterleave, swizzle: CUtensorMapSwizzle, l2Promotion: CUtensorMapL2promotion, oobFill: CUtensorMapFloatOOBfill)#
    +

    Create a tensor map descriptor object representing tiled memory region.

    +

    Creates a descriptor for Tensor Memory Access (TMA) object specified by +the parameters describing a tiled region and returns it in tensorMap.

    +

    Tensor map objects are only supported on devices of compute capability +9.0 or higher. Additionally, a tensor map object is an opaque value, +and, as such, should only be accessed through CUDA API calls.

    +

    The parameters passed are bound to the following requirements:

    +
      +
    • tensorMap address must be aligned to 64 bytes.

    • +
    • tensorDataType has to be an enum from +CUtensorMapDataType which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • tensorRank must be non-zero and less than or equal to the maximum +supported dimensionality of 5. If interleave is not +CU_TENSOR_MAP_INTERLEAVE_NONE, then tensorRank must +additionally be greater than or equal to 3.

    • +
    • globalAddress, which specifies the starting address of the memory +region described, must be 32 byte aligned when interleave is +CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned +otherwise.

    • +
    • globalDim array, which specifies tensor size of each of the +tensorRank dimensions, must be non-zero and less than or equal to +2^32.

    • +
    • globalStrides array, which specifies tensor stride of each of the +lower tensorRank - 1 dimensions in bytes, must be a multiple of 16 +and less than 2^40. Additionally, the stride must be a multiple of 32 +when interleave is CU_TENSOR_MAP_INTERLEAVE_32B. Each +following dimension specified includes previous dimension stride:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • boxDim array, which specifies number of elements to be traversed +along each of the tensorRank dimensions, must be non-zero and less +than or equal to 256. When interleave is +CU_TENSOR_MAP_INTERLEAVE_NONE, { boxDim`[0] * +elementSizeInBytes( `tensorDataType ) } must be a multiple of 16 +bytes.

    • +
    • elementStrides array, which specifies the iteration step along each +of the tensorRank dimensions, must be non-zero and less than or +equal to 8. Note that when interleave is +CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this +array is ignored since TMA doesn’t support the stride for dimension +zero. When all elements of elementStrides array is one, boxDim +specifies the number of elements to load. However, if the +`elementStrides`[i] is not equal to one, then TMA loads ceil( +`boxDim`[i] / `elementStrides`[i]) number of elements along i-th +dimension. To load N elements along i-th dimension, `boxDim`[i] must +be set to N * `elementStrides`[i].

    • +
    • interleave specifies the interleaved layout of type +CUtensorMapInterleave, which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 +bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 +uses 32 bytes. When interleave is +CU_TENSOR_MAP_INTERLEAVE_NONE and swizzle is not +CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner +dimension (computed as boxDim`[0] multiplied by element size derived +from `tensorDataType) must be less than or equal to the swizzle +size.

      +
        +
      • CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension +will be <= 32.

      • +
      • CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension +will be <= 64.

      • +
      • CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension +will be <= 128.

      • +
      +
    • +
    • swizzle, which specifies the shared memory bank swizzling pattern, +has to be of type CUtensorMapSwizzle which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • Data are organized in a specific order in global memory; however, +this may not match the order in which the application accesses data +in shared memory. This difference in data organization may cause bank +conflicts when shared memory is accessed. In order to avoid this +problem, data can be loaded to shared memory with shuffling across +shared memory banks. When interleave is +CU_TENSOR_MAP_INTERLEAVE_32B, swizzle must be +CU_TENSOR_MAP_SWIZZLE_32B. Other interleave modes can +have any swizzling pattern.

    • +
    • l2Promotion specifies L2 fetch size which indicates the byte +granurality at which L2 requests is filled from DRAM. It must be of +type CUtensorMapL2promotion, which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • oobFill, which indicates whether zero or a special NaN constant +should be used to fill out-of-bound elements, must be of type +CUtensorMapFloatOOBfill which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • Note that +CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can +only be used when tensorDataType represents a floating-point data +type.

    • +
    +
    +
    Parameters:
    +
      +
    • tensorDataType (CUtensorMapDataType) – Tensor data type

    • +
    • tensorRank (Any) – Dimensionality of tensor

    • +
    • globalAddress (Any) – Starting address of memory region described by tensor

    • +
    • globalDim (List[cuuint64_t]) – Array containing tensor size (number of elements) along each of the +tensorRank dimensions

    • +
    • globalStrides (List[cuuint64_t]) – Array containing stride size (in bytes) along each of the +tensorRank - 1 dimensions

    • +
    • boxDim (List[cuuint32_t]) – Array containing traversal box size (number of elments) along each +of the tensorRank dimensions. Specifies how many elements to be +traversed along each tensor dimension.

    • +
    • elementStrides (List[cuuint32_t]) – Array containing traversal stride in each of the tensorRank +dimensions

    • +
    • interleave (CUtensorMapInterleave) – Type of interleaved layout the tensor addresses

    • +
    • swizzle (CUtensorMapSwizzle) – Bank swizzling pattern inside shared memory

    • +
    • l2Promotion (CUtensorMapL2promotion) – L2 promotion size

    • +
    • oobFill (CUtensorMapFloatOOBfill) – Indicate whether zero or special NaN constant must be used to fill +out-of-bound elements

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuTensorMapEncodeIm2col(tensorDataType: CUtensorMapDataType, tensorRank, globalAddress, globalDim: Optional[Tuple[cuuint64_t] | List[cuuint64_t]], globalStrides: Optional[Tuple[cuuint64_t] | List[cuuint64_t]], pixelBoxLowerCorner: Optional[Tuple[int] | List[int]], pixelBoxUpperCorner: Optional[Tuple[int] | List[int]], channelsPerPixel, pixelsPerColumn, elementStrides: Optional[Tuple[cuuint32_t] | List[cuuint32_t]], interleave: CUtensorMapInterleave, swizzle: CUtensorMapSwizzle, l2Promotion: CUtensorMapL2promotion, oobFill: CUtensorMapFloatOOBfill)#
    +

    Create a tensor map descriptor object representing im2col memory region.

    +

    Creates a descriptor for Tensor Memory Access (TMA) object specified by +the parameters describing a im2col memory layout and returns it in +tensorMap.

    +

    Tensor map objects are only supported on devices of compute capability +9.0 or higher. Additionally, a tensor map object is an opaque value, +and, as such, should only be accessed through CUDA API calls.

    +

    The parameters passed are bound to the following requirements:

    +
      +
    • tensorMap address must be aligned to 64 bytes.

    • +
    • tensorDataType has to be an enum from +CUtensorMapDataType which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • tensorRank, which specifies the number of tensor dimensions, must +be 3, 4, or 5.

    • +
    • globalAddress, which specifies the starting address of the memory +region described, must be 32 byte aligned when interleave is +CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned +otherwise.

    • +
    • globalDim array, which specifies tensor size of each of the +tensorRank dimensions, must be non-zero and less than or equal to +2^32.

    • +
    • globalStrides array, which specifies tensor stride of each of the +lower tensorRank - 1 dimensions in bytes, must be a multiple of 16 +and less than 2^40. Additionally, the stride must be a multiple of 32 +when interleave is CU_TENSOR_MAP_INTERLEAVE_32B. Each +following dimension specified includes previous dimension stride:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • pixelBoxLowerCorner array specifies the coordinate offsets {D, H, +W} of the bounding box from top/left/front corner. The number of +offsets and their precision depend on the tensor dimensionality:

      +
        +
      • When tensorRank is 3, one signed offset within range [-32768, +32767] is supported.

      • +
      • When tensorRank is 4, two signed offsets each within range [-128, +127] are supported.

      • +
      • When tensorRank is 5, three offsets each within range [-16, 15] +are supported.

      • +
      +
    • +
    • pixelBoxUpperCorner array specifies the coordinate offsets {D, H, +W} of the bounding box from bottom/right/back corner. The number of +offsets and their precision depend on the tensor dimensionality:

      +
        +
      • When tensorRank is 3, one signed offset within range [-32768, +32767] is supported.

      • +
      • When tensorRank is 4, two signed offsets each within range [-128, +127] are supported.

      • +
      • When tensorRank is 5, three offsets each within range [-16, 15] +are supported. The bounding box specified by pixelBoxLowerCorner +and pixelBoxUpperCorner must have non-zero area.

      • +
      +
    • +
    • channelsPerPixel, which specifies the number of elements which must +be accessed along C dimension, must be less than or equal to 256.

    • +
    • pixelsPerColumn, which specifies the number of elements that must +be accessed along the {N, D, H, W} dimensions, must be less than or +equal to 1024.

    • +
    • elementStrides array, which specifies the iteration step along each +of the tensorRank dimensions, must be non-zero and less than or +equal to 8. Note that when interleave is +CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this +array is ignored since TMA doesn’t support the stride for dimension +zero. When all elements of the elementStrides array are one, +boxDim specifies the number of elements to load. However, if +elementStrides`[i] is not equal to one for some `i, then TMA loads +ceil( `boxDim`[i] / `elementStrides`[i]) number of elements along +i-th dimension. To load N elements along i-th dimension, `boxDim`[i] +must be set to N * `elementStrides`[i].

    • +
    • interleave specifies the interleaved layout of type +CUtensorMapInterleave, which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 +bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 +uses 32 bytes. When interleave is +CU_TENSOR_MAP_INTERLEAVE_NONE and swizzle is not +CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner +dimension (computed as boxDim`[0] multiplied by element size derived +from `tensorDataType) must be less than or equal to the swizzle +size.

      +
        +
      • CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension +will be <= 32.

      • +
      • CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension +will be <= 64.

      • +
      • CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension +will be <= 128.

      • +
      +
    • +
    • swizzle, which specifies the shared memory bank swizzling pattern, +has to be of type CUtensorMapSwizzle which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • Data are organized in a specific order in global memory; however, +this may not match the order in which the application accesses data +in shared memory. This difference in data organization may cause bank +conflicts when shared memory is accessed. In order to avoid this +problem, data can be loaded to shared memory with shuffling across +shared memory banks. When interleave is +CU_TENSOR_MAP_INTERLEAVE_32B, swizzle must be +CU_TENSOR_MAP_SWIZZLE_32B. Other interleave modes can +have any swizzling pattern.

    • +
    • l2Promotion specifies L2 fetch size which indicates the byte +granularity at which L2 requests are filled from DRAM. It must be of +type CUtensorMapL2promotion, which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • oobFill, which indicates whether zero or a special NaN constant +should be used to fill out-of-bound elements, must be of type +CUtensorMapFloatOOBfill which is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    • Note that +CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can +only be used when tensorDataType represents a floating-point data +type.

    • +
    +
    +
    Parameters:
    +
      +
    • tensorDataType (CUtensorMapDataType) – Tensor data type

    • +
    • tensorRank (Any) – Dimensionality of tensor; must be at least 3

    • +
    • globalAddress (Any) – Starting address of memory region described by tensor

    • +
    • globalDim (List[cuuint64_t]) – Array containing tensor size (number of elements) along each of the +tensorRank dimensions

    • +
    • globalStrides (List[cuuint64_t]) – Array containing stride size (in bytes) along each of the +tensorRank - 1 dimensions

    • +
    • pixelBoxLowerCorner (List[int]) – Array containing DHW dimensions of lower box corner

    • +
    • pixelBoxUpperCorner (List[int]) – Array containing DHW dimensions of upper box corner

    • +
    • channelsPerPixel (Any) – Number of channels per pixel

    • +
    • pixelsPerColumn (Any) – Number of pixels per column

    • +
    • elementStrides (List[cuuint32_t]) – Array containing traversal stride in each of the tensorRank +dimensions

    • +
    • interleave (CUtensorMapInterleave) – Type of interleaved layout the tensor addresses

    • +
    • swizzle (CUtensorMapSwizzle) – Bank swizzling pattern inside shared memory

    • +
    • l2Promotion (CUtensorMapL2promotion) – L2 promotion size

    • +
    • oobFill (CUtensorMapFloatOOBfill) – Indicate whether zero or special NaN constant will be used to fill +out-of-bound elements

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuTensorMapReplaceAddress(CUtensorMap tensorMap: Optional[CUtensorMap], globalAddress)#
    +

    Modify an existing tensor map descriptor with an updated global address.

    +

    Modifies the descriptor for Tensor Memory Access (TMA) object passed in +tensorMap with an updated globalAddress.

    +

    Tensor map objects are only supported on devices of compute capability +9.0 or higher. Additionally, a tensor map object is an opaque value, +and, as such, should only be accessed through CUDA API calls.

    +
    +
    Parameters:
    +
      +
    • tensorMap (CUtensorMap) – Tensor map object to modify

    • +
    • globalAddress (Any) – Starting address of memory region described by tensor, must follow +previous alignment requirements

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +

    Peer Context Memory Access#

    +

    This section describes the direct peer context memory access functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuDeviceCanAccessPeer(dev, peerDev)#
    +

    Queries if a device may directly access a peer device’s memory.

    +

    Returns in *canAccessPeer a value of 1 if contexts on dev are +capable of directly accessing memory from contexts on peerDev and 0 +otherwise. If direct access of peerDev from dev is possible, then +access may be enabled on two specific contexts by calling +cuCtxEnablePeerAccess().

    +
    +
    Parameters:
    +
      +
    • dev (CUdevice) – Device from which allocations on peerDev are to be directly +accessed.

    • +
    • peerDev (CUdevice) – Device on which the allocations to be directly accessed by dev +reside.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxEnablePeerAccess(peerContext, unsigned int Flags)#
    +

    Enables direct access to memory allocations in a peer context.

    +

    If both the current context and peerContext are on devices which +support unified addressing (as may be queried using +CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING) and same major +compute capability, then on success all allocations from peerContext +will immediately be accessible by the current context. See +Unified Addressing for additional details.

    +

    Note that access granted by this call is unidirectional and that in +order to access memory from the current context in peerContext, a +separate symmetric call to cuCtxEnablePeerAccess() is +required.

    +

    Note that there are both device-wide and system-wide limitations per +system configuration, as noted in the CUDA Programming Guide under the +section “Peer-to-Peer Memory Access”.

    +

    Returns CUDA_ERROR_PEER_ACCESS_UNSUPPORTED if +cuDeviceCanAccessPeer() indicates that the +CUdevice of the current context cannot directly access +memory from the CUdevice of peerContext.

    +

    Returns CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED if direct +access of peerContext from the current context has already been +enabled.

    +

    Returns CUDA_ERROR_TOO_MANY_PEERS if direct peer access is +not possible because hardware resources required for peer access have +been exhausted.

    +

    Returns CUDA_ERROR_INVALID_CONTEXT if there is no current +context, peerContext is not a valid context, or if the current +context is peerContext.

    +

    Returns CUDA_ERROR_INVALID_VALUE if Flags is not 0.

    +
    +
    Parameters:
    +
      +
    • peerContext (CUcontext) – Peer context to enable direct access to from the current context

    • +
    • Flags (unsigned int) – Reserved for future use and must be set to 0

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED, CUDA_ERROR_TOO_MANY_PEERS, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, CUDA_ERROR_INVALID_VALUE

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxDisablePeerAccess(peerContext)#
    +

    Disables direct access to memory allocations in a peer context and unregisters any registered allocations.

    +

    Returns CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer +access has not yet been enabled from peerContext to the current +context.

    +

    Returns CUDA_ERROR_INVALID_CONTEXT if there is no current +context, or if peerContext is not a valid context.

    +
    +
    Parameters:
    +

    peerContext (CUcontext) – Peer context to disable direct access to

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_PEER_ACCESS_NOT_ENABLED, CUDA_ERROR_INVALID_CONTEXT,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetP2PAttribute(attrib: CUdevice_P2PAttribute, srcDevice, dstDevice)#
    +

    Queries attributes of the link between two devices.

    +

    Returns in *value the value of the requested attribute attrib of +the link between srcDevice and dstDevice. The supported attributes +are:

    + +

    Returns CUDA_ERROR_INVALID_DEVICE if srcDevice or +dstDevice are not valid or if they represent the same device.

    +

    Returns CUDA_ERROR_INVALID_VALUE if attrib is not valid +or if value is a null pointer.

    +
    +
    Parameters:
    +
      +
    • attrib (CUdevice_P2PAttribute) – The requested attribute of the link between srcDevice and +dstDevice.

    • +
    • srcDevice (CUdevice) – The source device of the target link.

    • +
    • dstDevice (CUdevice) – The destination device of the target link.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Graphics Interoperability#

    +

    This section describes the graphics interoperability functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuGraphicsUnregisterResource(resource)#
    +

    Unregisters a graphics resource for access by CUDA.

    +

    Unregisters the graphics resource resource so it is not accessible by +CUDA unless registered again.

    +

    If resource is invalid then CUDA_ERROR_INVALID_HANDLE is +returned.

    +
    +
    Parameters:
    +

    resource (CUgraphicsResource) – Resource to unregister

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_UNKNOWN

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    cuGraphicsD3D9RegisterResource, cuGraphicsD3D10RegisterResource, cuGraphicsD3D11RegisterResource, cuGraphicsGLRegisterBuffer, cuGraphicsGLRegisterImage, cudaGraphicsUnregisterResource

    +
    +
    + +
    +
    +cuda.bindings.driver.cuGraphicsSubResourceGetMappedArray(resource, unsigned int arrayIndex, unsigned int mipLevel)#
    +

    Get an array through which to access a subresource of a mapped graphics resource.

    +

    Returns in *pArray an array through which the subresource of the +mapped graphics resource resource which corresponds to array index +arrayIndex and mipmap level mipLevel may be accessed. The value set +in *pArray may change every time that resource is mapped.

    +

    If resource is not a texture then it cannot be accessed via an array +and CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. If +arrayIndex is not a valid array index for resource then +CUDA_ERROR_INVALID_VALUE is returned. If mipLevel is not +a valid mipmap level for resource then +CUDA_ERROR_INVALID_VALUE is returned. If resource is not +mapped then CUDA_ERROR_NOT_MAPPED is returned.

    +
    +
    Parameters:
    +
      +
    • resource (CUgraphicsResource) – Mapped resource to access

    • +
    • arrayIndex (unsigned int) – Array index for array textures or cubemap face index as defined by +CUarray_cubemap_face for cubemap textures for the +subresource to access

    • +
    • mipLevel (unsigned int) – Mipmap level for the subresource to access

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphicsResourceGetMappedMipmappedArray(resource)#
    +

    Get a mipmapped array through which to access a mapped graphics resource.

    +

    Returns in *pMipmappedArray a mipmapped array through which the +mapped graphics resource resource. The value set in +*pMipmappedArray may change every time that resource is mapped.

    +

    If resource is not a texture then it cannot be accessed via a +mipmapped array and CUDA_ERROR_NOT_MAPPED_AS_ARRAY is +returned. If resource is not mapped then +CUDA_ERROR_NOT_MAPPED is returned.

    +
    +
    Parameters:
    +

    resource (CUgraphicsResource) – Mapped resource to access

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphicsResourceGetMappedPointer(resource)#
    +

    Get a device pointer through which to access a mapped graphics resource.

    +

    Returns in *pDevPtr a pointer through which the mapped graphics +resource resource may be accessed. Returns in pSize the size of the +memory in bytes which may be accessed from that pointer. The value set +in pPointer may change every time that resource is mapped.

    +

    If resource is not a buffer then it cannot be accessed via a pointer +and CUDA_ERROR_NOT_MAPPED_AS_POINTER is returned. If +resource is not mapped then CUDA_ERROR_NOT_MAPPED is +returned.

    +
    +
    Parameters:
    +

    resource (CUgraphicsResource) – None

    +
    +
    Returns:
    +

      +
    • CUresult

    • +
    • pDevPtr (CUdeviceptr) – None

    • +
    • pSize (int) – None

    • +
    +

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuGraphicsResourceSetMapFlags(resource, unsigned int flags)#
    +

    Set usage flags for mapping a graphics resource.

    +

    Set flags for mapping the graphics resource resource.

    +

    Changes to flags will take effect the next time resource is mapped. +The flags argument may be any of the following:

    +
      +
    • CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints +about how this resource will be used. It is therefore assumed that +this resource will be read from and written to by CUDA kernels. This +is the default value.

    • +
    • CU_GRAPHICS_MAP_RESOURCE_FLAGS_READONLY: Specifies that +CUDA kernels which access this resource will not write to this +resource.

    • +
    • CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITEDISCARD: Specifies +that CUDA kernels which access this resource will not read from this +resource and will write over the entire contents of the resource, so +none of the data previously stored in the resource will be preserved.

    • +
    +

    If resource is presently mapped for access by CUDA then +CUDA_ERROR_ALREADY_MAPPED is returned. If flags is not +one of the above values then CUDA_ERROR_INVALID_VALUE is +returned.

    +
    +
    Parameters:
    +
      +
    • resource (CUgraphicsResource) – Registered resource to set flags for

    • +
    • flags (unsigned int) – Parameters for resource mapping

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_VALUE, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_ALREADY_MAPPED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphicsMapResources(unsigned int count, resources, hStream)#
    +

    Map graphics resources for access by CUDA.

    +

    Maps the count graphics resources in resources for access by CUDA.

    +

    The resources in resources may be accessed by CUDA until they are +unmapped. The graphics API from which resources were registered +should not access any resources while they are mapped by CUDA. If an +application does so, the results are undefined.

    +

    This function provides the synchronization guarantee that any graphics +calls issued before cuGraphicsMapResources() will complete +before any subsequent CUDA work issued in stream begins.

    +

    If resources includes any duplicate entries then +CUDA_ERROR_INVALID_HANDLE is returned. If any of +resources are presently mapped for access by CUDA then +CUDA_ERROR_ALREADY_MAPPED is returned.

    +
    +
    Parameters:
    +
      +
    • count (unsigned int) – Number of resources to map

    • +
    • resources (CUgraphicsResource) – Resources to map for CUDA usage

    • +
    • hStream (CUstream or cudaStream_t) – Stream with which to synchronize

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_ALREADY_MAPPED, CUDA_ERROR_UNKNOWN

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphicsUnmapResources(unsigned int count, resources, hStream)#
    +

    Unmap graphics resources.

    +

    Unmaps the count graphics resources in resources.

    +

    Once unmapped, the resources in resources may not be accessed by CUDA +until they are mapped again.

    +

    This function provides the synchronization guarantee that any CUDA work +issued in stream before cuGraphicsUnmapResources() will +complete before any subsequently issued graphics work begins.

    +

    If resources includes any duplicate entries then +CUDA_ERROR_INVALID_HANDLE is returned. If any of +resources are not presently mapped for access by CUDA then +CUDA_ERROR_NOT_MAPPED is returned.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_NOT_MAPPED, CUDA_ERROR_UNKNOWN

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +

    Driver Entry Point Access#

    +

    This section describes the driver entry point access functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuGetProcAddress(char *symbol, int cudaVersion, flags)#
    +

    Returns the requested driver API function pointer.

    +

    Returns in **pfn the address of the CUDA driver function for the +requested CUDA version and flags.

    +

    The CUDA version is specified as (1000 * major + 10 * minor), so CUDA +11.2 should be specified as 11020. For a requested driver symbol, if +the specified CUDA version is greater than or equal to the CUDA version +in which the driver symbol was introduced, this API will return the +function pointer to the corresponding versioned function.

    +

    The pointer returned by the API should be cast to a function pointer +matching the requested driver function’s definition in the API header +file. The function pointer typedef can be picked up from the +corresponding typedefs header file. For example, cudaTypedefs.h +consists of function pointer typedefs for driver APIs defined in +h.

    +

    The API will return CUDA_SUCCESS and set the returned pfn +to NULL if the requested driver function is not supported on the +platform, no ABI compatible driver function exists for the specified +cudaVersion or if the driver symbol is invalid.

    +

    It will also set the optional symbolStatus to one of the values in +CUdriverProcAddressQueryResult with the following meanings:

    + +

    The requested flags can be:

    + +
    +
    Parameters:
    +
      +
    • symbol (bytes) – The base name of the driver API function to look for. As an +example, for the driver API cuMemAlloc_v2, symbol +would be cuMemAlloc and cudaVersion would be the ABI compatible +CUDA version for the _v2 variant.

    • +
    • cudaVersion (int) – The CUDA version to look for the requested driver symbol

    • +
    • flags (Any) – Flags to specify search options.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Coredump Attributes Control API#

    +

    This section describes the coredump attribute control functions of the low-level CUDA driver application programming interface.

    +
    +
    +class cuda.bindings.driver.CUcoredumpSettings(value)#
    +

    Flags for choosing a coredump attribute to get/set

    +
    +
    +CU_COREDUMP_ENABLE_ON_EXCEPTION = 1#
    +
    + +
    +
    +CU_COREDUMP_TRIGGER_HOST = 2#
    +
    + +
    +
    +CU_COREDUMP_LIGHTWEIGHT = 3#
    +
    + +
    +
    +CU_COREDUMP_ENABLE_USER_TRIGGER = 4#
    +
    + +
    +
    +CU_COREDUMP_FILE = 5#
    +
    + +
    +
    +CU_COREDUMP_PIPE = 6#
    +
    + +
    +
    +CU_COREDUMP_GENERATION_FLAGS = 7#
    +
    + +
    +
    +CU_COREDUMP_MAX = 8#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUCoredumpGenerationFlags(value)#
    +

    Flags for controlling coredump contents

    +
    +
    +CU_COREDUMP_DEFAULT_FLAGS = 0#
    +
    + +
    +
    +CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES = 1#
    +
    + +
    +
    +CU_COREDUMP_SKIP_GLOBAL_MEMORY = 2#
    +
    + +
    +
    +CU_COREDUMP_SKIP_SHARED_MEMORY = 4#
    +
    + +
    +
    +CU_COREDUMP_SKIP_LOCAL_MEMORY = 8#
    +
    + +
    +
    +CU_COREDUMP_SKIP_ABORT = 16#
    +
    + +
    +
    +CU_COREDUMP_SKIP_CONSTBANK_MEMORY = 32#
    +
    + +
    +
    +CU_COREDUMP_LIGHTWEIGHT_FLAGS = 47#
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCoredumpGetAttribute(attrib: CUcoredumpSettings)#
    +

    Allows caller to fetch a coredump attribute value for the current context.

    +

    Returns in *value the requested value specified by attrib. It is up +to the caller to ensure that the data type and size of *value matches +the request.

    +

    If the caller calls this function with *value equal to NULL, the size +of the memory region (in bytes) expected for attrib will be placed in +size.

    +

    The supported attributes are:

    +
      +
    • CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where +true means that GPU exceptions from this context will +create a coredump at the location specified by +CU_COREDUMP_FILE. The default value is false +unless set to true globally or locally, or the +CU_CTX_USER_COREDUMP_ENABLE flag was set during context creation.

    • +
    • CU_COREDUMP_TRIGGER_HOST: Bool where true +means that the host CPU will also create a coredump. The default +value is true unless set to false globally or +or locally. This value is deprecated as of CUDA 12.5 - raise the +CU_COREDUMP_SKIP_ABORT flag to disable host device +abort() if needed.

    • +
    • CU_COREDUMP_LIGHTWEIGHT: Bool where true +means that any resulting coredumps will not have a dump of GPU memory +or non-reloc ELF images. The default value is false +unless set to true globally or locally. This attribute is +deprecated as of CUDA 12.5, please use +CU_COREDUMP_GENERATION_FLAGS instead.

    • +
    • CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where +true means that a coredump can be created by writing to +the system pipe specified by CU_COREDUMP_PIPE. The +default value is false unless set to true +globally or locally.

    • +
    • CU_COREDUMP_FILE: String of up to 1023 characters that +defines the location where any coredumps generated by this context +will be written. The default value is +core.cuda.HOSTNAME.PID where HOSTNAME is the +host name of the machine running the CUDA applications and +PID is the process ID of the CUDA application.

    • +
    • CU_COREDUMP_PIPE: String of up to 1023 characters that +defines the name of the pipe that will be monitored if user-triggered +coredumps are enabled. The default value is +corepipe.cuda.HOSTNAME.PID where HOSTNAME is +the host name of the machine running the CUDA application and +PID is the process ID of the CUDA application.

    • +
    • CU_COREDUMP_GENERATION_FLAGS: An integer with values to +allow granular control the data contained in a coredump specified as +a bitwise OR combination of the following values:

      + +
    • +
    +
    +
    Parameters:
    +
      +
    • attrib (CUcoredumpSettings) – The enum defining which value to fetch.

    • +
    • size (int) – The size of the memory region value points to.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCoredumpGetAttributeGlobal(attrib: CUcoredumpSettings)#
    +

    Allows caller to fetch a coredump attribute value for the entire application.

    +

    Returns in *value the requested value specified by attrib. It is up +to the caller to ensure that the data type and size of *value matches +the request.

    +

    If the caller calls this function with *value equal to NULL, the size +of the memory region (in bytes) expected for attrib will be placed in +size.

    +

    The supported attributes are:

    +
      +
    • CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where +true means that GPU exceptions from this context will +create a coredump at the location specified by +CU_COREDUMP_FILE. The default value is false.

    • +
    • CU_COREDUMP_TRIGGER_HOST: Bool where true +means that the host CPU will also create a coredump. The default +value is true unless set to false globally or +or locally. This value is deprecated as of CUDA 12.5 - raise the +CU_COREDUMP_SKIP_ABORT flag to disable host device +abort() if needed.

    • +
    • CU_COREDUMP_LIGHTWEIGHT: Bool where true +means that any resulting coredumps will not have a dump of GPU memory +or non-reloc ELF images. The default value is false. This +attribute is deprecated as of CUDA 12.5, please use +CU_COREDUMP_GENERATION_FLAGS instead.

    • +
    • CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where +true means that a coredump can be created by writing to +the system pipe specified by CU_COREDUMP_PIPE. The +default value is false.

    • +
    • CU_COREDUMP_FILE: String of up to 1023 characters that +defines the location where any coredumps generated by this context +will be written. The default value is +core.cuda.HOSTNAME.PID where HOSTNAME is the +host name of the machine running the CUDA applications and +PID is the process ID of the CUDA application.

    • +
    • CU_COREDUMP_PIPE: String of up to 1023 characters that +defines the name of the pipe that will be monitored if user-triggered +coredumps are enabled. The default value is +corepipe.cuda.HOSTNAME.PID where HOSTNAME is +the host name of the machine running the CUDA application and +PID is the process ID of the CUDA application.

    • +
    • CU_COREDUMP_GENERATION_FLAGS: An integer with values to +allow granular control the data contained in a coredump specified as +a bitwise OR combination of the following values:

      + +
    • +
    +
    +
    Parameters:
    +
      +
    • attrib (CUcoredumpSettings) – The enum defining which value to fetch.

    • +
    • size (int) – The size of the memory region value points to.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCoredumpSetAttribute(attrib: CUcoredumpSettings, value)#
    +

    Allows caller to set a coredump attribute value for the current context.

    +

    This function should be considered an alternate interface to the CUDA- +GDB environment variables defined in this document: +https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump

    +

    An important design decision to note is that any coredump environment +variable values set before CUDA initializes will take permanent +precedence over any values set with this function. This decision was +made to ensure no change in behavior for any users that may be +currently using these variables to get coredumps.

    +

    *value shall contain the requested value specified by set. It is up +to the caller to ensure that the data type and size of *value matches +the request.

    +

    If the caller calls this function with *value equal to NULL, the size +of the memory region (in bytes) expected for set will be placed in +size.

    +

    /note This function will return CUDA_ERROR_NOT_SUPPORTED if +the caller attempts to set CU_COREDUMP_ENABLE_ON_EXCEPTION +on a GPU of with Compute Capability < 6.0. +cuCoredumpSetAttributeGlobal works on those platforms as an +alternative.

    +

    /note CU_COREDUMP_ENABLE_USER_TRIGGER and +CU_COREDUMP_PIPE cannot be set on a per-context basis.

    +

    The supported attributes are:

    +
      +
    • CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where +true means that GPU exceptions from this context will +create a coredump at the location specified by +CU_COREDUMP_FILE. The default value is false.

    • +
    • CU_COREDUMP_TRIGGER_HOST: Bool where true +means that the host CPU will also create a coredump. The default +value is true unless set to false globally or +or locally. This value is deprecated as of CUDA 12.5 - raise the +CU_COREDUMP_SKIP_ABORT flag to disable host device +abort() if needed.

    • +
    • CU_COREDUMP_LIGHTWEIGHT: Bool where true +means that any resulting coredumps will not have a dump of GPU memory +or non-reloc ELF images. The default value is false. This +attribute is deprecated as of CUDA 12.5, please use +CU_COREDUMP_GENERATION_FLAGS instead.

    • +
    • CU_COREDUMP_FILE: String of up to 1023 characters that +defines the location where any coredumps generated by this context +will be written. The default value is +core.cuda.HOSTNAME.PID where HOSTNAME is the +host name of the machine running the CUDA applications and +PID is the process ID of the CUDA application.

    • +
    • CU_COREDUMP_GENERATION_FLAGS: An integer with values to +allow granular control the data contained in a coredump specified as +a bitwise OR combination of the following values:

      + +
    • +
    +
    +
    Parameters:
    +
      +
    • attrib (CUcoredumpSettings) – The enum defining which value to set.

    • +
    • value (Any) – void* containing the requested data.

    • +
    • size (int) – The size of the memory region value points to.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCoredumpSetAttributeGlobal(attrib: CUcoredumpSettings, value)#
    +

    Allows caller to set a coredump attribute value globally.

    +

    This function should be considered an alternate interface to the CUDA- +GDB environment variables defined in this document: +https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump

    +

    An important design decision to note is that any coredump environment +variable values set before CUDA initializes will take permanent +precedence over any values set with this function. This decision was +made to ensure no change in behavior for any users that may be +currently using these variables to get coredumps.

    +

    *value shall contain the requested value specified by set. It is up +to the caller to ensure that the data type and size of *value matches +the request.

    +

    If the caller calls this function with *value equal to NULL, the size +of the memory region (in bytes) expected for set will be placed in +size.

    +

    The supported attributes are:

    +
      +
    • CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where +true means that GPU exceptions from this context will +create a coredump at the location specified by +CU_COREDUMP_FILE. The default value is false.

    • +
    • CU_COREDUMP_TRIGGER_HOST: Bool where true +means that the host CPU will also create a coredump. The default +value is true unless set to false globally or +or locally. This value is deprecated as of CUDA 12.5 - raise the +CU_COREDUMP_SKIP_ABORT flag to disable host device +abort() if needed.

    • +
    • CU_COREDUMP_LIGHTWEIGHT: Bool where true +means that any resulting coredumps will not have a dump of GPU memory +or non-reloc ELF images. The default value is false. This +attribute is deprecated as of CUDA 12.5, please use +CU_COREDUMP_GENERATION_FLAGS instead.

    • +
    • CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where +true means that a coredump can be created by writing to +the system pipe specified by CU_COREDUMP_PIPE. The +default value is false.

    • +
    • CU_COREDUMP_FILE: String of up to 1023 characters that +defines the location where any coredumps generated by this context +will be written. The default value is +core.cuda.HOSTNAME.PID where HOSTNAME is the +host name of the machine running the CUDA applications and +PID is the process ID of the CUDA application.

    • +
    • CU_COREDUMP_PIPE: String of up to 1023 characters that +defines the name of the pipe that will be monitored if user-triggered +coredumps are enabled. This value may not be changed after +CU_COREDUMP_ENABLE_USER_TRIGGER is set to +true. The default value is +corepipe.cuda.HOSTNAME.PID where HOSTNAME is +the host name of the machine running the CUDA application and +PID is the process ID of the CUDA application.

    • +
    • CU_COREDUMP_GENERATION_FLAGS: An integer with values to +allow granular control the data contained in a coredump specified as +a bitwise OR combination of the following values:

      + +
    • +
    +
    +
    Parameters:
    +
      +
    • attrib (CUcoredumpSettings) – The enum defining which value to set.

    • +
    • value (Any) – void* containing the requested data.

    • +
    • size (int) – The size of the memory region value points to.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Green Contexts#

    +

    This section describes the APIs for creation and manipulation of green contexts in the CUDA driver. Green contexts are a lightweight alternative to traditional contexts, with the ability to pass in a set of resources that they should be initialized with. This allows the developer to represent distinct spatial partitions of the GPU, provision resources for them, and target them via the same programming model that CUDA exposes (streams, kernel launches, etc.).

    +

    There are 4 main steps to using these new set of APIs.

    +
      +
      1. +
      2. Start with an initial set of resources, for example via cuDeviceGetDevResource. Only SM type is supported today.

      3. +
      +
    • +
      1. +
      2. Partition this set of resources by providing them as input to a partition API, for example: cuDevSmResourceSplitByCount.

      3. +
      +
    • +
      1. +
      2. Finalize the specification of resources by creating a descriptor via cuDevResourceGenerateDesc.

      3. +
      +
    • +
      1. +
      2. Provision the resources and create a green context via cuGreenCtxCreate.

      3. +
      +
    • +
    +

    For CU_DEV_RESOURCE_TYPE_SM, the partitions created have minimum SM count requirements, often rounding up and aligning the minCount provided to cuDevSmResourceSplitByCount. The following is a guideline for each architecture and may be subject to change:

    +
      +
    • On Compute Architecture 6.X: The minimum count is 1 SM.

    • +
    • On Compute Architecture 7.X: The minimum count is 2 SMs and must be a multiple of 2.

    • +
    • On Compute Architecture 8.X: The minimum count is 4 SMs and must be a multiple of 2.

    • +
    • On Compute Architecture 9.0+: The minimum count is 8 SMs and must be a multiple of 8.

    • +
    +

    In the future, flags can be provided to tradeoff functional and performance characteristics versus finer grained SM partitions.

    +

    Even if the green contexts have disjoint SM partitions, it is not guaranteed that the kernels launched in them will run concurrently or have forward progress guarantees. This is due to other resources (like HW connections, see ::CUDA_DEVICE_MAX_CONNECTIONS) that could cause a dependency. Additionally, in certain scenarios, it is possible for the workload to run on more SMs than was provisioned (but never less). The following are two scenarios which can exhibit this behavior:

    +
      +
    • On Volta+ MPS: When CUDA_MPS_ACTIVE_THREAD_PERCENTAGE is used, the set of SMs that are used for running kernels can be scaled up to the value of SMs used for the MPS client.

    • +
    • On Compute Architecture 9.x: When a module with dynamic parallelism (CDP) is loaded, all future kernels running under green contexts may use and share an additional set of 2 SMs.

    • +
    +
    +
    +class cuda.bindings.driver.CUdevSmResource_st(void_ptr _ptr=0)#
    +
    +
    +smCount#
    +

    The amount of streaming multiprocessors available in this resource. +This is an output parameter only, do not write to this field.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevResource_st(void_ptr _ptr=0)#
    +
    +
    +type#
    +

    Type of resource, dictates which union field was last set

    +
    +
    Type:
    +

    CUdevResourceType

    +
    +
    +
    + +
    +
    +_internal_padding#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +sm#
    +

    Resource corresponding to CU_DEV_RESOURCE_TYPE_SM ``. type.

    +
    +
    Type:
    +

    CUdevSmResource

    +
    +
    +
    + +
    +
    +_oversize#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevSmResource#
    +
    +
    +smCount#
    +

    The amount of streaming multiprocessors available in this resource. +This is an output parameter only, do not write to this field.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevResource#
    +
    +
    +type#
    +

    Type of resource, dictates which union field was last set

    +
    +
    Type:
    +

    CUdevResourceType

    +
    +
    +
    + +
    +
    +_internal_padding#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +sm#
    +

    Resource corresponding to CU_DEV_RESOURCE_TYPE_SM ``. type.

    +
    +
    Type:
    +

    CUdevSmResource

    +
    +
    +
    + +
    +
    +_oversize#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUgreenCtxCreate_flags(value)#
    +
    +
    +CU_GREEN_CTX_DEFAULT_STREAM = 1#
    +

    Required. Creates a default stream to use inside the green context

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevSmResourceSplit_flags(value)#
    +
    +
    +CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING = 1#
    +
    + +
    +
    +CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE = 2#
    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevResourceType(value)#
    +

    Type of resource

    +
    +
    +CU_DEV_RESOURCE_TYPE_INVALID = 0#
    +
    + +
    +
    +CU_DEV_RESOURCE_TYPE_SM = 1#
    +

    Streaming multiprocessors related information

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevResourceDesc(*args, **kwargs)#
    +

    An opaque descriptor handle. The descriptor encapsulates multiple created and configured resources. Created via cuDevResourceGenerateDesc

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.driver.CUdevSmResource#
    +
    +
    +smCount#
    +

    The amount of streaming multiprocessors available in this resource. +This is an output parameter only, do not write to this field.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGreenCtxCreate(desc, dev, unsigned int flags)#
    +

    Creates a green context with a specified set of resources.

    +

    This API creates a green context with the resources specified in the +descriptor desc and returns it in the handle represented by phCtx. +This API will retain the primary context on device dev, which will is +released when the green context is destroyed. It is advised to have the +primary context active before calling this API to avoid the heavy cost +of triggering primary context initialization and deinitialization +multiple times.

    +

    The API does not set the green context current. In order to set it +current, you need to explicitly set it current by first converting the +green context to a CUcontext using cuCtxFromGreenCtx and +subsequently calling cuCtxSetCurrent / +cuCtxPushCurrent. It should be noted that a green context +can be current to only one thread at a time. There is no internal +synchronization to make API calls accessing the same green context from +multiple threads work.

    +

    Note: The API is not supported on 32-bit platforms.

    +

    The supported flags are:

    +
      +
    • CU_GREEN_CTX_DEFAULT_STREAM : Creates a default stream to use +inside the green context. Required.

    • +
    +
    +
    Parameters:
    +
      +
    • desc (CUdevResourceDesc) – Descriptor generated via cuDevResourceGenerateDesc +which contains the set of resources to be used

    • +
    • dev (CUdevice) – Device on which to create the green context.

    • +
    • flags (unsigned int) – One of the supported green context creation flags. +CU_GREEN_CTX_DEFAULT_STREAM is required.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGreenCtxDestroy(hCtx)#
    +

    Destroys a green context.

    +

    Destroys the green context, releasing the primary context of the device +that this green context was created for. Any resources provisioned for +this green context (that were initially available via the resource +descriptor) are released as well.

    +
    +
    Parameters:
    +

    hCtx (CUgreenCtx) – Green context to be destroyed

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_CONTEXT_IS_DESTROYED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxFromGreenCtx(hCtx)#
    +

    Converts a green context into the primary context.

    +

    The API converts a green context into the primary context returned in +pContext. It is important to note that the converted context +pContext is a normal primary context but with the resources of the +specified green context hCtx. Once converted, it can then be used to +set the context current with cuCtxSetCurrent or with any of +the CUDA APIs that accept a CUcontext parameter.

    +

    Users are expected to call this API before calling any CUDA APIs that +accept a CUcontext. Failing to do so will result in the APIs returning +CUDA_ERROR_INVALID_CONTEXT.

    +
    +
    Parameters:
    +

    hCtx (CUgreenCtx) – Green context to convert

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuGreenCtxCreate

    +
    +
    + +
    +
    +cuda.bindings.driver.cuDeviceGetDevResource(device, typename: CUdevResourceType)#
    +

    Get device resources.

    +

    Get the typename resources available to the device. This may often +be the starting point for further partitioning or configuring of +resources.

    +

    Note: The API is not supported on 32-bit platforms.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuCtxGetDevResource(hCtx, typename: CUdevResourceType)#
    +

    Get context resources.

    +

    Get the typename resources available to the context represented by +hCtx Note: The API is not supported on 32-bit platforms.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGreenCtxGetDevResource(hCtx, typename: CUdevResourceType)#
    +

    Get green context resources.

    +

    Get the typename resources available to the green context represented +by hCtx

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDevSmResourceSplitByCount(unsigned int nbGroups, CUdevResource input_: Optional[CUdevResource], unsigned int useFlags, unsigned int minCount)#
    +

    Splits CU_DEV_RESOURCE_TYPE_SM resources.

    +

    Splits CU_DEV_RESOURCE_TYPE_SM resources into nbGroups, adhering to +the minimum SM count specified in minCount and the usage flags in +useFlags. If result is NULL, the API simulates a split and provides +the amount of groups that would be created in nbGroups. Otherwise, +nbGroups must point to the amount of elements in result and on +return, the API will overwrite nbGroups with the amount actually +created. The groups are written to the array in result. nbGroups +can be less than the total amount if a smaller number of groups is +needed.

    +

    This API is used to spatially partition the input resource. The input +resource needs to come from one of cuDeviceGetDevResource, +cuCtxGetDevResource, or +cuGreenCtxGetDevResource. A limitation of the API is that +the output results cannot be split again without first creating a +descriptor and a green context with that descriptor.

    +

    When creating the groups, the API will take into account the +performance and functional characteristics of the input resource, and +guarantee a split that will create a disjoint set of symmetrical +partitions. This may lead to fewer groups created than purely dividing +the total SM count by the minCount due to cluster requirements or +alignment and granularity requirements for the minCount.

    +

    The remainder set does not have the same functional or performance +guarantees as the groups in result. Its use should be carefully +planned and future partitions of the remainder set are discouraged.

    +

    The following flags are supported:

    +
      +
    • CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING : Lower the minimum +SM count and alignment, and treat each SM independent of its +hierarchy. This allows more fine grained partitions but at the cost +of advanced features (such as large clusters on compute capability +9.0+).

    • +
    • CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE : Compute +Capability 9.0+ only. Attempt to create groups that may allow for +maximally sized thread clusters. This can be queried post green +context creation using +cuOccupancyMaxPotentialClusterSize.

    • +
    +

    A successful API call must either have:

    +
      +
    • A valid array of result pointers of size passed in nbGroups, with +input of type CU_DEV_RESOURCE_TYPE_SM. Value of minCount must +be between 0 and the SM count specified in input. remaining may +be NULL.

    • +
    • NULL passed in for result, with a valid integer pointer in +nbGroups and input of type CU_DEV_RESOURCE_TYPE_SM. Value of +minCount must be between 0 and the SM count specified in input. +remaining may be NULL. This queries the number of groups that would +be created by the API.

    • +
    +

    Note: The API is not supported on 32-bit platforms.

    +
    +
    Parameters:
    +
      +
    • nbGroups (unsigned int) – This is a pointer, specifying the number of groups that would be or +should be created as described below.

    • +
    • input (CUdevResource) – Input SM resource to be split. Must be a valid +CU_DEV_RESOURCE_TYPE_SM resource.

    • +
    • useFlags (unsigned int) – Flags specifying how these partitions are used or which constraints +to abide by when splitting the input. Zero is valid for default +behavior.

    • +
    • minCount (unsigned int) – Minimum number of SMs required

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuDevResourceGenerateDesc(resources: Optional[Tuple[CUdevResource] | List[CUdevResource]], unsigned int nbResources)#
    +

    Generate a resource descriptor.

    +

    Generates a single resource descriptor with the set of resources +specified in resources. The generated resource descriptor is +necessary for the creation of green contexts via the +cuGreenCtxCreate API. Resources of the same type can be +passed in, provided they meet the requirements as noted below.

    +

    A successful API call must have:

    +
      +
    • A valid output pointer for the phDesc descriptor as well as a valid +array of resources pointers, with the array size passed in +nbResources. If multiple resources are provided in resources, the +device they came from must be the same, otherwise +CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION is returned. If multiple +resources are provided in resources and they are of type +CU_DEV_RESOURCE_TYPE_SM, they must be outputs (whether +result or remaining) from the same split API instance, otherwise +CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION is returned.

    • +
    +

    Note: The API is not supported on 32-bit platforms.

    +
    +
    Parameters:
    +
      +
    • resources (List[CUdevResource]) – Array of resources to be included in the descriptor

    • +
    • nbResources (unsigned int) – Number of resources passed in resources

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGreenCtxRecordEvent(hCtx, hEvent)#
    +

    Records an event.

    +

    Captures in hEvent all the activities of the green context of hCtx +at the time of this call. hEvent and hCtx must be from the same +primary context otherwise CUDA_ERROR_INVALID_HANDLE is +returned. Calls such as cuEventQuery() or +cuGreenCtxWaitEvent() will then examine or wait for +completion of the work that was captured. Uses of hCtx after this +call do not modify hEvent.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    The API will return CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED if the specified green context hCtx has a stream in the capture mode. In such a case, the call will invalidate all the conflicting captures.

    +
    + +
    +
    +cuda.bindings.driver.cuGreenCtxWaitEvent(hCtx, hEvent)#
    +

    Make a green context wait on an event.

    +

    Makes all future work submitted to green context hCtx wait for all +work captured in hEvent. The synchronization will be performed on the +device and will not block the calling CPU thread. See +cuGreenCtxRecordEvent() or cuEventRecord(), for +details on what is captured by an event.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_DEINITIALIZED, CUDA_ERROR_NOT_INITIALIZED, CUDA_ERROR_INVALID_CONTEXT, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +

    Notes

    +

    hEvent may be from a different context or device than hCtx.

    +

    The API will return CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED and invalidate the capture if the specified event hEvent is part of an ongoing capture sequence or if the specified green context hCtx has a stream in the capture mode.

    +
    + +
    +
    +cuda.bindings.driver.cuStreamGetGreenCtx(hStream)#
    +

    Query the green context associated with a stream.

    +

    Returns the CUDA green context that the stream is associated with, or +NULL if the stream is not associated with any green context.

    +

    The stream handle hStream can refer to any of the following:

    + +

    Passing an invalid handle will result in undefined behavior.

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGreenCtxStreamCreate(greenCtx, unsigned int flags, int priority)#
    +

    Create a stream for use in the green context.

    +

    Creates a stream for use in the specified green context greenCtx and +returns a handle in phStream. The stream can be destroyed by calling +cuStreamDestroy(). Note that the API ignores the context +that is current to the calling thread and creates a stream in the +specified green context greenCtx.

    +

    The supported values for flags are:

    +
      +
    • CU_STREAM_NON_BLOCKING: This must be specified. It +indicates that work running in the created stream may run +concurrently with work in the default stream, and that the created +stream should perform no implicit synchronization with the default +stream.

    • +
    +

    Specifying priority affects the scheduling priority of work in the +stream. Priorities provide a hint to preferentially run work with +higher priority when possible, but do not preempt already-running work +or provide any other functional guarantee on execution order. +priority follows a convention where lower numbers represent higher +priorities. ‘0’ represents default priority. The range of meaningful +numerical priorities can be queried using +cuCtxGetStreamPriorityRange. If the specified priority is +outside the numerical range returned by +cuCtxGetStreamPriorityRange, it will automatically be +clamped to the lowest or the highest number in the range.

    +
    +
    Parameters:
    +
      +
    • greenCtx (CUgreenCtx) – Green context for which to create the stream for

    • +
    • flags (unsigned int) – Flags for stream creation. CU_STREAM_NON_BLOCKING must be +specified.

    • +
    • priority (int) – Stream priority. Lower numbers represent higher priorities. See +cuCtxGetStreamPriorityRange for more information about +meaningful stream priorities that can be passed.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    In the current implementation, only compute kernels launched in priority streams are affected by the stream’s priority. Stream priorities have no effect on host-to-device and device-to-host memory operations.

    +
    + +
    +
    +driver.RESOURCE_ABI_VERSION = 1#
    +
    + +
    +
    +driver.RESOURCE_ABI_EXTERNAL_BYTES = 48#
    +
    + +
    +
    +

    EGL Interoperability#

    +

    This section describes the EGL interoperability functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuGraphicsEGLRegisterImage(image, unsigned int flags)#
    +

    Registers an EGL image.

    +

    Registers the EGLImageKHR specified by image for access by CUDA. A +handle to the registered object is returned as pCudaResource. +Additional Mapping/Unmapping is not required for the registered +resource and cuGraphicsResourceGetMappedEglFrame can be +directly called on the pCudaResource.

    +

    The application will be responsible for synchronizing access to shared +objects. The application must ensure that any pending operation which +access the objects have completed before passing control to CUDA. This +may be accomplished by issuing and waiting for glFinish command on all +GLcontexts (for OpenGL and likewise for other APIs). The application +will be also responsible for ensuring that any pending operation on the +registered CUDA resource has completed prior to executing subsequent +commands in other APIs accesing the same memory objects. This can be +accomplished by calling cuCtxSynchronize or cuEventSynchronize +(preferably).

    +

    The surface’s intended usage is specified using flags, as follows:

    + +

    The EGLImageKHR is an object which can be used to create EGLImage +target resource. It is defined as a void pointer. typedef void* +EGLImageKHR

    +
    +
    Parameters:
    +
      +
    • image (EGLImageKHR) – An EGLImageKHR image which can be used to create target resource.

    • +
    • flags (unsigned int) – Map flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEGLStreamConsumerConnect(stream)#
    +

    Connect CUDA to EGLStream as a consumer.

    +

    Connect CUDA as a consumer to EGLStreamKHR specified by stream.

    +

    The EGLStreamKHR is an EGL object that transfers a sequence of image +frames from one API to another.

    +
    +
    Parameters:
    +

    stream (EGLStreamKHR) – EGLStreamKHR handle

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEGLStreamConsumerConnectWithFlags(stream, unsigned int flags)#
    +

    Connect CUDA to EGLStream as a consumer with given flags.

    +

    Connect CUDA as a consumer to EGLStreamKHR specified by stream with +specified flags defined by CUeglResourceLocationFlags.

    +

    The flags specify whether the consumer wants to access frames from +system memory or video memory. Default is +CU_EGL_RESOURCE_LOCATION_VIDMEM.

    +
    +
    Parameters:
    +
      +
    • stream (EGLStreamKHR) – EGLStreamKHR handle

    • +
    • flags (unsigned int) – Flags denote intended location - system or video.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEGLStreamConsumerDisconnect(conn)#
    +

    Disconnect CUDA as a consumer to EGLStream .

    +

    Disconnect CUDA as a consumer to EGLStreamKHR.

    +
    +
    Parameters:
    +

    conn (CUeglStreamConnection) – Conection to disconnect.

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_CONTEXT,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, unsigned int timeout)#
    +

    Acquire an image frame from the EGLStream with CUDA as a consumer.

    +

    Acquire an image frame from EGLStreamKHR. This API can also acquire an +old frame presented by the producer unless explicitly disabled by +setting EGL_SUPPORT_REUSE_NV flag to EGL_FALSE during stream +initialization. By default, EGLStream is created with this flag set to +EGL_TRUE. cuGraphicsResourceGetMappedEglFrame can be called +on pCudaResource to get CUeglFrame.

    +
    +
    Parameters:
    +
      +
    • conn (CUeglStreamConnection) – Connection on which to acquire

    • +
    • pCudaResource (CUgraphicsResource) – CUDA resource on which the stream frame will be mapped for use.

    • +
    • pStream (CUstream) – CUDA stream for synchronization and any data migrations implied by +CUeglResourceLocationFlags.

    • +
    • timeout (unsigned int) – Desired timeout in usec for a new frame to be acquired. If set as +CUDA_EGL_INFINITE_TIMEOUT, acquire waits infinitely. +After timeout occurs CUDA consumer tries to acquire an old frame if +available and EGL_SUPPORT_REUSE_NV flag is set.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_LAUNCH_TIMEOUT,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream)#
    +

    Releases the last frame acquired from the EGLStream.

    +

    Release the acquired image frame specified by pCudaResource to +EGLStreamKHR. If EGL_SUPPORT_REUSE_NV flag is set to EGL_TRUE, at the +time of EGL creation this API doesn’t release the last frame acquired +on the EGLStream. By default, EGLStream is created with this flag set +to EGL_TRUE.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEGLStreamProducerConnect(stream, width, height)#
    +

    Connect CUDA to EGLStream as a producer.

    +

    Connect CUDA as a producer to EGLStreamKHR specified by stream.

    +

    The EGLStreamKHR is an EGL object that transfers a sequence of image +frames from one API to another.

    +
    +
    Parameters:
    +
      +
    • stream (EGLStreamKHR) – EGLStreamKHR handle

    • +
    • width (EGLint) – width of the image to be submitted to the stream

    • +
    • height (EGLint) – height of the image to be submitted to the stream

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEGLStreamProducerDisconnect(conn)#
    +

    Disconnect CUDA as a producer to EGLStream .

    +

    Disconnect CUDA as a producer to EGLStreamKHR.

    +
    +
    Parameters:
    +

    conn (CUeglStreamConnection) – Conection to disconnect.

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_INVALID_CONTEXT,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEGLStreamProducerPresentFrame(conn, CUeglFrame eglframe: CUeglFrame, pStream)#
    +

    Present a CUDA eglFrame to the EGLStream with CUDA as a producer.

    +

    When a frame is presented by the producer, it gets associated with the +EGLStream and thus it is illegal to free the frame before the producer +is disconnected. If a frame is freed and reused it may lead to +undefined behavior.

    +

    If producer and consumer are on different GPUs (iGPU and dGPU) then +frametype CU_EGL_FRAME_TYPE_ARRAY is not supported. +CU_EGL_FRAME_TYPE_PITCH can be used for such cross-device +applications.

    +

    The CUeglFrame is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For CUeglFrame of type CU_EGL_FRAME_TYPE_PITCH, +the application may present sub-region of a memory allocation. In that +case, the pitched pointer will specify the start address of the sub- +region in the allocation and corresponding CUeglFrame +fields will specify the dimensions of the sub-region.

    +
    +
    Parameters:
    +
      +
    • conn (CUeglStreamConnection) – Connection on which to present the CUDA array

    • +
    • eglframe (CUeglFrame) – CUDA Eglstream Proucer Frame handle to be sent to the consumer over +EglStream.

    • +
    • pStream (CUstream) – CUDA stream on which to present the frame.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE,

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuEGLStreamProducerReturnFrame(conn, CUeglFrame eglframe: Optional[CUeglFrame], pStream)#
    +

    Return the CUDA eglFrame to the EGLStream released by the consumer.

    +

    This API can potentially return CUDA_ERROR_LAUNCH_TIMEOUT if the +consumer has not returned a frame to EGL stream. If timeout is returned +the application can retry.

    +
    +
    Parameters:
    +
      +
    • conn (CUeglStreamConnection) – Connection on which to return

    • +
    • eglframe (CUeglFrame) – CUDA Eglstream Proucer Frame handle returned from the consumer over +EglStream.

    • +
    • pStream (CUstream) – CUDA stream on which to return the frame.

    • +
    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_HANDLE, CUDA_ERROR_LAUNCH_TIMEOUT

    +
    +
    Return type:
    +

    CUresult

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphicsResourceGetMappedEglFrame(resource, unsigned int index, unsigned int mipLevel)#
    +

    Get an eglFrame through which to access a registered EGL graphics resource.

    +

    Returns in *eglFrame an eglFrame pointer through which the registered +graphics resource resource may be accessed. This API can only be +called for registered EGL graphics resources.

    +

    The CUeglFrame is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If resource is not registered then CUDA_ERROR_NOT_MAPPED +is returned.

    +
    +
    Parameters:
    +
      +
    • resource (CUgraphicsResource) – None

    • +
    • index (unsigned int) – None

    • +
    • mipLevel (unsigned int) – None

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    + +
    +
    +cuda.bindings.driver.cuEventCreateFromEGLSync(eglSync, unsigned int flags)#
    +

    Creates an event from EGLSync object.

    +

    Creates an event *phEvent from an EGLSyncKHR eglSync with the flags +specified via flags. Valid flags include:

    +
      +
    • CU_EVENT_DEFAULT: Default event creation flag.

    • +
    • CU_EVENT_BLOCKING_SYNC: Specifies that the created event +should use blocking synchronization. A CPU thread that uses +cuEventSynchronize() to wait on an event created with +this flag will block until the event has actually been completed.

    • +
    +

    Once the eglSync gets destroyed, cuEventDestroy is the +only API that can be invoked on the event.

    +

    cuEventRecord and TimingData are not supported for events +created from EGLSync.

    +

    The EGLSyncKHR is an opaque handle to an EGL sync object. typedef void* +EGLSyncKHR

    +
    +
    Parameters:
    +
      +
    • eglSync (EGLSyncKHR) – Opaque handle to EGLSync object

    • +
    • flags (unsigned int) – Event creation flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    OpenGL Interoperability#

    +

    This section describes the OpenGL interoperability functions of the low-level CUDA driver application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interoperability.

    +
    +
    +class cuda.bindings.driver.CUGLDeviceList(value)#
    +

    CUDA devices corresponding to an OpenGL device

    +
    +
    +CU_GL_DEVICE_LIST_ALL = 1#
    +

    The CUDA devices for all GPUs used by the current OpenGL context

    +
    + +
    +
    +CU_GL_DEVICE_LIST_CURRENT_FRAME = 2#
    +

    The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame

    +
    + +
    +
    +CU_GL_DEVICE_LIST_NEXT_FRAME = 3#
    +

    The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame

    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphicsGLRegisterBuffer(buffer, unsigned int Flags)#
    +

    Registers an OpenGL buffer object.

    +

    Registers the buffer object specified by buffer for access by CUDA. A +handle to the registered object is returned as pCudaResource. The +register flags Flags specify the intended usage, as follows:

    + +
    +
    Parameters:
    +
      +
    • buffer (GLuint) – name of buffer object to be registered

    • +
    • Flags (unsigned int) – Register flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphicsGLRegisterImage(image, target, unsigned int Flags)#
    +

    Register an OpenGL texture or renderbuffer object.

    +

    Registers the texture or renderbuffer object specified by image for +access by CUDA. A handle to the registered object is returned as +pCudaResource.

    +

    target must match the type of the object, and must be one of +GL_TEXTURE_2D, GL_TEXTURE_RECTANGLE, +GL_TEXTURE_CUBE_MAP, GL_TEXTURE_3D, +GL_TEXTURE_2D_ARRAY, or GL_RENDERBUFFER.

    +

    The register flags Flags specify the intended usage, as follows:

    + +

    The following image formats are supported. For brevity’s sake, the list +is abbreviated. For ex., {GL_R, GL_RG} X {8, 16} would expand to the +following 4 formats {GL_R8, GL_R16, GL_RG8, GL_RG16} :

    +
      +
    • GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, +GL_INTENSITY

    • +
    • {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, +32I}

    • +
    • {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X {8, 16, +16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, +32I_EXT}

    • +
    +

    The following image classes are currently disallowed:

    +
      +
    • Textures with borders

    • +
    • Multisampled renderbuffers

    • +
    +
    +
    Parameters:
    +
      +
    • image (GLuint) – name of texture or renderbuffer object to be registered

    • +
    • target (GLenum) – Identifies the type of object specified by image

    • +
    • Flags (unsigned int) – Register flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGLGetDevices(unsigned int cudaDeviceCount, deviceList: CUGLDeviceList)#
    +

    Gets the CUDA devices associated with the current OpenGL context.

    +

    Returns in *pCudaDeviceCount the number of CUDA-compatible devices +corresponding to the current OpenGL context. Also returns in +*pCudaDevices at most cudaDeviceCount of the CUDA-compatible devices +corresponding to the current OpenGL context. If any of the GPUs being +used by the current OpenGL context are not CUDA capable then the call +will return CUDA_ERROR_NO_DEVICE.

    +

    The deviceList argument may be any of the following: +CU_GL_DEVICE_LIST_ALL: Query all devices used by the current OpenGL +context. CU_GL_DEVICE_LIST_CURRENT_FRAME: Query the devices used by the +current OpenGL context to render the current frame (in SLI). +CU_GL_DEVICE_LIST_NEXT_FRAME: Query the devices used by the current +OpenGL context to render the next frame (in SLI). Note that this is a +prediction, it can’t be guaranteed that this is correct in all cases.

    +
    +
    Parameters:
    +
      +
    • cudaDeviceCount (unsigned int) – The size of the output device array pCudaDevices.

    • +
    • deviceList (CUGLDeviceList) – The set of devices to return.

    • +
    +
    +
    Returns:
    +

      +
    • CUresult – CUDA_SUCCESS +CUDA_ERROR_NO_DEVICE +CUDA_ERROR_INVALID_VALUE +CUDA_ERROR_INVALID_CONTEXT +CUDA_ERROR_INVALID_GRAPHICS_CONTEXT

    • +
    • pCudaDeviceCount (unsigned int) – Returned number of CUDA devices.

    • +
    • pCudaDevices (List[CUdevice]) – Returned CUDA devices.

    • +
    +

    +
    +
    +
    +

    See also

    +

    cudaGLGetDevices

    +
    +

    Notes

    +

    This function is not supported on Mac OS X.

    +
    + +
    +
    +

    Profiler Control#

    +

    This section describes the profiler control functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuProfilerStart()#
    +

    Enable profiling.

    +

    Enables profile collection by the active profiling tool for the current +context. If profiling is already enabled, then +cuProfilerStart() has no effect.

    +

    cuProfilerStart and cuProfilerStop APIs are used to programmatically +control the profiling granularity by allowing profiling to be done only +on selective pieces of code.

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_CONTEXT

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    cuProfilerInitialize, cuProfilerStop, cudaProfilerStart

    +
    +
    + +
    +
    +cuda.bindings.driver.cuProfilerStop()#
    +

    Disable profiling.

    +

    Disables profile collection by the active profiling tool for the +current context. If profiling is already disabled, then +cuProfilerStop() has no effect.

    +

    cuProfilerStart and cuProfilerStop APIs are used to programmatically +control the profiling granularity by allowing profiling to be done only +on selective pieces of code.

    +
    +
    Returns:
    +

    CUDA_SUCCESS, CUDA_ERROR_INVALID_CONTEXT

    +
    +
    Return type:
    +

    CUresult

    +
    +
    +
    +

    See also

    +

    cuProfilerInitialize, cuProfilerStart, cudaProfilerStop

    +
    +
    + +
    +
    +

    VDPAU Interoperability#

    +

    This section describes the VDPAU interoperability functions of the low-level CUDA driver application programming interface.

    +
    +
    +cuda.bindings.driver.cuVDPAUGetDevice(vdpDevice, vdpGetProcAddress)#
    +

    Gets the CUDA device associated with a VDPAU device.

    +

    Returns in *pDevice the CUDA device associated with a vdpDevice, if +applicable.

    +
    +
    Parameters:
    +
      +
    • vdpDevice (VdpDevice) – A VdpDevice handle

    • +
    • vdpGetProcAddress (VdpGetProcAddress) – VDPAU’s VdpGetProcAddress function pointer

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuVDPAUCtxCreate(unsigned int flags, device, vdpDevice, vdpGetProcAddress)#
    +

    Create a CUDA context for interoperability with VDPAU.

    +

    Creates a new CUDA context, initializes VDPAU interoperability, and +associates the CUDA context with the calling thread. It must be called +before performing any other VDPAU interoperability operations. It may +fail if the needed VDPAU driver facilities are not available. For usage +of the flags parameter, see cuCtxCreate().

    +
    +
    Parameters:
    +
      +
    • flags (unsigned int) – Options for CUDA context creation

    • +
    • device (CUdevice) – Device on which to create the context

    • +
    • vdpDevice (VdpDevice) – The VdpDevice to interop with

    • +
    • vdpGetProcAddress (VdpGetProcAddress) – VDPAU’s VdpGetProcAddress function pointer

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphicsVDPAURegisterVideoSurface(vdpSurface, unsigned int flags)#
    +

    Registers a VDPAU VdpVideoSurface object.

    +

    Registers the VdpVideoSurface specified by vdpSurface for access by +CUDA. A handle to the registered object is returned as pCudaResource. +The surface’s intended usage is specified using flags, as follows:

    + +

    The VdpVideoSurface is presented as an array of subresources that may +be accessed using pointers returned by +cuGraphicsSubResourceGetMappedArray. The exact number of +valid arrayIndex values depends on the VDPAU surface format. The +mapping is shown in the table below. mipLevel must be 0.

    +
    +
    Parameters:
    +
      +
    • vdpSurface (VdpVideoSurface) – The VdpVideoSurface to be registered

    • +
    • flags (unsigned int) – Map flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.driver.cuGraphicsVDPAURegisterOutputSurface(vdpSurface, unsigned int flags)#
    +

    Registers a VDPAU VdpOutputSurface object.

    +

    Registers the VdpOutputSurface specified by vdpSurface for access by +CUDA. A handle to the registered object is returned as pCudaResource. +The surface’s intended usage is specified using flags, as follows:

    + +

    The VdpOutputSurface is presented as an array of subresources that may +be accessed using pointers returned by +cuGraphicsSubResourceGetMappedArray. The exact number of +valid arrayIndex values depends on the VDPAU surface format. The +mapping is shown in the table below. mipLevel must be 0.

    +
    +
    Parameters:
    +
      +
    • vdpSurface (VdpOutputSurface) – The VdpOutputSurface to be registered

    • +
    • flags (unsigned int) – Map flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    + +
    +
    + +
    + +
    +
    + + + + + + + \ No newline at end of file diff --git a/docs/module/nvrtc.html b/docs/module/nvrtc.html index 01d479e4..d4321f1f 100644 --- a/docs/module/nvrtc.html +++ b/docs/module/nvrtc.html @@ -3,11 +3,11 @@ - + - nvrtc - CUDA Python 12.6.0 documentation + nvrtc - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • @@ -234,89 +236,89 @@

    nvrtc#

    Error Handling#

    NVRTC defines the following enumeration type and function for API call error handling.

    -
    -class cuda.nvrtc.nvrtcResult(value)#
    +
    +class cuda.bindings.nvrtc.nvrtcResult(value)#

    The enumerated type nvrtcResult defines API call result codes. NVRTC API functions return nvrtcResult to indicate the call result.

    -
    -NVRTC_SUCCESS = 0#
    +
    +NVRTC_SUCCESS = 0#
    -
    -NVRTC_ERROR_OUT_OF_MEMORY = 1#
    +
    +NVRTC_ERROR_OUT_OF_MEMORY = 1#
    -
    -NVRTC_ERROR_PROGRAM_CREATION_FAILURE = 2#
    +
    +NVRTC_ERROR_PROGRAM_CREATION_FAILURE = 2#
    -
    -NVRTC_ERROR_INVALID_INPUT = 3#
    +
    +NVRTC_ERROR_INVALID_INPUT = 3#
    -
    -NVRTC_ERROR_INVALID_PROGRAM = 4#
    +
    +NVRTC_ERROR_INVALID_PROGRAM = 4#
    -
    -NVRTC_ERROR_INVALID_OPTION = 5#
    +
    +NVRTC_ERROR_INVALID_OPTION = 5#
    -
    -NVRTC_ERROR_COMPILATION = 6#
    +
    +NVRTC_ERROR_COMPILATION = 6#
    -
    -NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = 7#
    +
    +NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = 7#
    -
    -NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION = 8#
    +
    +NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION = 8#
    -
    -NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION = 9#
    +
    +NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION = 9#
    -
    -NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID = 10#
    +
    +NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID = 10#
    -
    -NVRTC_ERROR_INTERNAL_ERROR = 11#
    +
    +NVRTC_ERROR_INTERNAL_ERROR = 11#
    -
    -NVRTC_ERROR_TIME_FILE_WRITE_FAILED = 12#
    +
    +NVRTC_ERROR_TIME_FILE_WRITE_FAILED = 12#
    -
    -cuda.nvrtc.nvrtcGetErrorString(result: nvrtcResult)#
    +
    +cuda.bindings.nvrtc.nvrtcGetErrorString(result: nvrtcResult)#

    nvrtcGetErrorString is a helper function that returns a string describing the given nvrtcResult code, e.g., NVRTC_SUCCESS to “NVRTC_SUCCESS”. For unrecognized enumeration values, it returns “NVRTC_ERROR unknown”.

    Parameters:
    -

    result (nvrtcResult) – CUDA Runtime Compilation API result code.

    +

    result (nvrtcResult) – CUDA Runtime Compilation API result code.

    Returns:

    • nvrtcResult.NVRTC_SUCCESS – nvrtcResult.NVRTC_SUCCESS

    • -
    • bytes – Message string for the given nvrtcResult code.

    • +
    • bytes – Message string for the given nvrtcResult code.

    @@ -328,16 +330,16 @@

    Error Handling#

    NVRTC defines the following function for general information query.

    -
    -cuda.nvrtc.nvrtcVersion()#
    +
    +cuda.bindings.nvrtc.nvrtcVersion()#

    nvrtcVersion sets the output parameters major and minor with the CUDA Runtime Compilation version number.

    Returns:

    -
    -cuda.nvrtc.nvrtcGetPTX(prog, char *ptx)#
    +
    +cuda.bindings.nvrtc.nvrtcGetPTX(prog, char *ptx)#

    nvrtcGetPTX stores the PTX generated by the previous compilation of prog in the memory pointed by ptx.

    Parameters:
      -
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • +
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • ptx (bytes) – Compiled result.

    Returns:

    Return type:
    -

    nvrtcResult

    +

    nvrtcResult

    -
    -cuda.nvrtc.nvrtcGetCUBINSize(prog)#
    +
    +cuda.bindings.nvrtc.nvrtcGetCUBINSize(prog)#

    nvrtcGetCUBINSize sets the value of cubinSizeRet with the size of the cubin generated by the previous compilation of prog. The value of cubinSizeRet is set to 0 if the value specified to -arch is a virtual architecture instead of an actual architecture.

    Parameters:
    -

    prog (nvrtcProgram) – CUDA Runtime Compilation program.

    +

    prog (nvrtcProgram) – CUDA Runtime Compilation program.

    Returns:

    -
    -cuda.nvrtc.nvrtcGetCUBIN(prog, char *cubin)#
    +
    +cuda.bindings.nvrtc.nvrtcGetCUBIN(prog, char *cubin)#

    nvrtcGetCUBIN stores the cubin generated by the previous compilation of prog in the memory pointed by cubin. No cubin is available if the value specified to -arch is a virtual architecture instead of an actual architecture.

    Parameters:
      -
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • +
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • cubin (bytes) – Compiled and assembled result.

    Returns:

    Return type:
    -

    nvrtcResult

    +

    nvrtcResult

    -
    -cuda.nvrtc.nvrtcGetNVVMSize(prog)#
    +
    +cuda.bindings.nvrtc.nvrtcGetNVVMSize(prog)#

    DEPRECATION NOTICE: This function will be removed in a future release. Please use nvrtcGetLTOIRSize (and nvrtcGetLTOIR) instead.

    Parameters:
    -

    prog (nvrtcProgram) – None

    +

    prog (nvrtcProgram) – None

    Returns:

    -
    -cuda.nvrtc.nvrtcGetLTOIRSize(prog)#
    +
    +cuda.bindings.nvrtc.nvrtcGetLTOIRSize(prog)#

    nvrtcGetLTOIRSize sets the value of LTOIRSizeRet with the size of the LTO IR generated by the previous compilation of prog. The value of LTOIRSizeRet is set to 0 if the program was not compiled with -dlto.

    Parameters:
    -

    prog (nvrtcProgram) – CUDA Runtime Compilation program.

    +

    prog (nvrtcProgram) – CUDA Runtime Compilation program.

    Returns:

    -
    -cuda.nvrtc.nvrtcGetLTOIR(prog, char *LTOIR)#
    +
    +cuda.bindings.nvrtc.nvrtcGetLTOIR(prog, char *LTOIR)#

    nvrtcGetLTOIR stores the LTO IR generated by the previous compilation of prog in the memory pointed by LTOIR. No LTO IR is available if the program was compiled without -dlto.

    Parameters:
      -
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • +
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • LTOIR (bytes) – Compiled result.

    Returns:

    Return type:
    -

    nvrtcResult

    +

    nvrtcResult

    -
    -cuda.nvrtc.nvrtcGetOptiXIRSize(prog)#
    +
    +cuda.bindings.nvrtc.nvrtcGetOptiXIRSize(prog)#

    nvrtcGetOptiXIRSize sets the value of optixirSizeRet with the size of the OptiX IR generated by the previous compilation of prog. The value of nvrtcGetOptiXIRSize is set to 0 if the program was compiled with options incompatible with OptiX IR generation.

    Parameters:
    -

    prog (nvrtcProgram) – CUDA Runtime Compilation program.

    +

    prog (nvrtcProgram) – CUDA Runtime Compilation program.

    Returns:

    -
    -cuda.nvrtc.nvrtcGetOptiXIR(prog, char *optixir)#
    +
    +cuda.bindings.nvrtc.nvrtcGetOptiXIR(prog, char *optixir)#

    nvrtcGetOptiXIR stores the OptiX IR generated by the previous compilation of prog in the memory pointed by optixir. No OptiX IR is available if the program was compiled with options incompatible with OptiX IR generation.

    Parameters:
      -
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • +
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • Optix (bytes) – IR Compiled result.

    Returns:

    Return type:
    -

    nvrtcResult

    +

    nvrtcResult

    -
    -cuda.nvrtc.nvrtcGetProgramLogSize(prog)#
    +
    +cuda.bindings.nvrtc.nvrtcGetProgramLogSize(prog)#

    nvrtcGetProgramLogSize sets logSizeRet with the size of the log generated by the previous compilation of prog (including the trailing NULL).

    Note that compilation log may be generated with warnings and informative messages, even when the compilation of prog succeeds.

    Parameters:
    -

    prog (nvrtcProgram) – CUDA Runtime Compilation program.

    +

    prog (nvrtcProgram) – CUDA Runtime Compilation program.

    Returns:

    -
    -cuda.nvrtc.nvrtcGetProgramLog(prog, char *log)#
    +
    +cuda.bindings.nvrtc.nvrtcGetProgramLog(prog, char *log)#

    nvrtcGetProgramLog stores the log generated by the previous compilation of prog in the memory pointed by log.

    Parameters:
      -
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • +
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • log (bytes) – Compilation log.

    Returns:

    Return type:
    -

    nvrtcResult

    +

    nvrtcResult

    -
    -cuda.nvrtc.nvrtcAddNameExpression(prog, char *name_expression)#
    +
    +cuda.bindings.nvrtc.nvrtcAddNameExpression(prog, char *name_expression)#

    nvrtcAddNameExpression notes the given name expression denoting the address of a global function or device/__constant__ variable.

    The identical name expression string must be provided on a subsequent call to nvrtcGetLoweredName to extract the lowered name.

    Parameters:
      -
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • +
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • name_expression (bytes) – constant expression denoting the address of a global function or device/__constant__ variable.

    Returns:

    Return type:
    -

    nvrtcResult

    +

    nvrtcResult

    -
    -cuda.nvrtc.nvrtcGetLoweredName(prog, char *name_expression)#
    +
    +cuda.bindings.nvrtc.nvrtcGetLoweredName(prog, char *name_expression)#

    nvrtcGetLoweredName extracts the lowered (mangled) name for a global function or device/__constant__ variable, and updates lowered_name to point to it. The memory containing the name is released when the NVRTC program is destroyed by nvrtcDestroyProgram. The identical name expression must have been previously provided to nvrtcAddNameExpression.

    Parameters:
      -
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • +
    • prog (nvrtcProgram) – CUDA Runtime Compilation program.

    • name_expression (bytes) – constant expression denoting the address of a global function or device/__constant__ variable.

    @@ -889,7 +891,7 @@

    Compilation

    See also

    -

    nvrtcAddNameExpression

    +

    nvrtcAddNameExpression

    @@ -1170,14 +1172,14 @@

    Supported Compile Options - +
    Previous
    -
    cudart
    +
    runtime
    diff --git a/docs/module/runtime.html b/docs/module/runtime.html new file mode 100644 index 00000000..1886b95e --- /dev/null +++ b/docs/module/runtime.html @@ -0,0 +1,23596 @@ + + + + + + + + + + runtime - CUDA Python 12.6.1 documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
    +
    +
    + +
    + +
    +
    + +
    + +
    +
    + +
    +
    +
    + + + + + Back to top + +
    + +
    + +
    + +
    +
    +
    +

    runtime#

    +
    +

    Profiler Control#

    +

    This section describes the profiler control functions of the CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaProfilerStart()#
    +

    Enable profiling.

    +

    Enables profile collection by the active profiling tool for the current +context. If profiling is already enabled, then +cudaProfilerStart() has no effect.

    +

    cudaProfilerStart and cudaProfilerStop APIs are used to +programmatically control the profiling granularity by allowing +profiling to be done only on selective pieces of code.

    +
    +
    Returns:
    +

    cudaSuccess

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaProfilerStop()#
    +

    Disable profiling.

    +

    Disables profile collection by the active profiling tool for the +current context. If profiling is already disabled, then +cudaProfilerStop() has no effect.

    +

    cudaProfilerStart and cudaProfilerStop APIs are used to +programmatically control the profiling granularity by allowing +profiling to be done only on selective pieces of code.

    +
    +
    Returns:
    +

    cudaSuccess

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +

    Device Management#

    +

    impl_private

    +

    This section describes the device management functions of the CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaDeviceReset()#
    +

    Destroy all allocations and reset all state on the current device in the current process.

    +

    Explicitly destroys and cleans up all resources associated with the +current device in the current process. It is the caller’s +responsibility to ensure that the resources are not accessed or passed +in subsequent API calls and doing so will result in undefined behavior. +These resources include CUDA types cudaStream_t, +cudaEvent_t, cudaArray_t, +cudaMipmappedArray_t, cudaPitchedPtr, +cudaTextureObject_t, cudaSurfaceObject_t, +textureReference, surfaceReference, +cudaExternalMemory_t, cudaExternalSemaphore_t +and cudaGraphicsResource_t. These resources also include +memory allocations by cudaMalloc, +cudaMallocHost, cudaMallocManaged and +cudaMallocPitch. Any subsequent API call to this device +will reinitialize the device.

    +

    Note that this function will reset the device immediately. It is the +caller’s responsibility to ensure that the device is not being accessed +by any other host threads from the process when this function is +called.

    +
    +
    Returns:
    +

    cudaSuccess

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaDeviceSynchronize

    +
    +

    Notes

    +

    cudaDeviceReset() will not destroy memory allocations by cudaMallocAsync() and cudaMallocFromPoolAsync(). These memory allocations need to be destroyed explicitly.

    +

    If a non-primary CUcontext is current to the thread, cudaDeviceReset() will destroy only the internal CUDA RT state for that CUcontext.

    +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceSynchronize()#
    +

    Wait for compute device to finish.

    +

    Blocks until the device has completed all preceding requested tasks. +cudaDeviceSynchronize() returns an error if one of the +preceding tasks has failed. If the +cudaDeviceScheduleBlockingSync flag was set for this +device, the host thread will block until the device has finished its +work.

    +
    +
    Returns:
    +

    cudaSuccess

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceSetLimit(limit: cudaLimit, size_t value)#
    +

    Set resource limits.

    +

    Setting limit to value is a request by the application to update +the current limit maintained by the device. The driver is free to +modify the requested value to meet h/w requirements (this could be +clamping to minimum or maximum values, rounding up to nearest element +size, etc). The application can use cudaDeviceGetLimit() to +find out exactly what the limit has been set to.

    +

    Setting each cudaLimit has its own specific restrictions, +so each is discussed here.

    +
      +
    • cudaLimitStackSize controls the stack size in bytes of +each GPU thread.

    • +
    • cudaLimitPrintfFifoSize controls the size in bytes of the +shared FIFO used by the printf() device system call. +Setting cudaLimitPrintfFifoSize must not be performed +after launching any kernel that uses the printf() device +system call - in such case cudaErrorInvalidValue will be +returned.

    • +
    • cudaLimitMallocHeapSize controls the size in bytes of the +heap used by the malloc() and free() device +system calls. Setting cudaLimitMallocHeapSize must not be +performed after launching any kernel that uses the +malloc() or free() device system calls - in +such case cudaErrorInvalidValue will be returned.

    • +
    • cudaLimitDevRuntimeSyncDepth controls the maximum nesting +depth of a grid at which a thread can safely call +cudaDeviceSynchronize(). Setting this limit must be +performed before any launch of a kernel that uses the device runtime +and calls cudaDeviceSynchronize() above the default sync +depth, two levels of grids. Calls to +cudaDeviceSynchronize() will fail with error code +cudaErrorSyncDepthExceeded if the limitation is violated. +This limit can be set smaller than the default or up the maximum +launch depth of 24. When setting this limit, keep in mind that +additional levels of sync depth require the runtime to reserve large +amounts of device memory which can no longer be used for user +allocations. If these reservations of device memory fail, +cudaDeviceSetLimit will return +cudaErrorMemoryAllocation, and the limit can be reset to +a lower value. This limit is only applicable to devices of compute +capability < 9.0. Attempting to set this limit on devices of other +compute capability will results in error +cudaErrorUnsupportedLimit being returned.

    • +
    • cudaLimitDevRuntimePendingLaunchCount controls the +maximum number of outstanding device runtime launches that can be +made from the current device. A grid is outstanding from the point of +launch up until the grid is known to have been completed. Device +runtime launches which violate this limitation fail and return +cudaErrorLaunchPendingCountExceeded when +cudaGetLastError() is called after launch. If more +pending launches than the default (2048 launches) are needed for a +module using the device runtime, this limit can be increased. Keep in +mind that being able to sustain additional pending launches will +require the runtime to reserve larger amounts of device memory +upfront which can no longer be used for allocations. If these +reservations fail, cudaDeviceSetLimit will return +cudaErrorMemoryAllocation, and the limit can be reset to +a lower value. This limit is only applicable to devices of compute +capability 3.5 and higher. Attempting to set this limit on devices of +compute capability less than 3.5 will result in the error +cudaErrorUnsupportedLimit being returned.

    • +
    • cudaLimitMaxL2FetchGranularity controls the L2 cache +fetch granularity. Values can range from 0B to 128B. This is purely a +performance hint and it can be ignored or clamped depending on the +platform.

    • +
    • cudaLimitPersistingL2CacheSize controls size in bytes +available for persisting L2 cache. This is purely a performance hint +and it can be ignored or clamped depending on the platform.

    • +
    +
    +
    Parameters:
    +
      +
    • limit (cudaLimit) – Limit to set

    • +
    • value (size_t) – Size of limit

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorUnsupportedLimit, cudaErrorInvalidValue, cudaErrorMemoryAllocation

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetLimit(limit: cudaLimit)#
    +

    Return resource limits.

    +

    Returns in *pValue the current size of limit. The following +cudaLimit values are supported.

    + +
    +
    Parameters:
    +

    limit (cudaLimit) – Limit to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetTexture1DLinearMaxWidth(cudaChannelFormatDesc fmtDesc: Optional[cudaChannelFormatDesc], int device)#
    +

    Returns the maximum number of elements allocatable in a 1D linear texture for a given element size.

    +

    Returns in maxWidthInElements the maximum number of elements +allocatable in a 1D linear texture for given format descriptor +fmtDesc.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetCacheConfig()#
    +

    Returns the preferred cache configuration for the current device.

    +

    On devices where the L1 cache and shared memory use the same hardware +resources, this returns through pCacheConfig the preferred cache +configuration for the current device. This is only a preference. The +runtime will use the requested configuration if possible, but it is +free to choose a different configuration if required to execute +functions.

    +

    This will return a pCacheConfig of +cudaFuncCachePreferNone on devices where the size of the L1 +cache and shared memory are fixed.

    +

    The supported cache configurations are:

    + +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaDeviceSetCacheConfig, cudaFuncSetCacheConfig (C API), cudaFuncSetCacheConfig (C++ API), cuCtxGetCacheConfig

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetStreamPriorityRange()#
    +

    Returns numerical values that correspond to the least and greatest stream priorities.

    +

    Returns in *leastPriority and *greatestPriority the numerical +values that correspond to the least and greatest stream priorities +respectively. Stream priorities follow a convention where lower numbers +imply greater priorities. The range of meaningful stream priorities is +given by [*greatestPriority, *leastPriority]. If the user attempts +to create a stream with a priority value that is outside the the +meaningful range as specified by this API, the priority is +automatically clamped down or up to either *leastPriority or +*greatestPriority respectively. See +cudaStreamCreateWithPriority for details on creating a +priority stream. A NULL may be passed in for *leastPriority or +*greatestPriority if the value is not desired.

    +

    This function will return ‘0’ in both *leastPriority and +*greatestPriority if the current context’s device does not support +stream priorities (see cudaDeviceGetAttribute).

    +
    +
    Returns:
    +

      +
    • cudaError_tcudaSuccess

    • +
    • leastPriority (int) – Pointer to an int in which the numerical value for least stream +priority is returned

    • +
    • greatestPriority (int) – Pointer to an int in which the numerical value for greatest stream +priority is returned

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceSetCacheConfig(cacheConfig: cudaFuncCache)#
    +

    Sets the preferred cache configuration for the current device.

    +

    On devices where the L1 cache and shared memory use the same hardware +resources, this sets through cacheConfig the preferred cache +configuration for the current device. This is only a preference. The +runtime will use the requested configuration if possible, but it is +free to choose a different configuration if required to execute the +function. Any function preference set via +cudaFuncSetCacheConfig (C API) or cudaFuncSetCacheConfig +(C++ API) will be preferred over this device-wide setting. Setting the +device-wide cache configuration to cudaFuncCachePreferNone +will cause subsequent kernel launches to prefer to not change the cache +configuration unless required to launch the kernel.

    +

    This setting does nothing on devices where the size of the L1 cache and +shared memory are fixed.

    +

    Launching a kernel with a different preference than the most recent +preference setting may insert a device-side synchronization point.

    +

    The supported cache configurations are:

    + +
    +
    Parameters:
    +

    cacheConfig (cudaFuncCache) – Requested cache configuration

    +
    +
    Returns:
    +

    cudaSuccess

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaDeviceGetCacheConfig, cudaFuncSetCacheConfig (C API), cudaFuncSetCacheConfig (C++ API), cuCtxSetCacheConfig

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetByPCIBusId(char *pciBusId)#
    +

    Returns a handle to a compute device.

    +

    Returns in *device a device ordinal given a PCI bus ID string.

    +

    where domain, bus, device, and function are all hexadecimal +values

    +
    +
    Parameters:
    +

    pciBusId (bytes) – String in one of the following forms:

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetPCIBusId(int length, int device)#
    +

    Returns a PCI Bus Id string for the device.

    +

    Returns an ASCII string identifying the device dev in the NULL- +terminated string pointed to by pciBusId. length specifies the +maximum length of the string that may be returned.

    +

    where domain, bus, device, and function are all hexadecimal +values. pciBusId should be large enough to store 13 characters +including the NULL-terminator.

    +
    +
    Parameters:
    +
      +
    • length (int) – Maximum length of string to store in name

    • +
    • device (int) – Device to get identifier string for

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaIpcGetEventHandle(event)#
    +

    Gets an interprocess handle for a previously allocated event.

    +

    Takes as input a previously allocated event. This event must have been +created with the cudaEventInterprocess and +cudaEventDisableTiming flags set. This opaque handle may be +copied into other processes and opened with +cudaIpcOpenEventHandle to allow efficient hardware +synchronization between GPU work in different processes.

    +

    After the event has been been opened in the importing process, +cudaEventRecord, cudaEventSynchronize, +cudaStreamWaitEvent and cudaEventQuery may be +used in either process. Performing operations on the imported event +after the exported event has been freed with +cudaEventDestroy will result in undefined behavior.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cudaDeviceGetAttribute with +cudaDevAttrIpcEventSupport

    +
    +
    Parameters:
    +

    event (CUevent or cudaEvent_t) – Event allocated with cudaEventInterprocess and +cudaEventDisableTiming flags.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaIpcOpenEventHandle(cudaIpcEventHandle_t handle: cudaIpcEventHandle_t)#
    +

    Opens an interprocess event handle for use in the current process.

    +

    Opens an interprocess event handle exported from another process with +cudaIpcGetEventHandle. This function returns a +cudaEvent_t that behaves like a locally created event with +the cudaEventDisableTiming flag specified. This event must +be freed with cudaEventDestroy.

    +

    Performing operations on the imported event after the exported event +has been freed with cudaEventDestroy will result in +undefined behavior.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cudaDeviceGetAttribute with +cudaDevAttrIpcEventSupport

    +
    +
    Parameters:
    +

    handle (cudaIpcEventHandle_t) – Interprocess handle to open

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaIpcGetMemHandle(devPtr)#
    +

    Gets an interprocess memory handle for an existing device memory allocation.

    +

    Takes a pointer to the base of an existing device memory allocation +created with cudaMalloc and exports it for use in another +process. This is a lightweight operation and may be called multiple +times on an allocation without adverse effects.

    +

    If a region of memory is freed with cudaFree and a +subsequent call to cudaMalloc returns memory with the same +device address, cudaIpcGetMemHandle will return a unique +handle for the new memory.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cudaDeviceGetAttribute with +cudaDevAttrIpcEventSupport

    +
    +
    Parameters:
    +

    devPtr (Any) – Base pointer to previously allocated device memory

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaIpcOpenMemHandle(cudaIpcMemHandle_t handle: cudaIpcMemHandle_t, unsigned int flags)#
    +

    Opens an interprocess memory handle exported from another process and returns a device pointer usable in the local process.

    +

    Maps memory exported from another process with +cudaIpcGetMemHandle into the current device address space. +For contexts on different devices cudaIpcOpenMemHandle can +attempt to enable peer access between the devices as if the user called +cudaDeviceEnablePeerAccess. This behavior is controlled by +the cudaIpcMemLazyEnablePeerAccess flag. +cudaDeviceCanAccessPeer can determine if a mapping is +possible.

    +

    cudaIpcOpenMemHandle can open handles to devices that may +not be visible in the process calling the API.

    +

    Contexts that may open cudaIpcMemHandles are restricted in +the following way. cudaIpcMemHandles from each device in a +given process may only be opened by one context per device per other +process.

    +

    If the memory handle has already been opened by the current context, +the reference count on the handle is incremented by 1 and the existing +device pointer is returned.

    +

    Memory returned from cudaIpcOpenMemHandle must be freed +with cudaIpcCloseMemHandle.

    +

    Calling cudaFree on an exported memory region before +calling cudaIpcCloseMemHandle in the importing context will +result in undefined behavior.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cudaDeviceGetAttribute with +cudaDevAttrIpcEventSupport

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +
    +
    No guarantees are made about the address returned in *devPtr.

    In particular, multiple processes may not receive the same address for the same handle.

    +
    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaIpcCloseMemHandle(devPtr)#
    +

    Attempts to close memory mapped with cudaIpcOpenMemHandle.

    +

    Decrements the reference count of the memory returnd by +cudaIpcOpenMemHandle by 1. When the reference count reaches +0, this API unmaps the memory. The original allocation in the exporting +process as well as imported mappings in other processes will be +unaffected.

    +

    Any resources used to enable peer access will be freed if this is the +last mapping using them.

    +

    IPC functionality is restricted to devices with support for unified +addressing on Linux and Windows operating systems. IPC functionality on +Windows is supported for compatibility purposes but not recommended as +it comes with performance cost. Users can test their device for IPC +functionality by calling cudaDeviceGetAttribute with +cudaDevAttrIpcEventSupport

    +
    +
    Parameters:
    +

    devPtr (Any) – Device pointer returned by cudaIpcOpenMemHandle

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorMapBufferObjectFailed, cudaErrorNotSupported, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceFlushGPUDirectRDMAWrites(target: cudaFlushGPUDirectRDMAWritesTarget, scope: cudaFlushGPUDirectRDMAWritesScope)#
    +

    Blocks until remote writes are visible to the specified scope.

    +

    Blocks until remote writes to the target context via mappings created +through GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see +https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are +visible to the specified scope.

    +

    If the scope equals or lies within the scope indicated by +cudaDevAttrGPUDirectRDMAWritesOrdering, the call will be a +no-op and can be safely omitted for performance. This can be determined +by comparing the numerical values between the two enums, with smaller +scopes having smaller values.

    +

    Users may query support for this API via +cudaDevAttrGPUDirectRDMAFlushWritesOptions.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorNotSupported,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceRegisterAsyncNotification(int device, callbackFunc, userData)#
    +

    Registers a callback function to receive async notifications.

    +

    Registers callbackFunc to receive async notifications.

    +

    The userData parameter is passed to the callback function at async +notification time. Likewise, callback is also passed to the callback +function to distinguish between multiple registered callbacks.

    +

    The callback function being registered should be designed to return +quickly (~10ms). Any long running tasks should be queued for execution +on an application thread.

    +

    Callbacks may not call cudaDeviceRegisterAsyncNotification or +cudaDeviceUnregisterAsyncNotification. Doing so will result in +cudaErrorNotPermitted. Async notification callbacks execute +in an undefined order and may be serialized.

    +

    Returns in *callback a handle representing the registered callback +instance.

    +
    +
    Parameters:
    +
      +
    • device (int) – The device on which to register the callback

    • +
    • callbackFunc (cudaAsyncCallback) – The function to register as a callback

    • +
    • userData (Any) – A generic pointer to user data. This is passed into the callback +function.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceUnregisterAsyncNotification(int device, callback)#
    +

    Unregisters an async notification callback.

    +

    Unregisters callback so that the corresponding callback function will +stop receiving async notifications.

    +
    +
    Parameters:
    +
      +
    • device (int) – The device from which to remove callback.

    • +
    • callback (cudaAsyncCallbackHandle_t) – The callback instance to unregister from receiving async +notifications.

    • +
    +
    +
    Returns:
    +

    cudaSuccess cudaErrorNotSupported cudaErrorInvalidDevice cudaErrorInvalidValue cudaErrorNotPermitted cudaErrorUnknown

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetDeviceCount()#
    +

    Returns the number of compute-capable devices.

    +

    Returns in *count the number of devices with compute capability +greater or equal to 2.0 that are available for execution.

    +
    +
    Returns:
    +

      +
    • cudaError_tcudaSuccess

    • +
    • count (int) – Returns the number of devices with compute capability greater or +equal to 2.0

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetDeviceProperties(int device)#
    +

    Returns information about the compute-device.

    +

    Returns in *prop the properties of device dev. The +cudaDeviceProp structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • name[256] is an ASCII string identifying the device.

    • +
    • uuid is a 16-byte unique identifier.

    • +
    • totalGlobalMem is the total amount of global memory +available on the device in bytes.

    • +
    • sharedMemPerBlock is the maximum amount of shared memory +available to a thread block in bytes.

    • +
    • regsPerBlock is the maximum number of 32-bit registers +available to a thread block.

    • +
    • warpSize is the warp size in threads.

    • +
    • memPitch is the maximum pitch in bytes allowed by the +memory copy functions that involve memory regions allocated through +cudaMallocPitch().

    • +
    • maxThreadsPerBlock is the maximum number of threads per +block.

    • +
    • maxThreadsDim[3] contains the maximum size of each +dimension of a block.

    • +
    • maxGridSize[3] contains the maximum size of each +dimension of a grid.

    • +
    • clockRate is the clock frequency in kilohertz.

    • +
    • totalConstMem is the total amount of constant memory +available on the device in bytes.

    • +
    • major, minor are the major and minor revision +numbers defining the device’s compute capability.

    • +
    • textureAlignment is the alignment requirement; texture +base addresses that are aligned to textureAlignment bytes +do not need an offset applied to texture fetches.

    • +
    • texturePitchAlignment is the pitch alignment requirement +for 2D texture references that are bound to pitched memory.

    • +
    • deviceOverlap is 1 if the device can concurrently copy +memory between host and device while executing a kernel, or 0 if not. +Deprecated, use instead asyncEngineCount.

    • +
    • multiProcessorCount is the number of multiprocessors on +the device.

    • +
    • kernelExecTimeoutEnabled is 1 if there is a run time +limit for kernels executed on the device, or 0 if not.

    • +
    • integrated is 1 if the device is an integrated +(motherboard) GPU and 0 if it is a discrete (card) component.

    • +
    • canMapHostMemory is 1 if the device can map host memory +into the CUDA address space for use with +cudaHostAlloc()/cudaHostGetDevicePointer(), +or 0 if not.

    • +
    • computeMode is the compute mode that the device is +currently in. Available modes are as follows:

      +
        +
      • cudaComputeModeDefault: Default mode - Device is not restricted and +multiple threads can use cudaSetDevice() with this +device.

      • +
      • cudaComputeModeProhibited: Compute-prohibited mode - No threads can +use cudaSetDevice() with this device.

      • +
      • cudaComputeModeExclusiveProcess: Compute-exclusive-process mode - +Many threads in one process will be able to use +cudaSetDevice() with this device. When an occupied +exclusive mode device is chosen with cudaSetDevice, all +subsequent non-device management runtime functions will return +cudaErrorDevicesUnavailable.

      • +
      +
    • +
    • maxTexture1D is the maximum 1D texture size.

    • +
    • maxTexture1DMipmap is the maximum 1D mipmapped texture +texture size.

    • +
    • maxTexture1DLinear is the maximum 1D texture size for +textures bound to linear memory.

    • +
    • maxTexture2D[2] contains the maximum 2D texture +dimensions.

    • +
    • maxTexture2DMipmap[2] contains the maximum 2D mipmapped +texture dimensions.

    • +
    • maxTexture2DLinear[3] contains the maximum 2D texture +dimensions for 2D textures bound to pitch linear memory.

    • +
    • maxTexture2DGather[2] contains the maximum 2D texture +dimensions if texture gather operations have to be performed.

    • +
    • maxTexture3D[3] contains the maximum 3D texture +dimensions.

    • +
    • maxTexture3DAlt[3] contains the maximum alternate 3D +texture dimensions.

    • +
    • maxTextureCubemap is the maximum cubemap texture width or +height.

    • +
    • maxTexture1DLayered[2] contains the maximum 1D layered +texture dimensions.

    • +
    • maxTexture2DLayered[3] contains the maximum 2D layered +texture dimensions.

    • +
    • maxTextureCubemapLayered[2] contains the maximum cubemap +layered texture dimensions.

    • +
    • maxSurface1D is the maximum 1D surface size.

    • +
    • maxSurface2D[2] contains the maximum 2D surface +dimensions.

    • +
    • maxSurface3D[3] contains the maximum 3D surface +dimensions.

    • +
    • maxSurface1DLayered[2] contains the maximum 1D layered +surface dimensions.

    • +
    • maxSurface2DLayered[3] contains the maximum 2D layered +surface dimensions.

    • +
    • maxSurfaceCubemap is the maximum cubemap surface width or +height.

    • +
    • maxSurfaceCubemapLayered[2] contains the maximum cubemap +layered surface dimensions.

    • +
    • surfaceAlignment specifies the alignment requirements for +surfaces.

    • +
    • concurrentKernels is 1 if the device supports executing +multiple kernels within the same context simultaneously, or 0 if not. +It is not guaranteed that multiple kernels will be resident on the +device concurrently so this feature should not be relied upon for +correctness.

    • +
    • ECCEnabled is 1 if the device has ECC support turned on, +or 0 if not.

    • +
    • pciBusID is the PCI bus identifier of the device.

    • +
    • pciDeviceID is the PCI device (sometimes called slot) +identifier of the device.

    • +
    • pciDomainID is the PCI domain identifier of the device.

    • +
    • tccDriver is 1 if the device is using a TCC driver or 0 +if not.

    • +
    • asyncEngineCount is 1 when the device can concurrently +copy memory between host and device while executing a kernel. It is 2 +when the device can concurrently copy memory between host and device +in both directions and execute a kernel at the same time. It is 0 if +neither of these is supported.

    • +
    • unifiedAddressing is 1 if the device shares a unified +address space with the host and 0 otherwise.

    • +
    • memoryClockRate is the peak memory clock frequency in +kilohertz.

    • +
    • memoryBusWidth is the memory bus width in bits.

    • +
    • l2CacheSize is L2 cache size in bytes.

    • +
    • persistingL2CacheMaxSize is L2 cache’s maximum persisting +lines size in bytes.

    • +
    • maxThreadsPerMultiProcessor is the number of maximum +resident threads per multiprocessor.

    • +
    • streamPrioritiesSupported is 1 if the device supports +stream priorities, or 0 if it is not supported.

    • +
    • globalL1CacheSupported is 1 if the device supports +caching of globals in L1 cache, or 0 if it is not supported.

    • +
    • localL1CacheSupported is 1 if the device supports caching +of locals in L1 cache, or 0 if it is not supported.

    • +
    • sharedMemPerMultiprocessor is the maximum amount of +shared memory available to a multiprocessor in bytes; this amount is +shared by all thread blocks simultaneously resident on a +multiprocessor.

    • +
    • regsPerMultiprocessor is the maximum number of 32-bit +registers available to a multiprocessor; this number is shared by all +thread blocks simultaneously resident on a multiprocessor.

    • +
    • managedMemory is 1 if the device supports allocating +managed memory on this system, or 0 if it is not supported.

    • +
    • isMultiGpuBoard is 1 if the device is on a multi-GPU +board (e.g. Gemini cards), and 0 if not;

    • +
    • multiGpuBoardGroupID is a unique identifier for a group +of devices associated with the same board. Devices on the same multi- +GPU board will share the same identifier.

    • +
    • hostNativeAtomicSupported is 1 if the link between the +device and the host supports native atomic operations, or 0 if it is +not supported.

    • +
    • singleToDoublePrecisionPerfRatio is the ratio of single +precision performance (in floating-point operations per second) to +double precision performance.

    • +
    • pageableMemoryAccess is 1 if the device supports +coherently accessing pageable memory without calling cudaHostRegister +on it, and 0 otherwise.

    • +
    • concurrentManagedAccess is 1 if the device can coherently +access managed memory concurrently with the CPU, and 0 otherwise.

    • +
    • computePreemptionSupported is 1 if the device supports +Compute Preemption, and 0 otherwise.

    • +
    • canUseHostPointerForRegisteredMem is 1 if the device can +access host registered memory at the same virtual address as the CPU, +and 0 otherwise.

    • +
    • cooperativeLaunch is 1 if the device supports launching +cooperative kernels via cudaLaunchCooperativeKernel, and +0 otherwise.

    • +
    • cooperativeMultiDeviceLaunch is 1 if the device supports +launching cooperative kernels via +cudaLaunchCooperativeKernelMultiDevice, and 0 otherwise.

    • +
    • sharedMemPerBlockOptin is the per device maximum shared +memory per block usable by special opt in

    • +
    • pageableMemoryAccessUsesHostPageTables is 1 if the device +accesses pageable memory via the host’s page tables, and 0 otherwise.

    • +
    • directManagedMemAccessFromHost is 1 if the host can +directly access managed memory on the device without migration, and 0 +otherwise.

    • +
    • maxBlocksPerMultiProcessor is the maximum number of +thread blocks that can reside on a multiprocessor.

    • +
    • accessPolicyMaxWindowSize is the maximum value of +num_bytes.

    • +
    • reservedSharedMemPerBlock is the shared memory reserved +by CUDA driver per block in bytes

    • +
    • hostRegisterSupported is 1 if the device supports host +memory registration via cudaHostRegister, and 0 +otherwise.

    • +
    • sparseCudaArraySupported is 1 if the device supports +sparse CUDA arrays and sparse CUDA mipmapped arrays, 0 otherwise

    • +
    • hostRegisterReadOnlySupported is 1 if the device supports +using the cudaHostRegister flag cudaHostRegisterReadOnly +to register memory that must be mapped as read-only to the GPU

    • +
    • timelineSemaphoreInteropSupported is 1 if external +timeline semaphore interop is supported on the device, 0 otherwise

    • +
    • memoryPoolsSupported is 1 if the device supports using +the cudaMallocAsync and cudaMemPool family of APIs, 0 otherwise

    • +
    • gpuDirectRDMASupported is 1 if the device supports +GPUDirect RDMA APIs, 0 otherwise

    • +
    • gpuDirectRDMAFlushWritesOptions is a bitmask to be +interpreted according to the +cudaFlushGPUDirectRDMAWritesOptions enum

    • +
    • gpuDirectRDMAWritesOrdering See the +cudaGPUDirectRDMAWritesOrdering enum for numerical values

    • +
    • memoryPoolSupportedHandleTypes is a bitmask of handle +types supported with mempool-based IPC

    • +
    • deferredMappingCudaArraySupported is 1 if the device +supports deferred mapping CUDA arrays and CUDA mipmapped arrays

    • +
    • ipcEventSupported is 1 if the device supports IPC Events, +and 0 otherwise

    • +
    • unifiedFunctionPointers is 1 if the device support +unified pointers, and 0 otherwise

    • +
    +
    +
    Parameters:
    +

    device (int) – Device number to get properties for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetAttribute(attr: cudaDeviceAttr, int device)#
    +

    Returns information about the device.

    +

    Returns in *value the integer value of the attribute attr on device +device. The supported attributes are:

    + +
    +
    Parameters:
    +
      +
    • attr (cudaDeviceAttr) – Device attribute to query

    • +
    • device (int) – Device number to query

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetDefaultMemPool(int device)#
    +

    Returns the default mempool of a device.

    +

    The default mempool of a device contains device memory from that +device.

    +
    +
    Parameters:
    +

    device (int) – None

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceSetMemPool(int device, memPool)#
    +

    Sets the current memory pool of a device.

    +

    The memory pool must be local to the specified device. Unless a mempool +is specified in the cudaMallocAsync call, +cudaMallocAsync allocates from the current mempool of the +provided stream’s device. By default, a device’s current memory pool is +its default memory pool.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue cudaErrorInvalidDevice cudaErrorNotSupported

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +

    Notes

    +

    Use cudaMallocFromPoolAsync to specify asynchronous allocations from a device different than the one the stream runs on.

    +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetMemPool(int device)#
    +

    Gets the current mempool for a device.

    +

    Returns the last pool provided to cudaDeviceSetMemPool for +this device or the device’s default memory pool if +cudaDeviceSetMemPool has never been called. By default the +current mempool is the default mempool for a device, otherwise the +returned pool must have been set with cuDeviceSetMemPool or +cudaDeviceSetMemPool.

    +
    +
    Parameters:
    +

    device (int) – None

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, int device, int flags)#
    +

    Return NvSciSync attributes that this device can support.

    +

    Returns in nvSciSyncAttrList, the properties of NvSciSync that this +CUDA device, dev can support. The returned nvSciSyncAttrList can be +used to create an NvSciSync that matches this device’s capabilities.

    +

    If NvSciSyncAttrKey_RequiredPerm field in nvSciSyncAttrList is +already set this API will return cudaErrorInvalidValue.

    +

    The applications should set nvSciSyncAttrList to a valid +NvSciSyncAttrList failing which this API will return +cudaErrorInvalidHandle.

    +

    The flags controls how applications intends to use the NvSciSync +created from the nvSciSyncAttrList. The valid flags are:

    +
      +
    • cudaNvSciSyncAttrSignal, specifies that the applications +intends to signal an NvSciSync on this CUDA device.

    • +
    • cudaNvSciSyncAttrWait, specifies that the applications +intends to wait on an NvSciSync on this CUDA device.

    • +
    +

    At least one of these flags must be set, failing which the API returns +cudaErrorInvalidValue. Both the flags are orthogonal to one +another: a developer may set both these flags that allows to set both +wait and signal specific attributes in the same nvSciSyncAttrList.

    +

    Note that this API updates the input nvSciSyncAttrList with values +equivalent to the following public attribute key-values: +NvSciSyncAttrKey_RequiredPerm is set to

    +
      +
    • NvSciSyncAccessPerm_SignalOnly if cudaNvSciSyncAttrSignal +is set in flags.

    • +
    • NvSciSyncAccessPerm_WaitOnly if cudaNvSciSyncAttrWait is +set in flags.

    • +
    • NvSciSyncAccessPerm_WaitSignal if both +cudaNvSciSyncAttrWait and +cudaNvSciSyncAttrSignal are set in flags. +NvSciSyncAttrKey_PrimitiveInfo is set to

    • +
    • NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid device.

    • +
    • NvSciSyncAttrValPrimitiveType_Syncpoint if device is a Tegra +device.

    • +
    • NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if device +is GA10X+. NvSciSyncAttrKey_GpuId is set to the same UUID that is +returned in None from cudaDeviceGetProperties for this +device.

    • +
    +

    cudaSuccess, cudaErrorDeviceUninitialized, +cudaErrorInvalidValue, cudaErrorInvalidHandle, +cudaErrorInvalidDevice, cudaErrorNotSupported, +cudaErrorMemoryAllocation

    +
    +
    Parameters:
    +
      +
    • nvSciSyncAttrList (Any) – Return NvSciSync attributes supported.

    • +
    • device (int) – Valid Cuda Device to get NvSciSync attributes for.

    • +
    • flags (int) – flags describing NvSciSync usage.

    • +
    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetP2PAttribute(attr: cudaDeviceP2PAttr, int srcDevice, int dstDevice)#
    +

    Queries attributes of the link between two devices.

    +

    Returns in *value the value of the requested attribute attrib of +the link between srcDevice and dstDevice. The supported attributes +are:

    + +

    Returns cudaErrorInvalidDevice if srcDevice or +dstDevice are not valid or if they represent the same device.

    +

    Returns cudaErrorInvalidValue if attrib is not valid or +if value is a null pointer.

    +
    +
    Parameters:
    +
      +
    • attrib (cudaDeviceP2PAttr) – The requested attribute of the link between srcDevice and +dstDevice.

    • +
    • srcDevice (int) – The source device of the target link.

    • +
    • dstDevice (int) – The destination device of the target link.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaChooseDevice(cudaDeviceProp prop: Optional[cudaDeviceProp])#
    +

    Select compute-device which best matches criteria.

    +

    Returns in *device the device which has properties that best match +*prop.

    +
    +
    Parameters:
    +

    prop (cudaDeviceProp) – Desired device properties

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaInitDevice(int device, unsigned int deviceFlags, unsigned int flags)#
    +

    Initialize device to be used for GPU executions.

    +

    This function will initialize the CUDA Runtime structures and primary +context on device when called, but the context will not be made +current to device.

    +

    When cudaInitDeviceFlagsAreValid is set in flags, +deviceFlags are applied to the requested device. The values of +deviceFlags match those of the flags parameters in +cudaSetDeviceFlags. The effect may be verified by +cudaGetDeviceFlags.

    +

    This function will return an error if the device is in +cudaComputeModeExclusiveProcess and is occupied by another +process or if the device is in cudaComputeModeProhibited.

    +
    +
    Parameters:
    +
      +
    • device (int) – Device on which the runtime will initialize itself.

    • +
    • deviceFlags (unsigned int) – Parameters for device operation.

    • +
    • flags (unsigned int) – Flags for controlling the device initialization.

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidDevice,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaSetDevice(int device)#
    +

    Set device to be used for GPU executions.

    +

    Sets device as the current device for the calling host thread. Valid +device id’s are 0 to (cudaGetDeviceCount() - 1).

    +

    Any device memory subsequently allocated from this host thread using +cudaMalloc(), cudaMallocPitch() or +cudaMallocArray() will be physically resident on device. +Any host memory allocated from this host thread using +cudaMallocHost() or cudaHostAlloc() or +cudaHostRegister() will have its lifetime associated with +device. Any streams or events created from this host thread will be +associated with device. Any kernels launched from this host thread +using the <<<>>> operator or cudaLaunchKernel() will be +executed on device.

    +

    This call may be made from any host thread, to any device, and at any +time. This function will do no synchronization with the previous or new +device, and should only take significant time when it initializes the +runtime’s context state. This call will bind the primary context of the +specified device to the calling thread and all the subsequent memory +allocations, stream and event creations, and kernel launches will be +associated with the primary context. This function will also +immediately initialize the runtime state on the primary context, and +the context will be current on device immediately. This function will +return an error if the device is in +cudaComputeModeExclusiveProcess and is occupied by another +process or if the device is in cudaComputeModeProhibited.

    +

    It is not required to call cudaInitDevice before using this +function.

    +
    +
    Parameters:
    +

    device (int) – Device on which the active host thread should execute the device +code.

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidDevice, cudaErrorDeviceUnavailable,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetDevice()#
    +

    Returns which device is currently being used.

    +

    Returns in *device the current device for the calling host thread.

    +
    +
    Returns:
    +

      +
    • cudaError_tcudaSuccess, cudaErrorInvalidValue, cudaErrorDeviceUnavailable,

    • +
    • device (int) – Returns the device on which the active host thread executes the +device code.

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaSetDeviceFlags(unsigned int flags)#
    +

    Sets flags to be used for device executions.

    +

    Records flags as the flags for the current device. If the current +device has been set and that device has already been initialized, the +previous flags are overwritten. If the current device has not been +initialized, it is initialized with the provided flags. If no device +has been made current to the calling thread, a default device is +selected and initialized with the provided flags.

    +

    The three LSBs of the flags parameter can be used to control how the +CPU thread interacts with the OS scheduler when waiting for results +from the device.

    +
      +
    • cudaDeviceScheduleAuto: The default value if the flags +parameter is zero, uses a heuristic based on the number of active +CUDA contexts in the process C and the number of logical processors +in the system P. If C > P, then CUDA will yield to other OS +threads when waiting for the device, otherwise CUDA will not yield +while waiting for results and actively spin on the processor. +Additionally, on Tegra devices, cudaDeviceScheduleAuto +uses a heuristic based on the power profile of the platform and may +choose cudaDeviceScheduleBlockingSync for low-powered +devices.

    • +
    • cudaDeviceScheduleSpin: Instruct CUDA to actively spin +when waiting for results from the device. This can decrease latency +when waiting for the device, but may lower the performance of CPU +threads if they are performing work in parallel with the CUDA thread.

    • +
    • cudaDeviceScheduleYield: Instruct CUDA to yield its +thread when waiting for results from the device. This can increase +latency when waiting for the device, but can increase the performance +of CPU threads performing work in parallel with the device.

    • +
    • cudaDeviceScheduleBlockingSync: Instruct CUDA to block +the CPU thread on a synchronization primitive when waiting for the +device to finish work.

    • +
    • cudaDeviceBlockingSync: Instruct CUDA to block the CPU +thread on a synchronization primitive when waiting for the device to +finish work. Deprecated: This flag was deprecated as of +CUDA 4.0 and replaced with +cudaDeviceScheduleBlockingSync.

    • +
    • cudaDeviceMapHost: This flag enables allocating pinned +host memory that is accessible to the device. It is implicit for the +runtime but may be absent if a context is created using the driver +API. If this flag is not set, cudaHostGetDevicePointer() +will always return a failure code.

    • +
    • cudaDeviceLmemResizeToMax: Instruct CUDA to not reduce +local memory after resizing local memory for a kernel. This can +prevent thrashing by local memory allocations when launching many +kernels with high local memory usage at the cost of potentially +increased memory usage. Deprecated: This flag is +deprecated and the behavior enabled by this flag is now the default +and cannot be disabled.

    • +
    • cudaDeviceSyncMemops: Ensures that synchronous memory +operations initiated on this context will always synchronize. See +further documentation in the section titled “API Synchronization +behavior” to learn more about cases when synchronous memory +operations can exhibit asynchronous behavior.

    • +
    +
    +
    Parameters:
    +

    flags (unsigned int) – Parameters for device operation

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetDeviceFlags()#
    +

    Gets the flags for the current device.

    +

    Returns in flags the flags for the current device. If there is a +current device for the calling thread, the flags for the device are +returned. If there is no current device, the flags for the first device +are returned, which may be the default flags. Compare to the behavior +of cudaSetDeviceFlags.

    +

    Typically, the flags returned should match the behavior that will be +seen if the calling thread uses a device after this call, without any +change to the flags or current device inbetween by this or another +thread. Note that if the device is not initialized, it is possible for +another thread to change the flags for the current device before it is +initialized. Additionally, when using exclusive mode, if this thread +has not requested a specific device, it may use a device other than the +first device, contrary to the assumption made by this function.

    +

    If a context has been created via the driver API and is current to the +calling thread, the flags for that context are always returned.

    +

    Flags returned by this function may specifically include +cudaDeviceMapHost even though it is not accepted by +cudaSetDeviceFlags because it is implicit in runtime API +flags. The reason for this is that the current context may have been +created via the driver API in which case the flag is not implicit and +may be unset.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Error Handling#

    +

    This section describes the error handling functions of the CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaGetLastError()#
    +

    Returns the last error from a runtime call.

    +

    Returns the last error that has been produced by any of the runtime +calls in the same instance of the CUDA Runtime library in the host +thread and resets it to cudaSuccess.

    +

    Note: Multiple instances of the CUDA Runtime library can be present in +an application when using a library that statically links the CUDA +Runtime.

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorMissingConfiguration, cudaErrorMemoryAllocation, cudaErrorInitializationError, cudaErrorLaunchFailure, cudaErrorLaunchTimeout, cudaErrorLaunchOutOfResources, cudaErrorInvalidDeviceFunction, cudaErrorInvalidConfiguration, cudaErrorInvalidDevice, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidSymbol, cudaErrorUnmapBufferObjectFailed, cudaErrorInvalidDevicePointer, cudaErrorInvalidTexture, cudaErrorInvalidTextureBinding, cudaErrorInvalidChannelDescriptor, cudaErrorInvalidMemcpyDirection, cudaErrorInvalidFilterSetting, cudaErrorInvalidNormSetting, cudaErrorUnknown, cudaErrorInvalidResourceHandle, cudaErrorInsufficientDriver, cudaErrorNoDevice, cudaErrorSetOnActiveProcess, cudaErrorStartupFailure, cudaErrorInvalidPtx, cudaErrorUnsupportedPtxVersion, cudaErrorNoKernelImageForDevice, cudaErrorJitCompilerNotFound, cudaErrorJitCompilationDisabled

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaPeekAtLastError()#
    +

    Returns the last error from a runtime call.

    +

    Returns the last error that has been produced by any of the runtime +calls in the same instance of the CUDA Runtime library in the host +thread. This call does not reset the error to cudaSuccess +like cudaGetLastError().

    +

    Note: Multiple instances of the CUDA Runtime library can be present in +an application when using a library that statically links the CUDA +Runtime.

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorMissingConfiguration, cudaErrorMemoryAllocation, cudaErrorInitializationError, cudaErrorLaunchFailure, cudaErrorLaunchTimeout, cudaErrorLaunchOutOfResources, cudaErrorInvalidDeviceFunction, cudaErrorInvalidConfiguration, cudaErrorInvalidDevice, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidSymbol, cudaErrorUnmapBufferObjectFailed, cudaErrorInvalidDevicePointer, cudaErrorInvalidTexture, cudaErrorInvalidTextureBinding, cudaErrorInvalidChannelDescriptor, cudaErrorInvalidMemcpyDirection, cudaErrorInvalidFilterSetting, cudaErrorInvalidNormSetting, cudaErrorUnknown, cudaErrorInvalidResourceHandle, cudaErrorInsufficientDriver, cudaErrorNoDevice, cudaErrorSetOnActiveProcess, cudaErrorStartupFailure, cudaErrorInvalidPtx, cudaErrorUnsupportedPtxVersion, cudaErrorNoKernelImageForDevice, cudaErrorJitCompilerNotFound, cudaErrorJitCompilationDisabled

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetErrorName(error: cudaError_t)#
    +

    Returns the string representation of an error code enum name.

    +

    Returns a string containing the name of an error code in the enum. If +the error code is not recognized, “unrecognized error code” is +returned.

    +
    +
    Parameters:
    +

    error (cudaError_t) – Error code to convert to string

    +
    +
    Returns:
    +

      +
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • +
    • byteschar* pointer to a NULL-terminated string

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetErrorString(error: cudaError_t)#
    +

    Returns the description string for an error code.

    +

    Returns the description string for an error code. If the error code is +not recognized, “unrecognized error code” is returned.

    +
    +
    Parameters:
    +

    error (cudaError_t) – Error code to convert to string

    +
    +
    Returns:
    +

      +
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • +
    • byteschar* pointer to a NULL-terminated string

    • +
    +

    +
    +
    + +
    + +
    +
    +

    Stream Management#

    +

    This section describes the stream management functions of the CUDA runtime application programming interface.

    +
    +
    +class cuda.bindings.runtime.cudaStreamCallback_t(*args, **kwargs)#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamCreate()#
    +

    Create an asynchronous stream.

    +

    Creates a new asynchronous stream on the context that is current to the +calling host thread. If no context is current to the calling host +thread, then the primary context for a device is selected, made current +to the calling thread, and initialized before creating a stream on it.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamCreateWithFlags(unsigned int flags)#
    +

    Create an asynchronous stream.

    +

    Creates a new asynchronous stream on the context that is current to the +calling host thread. If no context is current to the calling host +thread, then the primary context for a device is selected, made current +to the calling thread, and initialized before creating a stream on it. +The flags argument determines the behaviors of the stream. Valid +values for flags are

    +
      +
    • cudaStreamDefault: Default stream creation flag.

    • +
    • cudaStreamNonBlocking: Specifies that work running in the +created stream may run concurrently with work in stream 0 (the NULL +stream), and that the created stream should perform no implicit +synchronization with stream 0.

    • +
    +
    +
    Parameters:
    +

    flags (unsigned int) – Parameters for stream creation

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamCreateWithPriority(unsigned int flags, int priority)#
    +

    Create an asynchronous stream with the specified priority.

    +

    Creates a stream with the specified priority and returns a handle in +pStream. The stream is created on the context that is current to the +calling host thread. If no context is current to the calling host +thread, then the primary context for a device is selected, made current +to the calling thread, and initialized before creating a stream on it. +This affects the scheduling priority of work in the stream. Priorities +provide a hint to preferentially run work with higher priority when +possible, but do not preempt already-running work or provide any other +functional guarantee on execution order.

    +

    priority follows a convention where lower numbers represent higher +priorities. ‘0’ represents default priority. The range of meaningful +numerical priorities can be queried using +cudaDeviceGetStreamPriorityRange. If the specified priority +is outside the numerical range returned by +cudaDeviceGetStreamPriorityRange, it will automatically be +clamped to the lowest or the highest number in the range.

    +
    +
    Parameters:
    +
      +
    • flags (unsigned int) – Flags for stream creation. See +cudaStreamCreateWithFlags for a list of valid flags +that can be passed

    • +
    • priority (int) – Priority of the stream. Lower numbers represent higher priorities. +See cudaDeviceGetStreamPriorityRange for more +information about the meaningful stream priorities that can be +passed.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Stream priorities are supported only on GPUs with compute capability 3.5 or higher.

    +

    In the current implementation, only compute kernels launched in priority streams are affected by the stream’s priority. Stream priorities have no effect on host-to-device and device-to-host memory operations.

    +
    + +
    +
    +cuda.bindings.runtime.cudaStreamGetPriority(hStream)#
    +

    Query the priority of a stream.

    +

    Query the priority of a stream. The priority is returned in in +priority. Note that if the stream was created with a priority outside +the meaningful numerical range returned by +cudaDeviceGetStreamPriorityRange, this function returns the +clamped priority. See cudaStreamCreateWithPriority for +details about priority clamping.

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamGetFlags(hStream)#
    +

    Query the flags of a stream.

    +

    Query the flags of a stream. The flags are returned in flags. See +cudaStreamCreateWithFlags for a list of valid flags.

    +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamGetId(hStream)#
    +

    Query the Id of a stream.

    +

    Query the Id of a stream. The Id is returned in streamId. The Id is +unique for the life of the program.

    +

    The stream handle hStream can refer to any of the following:

    + +
    +
    Parameters:
    +

    hStream (CUstream or cudaStream_t) – Handle to the stream to be queried

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaCtxResetPersistingL2Cache()#
    +

    Resets all persisting lines in cache to normal status.

    +

    Resets all persisting lines in cache to normal status. Takes effect on +function return.

    +
    +
    Returns:
    +

    cudaSuccess,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamCopyAttributes(dst, src)#
    +

    Copies attributes from source stream to destination stream.

    +

    Copies attributes from source stream src to destination stream dst. +Both streams must have the same context.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorNotSupported

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamGetAttribute(hStream, attr: cudaStreamAttrID)#
    +

    Queries stream attribute.

    +

    Queries attribute attr from hStream and stores it in corresponding +member of value_out.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamSetAttribute(hStream, attr: cudaStreamAttrID, cudaStreamAttrValue value: Optional[cudaStreamAttrValue])#
    +

    Sets stream attribute.

    +

    Sets attribute attr on hStream from corresponding attribute of +value. The updated attribute will be applied to subsequent work +submitted to the stream. It will not affect previously submitted work.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamDestroy(stream)#
    +

    Destroys and cleans up an asynchronous stream.

    +

    Destroys and cleans up the asynchronous stream specified by stream.

    +

    In case the device is still doing work in the stream stream when +cudaStreamDestroy() is called, the function will return +immediately and the resources associated with stream will be released +automatically once the device has completed all work in stream.

    +
    +
    Parameters:
    +

    stream (CUstream or cudaStream_t) – Stream identifier

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamWaitEvent(stream, event, unsigned int flags)#
    +

    Make a compute stream wait on an event.

    +

    Makes all future work submitted to stream wait for all work captured +in event. See cudaEventRecord() for details on what is +captured by an event. The synchronization will be performed efficiently +on the device when applicable. event may be from a different device +than stream.

    +

    flags include:

    + +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamAddCallback(stream, callback, userData, unsigned int flags)#
    +

    Add a callback to a compute stream.

    +

    Adds a callback to be called on the host after all currently enqueued +items in the stream have completed. For each cudaStreamAddCallback +call, a callback will be executed exactly once. The callback will block +later work in the stream until it is finished.

    +

    The callback may be passed cudaSuccess or an error code. In +the event of a device error, all subsequently executed callbacks will +receive an appropriate cudaError_t.

    +

    Callbacks must not make any CUDA API calls. Attempting to use CUDA APIs +may result in cudaErrorNotPermitted. Callbacks must not +perform any synchronization that may depend on outstanding device work +or other callbacks that are not mandated to run earlier. Callbacks +without a mandated order (in independent streams) execute in undefined +order and may be serialized.

    +

    For the purposes of Unified Memory, callback execution makes a number +of guarantees:

    +
      +
    • The callback stream is considered idle for the duration of the +callback. Thus, for example, a callback may always use memory +attached to the callback stream.

    • +
    • The start of execution of a callback has the same effect as +synchronizing an event recorded in the same stream immediately prior +to the callback. It thus synchronizes streams which have been +“joined” prior to the callback.

    • +
    • Adding device work to any stream does not have the effect of making +the stream active until all preceding callbacks have executed. Thus, +for example, a callback might use global attached memory even if work +has been added to another stream, if it has been properly ordered +with an event.

    • +
    • Completion of a callback does not cause a stream to become active +except as described above. The callback stream will remain idle if no +device work follows the callback, and will remain idle across +consecutive callbacks without device work in between. Thus, for +example, stream synchronization can be done by signaling from a +callback at the end of the stream.

    • +
    +
    +
    Parameters:
    +
      +
    • stream (CUstream or cudaStream_t) – Stream to add callback to

    • +
    • callback (cudaStreamCallback_t) – The function to call once preceding stream operations are complete

    • +
    • userData (Any) – User specified data to be passed to the callback function

    • +
    • flags (unsigned int) – Reserved for future use, must be 0

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorInvalidValue, cudaErrorNotSupported

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +

    Notes

    +

    This function is slated for eventual deprecation and removal. If you do not require the callback to execute in case of a device error, consider using cudaLaunchHostFunc. Additionally, this function is not supported with cudaStreamBeginCapture and cudaStreamEndCapture, unlike cudaLaunchHostFunc.

    +
    + +
    +
    +cuda.bindings.runtime.cudaStreamSynchronize(stream)#
    +

    Waits for stream tasks to complete.

    +

    Blocks until stream has completed all operations. If the +cudaDeviceScheduleBlockingSync flag was set for this +device, the host thread will block until the stream is finished with +all of its tasks.

    +
    +
    Parameters:
    +

    stream (CUstream or cudaStream_t) – Stream identifier

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamQuery(stream)#
    +

    Queries an asynchronous stream for completion status.

    +

    Returns cudaSuccess if all operations in stream have +completed, or cudaErrorNotReady if not.

    +

    For the purposes of Unified Memory, a return value of +cudaSuccess is equivalent to having called +cudaStreamSynchronize().

    +
    +
    Parameters:
    +

    stream (CUstream or cudaStream_t) – Stream identifier

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorNotReady, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamAttachMemAsync(stream, devPtr, size_t length, unsigned int flags)#
    +

    Attach memory to a stream asynchronously.

    +

    Enqueues an operation in stream to specify stream association of +length bytes of memory starting from devPtr. This function is a +stream-ordered operation, meaning that it is dependent on, and will +only take effect when, previous work in stream has completed. Any +previous association is automatically replaced.

    +

    devPtr must point to an one of the following types of memories:

    +
      +
    • managed memory declared using the managed keyword or allocated with +cudaMallocManaged.

    • +
    • a valid host-accessible region of system-allocated pageable memory. +This type of memory may only be specified if the device associated +with the stream reports a non-zero value for the device attribute +cudaDevAttrPageableMemoryAccess.

    • +
    +

    For managed allocations, length must be either zero or the entire +allocation’s size. Both indicate that the entire allocation’s stream +association is being changed. Currently, it is not possible to change +stream association for a portion of a managed allocation.

    +

    For pageable allocations, length must be non-zero.

    +

    The stream association is specified using flags which must be one of +cudaMemAttachGlobal, cudaMemAttachHost or +cudaMemAttachSingle. The default value for flags is +cudaMemAttachSingle If the cudaMemAttachGlobal +flag is specified, the memory can be accessed by any stream on any +device. If the cudaMemAttachHost flag is specified, the +program makes a guarantee that it won’t access the memory on the device +from any stream on a device that has a zero value for the device +attribute cudaDevAttrConcurrentManagedAccess. If the +cudaMemAttachSingle flag is specified and stream is +associated with a device that has a zero value for the device attribute +cudaDevAttrConcurrentManagedAccess, the program makes a +guarantee that it will only access the memory on the device from +stream. It is illegal to attach singly to the NULL stream, because +the NULL stream is a virtual global stream and not a specific stream. +An error will be returned in this case.

    +

    When memory is associated with a single stream, the Unified Memory +system will allow CPU access to this memory region so long as all +operations in stream have completed, regardless of whether other +streams are active. In effect, this constrains exclusive ownership of +the managed memory region by an active GPU to per-stream activity +instead of whole-GPU activity.

    +

    Accessing memory on the device from streams that are not associated +with it will produce undefined results. No error checking is performed +by the Unified Memory system to ensure that kernels launched into other +streams do not access this region.

    +

    It is a program’s responsibility to order calls to +cudaStreamAttachMemAsync via events, synchronization or +other means to ensure legal access to memory at all times. Data +visibility and coherency will be changed appropriately for all kernels +which follow a stream-association change.

    +

    If stream is destroyed while data is associated with it, the +association is removed and the association reverts to the default +visibility of the allocation as specified at +cudaMallocManaged. For managed variables, the default +association is always cudaMemAttachGlobal. Note that +destroying a stream is an asynchronous operation, and as a result, the +change to default association won’t happen until all work in the stream +has completed.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorNotReady, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamBeginCapture(stream, mode: cudaStreamCaptureMode)#
    +

    Begins graph capture on a stream.

    +

    Begin graph capture on stream. When a stream is in capture mode, all +operations pushed into the stream will not be executed, but will +instead be captured into a graph, which will be returned via +cudaStreamEndCapture. Capture may not be initiated if +stream is cudaStreamLegacy. Capture must be ended on the +same stream in which it was initiated, and it may only be initiated if +the stream is not already in capture mode. The capture mode may be +queried via cudaStreamIsCapturing. A unique id representing +the capture sequence may be queried via +cudaStreamGetCaptureInfo.

    +

    If mode is not cudaStreamCaptureModeRelaxed, +cudaStreamEndCapture must be called on this stream from the +same thread.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +

    Notes

    +

    Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    +
    + +
    +
    +cuda.bindings.runtime.cudaStreamBeginCaptureToGraph(stream, graph, dependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], dependencyData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies, mode: cudaStreamCaptureMode)#
    +

    Begins graph capture on a stream to an existing graph.

    +

    Begin graph capture on stream. When a stream is in capture mode, all +operations pushed into the stream will not be executed, but will +instead be captured into graph, which will be returned via +cudaStreamEndCapture.

    +

    Capture may not be initiated if stream is +cudaStreamLegacy. Capture must be ended on the same stream +in which it was initiated, and it may only be initiated if the stream +is not already in capture mode. The capture mode may be queried via +cudaStreamIsCapturing. A unique id representing the capture +sequence may be queried via cudaStreamGetCaptureInfo.

    +

    If mode is not cudaStreamCaptureModeRelaxed, +cudaStreamEndCapture must be called on this stream from the +same thread.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +

    Notes

    +

    Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    +
    + +
    +
    +cuda.bindings.runtime.cudaThreadExchangeStreamCaptureMode(mode: cudaStreamCaptureMode)#
    +

    Swaps the stream capture interaction mode for a thread.

    +

    Sets the calling thread’s stream capture interaction mode to the value +contained in *mode, and overwrites *mode with the previous mode for +the thread. To facilitate deterministic behavior across function or +module boundaries, callers are encouraged to use this API in a push-pop +fashion:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    During stream capture (see cudaStreamBeginCapture), some +actions, such as a call to cudaMalloc, may be unsafe. In +the case of cudaMalloc, the operation is not enqueued +asynchronously to a stream, and is not observed by stream capture. +Therefore, if the sequence of operations captured via +cudaStreamBeginCapture depended on the allocation being +replayed whenever the graph is launched, the captured graph would be +invalid.

    +

    Therefore, stream capture places restrictions on API calls that can be +made within or concurrently to a +cudaStreamBeginCapture-cudaStreamEndCapture +sequence. This behavior can be controlled via this API and flags to +cudaStreamBeginCapture.

    +

    A thread’s mode is one of the following:

    +
      +
    • cudaStreamCaptureModeGlobal: This is the default mode. If the local +thread has an ongoing capture sequence that was not initiated with +cudaStreamCaptureModeRelaxed at cuStreamBeginCapture, or if any +other thread has a concurrent capture sequence initiated with +cudaStreamCaptureModeGlobal, this thread is prohibited from +potentially unsafe API calls.

    • +
    • cudaStreamCaptureModeThreadLocal: If the local thread has an +ongoing capture sequence not initiated with +cudaStreamCaptureModeRelaxed, it is prohibited from potentially +unsafe API calls. Concurrent capture sequences in other threads are +ignored.

    • +
    • cudaStreamCaptureModeRelaxed: The local thread is not prohibited +from potentially unsafe API calls. Note that the thread is still +prohibited from API calls which necessarily conflict with stream +capture, for example, attempting cudaEventQuery on an +event that was last recorded inside a capture sequence.

    • +
    +
    +
    Parameters:
    +

    mode (cudaStreamCaptureMode) – Pointer to mode value to swap with the current mode

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamEndCapture(stream)#
    +

    Ends capture on a stream, returning the captured graph.

    +

    End capture on stream, returning the captured graph via pGraph. +Capture must have been initiated on stream via a call to +cudaStreamBeginCapture. If capture was invalidated, due to +a violation of the rules of stream capture, then a NULL graph will be +returned.

    +

    If the mode argument to cudaStreamBeginCapture was not +cudaStreamCaptureModeRelaxed, this call must be from the +same thread as cudaStreamBeginCapture.

    +
    +
    Parameters:
    +

    stream (CUstream or cudaStream_t) – Stream to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamIsCapturing(stream)#
    +

    Returns a stream’s capture status.

    +

    Return the capture status of stream via pCaptureStatus. After a +successful call, *pCaptureStatus will contain one of the following:

    + +

    Note that, if this is called on cudaStreamLegacy (the “null +stream”) while a blocking stream on the same device is capturing, it +will return cudaErrorStreamCaptureImplicit and +*pCaptureStatus is unspecified after the call. The blocking stream +capture is not invalidated.

    +

    When a blocking stream is capturing, the legacy stream is in an +unusable state until the blocking stream capture is terminated. The +legacy stream is not supported for stream capture, but attempted use +would have an implicit dependency on the capturing stream(s).

    +
    +
    Parameters:
    +

    stream (CUstream or cudaStream_t) – Stream to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamGetCaptureInfo(stream)#
    +

    Query a stream’s capture state.

    +

    Query stream state related to stream capture.

    +

    If called on cudaStreamLegacy (the “null stream”) while a +stream not created with cudaStreamNonBlocking is capturing, +returns cudaErrorStreamCaptureImplicit.

    +

    Valid data (other than capture status) is returned only if both of the +following are true:

    + +
    +
    Parameters:
    +

    stream (CUstream or cudaStream_t) – The stream to query

    +
    +
    Returns:
    +

      +
    • cudaError_tcudaSuccess, cudaErrorInvalidValue, cudaErrorStreamCaptureImplicit

    • +
    • captureStatus_out (cudaStreamCaptureStatus) – Location to return the capture status of the stream; required

    • +
    • id_out (unsigned long long) – Optional location to return an id for the capture sequence, which +is unique over the lifetime of the process

    • +
    • graph_out (cudaGraph_t) – Optional location to return the graph being captured into. All +operations other than destroy and node removal are permitted on the +graph while the capture sequence is in progress. This API does not +transfer ownership of the graph, which is transferred or destroyed +at cudaStreamEndCapture. Note that the graph handle may +be invalidated before end of capture for certain errors. Nodes that +are or become unreachable from the original stream at +cudaStreamEndCapture due to direct actions on the graph +do not trigger cudaErrorStreamCaptureUnjoined.

    • +
    • dependencies_out (List[cudaGraphNode_t]) – Optional location to store a pointer to an array of nodes. The next +node to be captured in the stream will depend on this set of nodes, +absent operations such as event wait which modify this set. The +array pointer is valid until the next API call which operates on +the stream or until the capture is terminated. The node handles may +be copied out and are valid until they or the graph is destroyed. +The driver-owned array may also be passed directly to APIs that +operate on the graph (not the stream) without copying.

    • +
    • numDependencies_out (int) – Optional location to store the size of the array returned in +dependencies_out.

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamGetCaptureInfo_v3(stream)#
    +

    Query a stream’s capture state (12.3+)

    +

    Query stream state related to stream capture.

    +

    If called on cudaStreamLegacy (the “null stream”) while a +stream not created with cudaStreamNonBlocking is capturing, +returns cudaErrorStreamCaptureImplicit.

    +

    Valid data (other than capture status) is returned only if both of the +following are true:

    + +

    If edgeData_out is non-NULL then dependencies_out must be as well. +If dependencies_out is non-NULL and edgeData_out is NULL, but there +is non-zero edge data for one or more of the current stream +dependencies, the call will return cudaErrorLossyQuery.

    +
    +
    Parameters:
    +

    stream (CUstream or cudaStream_t) – The stream to query

    +
    +
    Returns:
    +

      +
    • cudaError_tcudaSuccess, cudaErrorInvalidValue, cudaErrorStreamCaptureImplicit, cudaErrorLossyQuery

    • +
    • captureStatus_out (cudaStreamCaptureStatus) – Location to return the capture status of the stream; required

    • +
    • id_out (unsigned long long) – Optional location to return an id for the capture sequence, which +is unique over the lifetime of the process

    • +
    • graph_out (cudaGraph_t) – Optional location to return the graph being captured into. All +operations other than destroy and node removal are permitted on the +graph while the capture sequence is in progress. This API does not +transfer ownership of the graph, which is transferred or destroyed +at cudaStreamEndCapture. Note that the graph handle may +be invalidated before end of capture for certain errors. Nodes that +are or become unreachable from the original stream at +cudaStreamEndCapture due to direct actions on the graph +do not trigger cudaErrorStreamCaptureUnjoined.

    • +
    • dependencies_out (List[cudaGraphNode_t]) – Optional location to store a pointer to an array of nodes. The next +node to be captured in the stream will depend on this set of nodes, +absent operations such as event wait which modify this set. The +array pointer is valid until the next API call which operates on +the stream or until the capture is terminated. The node handles may +be copied out and are valid until they or the graph is destroyed. +The driver-owned array may also be passed directly to APIs that +operate on the graph (not the stream) without copying.

    • +
    • edgeData_out (List[cudaGraphEdgeData]) – Optional location to store a pointer to an array of graph edge +data. This array parallels dependencies_out; the next node to be +added has an edge to dependencies_out`[i] with annotation +`edgeData_out`[i] for each `i. The array pointer is valid until +the next API call which operates on the stream or until the capture +is terminated.

    • +
    • numDependencies_out (int) – Optional location to store the size of the array returned in +dependencies_out.

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies(stream, dependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, unsigned int flags)#
    +

    Update the set of dependencies in a capturing stream (11.3+)

    +

    Modifies the dependency set of a capturing stream. The dependency set +is the set of nodes that the next captured node in the stream will +depend on.

    +

    Valid flags are cudaStreamAddCaptureDependencies and +cudaStreamSetCaptureDependencies. These control whether the +set passed to the API is added to the existing set or replaces it. A +flags value of 0 defaults to +cudaStreamAddCaptureDependencies.

    +

    Nodes that are removed from the dependency set via this API do not +result in cudaErrorStreamCaptureUnjoined if they are +unreachable from the stream at cudaStreamEndCapture.

    +

    Returns cudaErrorIllegalState if the stream is not +capturing.

    +

    This API is new in CUDA 11.3. Developers requiring compatibility across +minor versions of the CUDA driver to 11.0 should not use this API or +provide a fallback.

    +
    +
    Parameters:
    +
      +
    • stream (CUstream or cudaStream_t) – The stream to update

    • +
    • dependencies (List[cudaGraphNode_t]) – The set of dependencies to add

    • +
    • numDependencies (size_t) – The size of the dependencies array

    • +
    • flags (unsigned int) – See above

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorIllegalState

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies_v2(stream, dependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], dependencyData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies, unsigned int flags)#
    +

    Update the set of dependencies in a capturing stream (12.3+)

    +

    Modifies the dependency set of a capturing stream. The dependency set +is the set of nodes that the next captured node in the stream will +depend on.

    +

    Valid flags are cudaStreamAddCaptureDependencies and +cudaStreamSetCaptureDependencies. These control whether the +set passed to the API is added to the existing set or replaces it. A +flags value of 0 defaults to +cudaStreamAddCaptureDependencies.

    +

    Nodes that are removed from the dependency set via this API do not +result in cudaErrorStreamCaptureUnjoined if they are +unreachable from the stream at cudaStreamEndCapture.

    +

    Returns cudaErrorIllegalState if the stream is not +capturing.

    +
    +
    Parameters:
    +
      +
    • stream (CUstream or cudaStream_t) – The stream to update

    • +
    • dependencies (List[cudaGraphNode_t]) – The set of dependencies to add

    • +
    • dependencyData (List[cudaGraphEdgeData]) – Optional array of data associated with each dependency.

    • +
    • numDependencies (size_t) – The size of the dependencies array

    • +
    • flags (unsigned int) – See above

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorIllegalState

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +

    Event Management#

    +

    This section describes the event management functions of the CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaEventCreate()#
    +

    Creates an event object.

    +

    Creates an event object for the current device using +cudaEventDefault.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEventCreateWithFlags(unsigned int flags)#
    +

    Creates an event object with the specified flags.

    +

    Creates an event object for the current device with the specified +flags. Valid flags include:

    + +
    +
    Parameters:
    +

    flags (unsigned int) – Flags for new event

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEventRecord(event, stream)#
    +

    Records an event.

    +

    Captures in event the contents of stream at the time of this call. +event and stream must be on the same CUDA context. Calls such as +cudaEventQuery() or cudaStreamWaitEvent() will +then examine or wait for completion of the work that was captured. Uses +of stream after this call do not modify event. See note on default +stream behavior for what is captured in the default case.

    +

    cudaEventRecord() can be called multiple times on the same +event and will overwrite the previously captured state. Other APIs such +as cudaStreamWaitEvent() use the most recently captured +state at the time of the API call, and are not affected by later calls +to cudaEventRecord(). Before the first call to +cudaEventRecord(), an event represents an empty set of +work, so for example cudaEventQuery() would return +cudaSuccess.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEventRecordWithFlags(event, stream, unsigned int flags)#
    +

    Records an event.

    +

    Captures in event the contents of stream at the time of this call. +event and stream must be on the same CUDA context. Calls such as +cudaEventQuery() or cudaStreamWaitEvent() will +then examine or wait for completion of the work that was captured. Uses +of stream after this call do not modify event. See note on default +stream behavior for what is captured in the default case.

    +

    cudaEventRecordWithFlags() can be called multiple times on +the same event and will overwrite the previously captured state. Other +APIs such as cudaStreamWaitEvent() use the most recently +captured state at the time of the API call, and are not affected by +later calls to cudaEventRecordWithFlags(). Before the first +call to cudaEventRecordWithFlags(), an event represents an +empty set of work, so for example cudaEventQuery() would +return cudaSuccess.

    +

    flags include:

    + +
    +
    Parameters:
    +
      +
    • event (CUevent or cudaEvent_t) – Event to record

    • +
    • stream (CUstream or cudaStream_t) – Stream in which to record event

    • +
    • flags (unsigned int) – Parameters for the operation(See above)

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEventQuery(event)#
    +

    Queries an event’s status.

    +

    Queries the status of all work currently captured by event. See +cudaEventRecord() for details on what is captured by an +event.

    +

    Returns cudaSuccess if all captured work has been +completed, or cudaErrorNotReady if any captured work is +incomplete.

    +

    For the purposes of Unified Memory, a return value of +cudaSuccess is equivalent to having called +cudaEventSynchronize().

    +
    +
    Parameters:
    +

    event (CUevent or cudaEvent_t) – Event to query

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorNotReady, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEventSynchronize(event)#
    +

    Waits for an event to complete.

    +

    Waits until the completion of all work currently captured in event. +See cudaEventRecord() for details on what is captured by an +event.

    +

    Waiting for an event that was created with the +cudaEventBlockingSync flag will cause the calling CPU +thread to block until the event has been completed by the device. If +the cudaEventBlockingSync flag has not been set, then the +CPU thread will busy-wait until the event has been completed by the +device.

    +
    +
    Parameters:
    +

    event (CUevent or cudaEvent_t) – Event to wait for

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEventDestroy(event)#
    +

    Destroys an event object.

    +

    Destroys the event specified by event.

    +

    An event may be destroyed before it is complete (i.e., while +cudaEventQuery() would return +cudaErrorNotReady). In this case, the call does not block +on completion of the event, and any associated resources will +automatically be released asynchronously at completion.

    +
    +
    Parameters:
    +

    event (CUevent or cudaEvent_t) – Event to destroy

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorLaunchFailure

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEventElapsedTime(start, end)#
    +

    Computes the elapsed time between events.

    +

    Computes the elapsed time between two events (in milliseconds with a +resolution of around 0.5 microseconds).

    +

    If either event was last recorded in a non-NULL stream, the resulting +time may be greater than expected (even if both used the same stream +handle). This happens because the cudaEventRecord() +operation takes place asynchronously and there is no guarantee that the +measured latency is actually just between the two events. Any number of +other different stream operations could execute in between the two +measured events, thus altering the timing in a significant way.

    +

    If cudaEventRecord() has not been called on either event, +then cudaErrorInvalidResourceHandle is returned. If +cudaEventRecord() has been called on both events but one or +both of them has not yet been completed (that is, +cudaEventQuery() would return cudaErrorNotReady +on at least one of the events), cudaErrorNotReady is +returned. If either event was created with the +cudaEventDisableTiming flag, then this function will return +cudaErrorInvalidResourceHandle.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    External Resource Interoperability#

    +

    This section describes the external resource interoperability functions of the CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaImportExternalMemory(cudaExternalMemoryHandleDesc memHandleDesc: Optional[cudaExternalMemoryHandleDesc])#
    +

    Imports an external memory object.

    +

    Imports an externally allocated memory object and returns a handle to +that in extMem_out.

    +

    The properties of the handle being imported must be described in +memHandleDesc. The cudaExternalMemoryHandleDesc structure +is defined as follows:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where type specifies the type +of handle being imported. cudaExternalMemoryHandleType is +defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If type is +cudaExternalMemoryHandleTypeOpaqueFd, then +cudaExternalMemoryHandleDesc::handle::fd must be a valid +file descriptor referencing a memory object. Ownership of the file +descriptor is transferred to the CUDA driver when the handle is +imported successfully. Performing any operations on the file descriptor +after it is imported results in undefined behavior.

    +

    If type is +cudaExternalMemoryHandleTypeOpaqueWin32, then exactly one +of cudaExternalMemoryHandleDesc::handle::win32::handle and +cudaExternalMemoryHandleDesc::handle::win32::name must not +be NULL. If +cudaExternalMemoryHandleDesc::handle::win32::handle is not +NULL, then it must represent a valid shared NT handle that references a +memory object. Ownership of this handle is not transferred to CUDA +after the import operation, so the application must release the handle +using the appropriate system call. If +cudaExternalMemoryHandleDesc::handle::win32::name is not +NULL, then it must point to a NULL-terminated array of UTF-16 +characters that refers to a memory object.

    +

    If type is +cudaExternalMemoryHandleTypeOpaqueWin32Kmt, then +cudaExternalMemoryHandleDesc::handle::win32::handle must be +non-NULL and +cudaExternalMemoryHandleDesc::handle::win32::name must be +NULL. The handle specified must be a globally shared KMT handle. This +handle does not hold a reference to the underlying object, and thus +will be invalid when all references to the memory object are destroyed.

    +

    If type is +cudaExternalMemoryHandleTypeD3D12Heap, then exactly one of +cudaExternalMemoryHandleDesc::handle::win32::handle and +cudaExternalMemoryHandleDesc::handle::win32::name must not +be NULL. If +cudaExternalMemoryHandleDesc::handle::win32::handle is not +NULL, then it must represent a valid shared NT handle that is returned +by ID3D12Device::CreateSharedHandle when referring to a ID3D12Heap +object. This handle holds a reference to the underlying object. If +cudaExternalMemoryHandleDesc::handle::win32::name is not +NULL, then it must point to a NULL-terminated array of UTF-16 +characters that refers to a ID3D12Heap object.

    +

    If type is +cudaExternalMemoryHandleTypeD3D12Resource, then exactly one +of cudaExternalMemoryHandleDesc::handle::win32::handle and +cudaExternalMemoryHandleDesc::handle::win32::name must not +be NULL. If +cudaExternalMemoryHandleDesc::handle::win32::handle is not +NULL, then it must represent a valid shared NT handle that is returned +by ID3D12Device::CreateSharedHandle when referring to a ID3D12Resource +object. This handle holds a reference to the underlying object. If +cudaExternalMemoryHandleDesc::handle::win32::name is not +NULL, then it must point to a NULL-terminated array of UTF-16 +characters that refers to a ID3D12Resource object.

    +

    If type is +cudaExternalMemoryHandleTypeD3D11Resource,then exactly one +of cudaExternalMemoryHandleDesc::handle::win32::handle and +cudaExternalMemoryHandleDesc::handle::win32::name must not +be NULL. If +cudaExternalMemoryHandleDesc::handle::win32::handle is +not NULL, then it must represent a valid shared NT handle that is +returned by IDXGIResource1::CreateSharedHandle when referring to a +ID3D11Resource object. If +cudaExternalMemoryHandleDesc::handle::win32::name is not +NULL, then it must point to a NULL-terminated array of UTF-16 +characters that refers to a ID3D11Resource object.

    +

    If type is +cudaExternalMemoryHandleTypeD3D11ResourceKmt, then +cudaExternalMemoryHandleDesc::handle::win32::handle must be +non-NULL and +cudaExternalMemoryHandleDesc::handle::win32::name must be +NULL. The handle specified must be a valid shared KMT handle that is +returned by IDXGIResource::GetSharedHandle when referring to a +ID3D11Resource object.

    +

    If type is +cudaExternalMemoryHandleTypeNvSciBuf, then +cudaExternalMemoryHandleDesc::handle::nvSciBufObject must +be NON-NULL and reference a valid NvSciBuf object. If the NvSciBuf +object imported into CUDA is also mapped by other drivers, then the +application must use cudaWaitExternalSemaphoresAsync or +cudaSignalExternalSemaphoresAsync as approprriate barriers +to maintain coherence between CUDA and the other drivers. See +cudaExternalSemaphoreWaitSkipNvSciBufMemSync and +cudaExternalSemaphoreSignalSkipNvSciBufMemSync for memory +synchronization.

    +

    The size of the memory object must be specified in +size.

    +

    Specifying the flag cudaExternalMemoryDedicated in +flags indicates that the +resource is a dedicated resource. The definition of what a dedicated +resource is outside the scope of this extension. This flag must be set +if type is one of the +following: cudaExternalMemoryHandleTypeD3D12Resource +cudaExternalMemoryHandleTypeD3D11Resource +cudaExternalMemoryHandleTypeD3D11ResourceKmt

    +
    +
    Parameters:
    +

    memHandleDesc (cudaExternalMemoryHandleDesc) – Memory import handle descriptor

    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    If the Vulkan memory imported into CUDA is mapped on the CPU then the application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges as well as appropriate Vulkan pipeline barriers to maintain coherence between CPU and GPU. For more information on these APIs, please refer to “Synchronization +and Cache Control” chapter from Vulkan specification.

    +
    + +
    +
    +cuda.bindings.runtime.cudaExternalMemoryGetMappedBuffer(extMem, cudaExternalMemoryBufferDesc bufferDesc: Optional[cudaExternalMemoryBufferDesc])#
    +

    Maps a buffer onto an imported memory object.

    +

    Maps a buffer onto an imported memory object and returns a device +pointer in devPtr.

    +

    The properties of the buffer being mapped must be described in +bufferDesc. The cudaExternalMemoryBufferDesc structure is +defined as follows:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where offset is the offset in +the memory object where the buffer’s base address is. +size is the size of the +buffer. flags must be zero.

    +

    The offset and size have to be suitably aligned to match the +requirements of the external API. Mapping two buffers whose ranges +overlap may or may not result in the same virtual address being +returned for the overlapped portion. In such cases, the application +must ensure that all accesses to that region from the GPU are volatile. +Otherwise writes made via one address are not guaranteed to be visible +via the other address, even if they’re issued by the same thread. It is +recommended that applications map the combined range instead of mapping +separate buffers and then apply the appropriate offsets to the returned +pointer to derive the individual buffers.

    +

    The returned pointer devPtr must be freed using cudaFree.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaExternalMemoryGetMappedMipmappedArray(extMem, cudaExternalMemoryMipmappedArrayDesc mipmapDesc: Optional[cudaExternalMemoryMipmappedArrayDesc])#
    +

    Maps a CUDA mipmapped array onto an external memory object.

    +

    Maps a CUDA mipmapped array onto an external object and returns a +handle to it in mipmap.

    +

    The properties of the CUDA mipmapped array being mapped must be +described in mipmapDesc. The structure +cudaExternalMemoryMipmappedArrayDesc is defined as follows:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where offset is the +offset in the memory object where the base level of the mipmap chain +is. formatDesc +describes the format of the data. +extent specifies the +dimensions of the base level of the mipmap chain. +flags are flags +associated with CUDA mipmapped arrays. For further details, please +refer to the documentation for cudaMalloc3DArray. Note that +if the mipmapped array is bound as a color target in the graphics API, +then the flag cudaArrayColorAttachment must be specified in +flags. +numLevels specifies +the total number of levels in the mipmap chain.

    +

    The returned CUDA mipmapped array must be freed using +cudaFreeMipmappedArray.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    If type is cudaExternalMemoryHandleTypeNvSciBuf, then numLevels must not be greater than 1.

    +
    + +
    +
    +cuda.bindings.runtime.cudaDestroyExternalMemory(extMem)#
    +

    Destroys an external memory object.

    +

    Destroys the specified external memory object. Any existing buffers and +CUDA mipmapped arrays mapped onto this object must no longer be used +and must be explicitly freed using cudaFree and +cudaFreeMipmappedArray respectively.

    +
    +
    Parameters:
    +

    extMem (cudaExternalMemory_t) – External memory object to be destroyed

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaImportExternalSemaphore(cudaExternalSemaphoreHandleDesc semHandleDesc: Optional[cudaExternalSemaphoreHandleDesc])#
    +

    Imports an external semaphore.

    +

    Imports an externally allocated synchronization object and returns a +handle to that in extSem_out.

    +

    The properties of the handle being imported must be described in +semHandleDesc. The cudaExternalSemaphoreHandleDesc is +defined as follows:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where type specifies the +type of handle being imported. +cudaExternalSemaphoreHandleType is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    If type is +cudaExternalSemaphoreHandleTypeOpaqueFd, then +cudaExternalSemaphoreHandleDesc::handle::fd must be a valid +file descriptor referencing a synchronization object. Ownership of the +file descriptor is transferred to the CUDA driver when the handle is +imported successfully. Performing any operations on the file descriptor +after it is imported results in undefined behavior.

    +

    If type is +cudaExternalSemaphoreHandleTypeOpaqueWin32, then exactly +one of +cudaExternalSemaphoreHandleDesc::handle::win32::handle and +cudaExternalSemaphoreHandleDesc::handle::win32::name must +not be NULL. If +cudaExternalSemaphoreHandleDesc::handle::win32::handle is +not NULL, then it must represent a valid shared NT handle that +references a synchronization object. Ownership of this handle is not +transferred to CUDA after the import operation, so the application must +release the handle using the appropriate system call. If +cudaExternalSemaphoreHandleDesc::handle::win32::name is not +NULL, then it must name a valid synchronization object.

    +

    If type is +cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt, then +cudaExternalSemaphoreHandleDesc::handle::win32::handle must +be non-NULL and +cudaExternalSemaphoreHandleDesc::handle::win32::name must +be NULL. The handle specified must be a globally shared KMT handle. +This handle does not hold a reference to the underlying object, and +thus will be invalid when all references to the synchronization object +are destroyed.

    +

    If type is +cudaExternalSemaphoreHandleTypeD3D12Fence, then exactly one +of cudaExternalSemaphoreHandleDesc::handle::win32::handle +and cudaExternalSemaphoreHandleDesc::handle::win32::name +must not be NULL. If +cudaExternalSemaphoreHandleDesc::handle::win32::handle is +not NULL, then it must represent a valid shared NT handle that is +returned by ID3D12Device::CreateSharedHandle when referring to a +ID3D12Fence object. This handle holds a reference to the underlying +object. If +cudaExternalSemaphoreHandleDesc::handle::win32::name is not +NULL, then it must name a valid synchronization object that refers to a +valid ID3D12Fence object.

    +

    If type is +cudaExternalSemaphoreHandleTypeD3D11Fence, then exactly one +of cudaExternalSemaphoreHandleDesc::handle::win32::handle +and cudaExternalSemaphoreHandleDesc::handle::win32::name +must not be NULL. If +cudaExternalSemaphoreHandleDesc::handle::win32::handle is +not NULL, then it must represent a valid shared NT handle that is +returned by ID3D11Fence::CreateSharedHandle. If +cudaExternalSemaphoreHandleDesc::handle::win32::name is not +NULL, then it must name a valid synchronization object that refers to a +valid ID3D11Fence object.

    +

    If type is +cudaExternalSemaphoreHandleTypeNvSciSync, then +cudaExternalSemaphoreHandleDesc::handle::nvSciSyncObj +represents a valid NvSciSyncObj.

    +

    cudaExternalSemaphoreHandleTypeKeyedMutex, then exactly one +of cudaExternalSemaphoreHandleDesc::handle::win32::handle +and cudaExternalSemaphoreHandleDesc::handle::win32::name +must not be NULL. If +cudaExternalSemaphoreHandleDesc::handle::win32::handle is +not NULL, then it represent a valid shared NT handle that is returned +by IDXGIResource1::CreateSharedHandle when referring to a +IDXGIKeyedMutex object.

    +

    If type is +cudaExternalSemaphoreHandleTypeKeyedMutexKmt, then +cudaExternalSemaphoreHandleDesc::handle::win32::handle must +be non-NULL and +cudaExternalSemaphoreHandleDesc::handle::win32::name must +be NULL. The handle specified must represent a valid KMT handle that is +returned by IDXGIResource::GetSharedHandle when referring to a +IDXGIKeyedMutex object.

    +

    If type is +cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, then +cudaExternalSemaphoreHandleDesc::handle::fd must be a valid +file descriptor referencing a synchronization object. Ownership of the +file descriptor is transferred to the CUDA driver when the handle is +imported successfully. Performing any operations on the file descriptor +after it is imported results in undefined behavior.

    +

    If type is +cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32, then +exactly one of +cudaExternalSemaphoreHandleDesc::handle::win32::handle and +cudaExternalSemaphoreHandleDesc::handle::win32::name must +not be NULL. If +cudaExternalSemaphoreHandleDesc::handle::win32::handle is +not NULL, then it must represent a valid shared NT handle that +references a synchronization object. Ownership of this handle is not +transferred to CUDA after the import operation, so the application must +release the handle using the appropriate system call. If +cudaExternalSemaphoreHandleDesc::handle::win32::name is not +NULL, then it must name a valid synchronization object.

    +
    +
    Parameters:
    +

    semHandleDesc (cudaExternalSemaphoreHandleDesc) – Semaphore import handle descriptor

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaSignalExternalSemaphoresAsync(extSemArray: Optional[Tuple[cudaExternalSemaphore_t] | List[cudaExternalSemaphore_t]], paramsArray: Optional[Tuple[cudaExternalSemaphoreSignalParams] | List[cudaExternalSemaphoreSignalParams]], unsigned int numExtSems, stream)#
    +

    Signals a set of external semaphore objects.

    +

    Enqueues a signal operation on a set of externally allocated semaphore +object in the specified stream. The operations will be executed when +all prior operations in the stream complete.

    +

    The exact semantics of signaling a semaphore depends on the type of the +object.

    +

    If the semaphore object is any one of the following types: +cudaExternalSemaphoreHandleTypeOpaqueFd, +cudaExternalSemaphoreHandleTypeOpaqueWin32, +cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt then +signaling the semaphore will set it to the signaled state.

    +

    If the semaphore object is any one of the following types: +cudaExternalSemaphoreHandleTypeD3D12Fence, +cudaExternalSemaphoreHandleTypeD3D11Fence, +cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, +cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 then +the semaphore will be set to the value specified in +cudaExternalSemaphoreSignalParams::params::fence::value.

    +

    If the semaphore object is of the type +cudaExternalSemaphoreHandleTypeNvSciSync this API sets +cudaExternalSemaphoreSignalParams::params::nvSciSync::fence +to a value that can be used by subsequent waiters of the same NvSciSync +object to order operations with those currently submitted in stream. +Such an update will overwrite previous contents of +cudaExternalSemaphoreSignalParams::params::nvSciSync::fence. +By default, signaling such an external semaphore object causes +appropriate memory synchronization operations to be performed over all +the external memory objects that are imported as +cudaExternalMemoryHandleTypeNvSciBuf. This ensures that any +subsequent accesses made by other importers of the same set of NvSciBuf +memory object(s) are coherent. These operations can be skipped by +specifying the flag +cudaExternalSemaphoreSignalSkipNvSciBufMemSync, which can +be used as a performance optimization when data coherency is not +required. But specifying this flag in scenarios where data coherency is +required results in undefined behavior. Also, for semaphore object of +the type cudaExternalSemaphoreHandleTypeNvSciSync, if the +NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags +in cudaDeviceGetNvSciSyncAttributes to +cudaNvSciSyncAttrSignal, this API will return cudaErrorNotSupported.

    +

    cudaExternalSemaphoreSignalParams::params::nvSciSync::fence +associated with semaphore object of the type +cudaExternalSemaphoreHandleTypeNvSciSync can be +deterministic. For this the NvSciSyncAttrList used to create the +semaphore object must have value of +NvSciSyncAttrKey_RequireDeterministicFences key set to true. +Deterministic fences allow users to enqueue a wait over the semaphore +object even before corresponding signal is enqueued. For such a +semaphore object, CUDA guarantees that each signal operation will +increment the fence value by ‘1’. Users are expected to track count of +signals enqueued on the semaphore object and insert waits accordingly. +When such a semaphore object is signaled from multiple streams, due to +concurrent stream execution, it is possible that the order in which the +semaphore gets signaled is indeterministic. This could lead to waiters +of the semaphore getting unblocked incorrectly. Users are expected to +handle such situations, either by not using the same semaphore object +with deterministic fence support enabled in different streams or by +adding explicit dependency amongst such streams so that the semaphore +is signaled in order.

    +

    If the semaphore object is any one of the following types: +cudaExternalSemaphoreHandleTypeKeyedMutex, +cudaExternalSemaphoreHandleTypeKeyedMutexKmt, then the +keyed mutex will be released with the key specified in +cudaExternalSemaphoreSignalParams::params::keyedmutex::key.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaWaitExternalSemaphoresAsync(extSemArray: Optional[Tuple[cudaExternalSemaphore_t] | List[cudaExternalSemaphore_t]], paramsArray: Optional[Tuple[cudaExternalSemaphoreWaitParams] | List[cudaExternalSemaphoreWaitParams]], unsigned int numExtSems, stream)#
    +

    Waits on a set of external semaphore objects.

    +

    Enqueues a wait operation on a set of externally allocated semaphore +object in the specified stream. The operations will be executed when +all prior operations in the stream complete.

    +

    The exact semantics of waiting on a semaphore depends on the type of +the object.

    +

    If the semaphore object is any one of the following types: +cudaExternalSemaphoreHandleTypeOpaqueFd, +cudaExternalSemaphoreHandleTypeOpaqueWin32, +cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt then waiting +on the semaphore will wait until the semaphore reaches the signaled +state. The semaphore will then be reset to the unsignaled state. +Therefore for every signal operation, there can only be one wait +operation.

    +

    If the semaphore object is any one of the following types: +cudaExternalSemaphoreHandleTypeD3D12Fence, +cudaExternalSemaphoreHandleTypeD3D11Fence, +cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd, +cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 then +waiting on the semaphore will wait until the value of the semaphore is +greater than or equal to +cudaExternalSemaphoreWaitParams::params::fence::value.

    +

    If the semaphore object is of the type +cudaExternalSemaphoreHandleTypeNvSciSync then, waiting on +the semaphore will wait until the +cudaExternalSemaphoreSignalParams::params::nvSciSync::fence +is signaled by the signaler of the NvSciSyncObj that was associated +with this semaphore object. By default, waiting on such an external +semaphore object causes appropriate memory synchronization operations +to be performed over all external memory objects that are imported as +cudaExternalMemoryHandleTypeNvSciBuf. This ensures that any +subsequent accesses made by other importers of the same set of NvSciBuf +memory object(s) are coherent. These operations can be skipped by +specifying the flag +cudaExternalSemaphoreWaitSkipNvSciBufMemSync, which can be +used as a performance optimization when data coherency is not required. +But specifying this flag in scenarios where data coherency is required +results in undefined behavior. Also, for semaphore object of the type +cudaExternalSemaphoreHandleTypeNvSciSync, if the +NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags +in cudaDeviceGetNvSciSyncAttributes to +cudaNvSciSyncAttrWait, this API will return cudaErrorNotSupported.

    +

    If the semaphore object is any one of the following types: +cudaExternalSemaphoreHandleTypeKeyedMutex, +cudaExternalSemaphoreHandleTypeKeyedMutexKmt, then the +keyed mutex will be acquired when it is released with the key specified +in +cudaExternalSemaphoreSignalParams::params::keyedmutex::key +or until the timeout specified by +cudaExternalSemaphoreSignalParams::params::keyedmutex::timeoutMs +has lapsed. The timeout interval can either be a finite value specified +in milliseconds or an infinite value. In case an infinite value is +specified the timeout never elapses. The windows INFINITE macro must be +used to specify infinite timeout

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle cudaErrorTimeout

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDestroyExternalSemaphore(extSem)#
    +

    Destroys an external semaphore.

    +

    Destroys an external semaphore object and releases any references to +the underlying resource. Any outstanding signals or waits must have +completed before the semaphore is destroyed.

    +
    +
    Parameters:
    +

    extSem (cudaExternalSemaphore_t) – External semaphore to be destroyed

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +

    Execution Control#

    +

    This section describes the execution control functions of the CUDA runtime application programming interface.

    +

    Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module.

    +
    +
    +cuda.bindings.runtime.cudaFuncSetCacheConfig(func, cacheConfig: cudaFuncCache)#
    +

    Sets the preferred cache configuration for a device function.

    +

    On devices where the L1 cache and shared memory use the same hardware +resources, this sets through cacheConfig the preferred cache +configuration for the function specified via func. This is only a +preference. The runtime will use the requested configuration if +possible, but it is free to choose a different configuration if +required to execute func.

    +

    func is a device function symbol and must be declared as a None +function. If the specified function does not exist, then +cudaErrorInvalidDeviceFunction is returned. For templated +functions, pass the function symbol as follows: +func_name<template_arg_0,…,template_arg_N>

    +

    This setting does nothing on devices where the size of the L1 cache and +shared memory are fixed.

    +

    Launching a kernel with a different preference than the most recent +preference setting may insert a device-side synchronization point.

    +

    The supported cache configurations are:

    + +
    +
    Parameters:
    +
      +
    • func (Any) – Device function symbol

    • +
    • cacheConfig (cudaFuncCache) – Requested cache configuration

    • +
    +
    +
    Returns:
    +

    cudaSuccess, :py:obj:`~.cudaErrorInvalidDeviceFunction`2

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaFuncSetCacheConfig (C++ API), cudaFuncGetAttributes (C API), cudaLaunchKernel (C API), cuFuncSetCacheConfig

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaFuncGetAttributes(func)#
    +

    Find out attributes for a given function.

    +

    This function obtains the attributes of a function specified via +func. func is a device function symbol and must be declared as a +None function. The fetched attributes are placed in attr. If the +specified function does not exist, then +cudaErrorInvalidDeviceFunction is returned. For templated +functions, pass the function symbol as follows: +func_name<template_arg_0,…,template_arg_N>

    +

    Note that some function attributes such as +maxThreadsPerBlock may vary based on the device that is +currently being used.

    +
    +
    Parameters:
    +

    func (Any) – Device function symbol

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaFuncSetCacheConfig (C API), cudaFuncGetAttributes (C++ API), cudaLaunchKernel (C API), cuFuncGetAttribute

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaFuncSetAttribute(func, attr: cudaFuncAttribute, int value)#
    +

    Set attributes for a given function.

    +

    This function sets the attributes of a function specified via func. +The parameter func must be a pointer to a function that executes on +the device. The parameter specified by func must be declared as a +None function. The enumeration defined by attr is set to the value +defined by value. If the specified function does not exist, then +cudaErrorInvalidDeviceFunction is returned. If the +specified attribute cannot be written, or if the value is incorrect, +then cudaErrorInvalidValue is returned.

    +

    Valid values for attr are:

    +
      +
    • cudaFuncAttributeMaxDynamicSharedMemorySize - The +requested maximum size in bytes of dynamically-allocated shared +memory. The sum of this value and the function attribute +sharedSizeBytes cannot exceed the device attribute +cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size +of requestable dynamic shared memory may differ by GPU architecture.

    • +
    • cudaFuncAttributePreferredSharedMemoryCarveout - On +devices where the L1 cache and shared memory use the same hardware +resources, this sets the shared memory carveout preference, in +percent of the total shared memory. See +cudaDevAttrMaxSharedMemoryPerMultiprocessor. This is only +a hint, and the driver can choose a different ratio if required to +execute the function.

    • +
    • cudaFuncAttributeRequiredClusterWidth: The required +cluster width in blocks. The width, height, and depth values must +either all be 0 or all be positive. The validity of the cluster +dimensions is checked at launch time. If the value is set during +compile time, it cannot be set at runtime. Setting it at runtime will +return cudaErrorNotPermitted.

    • +
    • cudaFuncAttributeRequiredClusterHeight: The required +cluster height in blocks. The width, height, and depth values must +either all be 0 or all be positive. The validity of the cluster +dimensions is checked at launch time. If the value is set during +compile time, it cannot be set at runtime. Setting it at runtime will +return cudaErrorNotPermitted.

    • +
    • cudaFuncAttributeRequiredClusterDepth: The required +cluster depth in blocks. The width, height, and depth values must +either all be 0 or all be positive. The validity of the cluster +dimensions is checked at launch time. If the value is set during +compile time, it cannot be set at runtime. Setting it at runtime will +return cudaErrorNotPermitted.

    • +
    • cudaFuncAttributeNonPortableClusterSizeAllowed: Indicates +whether the function can be launched with non-portable cluster size. +1 is allowed, 0 is disallowed.

    • +
    • cudaFuncAttributeClusterSchedulingPolicyPreference: The +block scheduling policy of a function. The value type is +cudaClusterSchedulingPolicy.

    • +
    +

    cudaLaunchKernel (C++ API), cudaFuncSetCacheConfig (C++ API), +cudaFuncGetAttributes (C API),

    +
    +
    Parameters:
    +
      +
    • func (Any) – Function to get attributes of

    • +
    • attr (cudaFuncAttribute) – Attribute to set

    • +
    • value (int) – Value to set

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidDeviceFunction, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaLaunchHostFunc(stream, fn, userData)#
    +

    Enqueues a host function call in a stream.

    +

    Enqueues a host function to run in a stream. The function will be +called after currently enqueued work and will block work added after +it.

    +

    The host function must not make any CUDA API calls. Attempting to use a +CUDA API may result in cudaErrorNotPermitted, but this is +not required. The host function must not perform any synchronization +that may depend on outstanding CUDA work not mandated to run earlier. +Host functions without a mandated order (such as in independent +streams) execute in undefined order and may be serialized.

    +

    For the purposes of Unified Memory, execution makes a number of +guarantees:

    +
      +
    • The stream is considered idle for the duration of the function’s +execution. Thus, for example, the function may always use memory +attached to the stream it was enqueued in.

    • +
    • The start of execution of the function has the same effect as +synchronizing an event recorded in the same stream immediately prior +to the function. It thus synchronizes streams which have been +“joined” prior to the function.

    • +
    • Adding device work to any stream does not have the effect of making +the stream active until all preceding host functions and stream +callbacks have executed. Thus, for example, a function might use +global attached memory even if work has been added to another stream, +if the work has been ordered behind the function call with an event.

    • +
    • Completion of the function does not cause a stream to become active +except as described above. The stream will remain idle if no device +work follows the function, and will remain idle across consecutive +host functions or stream callbacks without device work in between. +Thus, for example, stream synchronization can be done by signaling +from a host function at the end of the stream.

    • +
    +

    Note that, in constrast to cuStreamAddCallback, the +function will not be called in the event of an error in the CUDA +context.

    +
    +
    Parameters:
    +
      +
    • hStream (CUstream or cudaStream_t) – Stream to enqueue function call in

    • +
    • fn (cudaHostFn_t) – The function to call once preceding stream operations are complete

    • +
    • userData (Any) – User-specified data to be passed to the function

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorInvalidValue, cudaErrorNotSupported

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +

    Occupancy#

    +

    This section describes the occupancy calculation functions of the CUDA runtime application programming interface.

    +

    Besides the occupancy calculator functions (cudaOccupancyMaxActiveBlocksPerMultiprocessor and cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags), there are also C++ only occupancy-based launch configuration functions documented in C++ API Routines module.

    +

    See cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API) cudaOccupancyAvailableDynamicSMemPerBlock (C++ API),

    +
    +
    +cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessor(func, int blockSize, size_t dynamicSMemSize)#
    +

    Returns occupancy for a device function.

    +

    Returns in *numBlocks the maximum number of active blocks per +streaming multiprocessor for the device function.

    +
    +
    Parameters:
    +
      +
    • func (Any) – Kernel function for which occupancy is calculated

    • +
    • blockSize (int) – Block size the kernel is intended to be launched with

    • +
    • dynamicSMemSize (size_t) – Per-block dynamic shared memory usage intended, in bytes

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, cudaOccupancyMaxPotentialBlockSize, cudaOccupancyMaxPotentialBlockSizeWithFlags, cudaOccupancyMaxPotentialBlockSizeVariableSMem, cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags, cudaOccupancyAvailableDynamicSMemPerBlock, cuOccupancyMaxActiveBlocksPerMultiprocessor

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaOccupancyAvailableDynamicSMemPerBlock(func, int numBlocks, int blockSize)#
    +

    Returns dynamic shared memory available per block when launching numBlocks blocks on SM.

    +

    Returns in *dynamicSmemSize the maximum size of dynamic shared memory +to allow numBlocks blocks per SM.

    +
    +
    Parameters:
    +
      +
    • func (Any) – Kernel function for which occupancy is calculated

    • +
    • numBlocks (int) – Number of blocks to fit on SM

    • +
    • blockSize (int) – Size of the block

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, cudaOccupancyMaxPotentialBlockSize, cudaOccupancyMaxPotentialBlockSizeWithFlags, cudaOccupancyMaxPotentialBlockSizeVariableSMem, cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags, cudaOccupancyAvailableDynamicSMemPerBlock

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(func, int blockSize, size_t dynamicSMemSize, unsigned int flags)#
    +

    Returns occupancy for a device function with the specified flags.

    +

    Returns in *numBlocks the maximum number of active blocks per +streaming multiprocessor for the device function.

    +

    The flags parameter controls how special cases are handled. Valid +flags include:

    +
      +
    • cudaOccupancyDefault: keeps the default behavior as +cudaOccupancyMaxActiveBlocksPerMultiprocessor

    • +
    • cudaOccupancyDisableCachingOverride: This flag suppresses +the default behavior on platform where global caching affects +occupancy. On such platforms, if caching is enabled, but per-block SM +resource usage would result in zero occupancy, the occupancy +calculator will calculate the occupancy as if caching is disabled. +Setting this flag makes the occupancy calculator to return 0 in such +cases. More information can be found about this feature in the +“Unified L1/Texture Cache” section of the Maxwell tuning guide.

    • +
    +
    +
    Parameters:
    +
      +
    • func (Any) – Kernel function for which occupancy is calculated

    • +
    • blockSize (int) – Block size the kernel is intended to be launched with

    • +
    • dynamicSMemSize (size_t) – Per-block dynamic shared memory usage intended, in bytes

    • +
    • flags (unsigned int) – Requested behavior for the occupancy calculator

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaOccupancyMaxActiveBlocksPerMultiprocessor, cudaOccupancyMaxPotentialBlockSize, cudaOccupancyMaxPotentialBlockSizeWithFlags, cudaOccupancyMaxPotentialBlockSizeVariableSMem, cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags, cudaOccupancyAvailableDynamicSMemPerBlock, cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags

    +
    +
    + +
    +
    +

    Memory Management#

    +

    This section describes the memory management functions of the CUDA runtime application programming interface.

    +

    Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module.

    +
    +
    +cuda.bindings.runtime.cudaMallocManaged(size_t size, unsigned int flags)#
    +

    Allocates memory that will be automatically managed by the Unified Memory system.

    +

    Allocates size bytes of managed memory on the device and returns in +*devPtr a pointer to the allocated memory. If the device doesn’t +support allocating managed memory, cudaErrorNotSupported is +returned. Support for managed memory can be queried using the device +attribute cudaDevAttrManagedMemory. The allocated memory is +suitably aligned for any kind of variable. The memory is not cleared. +If size is 0, cudaMallocManaged returns +cudaErrorInvalidValue. The pointer is valid on the CPU and +on all GPUs in the system that support managed memory. All accesses to +this pointer must obey the Unified Memory programming model.

    +

    flags specifies the default stream association for this allocation. +flags must be one of cudaMemAttachGlobal or +cudaMemAttachHost. The default value for flags is +cudaMemAttachGlobal. If cudaMemAttachGlobal is +specified, then this memory is accessible from any stream on any +device. If cudaMemAttachHost is specified, then the +allocation should not be accessed from devices that have a zero value +for the device attribute +cudaDevAttrConcurrentManagedAccess; an explicit call to +cudaStreamAttachMemAsync will be required to enable access +on such devices.

    +

    If the association is later changed via +cudaStreamAttachMemAsync to a single stream, the default +association, as specifed during cudaMallocManaged, is +restored when that stream is destroyed. For managed variables, the +default association is always cudaMemAttachGlobal. Note +that destroying a stream is an asynchronous operation, and as a result, +the change to default association won’t happen until all work in the +stream has completed.

    +

    Memory allocated with cudaMallocManaged should be released +with cudaFree.

    +

    Device memory oversubscription is possible for GPUs that have a non- +zero value for the device attribute +cudaDevAttrConcurrentManagedAccess. Managed memory on such +GPUs may be evicted from device memory to host memory at any time by +the Unified Memory driver in order to make room for other allocations.

    +

    In a system where all GPUs have a non-zero value for the device +attribute cudaDevAttrConcurrentManagedAccess, managed +memory may not be populated when this API returns and instead may be +populated on access. In such systems, managed memory can migrate to any +processor’s memory at any time. The Unified Memory driver will employ +heuristics to maintain data locality and prevent excessive page faults +to the extent possible. The application can also guide the driver about +memory usage patterns via cudaMemAdvise. The application +can also explicitly migrate memory to a desired processor’s memory via +cudaMemPrefetchAsync.

    +

    In a multi-GPU system where all of the GPUs have a zero value for the +device attribute cudaDevAttrConcurrentManagedAccess and all +the GPUs have peer-to-peer support with each other, the physical +storage for managed memory is created on the GPU which is active at the +time cudaMallocManaged is called. All other GPUs will +reference the data at reduced bandwidth via peer mappings over the PCIe +bus. The Unified Memory driver does not migrate memory among such GPUs.

    +

    In a multi-GPU system where not all GPUs have peer-to-peer support with +each other and where the value of the device attribute +cudaDevAttrConcurrentManagedAccess is zero for at least one +of those GPUs, the location chosen for physical storage of managed +memory is system-dependent.

    +
      +
    • On Linux, the location chosen will be device memory as long as the +current set of active contexts are on devices that either have peer- +to-peer support with each other or have a non-zero value for the +device attribute cudaDevAttrConcurrentManagedAccess. If +there is an active context on a GPU that does not have a non-zero +value for that device attribute and it does not have peer-to-peer +support with the other devices that have active contexts on them, +then the location for physical storage will be ‘zero-copy’ or host +memory. Note that this means that managed memory that is located in +device memory is migrated to host memory if a new context is created +on a GPU that doesn’t have a non-zero value for the device attribute +and does not support peer-to-peer with at least one of the other +devices that has an active context. This in turn implies that context +creation may fail if there is insufficient host memory to migrate all +managed allocations.

    • +
    • On Windows, the physical storage is always created in ‘zero-copy’ or +host memory. All GPUs will reference the data at reduced bandwidth +over the PCIe bus. In these circumstances, use of the environment +variable CUDA_VISIBLE_DEVICES is recommended to restrict CUDA to only +use those GPUs that have peer-to-peer support. Alternatively, users +can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero value to +force the driver to always use device memory for physical storage. +When this environment variable is set to a non-zero value, all +devices used in that process that support managed memory have to be +peer-to-peer compatible with each other. The error +cudaErrorInvalidDevice will be returned if a device that +supports managed memory is used and it is not peer-to-peer compatible +with any of the other managed memory supporting devices that were +previously used in that process, even if cudaDeviceReset +has been called on those devices. These environment variables are +described in the CUDA programming guide under the “CUDA environment +variables” section.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMalloc(size_t size)#
    +

    Allocate memory on the device.

    +

    Allocates size bytes of linear memory on the device and returns in +*devPtr a pointer to the allocated memory. The allocated memory is +suitably aligned for any kind of variable. The memory is not cleared. +cudaMalloc() returns cudaErrorMemoryAllocation +in case of failure.

    +

    The device version of cudaFree cannot be used with a +*devPtr allocated using the host API, and vice versa.

    +
    +
    Parameters:
    +

    size (size_t) – Requested allocation size in bytes

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMallocHost(size_t size)#
    +

    Allocates page-locked memory on the host.

    +

    Allocates size bytes of host memory that is page-locked and +accessible to the device. The driver tracks the virtual memory ranges +allocated with this function and automatically accelerates calls to +functions such as malloc().

    +

    On systems where pageableMemoryAccessUsesHostPageTables is +true, cudaMallocHost may not page-lock the allocated +memory.

    +

    Page-locking excessive amounts of memory with +cudaMallocHost() may degrade system performance, since it +reduces the amount of memory available to the system for paging. As a +result, this function is best used sparingly to allocate staging areas +for data exchange between host and device.

    +
    +
    Parameters:
    +

    size (size_t) – Requested allocation size in bytes

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMallocPitch(size_t width, size_t height)#
    +

    Allocates pitched memory on the device.

    +

    Allocates at least width (in bytes) * height bytes of linear memory +on the device and returns in *devPtr a pointer to the allocated +memory. The function may pad the allocation to ensure that +corresponding pointers in any given row will continue to meet the +alignment requirements for coalescing as the address is updated from +row to row. The pitch returned in *pitch by +cudaMallocPitch() is the width in bytes of the allocation. +The intended usage of pitch is as a separate parameter of the +allocation, used to compute addresses within the 2D array. Given the +row and column of an array element of type T, the address is computed +as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For allocations of 2D arrays, it is recommended that programmers +consider performing pitch allocations using +cudaMallocPitch(). Due to pitch alignment restrictions in +the hardware, this is especially true if the application will be +performing 2D memory copies between different regions of device memory +(whether linear memory or CUDA arrays).

    +
    +
    Parameters:
    +
      +
    • width (size_t) – Requested pitched allocation width (in bytes)

    • +
    • height (size_t) – Requested pitched allocation height

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMallocArray(cudaChannelFormatDesc desc: Optional[cudaChannelFormatDesc], size_t width, size_t height, unsigned int flags)#
    +

    Allocate an array on the device.

    +

    Allocates a CUDA array according to the +cudaChannelFormatDesc structure desc and returns a handle +to the new CUDA array in *array.

    +

    The cudaChannelFormatDesc is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where cudaChannelFormatKind is one of +cudaChannelFormatKindSigned, +cudaChannelFormatKindUnsigned, or +cudaChannelFormatKindFloat.

    +

    The flags parameter enables different options to be specified that +affect the allocation, as follows.

    +
      +
    • cudaArrayDefault: This flag’s value is defined to be 0 +and provides default array allocation

    • +
    • cudaArraySurfaceLoadStore: Allocates an array that can be +read from or written to using a surface reference

    • +
    • cudaArrayTextureGather: This flag indicates that texture +gather operations will be performed on the array.

    • +
    • cudaArraySparse: Allocates a CUDA array without physical +backing memory. The subregions within this sparse array can later be +mapped onto a physical memory allocation by calling +cuMemMapArrayAsync. The physical backing memory must be +allocated via cuMemCreate.

    • +
    • cudaArrayDeferredMapping: Allocates a CUDA array without +physical backing memory. The entire array can later be mapped onto a +physical memory allocation by calling cuMemMapArrayAsync. +The physical backing memory must be allocated via +cuMemCreate.

    • +
    +

    width and height must meet certain size requirements. See +cudaMalloc3DArray() for more details.

    +
    +
    Parameters:
    +
      +
    • desc (cudaChannelFormatDesc) – Requested channel format

    • +
    • width (size_t) – Requested array allocation width

    • +
    • height (size_t) – Requested array allocation height

    • +
    • flags (unsigned int) – Requested properties of allocated array

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaFree(devPtr)#
    +

    Frees memory on the device.

    +

    Frees the memory space pointed to by devPtr, which must have been +returned by a previous call to one of the following memory allocation +APIs - cudaMalloc(), cudaMallocPitch(), +cudaMallocManaged(), cudaMallocAsync(), +cudaMallocFromPoolAsync().

    +

    Note - This API will not perform any implicit synchronization when the +pointer was allocated with cudaMallocAsync or +cudaMallocFromPoolAsync. Callers must ensure that all +accesses to these pointer have completed before invoking +cudaFree. For best performance and memory reuse, users +should use cudaFreeAsync to free memory allocated via the +stream ordered memory allocator. For all other pointers, this API may +perform implicit synchronization.

    +

    If cudaFree`(`devPtr) has already been called before, an +error is returned. If devPtr is 0, no operation is performed. +cudaFree() returns cudaErrorValue in case of +failure.

    +

    The device version of cudaFree cannot be used with a +*devPtr allocated using the host API, and vice versa.

    +
    +
    Parameters:
    +

    devPtr (Any) – Device pointer to memory to free

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaFreeHost(ptr)#
    +

    Frees page-locked memory.

    +

    Frees the memory space pointed to by hostPtr, which must have been +returned by a previous call to cudaMallocHost() or +cudaHostAlloc().

    +
    +
    Parameters:
    +

    ptr (Any) – Pointer to memory to free

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaFreeArray(array)#
    +

    Frees an array on the device.

    +

    Frees the CUDA array array, which must have been returned by a +previous call to cudaMallocArray(). If devPtr is 0, no +operation is performed.

    +
    +
    Parameters:
    +

    array (cudaArray_t) – Pointer to array to free

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaFreeMipmappedArray(mipmappedArray)#
    +

    Frees a mipmapped array on the device.

    +

    Frees the CUDA mipmapped array mipmappedArray, which must have been +returned by a previous call to cudaMallocMipmappedArray(). +If devPtr is 0, no operation is performed.

    +
    +
    Parameters:
    +

    mipmappedArray (cudaMipmappedArray_t) – Pointer to mipmapped array to free

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaHostAlloc(size_t size, unsigned int flags)#
    +

    Allocates page-locked memory on the host.

    +

    Allocates size bytes of host memory that is page-locked and +accessible to the device. The driver tracks the virtual memory ranges +allocated with this function and automatically accelerates calls to +functions such as cudaMemcpy(). Since the memory can be +accessed directly by the device, it can be read or written with much +higher bandwidth than pageable memory obtained with functions such as +malloc(). Allocating excessive amounts of pinned memory may +degrade system performance, since it reduces the amount of memory +available to the system for paging. As a result, this function is best +used sparingly to allocate staging areas for data exchange between host +and device.

    +

    The flags parameter enables different options to be specified that +affect the allocation, as follows.

    +
      +
    • cudaHostAllocDefault: This flag’s value is defined to be +0 and causes cudaHostAlloc() to emulate +cudaMallocHost().

    • +
    • cudaHostAllocPortable: The memory returned by this call +will be considered as pinned memory by all CUDA contexts, not just +the one that performed the allocation.

    • +
    • cudaHostAllocMapped: Maps the allocation into the CUDA +address space. The device pointer to the memory may be obtained by +calling cudaHostGetDevicePointer().

    • +
    • cudaHostAllocWriteCombined: Allocates the memory as +write-combined (WC). WC memory can be transferred across the PCI +Express bus more quickly on some system configurations, but cannot be +read efficiently by most CPUs. WC memory is a good option for buffers +that will be written by the CPU and read by the device via mapped +pinned memory or host->device transfers.

    • +
    +

    All of these flags are orthogonal to one another: a developer may +allocate memory that is portable, mapped and/or write-combined with no +restrictions.

    +

    In order for the cudaHostAllocMapped flag to have any +effect, the CUDA context must support the cudaDeviceMapHost +flag, which can be checked via cudaGetDeviceFlags(). The +cudaDeviceMapHost flag is implicitly set for contexts +created via the runtime API.

    +

    The cudaHostAllocMapped flag may be specified on CUDA +contexts for devices that do not support mapped pinned memory. The +failure is deferred to cudaHostGetDevicePointer() because +the memory may be mapped into other CUDA contexts via the +cudaHostAllocPortable flag.

    +

    Memory allocated by this function must be freed with +cudaFreeHost().

    +
    +
    Parameters:
    +
      +
    • size (size_t) – Requested allocation size in bytes

    • +
    • flags (unsigned int) – Requested properties of allocated memory

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaSetDeviceFlags, cudaMallocHost (C API), cudaFreeHost, cudaGetDeviceFlags, cuMemHostAlloc

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaHostRegister(ptr, size_t size, unsigned int flags)#
    +

    Registers an existing host memory range for use by CUDA.

    +

    Page-locks the memory range specified by ptr and size and maps it +for the device(s) as specified by flags. This memory range also is +added to the same tracking mechanism as cudaHostAlloc() to +automatically accelerate calls to functions such as +cudaMemcpy(). Since the memory can be accessed directly by +the device, it can be read or written with much higher bandwidth than +pageable memory that has not been registered. Page-locking excessive +amounts of memory may degrade system performance, since it reduces the +amount of memory available to the system for paging. As a result, this +function is best used sparingly to register staging areas for data +exchange between host and device.

    +

    On systems where pageableMemoryAccessUsesHostPageTables is +true, cudaHostRegister will not page-lock the memory range +specified by ptr but only populate unpopulated pages.

    +

    cudaHostRegister is supported only on I/O coherent devices +that have a non-zero value for the device attribute +cudaDevAttrHostRegisterSupported.

    +

    The flags parameter enables different options to be specified that +affect the allocation, as follows.

    +
      +
    • cudaHostRegisterDefault: On a system with unified virtual +addressing, the memory will be both mapped and portable. On a system +with no unified virtual addressing, the memory will be neither mapped +nor portable.

    • +
    • cudaHostRegisterPortable: The memory returned by this +call will be considered as pinned memory by all CUDA contexts, not +just the one that performed the allocation.

    • +
    • cudaHostRegisterMapped: Maps the allocation into the CUDA +address space. The device pointer to the memory may be obtained by +calling cudaHostGetDevicePointer().

    • +
    • cudaHostRegisterIoMemory: The passed memory pointer is +treated as pointing to some memory-mapped I/O space, e.g. belonging +to a third-party PCIe device, and it will marked as non cache- +coherent and contiguous.

    • +
    • cudaHostRegisterReadOnly: The passed memory pointer is +treated as pointing to memory that is considered read-only by the +device. On platforms without +cudaDevAttrPageableMemoryAccessUsesHostPageTables, this +flag is required in order to register memory mapped to the CPU as +read-only. Support for the use of this flag can be queried from the +device attribute cudaDeviceAttrReadOnlyHostRegisterSupported. Using +this flag with a current context associated with a device that does +not have this attribute set will cause cudaHostRegister +to error with cudaErrorNotSupported.

    • +
    +

    All of these flags are orthogonal to one another: a developer may page- +lock memory that is portable or mapped with no restrictions.

    +

    The CUDA context must have been created with the +cudaMapHost flag in order for the +cudaHostRegisterMapped flag to have any effect.

    +

    The cudaHostRegisterMapped flag may be specified on CUDA +contexts for devices that do not support mapped pinned memory. The +failure is deferred to cudaHostGetDevicePointer() because +the memory may be mapped into other CUDA contexts via the +cudaHostRegisterPortable flag.

    +

    For devices that have a non-zero value for the device attribute +cudaDevAttrCanUseHostPointerForRegisteredMem, the memory +can also be accessed from the device using the host pointer ptr. The +device pointer returned by cudaHostGetDevicePointer() may +or may not match the original host pointer ptr and depends on the +devices visible to the application. If all devices visible to the +application have a non-zero value for the device attribute, the device +pointer returned by cudaHostGetDevicePointer() will match +the original pointer ptr. If any device visible to the application +has a zero value for the device attribute, the device pointer returned +by cudaHostGetDevicePointer() will not match the original +host pointer ptr, but it will be suitable for use on all devices +provided Unified Virtual Addressing is enabled. In such systems, it is +valid to access the memory using either pointer on devices that have a +non-zero value for the device attribute. Note however that such devices +should access the memory using only of the two pointers and not both.

    +

    The memory page-locked by this function must be unregistered with +cudaHostUnregister().

    +
    +
    Parameters:
    +
      +
    • ptr (Any) – Host pointer to memory to page-lock

    • +
    • size (size_t) – Size in bytes of the address range to page-lock in bytes

    • +
    • flags (unsigned int) – Flags for allocation request

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorMemoryAllocation, cudaErrorHostMemoryAlreadyRegistered, cudaErrorNotSupported

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaHostUnregister(ptr)#
    +

    Unregisters a memory range that was registered with cudaHostRegister.

    +

    Unmaps the memory range whose base address is specified by ptr, and +makes it pageable again.

    +

    The base address must be the same one specified to +cudaHostRegister().

    +
    +
    Parameters:
    +

    ptr (Any) – Host pointer to memory to unregister

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorHostMemoryNotRegistered

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaHostGetDevicePointer(pHost, unsigned int flags)#
    +

    Passes back device pointer of mapped host memory allocated by cudaHostAlloc or registered by cudaHostRegister.

    +

    Passes back the device pointer corresponding to the mapped, pinned host +buffer allocated by cudaHostAlloc() or registered by +cudaHostRegister().

    +

    cudaHostGetDevicePointer() will fail if the +cudaDeviceMapHost flag was not specified before deferred +context creation occurred, or if called on a device that does not +support mapped, pinned memory.

    +

    For devices that have a non-zero value for the device attribute +cudaDevAttrCanUseHostPointerForRegisteredMem, the memory +can also be accessed from the device using the host pointer pHost. +The device pointer returned by cudaHostGetDevicePointer() +may or may not match the original host pointer pHost and depends on +the devices visible to the application. If all devices visible to the +application have a non-zero value for the device attribute, the device +pointer returned by cudaHostGetDevicePointer() will match +the original pointer pHost. If any device visible to the application +has a zero value for the device attribute, the device pointer returned +by cudaHostGetDevicePointer() will not match the original +host pointer pHost, but it will be suitable for use on all devices +provided Unified Virtual Addressing is enabled. In such systems, it is +valid to access the memory using either pointer on devices that have a +non-zero value for the device attribute. Note however that such devices +should access the memory using only of the two pointers and not both.

    +

    flags provides for future releases. For now, it must be set to 0.

    +
    +
    Parameters:
    +
      +
    • pHost (Any) – Requested host pointer mapping

    • +
    • flags (unsigned int) – Flags for extensions (must be 0 for now)

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaHostGetFlags(pHost)#
    +

    Passes back flags used to allocate pinned host memory allocated by cudaHostAlloc.

    +

    cudaHostGetFlags() will fail if the input pointer does not +reside in an address range allocated by cudaHostAlloc().

    +
    +
    Parameters:
    +

    pHost (Any) – Host pointer

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMalloc3D(cudaExtent extent: cudaExtent)#
    +

    Allocates logical 1D, 2D, or 3D memory objects on the device.

    +

    Allocates at least width * height * depth bytes of linear memory +on the device and returns a cudaPitchedPtr in which ptr +is a pointer to the allocated memory. The function may pad the +allocation to ensure hardware alignment requirements are met. The pitch +returned in the pitch field of pitchedDevPtr is the width in bytes +of the allocation.

    +

    The returned cudaPitchedPtr contains additional fields +xsize and ysize, the logical width and height of the allocation, +which are equivalent to the width and height extent parameters +provided by the programmer during allocation.

    +

    For allocations of 2D and 3D objects, it is highly recommended that +programmers perform allocations using cudaMalloc3D() or +cudaMallocPitch(). Due to alignment restrictions in the +hardware, this is especially true if the application will be performing +memory copies involving 2D or 3D objects (whether linear memory or CUDA +arrays).

    +
    +
    Parameters:
    +

    extent (cudaExtent) – Requested allocation size (width field in bytes)

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMalloc3DArray(cudaChannelFormatDesc desc: Optional[cudaChannelFormatDesc], cudaExtent extent: cudaExtent, unsigned int flags)#
    +

    Allocate an array on the device.

    +

    Allocates a CUDA array according to the +cudaChannelFormatDesc structure desc and returns a handle +to the new CUDA array in *array.

    +

    The cudaChannelFormatDesc is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where cudaChannelFormatKind is one of +cudaChannelFormatKindSigned, +cudaChannelFormatKindUnsigned, or +cudaChannelFormatKindFloat.

    +

    cudaMalloc3DArray() can allocate the following:

    +
      +
    • A 1D array is allocated if the height and depth extents are both +zero.

    • +
    • A 2D array is allocated if only the depth extent is zero.

    • +
    • A 3D array is allocated if all three extents are non-zero.

    • +
    • A 1D layered CUDA array is allocated if only the height extent is +zero and the cudaArrayLayered flag is set. Each layer is a 1D array. +The number of layers is determined by the depth extent.

    • +
    • A 2D layered CUDA array is allocated if all three extents are non- +zero and the cudaArrayLayered flag is set. Each layer is a 2D array. +The number of layers is determined by the depth extent.

    • +
    • A cubemap CUDA array is allocated if all three extents are non-zero +and the cudaArrayCubemap flag is set. Width must be equal to height, +and depth must be six. A cubemap is a special type of 2D layered CUDA +array, where the six layers represent the six faces of a cube. The +order of the six layers in memory is the same as that listed in +cudaGraphicsCubeFace.

    • +
    • A cubemap layered CUDA array is allocated if all three extents are +non-zero, and both, cudaArrayCubemap and cudaArrayLayered flags are +set. Width must be equal to height, and depth must be a multiple of +six. A cubemap layered CUDA array is a special type of 2D layered +CUDA array that consists of a collection of cubemaps. The first six +layers represent the first cubemap, the next six layers form the +second cubemap, and so on.

    • +
    +

    The flags parameter enables different options to be specified that +affect the allocation, as follows.

    +
      +
    • cudaArrayDefault: This flag’s value is defined to be 0 +and provides default array allocation

    • +
    • cudaArrayLayered: Allocates a layered CUDA array, with +the depth extent indicating the number of layers

    • +
    • cudaArrayCubemap: Allocates a cubemap CUDA array. Width +must be equal to height, and depth must be six. If the +cudaArrayLayered flag is also set, depth must be a multiple of six.

    • +
    • cudaArraySurfaceLoadStore: Allocates a CUDA array that +could be read from or written to using a surface reference.

    • +
    • cudaArrayTextureGather: This flag indicates that texture +gather operations will be performed on the CUDA array. Texture gather +can only be performed on 2D CUDA arrays.

    • +
    • cudaArraySparse: Allocates a CUDA array without physical +backing memory. The subregions within this sparse array can later be +mapped onto a physical memory allocation by calling +cuMemMapArrayAsync. This flag can only be used for +creating 2D, 3D or 2D layered sparse CUDA arrays. The physical +backing memory must be allocated via cuMemCreate.

    • +
    • cudaArrayDeferredMapping: Allocates a CUDA array without +physical backing memory. The entire array can later be mapped onto a +physical memory allocation by calling cuMemMapArrayAsync. +The physical backing memory must be allocated via +cuMemCreate.

    • +
    +

    The width, height and depth extents must meet certain size requirements +as listed in the following table. All values are specified in elements.

    +

    Note that 2D CUDA arrays have different size requirements if the +cudaArrayTextureGather flag is set. In that case, the valid +range for (width, height, depth) is ((1,maxTexture2DGather[0]), +(1,maxTexture2DGather[1]), 0).

    +

    View CUDA Toolkit Documentation for a table example

    +
    +
    Parameters:
    +
      +
    • desc (cudaChannelFormatDesc) – Requested channel format

    • +
    • extent (cudaExtent) – Requested allocation size (width field in elements)

    • +
    • flags (unsigned int) – Flags for extensions

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMallocMipmappedArray(cudaChannelFormatDesc desc: Optional[cudaChannelFormatDesc], cudaExtent extent: cudaExtent, unsigned int numLevels, unsigned int flags)#
    +

    Allocate a mipmapped array on the device.

    +

    Allocates a CUDA mipmapped array according to the +cudaChannelFormatDesc structure desc and returns a handle +to the new CUDA mipmapped array in *mipmappedArray. numLevels +specifies the number of mipmap levels to be allocated. This value is +clamped to the range [1, 1 + floor(log2(max(width, height, depth)))].

    +

    The cudaChannelFormatDesc is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where cudaChannelFormatKind is one of +cudaChannelFormatKindSigned, +cudaChannelFormatKindUnsigned, or +cudaChannelFormatKindFloat.

    +

    cudaMallocMipmappedArray() can allocate the following:

    +
      +
    • A 1D mipmapped array is allocated if the height and depth extents are +both zero.

    • +
    • A 2D mipmapped array is allocated if only the depth extent is zero.

    • +
    • A 3D mipmapped array is allocated if all three extents are non-zero.

    • +
    • A 1D layered CUDA mipmapped array is allocated if only the height +extent is zero and the cudaArrayLayered flag is set. Each layer is a +1D mipmapped array. The number of layers is determined by the depth +extent.

    • +
    • A 2D layered CUDA mipmapped array is allocated if all three extents +are non-zero and the cudaArrayLayered flag is set. Each layer is a 2D +mipmapped array. The number of layers is determined by the depth +extent.

    • +
    • A cubemap CUDA mipmapped array is allocated if all three extents are +non-zero and the cudaArrayCubemap flag is set. Width must be equal to +height, and depth must be six. The order of the six layers in memory +is the same as that listed in cudaGraphicsCubeFace.

    • +
    • A cubemap layered CUDA mipmapped array is allocated if all three +extents are non-zero, and both, cudaArrayCubemap and cudaArrayLayered +flags are set. Width must be equal to height, and depth must be a +multiple of six. A cubemap layered CUDA mipmapped array is a special +type of 2D layered CUDA mipmapped array that consists of a collection +of cubemap mipmapped arrays. The first six layers represent the first +cubemap mipmapped array, the next six layers form the second cubemap +mipmapped array, and so on.

    • +
    +

    The flags parameter enables different options to be specified that +affect the allocation, as follows.

    +
      +
    • cudaArrayDefault: This flag’s value is defined to be 0 +and provides default mipmapped array allocation

    • +
    • cudaArrayLayered: Allocates a layered CUDA mipmapped +array, with the depth extent indicating the number of layers

    • +
    • cudaArrayCubemap: Allocates a cubemap CUDA mipmapped +array. Width must be equal to height, and depth must be six. If the +cudaArrayLayered flag is also set, depth must be a multiple of six.

    • +
    • cudaArraySurfaceLoadStore: This flag indicates that +individual mipmap levels of the CUDA mipmapped array will be read +from or written to using a surface reference.

    • +
    • cudaArrayTextureGather: This flag indicates that texture +gather operations will be performed on the CUDA array. Texture gather +can only be performed on 2D CUDA mipmapped arrays, and the gather +operations are performed only on the most detailed mipmap level.

    • +
    • cudaArraySparse: Allocates a CUDA mipmapped array without +physical backing memory. The subregions within this sparse array can +later be mapped onto a physical memory allocation by calling +cuMemMapArrayAsync. This flag can only be used for +creating 2D, 3D or 2D layered sparse CUDA mipmapped arrays. The +physical backing memory must be allocated via +cuMemCreate.

    • +
    • cudaArrayDeferredMapping: Allocates a CUDA mipmapped +array without physical backing memory. The entire array can later be +mapped onto a physical memory allocation by calling +cuMemMapArrayAsync. The physical backing memory must be +allocated via cuMemCreate.

    • +
    +

    The width, height and depth extents must meet certain size requirements +as listed in the following table. All values are specified in elements.

    +

    View CUDA Toolkit Documentation for a table example

    +
    +
    Parameters:
    +
      +
    • desc (cudaChannelFormatDesc) – Requested channel format

    • +
    • extent (cudaExtent) – Requested allocation size (width field in elements)

    • +
    • numLevels (unsigned int) – Number of mipmap levels to allocate

    • +
    • flags (unsigned int) – Flags for extensions

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetMipmappedArrayLevel(mipmappedArray, unsigned int level)#
    +

    Gets a mipmap level of a CUDA mipmapped array.

    +

    Returns in *levelArray a CUDA array that represents a single mipmap +level of the CUDA mipmapped array mipmappedArray.

    +

    If level is greater than the maximum number of levels in this +mipmapped array, cudaErrorInvalidValue is returned.

    +

    If mipmappedArray is NULL, cudaErrorInvalidResourceHandle +is returned.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy3D(cudaMemcpy3DParms p: Optional[cudaMemcpy3DParms])#
    +

    Copies data between 3D objects.

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    cudaMemcpy3D() copies data betwen two 3D objects. The +source and destination objects may be in either host memory, device +memory, or a CUDA array. The source, destination, extent, and kind of +copy performed is specified by the cudaMemcpy3DParms struct +which should be initialized to zero before use:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    The struct passed to cudaMemcpy3D() must specify one of +srcArray or srcPtr and one of dstArray or dstPtr. Passing more +than one non-zero source or destination will cause +cudaMemcpy3D() to return an error.

    +

    The srcPos and dstPos fields are optional offsets into the source +and destination objects and are defined in units of each object’s +elements. The element for a host or device pointer is assumed to be +unsigned char.

    +

    The extent field defines the dimensions of the transferred area in +elements. If a CUDA array is participating in the copy, the extent is +defined in terms of that array’s elements. If no CUDA array is +participating in the copy then the extents are defined in elements of +unsigned char.

    +

    The kind field defines the direction of the copy. It must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. For cudaMemcpyHostToHost or +cudaMemcpyHostToDevice or +cudaMemcpyDeviceToHost passed as kind and cudaArray type +passed as source or destination, if the kind implies cudaArray type to +be present on the host, cudaMemcpy3D() will disregard that +implication and silently correct the kind based on the fact that +cudaArray type can only be present on the device.

    +

    If the source and destination are both arrays, +cudaMemcpy3D() will return an error if they do not have the +same element size.

    +

    The source and destination object may not overlap. If overlapping +source and destination objects are specified, undefined behavior will +result.

    +

    The source object must entirely contain the region defined by srcPos +and extent. The destination object must entirely contain the region +defined by dstPos and extent.

    +

    cudaMemcpy3D() returns an error if the pitch of srcPtr or +dstPtr exceeds the maximum allowed. The pitch of a +cudaPitchedPtr allocated with cudaMalloc3D() +will always be valid.

    +
    +
    Parameters:
    +

    p (cudaMemcpy3DParms) – 3D memory copy parameters

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy3DPeer(cudaMemcpy3DPeerParms p: Optional[cudaMemcpy3DPeerParms])#
    +

    Copies memory between devices.

    +

    Perform a 3D memory copy according to the parameters specified in p. +See the definition of the cudaMemcpy3DPeerParms structure +for documentation of its parameters.

    +

    Note that this function is synchronous with respect to the host only if +the source or destination of the transfer is host memory. Note also +that this copy is serialized with respect to all pending and future +asynchronous work in to the current device, the copy’s source device, +and the copy’s destination device (use +cudaMemcpy3DPeerAsync to avoid this synchronization).

    +
    +
    Parameters:
    +

    p (cudaMemcpy3DPeerParms) – Parameters for the memory copy

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice, cudaErrorInvalidPitchValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy3DAsync(cudaMemcpy3DParms p: Optional[cudaMemcpy3DParms], stream)#
    +

    Copies data between 3D objects.

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    cudaMemcpy3DAsync() copies data betwen two 3D objects. The +source and destination objects may be in either host memory, device +memory, or a CUDA array. The source, destination, extent, and kind of +copy performed is specified by the cudaMemcpy3DParms struct +which should be initialized to zero before use:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    The struct passed to cudaMemcpy3DAsync() must specify one +of srcArray or srcPtr and one of dstArray or dstPtr. Passing +more than one non-zero source or destination will cause +cudaMemcpy3DAsync() to return an error.

    +

    The srcPos and dstPos fields are optional offsets into the source +and destination objects and are defined in units of each object’s +elements. The element for a host or device pointer is assumed to be +unsigned char. For CUDA arrays, positions must be in the range [0, +2048) for any dimension.

    +

    The extent field defines the dimensions of the transferred area in +elements. If a CUDA array is participating in the copy, the extent is +defined in terms of that array’s elements. If no CUDA array is +participating in the copy then the extents are defined in elements of +unsigned char.

    +

    The kind field defines the direction of the copy. It must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. For cudaMemcpyHostToHost or +cudaMemcpyHostToDevice or +cudaMemcpyDeviceToHost passed as kind and cudaArray type +passed as source or destination, if the kind implies cudaArray type to +be present on the host, cudaMemcpy3DAsync() will disregard +that implication and silently correct the kind based on the fact that +cudaArray type can only be present on the device.

    +

    If the source and destination are both arrays, +cudaMemcpy3DAsync() will return an error if they do not +have the same element size.

    +

    The source and destination object may not overlap. If overlapping +source and destination objects are specified, undefined behavior will +result.

    +

    The source object must lie entirely within the region defined by +srcPos and extent. The destination object must lie entirely within +the region defined by dstPos and extent.

    +

    cudaMemcpy3DAsync() returns an error if the pitch of +srcPtr or dstPtr exceeds the maximum allowed. The pitch of a +cudaPitchedPtr allocated with cudaMalloc3D() +will always be valid.

    +

    cudaMemcpy3DAsync() is asynchronous with respect to the +host, so the call may return before the copy is complete. The copy can +optionally be associated to a stream by passing a non-zero stream +argument. If kind is cudaMemcpyHostToDevice or +cudaMemcpyDeviceToHost and stream is non-zero, the copy +may overlap with operations in other streams.

    +

    The device version of this function only handles device to device +copies and cannot be given local or shared pointers.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy3DPeerAsync(cudaMemcpy3DPeerParms p: Optional[cudaMemcpy3DPeerParms], stream)#
    +

    Copies memory between devices asynchronously.

    +

    Perform a 3D memory copy according to the parameters specified in p. +See the definition of the cudaMemcpy3DPeerParms structure +for documentation of its parameters.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice, cudaErrorInvalidPitchValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemGetInfo()#
    +

    Gets free and total device memory.

    +

    Returns in *total the total amount of memory available to the the +current context. Returns in *free the amount of memory on the device +that is free according to the OS. CUDA is not guaranteed to be able to +allocate all of the memory that the OS reports as free. In a multi- +tenet situation, free estimate returned is prone to race condition +where a new allocation/free done by a different process or a different +thread in the same process between the time when free memory was +estimated and reported, will result in deviation in free value reported +and actual free memory.

    +

    The integrated GPU on Tegra shares memory with CPU and other component +of the SoC. The free and total values returned by the API excludes the +SWAP memory space maintained by the OS on some platforms. The OS may +move some of the memory pages into swap area as the GPU or CPU allocate +or access memory. See Tegra app note on how to calculate total and free +memory on Tegra.

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuMemGetInfo

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaArrayGetInfo(array)#
    +

    Gets info about the specified cudaArray.

    +

    Returns in *desc, *extent and *flags respectively, the type, +shape and flags of array.

    +

    Any of *desc, *extent and *flags may be specified as NULL.

    +
    +
    Parameters:
    +

    array (cudaArray_t) – The cudaArray to get info for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaArrayGetPlane(hArray, unsigned int planeIdx)#
    +

    Gets a CUDA array plane from a CUDA array.

    +

    Returns in pPlaneArray a CUDA array that represents a single format +plane of the CUDA array hArray.

    +

    If planeIdx is greater than the maximum number of planes in this +array or if the array does not have a multi-planar format e.g: +cudaChannelFormatKindNV12, then +cudaErrorInvalidValue is returned.

    +

    Note that if the hArray has format +cudaChannelFormatKindNV12, then passing in 0 for planeIdx +returns a CUDA array of the same size as hArray but with one 8-bit +channel and cudaChannelFormatKindUnsigned as its format +kind. If 1 is passed for planeIdx, then the returned CUDA array has +half the height and width of hArray with two 8-bit channels and +cudaChannelFormatKindUnsigned as its format kind.

    +
    +
    Parameters:
    +
      +
    • hArray (cudaArray_t) – CUDA array

    • +
    • planeIdx (unsigned int) – Plane index

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuArrayGetPlane

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaArrayGetMemoryRequirements(array, int device)#
    +

    Returns the memory requirements of a CUDA array.

    +

    Returns the memory requirements of a CUDA array in memoryRequirements +If the CUDA array is not allocated with flag +cudaArrayDeferredMapping cudaErrorInvalidValue +will be returned.

    +

    The returned value in size +represents the total size of the CUDA array. The returned value in +alignment represents the +alignment necessary for mapping the CUDA array.

    +
    +
    Parameters:
    +
      +
    • array (cudaArray_t) – CUDA array to get the memory requirements of

    • +
    • device (int) – Device to get the memory requirements for

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMipmappedArrayGetMemoryRequirements(mipmap, int device)#
    +

    Returns the memory requirements of a CUDA mipmapped array.

    +

    Returns the memory requirements of a CUDA mipmapped array in +memoryRequirements If the CUDA mipmapped array is not allocated with +flag cudaArrayDeferredMapping +cudaErrorInvalidValue will be returned.

    +

    The returned value in size +represents the total size of the CUDA mipmapped array. The returned +value in alignment represents +the alignment necessary for mapping the CUDA mipmapped array.

    +
    +
    Parameters:
    +
      +
    • mipmap (cudaMipmappedArray_t) – CUDA mipmapped array to get the memory requirements of

    • +
    • device (int) – Device to get the memory requirements for

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaArrayGetSparseProperties(array)#
    +

    Returns the layout properties of a sparse CUDA array.

    +

    Returns the layout properties of a sparse CUDA array in +sparseProperties. If the CUDA array is not allocated with flag +cudaArraySparse cudaErrorInvalidValue will be +returned.

    +

    If the returned value in flags +contains cudaArraySparsePropertiesSingleMipTail, then +miptailSize represents the total +size of the array. Otherwise, it will be zero. Also, the returned value +in miptailFirstLevel is always +zero. Note that the array must have been allocated using +cudaMallocArray or cudaMalloc3DArray. For CUDA +arrays obtained using cudaMipmappedArrayGetLevel, +cudaErrorInvalidValue will be returned. Instead, +cudaMipmappedArrayGetSparseProperties must be used to +obtain the sparse properties of the entire CUDA mipmapped array to +which array belongs to.

    +
    +
    Parameters:
    +

    array (cudaArray_t) – The CUDA array to get the sparse properties of

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMipmappedArrayGetSparseProperties(mipmap)#
    +

    Returns the layout properties of a sparse CUDA mipmapped array.

    +

    Returns the sparse array layout properties in sparseProperties. If +the CUDA mipmapped array is not allocated with flag +cudaArraySparse cudaErrorInvalidValue will be +returned.

    +

    For non-layered CUDA mipmapped arrays, +miptailSize returns the size of +the mip tail region. The mip tail region includes all mip levels whose +width, height or depth is less than that of the tile. For layered CUDA +mipmapped arrays, if flags +contains cudaArraySparsePropertiesSingleMipTail, then +miptailSize specifies the size of +the mip tail of all layers combined. Otherwise, +miptailSize specifies mip tail +size per layer. The returned value of +miptailFirstLevel is valid only +if miptailSize is non-zero.

    +
    +
    Parameters:
    +

    mipmap (cudaMipmappedArray_t) – The CUDA mipmapped array to get the sparse properties of

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy(dst, src, size_t count, kind: cudaMemcpyKind)#
    +

    Copies data between host and device.

    +
    +

    Copies count bytes from the memory area pointed to by src to the +memory area pointed to by dst, where kind specifies the direction +of the copy, and must be one of cudaMemcpyHostToHost, +cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. Calling cudaMemcpy() with dst +and src pointers that do not match the direction of the copy results in +an undefined behavior.

    +
    +

    ote_sync

    +
    +
    +
    dstAny

    Destination memory address

    +
    +
    srcAny

    Source memory address

    +
    +
    countsize_t

    Size in bytes to copy

    +
    +
    kindcudaMemcpyKind

    Type of transfer

    +
    +
    +
    +
    cudaError_t

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidMemcpyDirection

    +
    +
    +

    cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpyDtoH, cuMemcpyHtoD, cuMemcpyDtoD, cuMemcpy

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpyPeer(dst, int dstDevice, src, int srcDevice, size_t count)#
    +

    Copies memory between two devices.

    +

    Copies memory from one device to memory on another device. dst is the +base device pointer of the destination memory and dstDevice is the +destination device. src is the base device pointer of the source +memory and srcDevice is the source device. count specifies the +number of bytes to copy.

    +

    Note that this function is asynchronous with respect to the host, but +serialized with respect all pending and future asynchronous work in to +the current device, srcDevice, and dstDevice (use +cudaMemcpyPeerAsync to avoid this synchronization).

    +
    +
    Parameters:
    +
      +
    • dst (Any) – Destination device pointer

    • +
    • dstDevice (int) – Destination device

    • +
    • src (Any) – Source device pointer

    • +
    • srcDevice (int) – Source device

    • +
    • count (size_t) – Size of memory copy in bytes

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy2D(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind)#
    +

    Copies data between host and device.

    +

    Copies a matrix (height rows of width bytes each) from the memory +area pointed to by src to the memory area pointed to by dst, where +kind specifies the direction of the copy, and must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. dpitch and spitch are the widths in +memory in bytes of the 2D arrays pointed to by dst and src, +including any padding added to the end of each row. The memory areas +may not overlap. width must not exceed either dpitch or spitch. +Calling cudaMemcpy2D() with dst and src pointers that +do not match the direction of the copy results in an undefined +behavior. cudaMemcpy2D() returns an error if dpitch or +spitch exceeds the maximum allowed.

    +
    +
    Parameters:
    +
      +
    • dst (Any) – Destination memory address

    • +
    • dpitch (size_t) – Pitch of destination memory

    • +
    • src (Any) – Source memory address

    • +
    • spitch (size_t) – Pitch of source memory

    • +
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • +
    • height (size_t) – Height of matrix transfer (rows)

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaMemcpy, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2D, cuMemcpy2DUnaligned

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy2DToArray(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind)#
    +

    Copies data between host and device.

    +

    Copies a matrix (height rows of width bytes each) from the memory +area pointed to by src to the CUDA array dst starting at hOffset +rows and wOffset bytes from the upper left corner, where kind +specifies the direction of the copy, and must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. spitch is the width in memory in bytes of +the 2D array pointed to by src, including any padding added to the +end of each row. wOffset + width must not exceed the width of the +CUDA array dst. width must not exceed spitch. +cudaMemcpy2DToArray() returns an error if spitch exceeds +the maximum allowed.

    +
    +
    Parameters:
    +
      +
    • dst (cudaArray_t) – Destination memory address

    • +
    • wOffset (size_t) – Destination starting X offset (columns in bytes)

    • +
    • hOffset (size_t) – Destination starting Y offset (rows)

    • +
    • src (Any) – Source memory address

    • +
    • spitch (size_t) – Pitch of source memory

    • +
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • +
    • height (size_t) – Height of matrix transfer (rows)

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2D, cuMemcpy2DUnaligned

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy2DFromArray(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind: cudaMemcpyKind)#
    +

    Copies data between host and device.

    +

    Copies a matrix (height rows of width bytes each) from the CUDA +array src starting at hOffset rows and wOffset bytes from the +upper left corner to the memory area pointed to by dst, where kind +specifies the direction of the copy, and must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. dpitch is the width in memory in bytes of +the 2D array pointed to by dst, including any padding added to the +end of each row. wOffset + width must not exceed the width of the +CUDA array src. width must not exceed dpitch. +cudaMemcpy2DFromArray() returns an error if dpitch +exceeds the maximum allowed.

    +
    +
    Parameters:
    +
      +
    • dst (Any) – Destination memory address

    • +
    • dpitch (size_t) – Pitch of destination memory

    • +
    • src (cudaArray_const_t) – Source memory address

    • +
    • wOffset (size_t) – Source starting X offset (columns in bytes)

    • +
    • hOffset (size_t) – Source starting Y offset (rows)

    • +
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • +
    • height (size_t) – Height of matrix transfer (rows)

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2D, cuMemcpy2DUnaligned

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy2DArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, size_t wOffsetSrc, size_t hOffsetSrc, size_t width, size_t height, kind: cudaMemcpyKind)#
    +

    Copies data between host and device.

    +

    Copies a matrix (height rows of width bytes each) from the CUDA +array src starting at hOffsetSrc rows and wOffsetSrc bytes from +the upper left corner to the CUDA array dst starting at hOffsetDst +rows and wOffsetDst bytes from the upper left corner, where kind +specifies the direction of the copy, and must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. wOffsetDst + width must not exceed the +width of the CUDA array dst. wOffsetSrc + width must not exceed +the width of the CUDA array src.

    +
    +
    Parameters:
    +
      +
    • dst (cudaArray_t) – Destination memory address

    • +
    • wOffsetDst (size_t) – Destination starting X offset (columns in bytes)

    • +
    • hOffsetDst (size_t) – Destination starting Y offset (rows)

    • +
    • src (cudaArray_const_t) – Source memory address

    • +
    • wOffsetSrc (size_t) – Source starting X offset (columns in bytes)

    • +
    • hOffsetSrc (size_t) – Source starting Y offset (rows)

    • +
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • +
    • height (size_t) – Height of matrix transfer (rows)

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2D, cuMemcpy2DUnaligned

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpyAsync(dst, src, size_t count, kind: cudaMemcpyKind, stream)#
    +

    Copies data between host and device.

    +

    Copies count bytes from the memory area pointed to by src to the +memory area pointed to by dst, where kind specifies the direction +of the copy, and must be one of cudaMemcpyHostToHost, +cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing.

    +

    The memory areas may not overlap. Calling cudaMemcpyAsync() +with dst and src pointers that do not match the direction of the +copy results in an undefined behavior.

    +

    cudaMemcpyAsync() is asynchronous with respect to the host, +so the call may return before the copy is complete. The copy can +optionally be associated to a stream by passing a non-zero stream +argument. If kind is cudaMemcpyHostToDevice or +cudaMemcpyDeviceToHost and the stream is non-zero, the +copy may overlap with operations in other streams.

    +

    The device version of this function only handles device to device +copies and cannot be given local or shared pointers.

    +
    +
    Parameters:
    +
      +
    • dst (Any) – Destination memory address

    • +
    • src (Any) – Source memory address

    • +
    • count (size_t) – Size in bytes to copy

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    • stream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpyPeerAsync(dst, int dstDevice, src, int srcDevice, size_t count, stream)#
    +

    Copies memory between two devices asynchronously.

    +

    Copies memory from one device to memory on another device. dst is the +base device pointer of the destination memory and dstDevice is the +destination device. src is the base device pointer of the source +memory and srcDevice is the source device. count specifies the +number of bytes to copy.

    +

    Note that this function is asynchronous with respect to the host and +all work on other devices.

    +
    +
    Parameters:
    +
      +
    • dst (Any) – Destination device pointer

    • +
    • dstDevice (int) – Destination device

    • +
    • src (Any) – Source device pointer

    • +
    • srcDevice (int) – Source device

    • +
    • count (size_t) – Size of memory copy in bytes

    • +
    • stream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy2DAsync(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind, stream)#
    +

    Copies data between host and device.

    +

    Copies a matrix (height rows of width bytes each) from the memory +area pointed to by src to the memory area pointed to by dst, where +kind specifies the direction of the copy, and must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. dpitch and spitch are the widths in +memory in bytes of the 2D arrays pointed to by dst and src, +including any padding added to the end of each row. The memory areas +may not overlap. width must not exceed either dpitch or spitch.

    +

    Calling cudaMemcpy2DAsync() with dst and src pointers +that do not match the direction of the copy results in an undefined +behavior. cudaMemcpy2DAsync() returns an error if dpitch +or spitch is greater than the maximum allowed.

    +

    cudaMemcpy2DAsync() is asynchronous with respect to the +host, so the call may return before the copy is complete. The copy can +optionally be associated to a stream by passing a non-zero stream +argument. If kind is cudaMemcpyHostToDevice or +cudaMemcpyDeviceToHost and stream is non-zero, the copy +may overlap with operations in other streams.

    +

    The device version of this function only handles device to device +copies and cannot be given local or shared pointers.

    +
    +
    Parameters:
    +
      +
    • dst (Any) – Destination memory address

    • +
    • dpitch (size_t) – Pitch of destination memory

    • +
    • src (Any) – Source memory address

    • +
    • spitch (size_t) – Pitch of source memory

    • +
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • +
    • height (size_t) – Height of matrix transfer (rows)

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    • stream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaMemcpy, cudaMemcpy2D, cudaMemcpy2DToArray, cudaMemcpy2DFromArray, cudaMemcpy2DArrayToArray, cudaMemcpyToSymbol, cudaMemcpyFromSymbol, cudaMemcpyAsync, cudaMemcpy2DToArrayAsync, cudaMemcpy2DFromArrayAsync, cudaMemcpyToSymbolAsync, cudaMemcpyFromSymbolAsync, cuMemcpy2DAsync

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy2DToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind, stream)#
    +

    Copies data between host and device.

    +

    Copies a matrix (height rows of width bytes each) from the memory +area pointed to by src to the CUDA array dst starting at hOffset +rows and wOffset bytes from the upper left corner, where kind +specifies the direction of the copy, and must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. spitch is the width in memory in bytes of +the 2D array pointed to by src, including any padding added to the +end of each row. wOffset + width must not exceed the width of the +CUDA array dst. width must not exceed spitch. +cudaMemcpy2DToArrayAsync() returns an error if spitch +exceeds the maximum allowed.

    +

    cudaMemcpy2DToArrayAsync() is asynchronous with respect to +the host, so the call may return before the copy is complete. The copy +can optionally be associated to a stream by passing a non-zero stream +argument. If kind is cudaMemcpyHostToDevice or +cudaMemcpyDeviceToHost and stream is non-zero, the copy +may overlap with operations in other streams.

    +

    cudaMemcpy2DFromArrayAsync, +cudaMemcpyToSymbolAsync, +cudaMemcpyFromSymbolAsync, cuMemcpy2DAsync

    +
    +
    Parameters:
    +
      +
    • dst (cudaArray_t) – Destination memory address

    • +
    • wOffset (size_t) – Destination starting X offset (columns in bytes)

    • +
    • hOffset (size_t) – Destination starting Y offset (rows)

    • +
    • src (Any) – Source memory address

    • +
    • spitch (size_t) – Pitch of source memory

    • +
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • +
    • height (size_t) – Height of matrix transfer (rows)

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    • stream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemcpy2DFromArrayAsync(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind: cudaMemcpyKind, stream)#
    +

    Copies data between host and device.

    +

    Copies a matrix (height rows of width bytes each) from the CUDA +array src starting at hOffset rows and wOffset bytes from the +upper left corner to the memory area pointed to by dst, where kind +specifies the direction of the copy, and must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. dpitch is the width in memory in bytes of +the 2D array pointed to by dst, including any padding added to the +end of each row. wOffset + width must not exceed the width of the +CUDA array src. width must not exceed dpitch. +cudaMemcpy2DFromArrayAsync() returns an error if dpitch +exceeds the maximum allowed.

    +

    cudaMemcpy2DFromArrayAsync() is asynchronous with respect +to the host, so the call may return before the copy is complete. The +copy can optionally be associated to a stream by passing a non-zero +stream argument. If kind is cudaMemcpyHostToDevice or +cudaMemcpyDeviceToHost and stream is non-zero, the copy +may overlap with operations in other streams.

    +

    cudaMemcpyToSymbolAsync, +cudaMemcpyFromSymbolAsync, cuMemcpy2DAsync

    +
    +
    Parameters:
    +
      +
    • dst (Any) – Destination memory address

    • +
    • dpitch (size_t) – Pitch of destination memory

    • +
    • src (cudaArray_const_t) – Source memory address

    • +
    • wOffset (size_t) – Source starting X offset (columns in bytes)

    • +
    • hOffset (size_t) – Source starting Y offset (rows)

    • +
    • width (size_t) – Width of matrix transfer (columns in bytes)

    • +
    • height (size_t) – Height of matrix transfer (rows)

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    • stream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidPitchValue, cudaErrorInvalidMemcpyDirection

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemset(devPtr, int value, size_t count)#
    +

    Initializes or sets device memory to a value.

    +

    Fills the first count bytes of the memory area pointed to by devPtr +with the constant byte value value.

    +

    Note that this function is asynchronous with respect to the host unless +devPtr refers to pinned host memory.

    +
    +
    Parameters:
    +
      +
    • devPtr (Any) – Pointer to device memory

    • +
    • value (int) – Value to set for each byte of specified memory

    • +
    • count (size_t) – Size in bytes to set

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemset2D(devPtr, size_t pitch, int value, size_t width, size_t height)#
    +

    Initializes or sets device memory to a value.

    +

    Sets to the specified value value a matrix (height rows of width +bytes each) pointed to by dstPtr. pitch is the width in bytes of +the 2D array pointed to by dstPtr, including any padding added to the +end of each row. This function performs fastest when the pitch is one +that has been passed back by cudaMallocPitch().

    +

    Note that this function is asynchronous with respect to the host unless +devPtr refers to pinned host memory.

    +
    +
    Parameters:
    +
      +
    • devPtr (Any) – Pointer to 2D device memory

    • +
    • pitch (size_t) – Pitch in bytes of 2D device memory(Unused if height is 1)

    • +
    • value (int) – Value to set for each byte of specified memory

    • +
    • width (size_t) – Width of matrix set (columns in bytes)

    • +
    • height (size_t) – Height of matrix set (rows)

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemset3D(cudaPitchedPtr pitchedDevPtr: cudaPitchedPtr, int value, cudaExtent extent: cudaExtent)#
    +

    Initializes or sets device memory to a value.

    +

    Initializes each element of a 3D array to the specified value value. +The object to initialize is defined by pitchedDevPtr. The pitch +field of pitchedDevPtr is the width in memory in bytes of the 3D +array pointed to by pitchedDevPtr, including any padding added to the +end of each row. The xsize field specifies the logical width of each +row in bytes, while the ysize field specifies the height of each 2D +slice in rows. The pitch field of pitchedDevPtr is ignored when +height and depth are both equal to 1.

    +

    The extents of the initialized region are specified as a width in +bytes, a height in rows, and a depth in slices.

    +

    Extents with width greater than or equal to the xsize of +pitchedDevPtr may perform significantly faster than extents narrower +than the xsize. Secondarily, extents with height equal to the +ysize of pitchedDevPtr will perform faster than when the height +is shorter than the ysize.

    +

    This function performs fastest when the pitchedDevPtr has been +allocated by cudaMalloc3D().

    +

    Note that this function is asynchronous with respect to the host unless +pitchedDevPtr refers to pinned host memory.

    +
    +
    Parameters:
    +
      +
    • pitchedDevPtr (cudaPitchedPtr) – Pointer to pitched device memory

    • +
    • value (int) – Value to set for each byte of specified memory

    • +
    • extent (cudaExtent) – Size parameters for where to set device memory (width field in +bytes)

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemsetAsync(devPtr, int value, size_t count, stream)#
    +

    Initializes or sets device memory to a value.

    +

    Fills the first count bytes of the memory area pointed to by devPtr +with the constant byte value value.

    +

    cudaMemsetAsync() is asynchronous with respect to the host, +so the call may return before the memset is complete. The operation can +optionally be associated to a stream by passing a non-zero stream +argument. If stream is non-zero, the operation may overlap with +operations in other streams.

    +

    The device version of this function only handles device to device +copies and cannot be given local or shared pointers.

    +
    +
    Parameters:
    +
      +
    • devPtr (Any) – Pointer to device memory

    • +
    • value (int) – Value to set for each byte of specified memory

    • +
    • count (size_t) – Size in bytes to set

    • +
    • stream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemset2DAsync(devPtr, size_t pitch, int value, size_t width, size_t height, stream)#
    +

    Initializes or sets device memory to a value.

    +

    Sets to the specified value value a matrix (height rows of width +bytes each) pointed to by dstPtr. pitch is the width in bytes of +the 2D array pointed to by dstPtr, including any padding added to the +end of each row. This function performs fastest when the pitch is one +that has been passed back by cudaMallocPitch().

    +

    cudaMemset2DAsync() is asynchronous with respect to the +host, so the call may return before the memset is complete. The +operation can optionally be associated to a stream by passing a non- +zero stream argument. If stream is non-zero, the operation may +overlap with operations in other streams.

    +

    The device version of this function only handles device to device +copies and cannot be given local or shared pointers.

    +
    +
    Parameters:
    +
      +
    • devPtr (Any) – Pointer to 2D device memory

    • +
    • pitch (size_t) – Pitch in bytes of 2D device memory(Unused if height is 1)

    • +
    • value (int) – Value to set for each byte of specified memory

    • +
    • width (size_t) – Width of matrix set (columns in bytes)

    • +
    • height (size_t) – Height of matrix set (rows)

    • +
    • stream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemset3DAsync(cudaPitchedPtr pitchedDevPtr: cudaPitchedPtr, int value, cudaExtent extent: cudaExtent, stream)#
    +

    Initializes or sets device memory to a value.

    +

    Initializes each element of a 3D array to the specified value value. +The object to initialize is defined by pitchedDevPtr. The pitch +field of pitchedDevPtr is the width in memory in bytes of the 3D +array pointed to by pitchedDevPtr, including any padding added to the +end of each row. The xsize field specifies the logical width of each +row in bytes, while the ysize field specifies the height of each 2D +slice in rows. The pitch field of pitchedDevPtr is ignored when +height and depth are both equal to 1.

    +

    The extents of the initialized region are specified as a width in +bytes, a height in rows, and a depth in slices.

    +

    Extents with width greater than or equal to the xsize of +pitchedDevPtr may perform significantly faster than extents narrower +than the xsize. Secondarily, extents with height equal to the +ysize of pitchedDevPtr will perform faster than when the height +is shorter than the ysize.

    +

    This function performs fastest when the pitchedDevPtr has been +allocated by cudaMalloc3D().

    +

    cudaMemset3DAsync() is asynchronous with respect to the +host, so the call may return before the memset is complete. The +operation can optionally be associated to a stream by passing a non- +zero stream argument. If stream is non-zero, the operation may +overlap with operations in other streams.

    +

    The device version of this function only handles device to device +copies and cannot be given local or shared pointers.

    +
    +
    Parameters:
    +
      +
    • pitchedDevPtr (cudaPitchedPtr) – Pointer to pitched device memory

    • +
    • value (int) – Value to set for each byte of specified memory

    • +
    • extent (cudaExtent) – Size parameters for where to set device memory (width field in +bytes)

    • +
    • stream (CUstream or cudaStream_t) – Stream identifier

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemPrefetchAsync(devPtr, size_t count, int dstDevice, stream)#
    +

    Prefetches memory to the specified destination device.

    +

    Prefetches memory to the specified destination device. devPtr is the +base device pointer of the memory to be prefetched and dstDevice is +the destination device. count specifies the number of bytes to copy. +stream is the stream in which the operation is enqueued. The memory +range must refer to managed memory allocated via +cudaMallocManaged or declared via managed variables, or it +may also refer to system-allocated memory on systems with non-zero +cudaDevAttrPageableMemoryAccess.

    +

    Passing in cudaCpuDeviceId for dstDevice will prefetch the data to +host memory. If dstDevice is a GPU, then the device attribute +cudaDevAttrConcurrentManagedAccess must be non-zero. +Additionally, stream must be associated with a device that has a non- +zero value for the device attribute +cudaDevAttrConcurrentManagedAccess.

    +

    The start address and end address of the memory range will be rounded +down and rounded up respectively to be aligned to CPU page size before +the prefetch operation is enqueued in the stream.

    +

    If no physical memory has been allocated for this region, then this +memory region will be populated and mapped on the destination device. +If there’s insufficient memory to prefetch the desired region, the +Unified Memory driver may evict pages from other +cudaMallocManaged allocations to host memory in order to +make room. Device memory allocated using cudaMalloc or +cudaMallocArray will not be evicted.

    +

    By default, any mappings to the previous location of the migrated pages +are removed and mappings for the new location are only setup on +dstDevice. The exact behavior however also depends on the settings +applied to this memory range via cudaMemAdvise as described +below:

    +

    If cudaMemAdviseSetReadMostly was set on any subset of this +memory range, then that subset will create a read-only copy of the +pages on dstDevice.

    +

    If cudaMemAdviseSetPreferredLocation was called on any +subset of this memory range, then the pages will be migrated to +dstDevice even if dstDevice is not the preferred location of any +pages in the memory range.

    +

    If cudaMemAdviseSetAccessedBy was called on any subset of +this memory range, then mappings to those pages from all the +appropriate processors are updated to refer to the new location if +establishing such a mapping is possible. Otherwise, those mappings are +cleared.

    +

    Note that this API is not required for functionality and only serves to +improve performance by allowing the application to migrate data to a +suitable location before it is accessed. Memory accesses to this range +are always coherent and are allowed even when the data is actively +being migrated.

    +

    Note that this function is asynchronous with respect to the host and +all work on other devices.

    +
    +
    Parameters:
    +
      +
    • devPtr (Any) – Pointer to be prefetched

    • +
    • count (size_t) – Size in bytes

    • +
    • dstDevice (int) – Destination device to prefetch to

    • +
    • stream (CUstream or cudaStream_t) – Stream to enqueue prefetch operation

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemPrefetchAsync_v2(devPtr, size_t count, cudaMemLocation location: cudaMemLocation, unsigned int flags, stream)#
    +

    Prefetches memory to the specified destination location.

    +

    Prefetches memory to the specified destination location. devPtr is +the base device pointer of the memory to be prefetched and location +specifies the destination location. count specifies the number of +bytes to copy. stream is the stream in which the operation is +enqueued. The memory range must refer to managed memory allocated via +cudaMallocManaged or declared via managed variables, or it +may also refer to system-allocated memory on systems with non-zero +cudaDevAttrPageableMemoryAccess.

    +

    Specifying cudaMemLocationTypeDevice for +type will prefetch memory to GPU specified +by device ordinal id which must have non- +zero value for the device attribute +concurrentManagedAccess. Additionally, stream must be +associated with a device that has a non-zero value for the device +attribute concurrentManagedAccess. Specifying +cudaMemLocationTypeHost as type +will prefetch data to host memory. Applications can request prefetching +memory to a specific host NUMA node by specifying +cudaMemLocationTypeHostNuma for +type and a valid host NUMA node id in +id Users can also request prefetching +memory to the host NUMA node closest to the current thread’s CPU by +specifying cudaMemLocationTypeHostNumaCurrent for +type. Note when +type is etiher +cudaMemLocationTypeHost OR +cudaMemLocationTypeHostNumaCurrent, +id will be ignored.

    +

    The start address and end address of the memory range will be rounded +down and rounded up respectively to be aligned to CPU page size before +the prefetch operation is enqueued in the stream.

    +

    If no physical memory has been allocated for this region, then this +memory region will be populated and mapped on the destination device. +If there’s insufficient memory to prefetch the desired region, the +Unified Memory driver may evict pages from other +cudaMallocManaged allocations to host memory in order to +make room. Device memory allocated using cudaMalloc or +cudaMallocArray will not be evicted.

    +

    By default, any mappings to the previous location of the migrated pages +are removed and mappings for the new location are only setup on the +destination location. The exact behavior however also depends on the +settings applied to this memory range via cuMemAdvise as +described below:

    +

    If cudaMemAdviseSetReadMostly was set on any subset of this +memory range, then that subset will create a read-only copy of the +pages on destination location. If however the destination location is a +host NUMA node, then any pages of that subset that are already in +another host NUMA node will be transferred to the destination.

    +

    If cudaMemAdviseSetPreferredLocation was called on any +subset of this memory range, then the pages will be migrated to +location even if location is not the preferred location of any +pages in the memory range.

    +

    If cudaMemAdviseSetAccessedBy was called on any subset of +this memory range, then mappings to those pages from all the +appropriate processors are updated to refer to the new location if +establishing such a mapping is possible. Otherwise, those mappings are +cleared.

    +

    Note that this API is not required for functionality and only serves to +improve performance by allowing the application to migrate data to a +suitable location before it is accessed. Memory accesses to this range +are always coherent and are allowed even when the data is actively +being migrated.

    +

    Note that this function is asynchronous with respect to the host and +all work on other devices.

    +
    +
    Parameters:
    +
      +
    • devPtr (Any) – Pointer to be prefetched

    • +
    • count (size_t) – Size in bytes

    • +
    • location (cudaMemLocation) – location to prefetch to

    • +
    • flags (unsigned int) – flags for future use, must be zero now.

    • +
    • stream (CUstream or cudaStream_t) – Stream to enqueue prefetch operation

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemAdvise(devPtr, size_t count, advice: cudaMemoryAdvise, int device)#
    +

    Advise about the usage of a given memory range.

    +

    Advise the Unified Memory subsystem about the usage pattern for the +memory range starting at devPtr with a size of count bytes. The +start address and end address of the memory range will be rounded down +and rounded up respectively to be aligned to CPU page size before the +advice is applied. The memory range must refer to managed memory +allocated via cudaMallocManaged or declared via managed +variables. The memory range could also refer to system-allocated +pageable memory provided it represents a valid, host-accessible region +of memory and all additional constraints imposed by advice as +outlined below are also satisfied. Specifying an invalid system- +allocated pageable memory range results in an error being returned.

    +

    The advice parameter can take the following values:

    +
      +
    • cudaMemAdviseSetReadMostly: This implies that the data is +mostly going to be read from and only occasionally written to. Any +read accesses from any processor to this region will create a read- +only copy of at least the accessed pages in that processor’s memory. +Additionally, if cudaMemPrefetchAsync is called on this +region, it will create a read-only copy of the data on the +destination processor. If any processor writes to this region, all +copies of the corresponding page will be invalidated except for the +one where the write occurred. The device argument is ignored for +this advice. Note that for a page to be read-duplicated, the +accessing processor must either be the CPU or a GPU that has a non- +zero value for the device attribute +cudaDevAttrConcurrentManagedAccess. Also, if a context is +created on a device that does not have the device attribute +cudaDevAttrConcurrentManagedAccess set, then read- +duplication will not occur until all such contexts are destroyed. If +the memory region refers to valid system-allocated pageable memory, +then the accessing device must have a non-zero value for the device +attribute cudaDevAttrPageableMemoryAccess for a read-only +copy to be created on that device. Note however that if the accessing +device also has a non-zero value for the device attribute +cudaDevAttrPageableMemoryAccessUsesHostPageTables, then +setting this advice will not create a read-only copy when that device +accesses this memory region.

    • +
    • cudaMemAdviceUnsetReadMostly: Undoes the effect of +cudaMemAdviceReadMostly and also prevents the Unified +Memory driver from attempting heuristic read-duplication on the +memory range. Any read-duplicated copies of the data will be +collapsed into a single copy. The location for the collapsed copy +will be the preferred location if the page has a preferred location +and one of the read-duplicated copies was resident at that location. +Otherwise, the location chosen is arbitrary.

    • +
    • cudaMemAdviseSetPreferredLocation: This advice sets the +preferred location for the data to be the memory belonging to +device. Passing in cudaCpuDeviceId for device sets the preferred +location as host memory. If device is a GPU, then it must have a +non-zero value for the device attribute +cudaDevAttrConcurrentManagedAccess. Setting the preferred +location does not cause data to migrate to that location immediately. +Instead, it guides the migration policy when a fault occurs on that +memory region. If the data is already in its preferred location and +the faulting processor can establish a mapping without requiring the +data to be migrated, then data migration will be avoided. On the +other hand, if the data is not in its preferred location or if a +direct mapping cannot be established, then it will be migrated to the +processor accessing it. It is important to note that setting the +preferred location does not prevent data prefetching done using +cudaMemPrefetchAsync. Having a preferred location can +override the page thrash detection and resolution logic in the +Unified Memory driver. Normally, if a page is detected to be +constantly thrashing between for example host and device memory, the +page may eventually be pinned to host memory by the Unified Memory +driver. But if the preferred location is set as device memory, then +the page will continue to thrash indefinitely. If +cudaMemAdviseSetReadMostly is also set on this memory +region or any subset of it, then the policies associated with that +advice will override the policies of this advice, unless read +accesses from device will not result in a read-only copy being +created on that device as outlined in description for the advice +cudaMemAdviseSetReadMostly. If the memory region refers +to valid system-allocated pageable memory, then device must have a +non-zero value for the device attribute +cudaDevAttrPageableMemoryAccess.

    • +
    • cudaMemAdviseUnsetPreferredLocation: Undoes the effect of +cudaMemAdviseSetPreferredLocation and changes the +preferred location to none.

    • +
    • cudaMemAdviseSetAccessedBy: This advice implies that the +data will be accessed by device. Passing in +cudaCpuDeviceId for device will set the advice for the +CPU. If device is a GPU, then the device attribute +cudaDevAttrConcurrentManagedAccess must be non-zero. This +advice does not cause data migration and has no impact on the +location of the data per se. Instead, it causes the data to always be +mapped in the specified processor’s page tables, as long as the +location of the data permits a mapping to be established. If the data +gets migrated for any reason, the mappings are updated accordingly. +This advice is recommended in scenarios where data locality is not +important, but avoiding faults is. Consider for example a system +containing multiple GPUs with peer-to-peer access enabled, where the +data located on one GPU is occasionally accessed by peer GPUs. In +such scenarios, migrating data over to the other GPUs is not as +important because the accesses are infrequent and the overhead of +migration may be too high. But preventing faults can still help +improve performance, and so having a mapping set up in advance is +useful. Note that on CPU access of this data, the data may be +migrated to host memory because the CPU typically cannot access +device memory directly. Any GPU that had the +cudaMemAdviceSetAccessedBy flag set for this data will +now have its mapping updated to point to the page in host memory. If +cudaMemAdviseSetReadMostly is also set on this memory +region or any subset of it, then the policies associated with that +advice will override the policies of this advice. Additionally, if +the preferred location of this memory region or any subset of it is +also device, then the policies associated with +cudaMemAdviseSetPreferredLocation will override the +policies of this advice. If the memory region refers to valid system- +allocated pageable memory, then device must have a non-zero value +for the device attribute cudaDevAttrPageableMemoryAccess. +Additionally, if device has a non-zero value for the device +attribute +cudaDevAttrPageableMemoryAccessUsesHostPageTables, then +this call has no effect.

    • +
    • cudaMemAdviseUnsetAccessedBy: Undoes the effect of +cudaMemAdviseSetAccessedBy. Any mappings to the data from +device may be removed at any time causing accesses to result in +non-fatal page faults. If the memory region refers to valid system- +allocated pageable memory, then device must have a non-zero value +for the device attribute cudaDevAttrPageableMemoryAccess. +Additionally, if device has a non-zero value for the device +attribute +cudaDevAttrPageableMemoryAccessUsesHostPageTables, then +this call has no effect.

    • +
    +
    +
    Parameters:
    +
      +
    • devPtr (Any) – Pointer to memory to set the advice for

    • +
    • count (size_t) – Size in bytes of the memory range

    • +
    • advice (cudaMemoryAdvise) – Advice to be applied for the specified memory range

    • +
    • device (int) – Device to apply the advice for

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemAdvise_v2(devPtr, size_t count, advice: cudaMemoryAdvise, cudaMemLocation location: cudaMemLocation)#
    +

    Advise about the usage of a given memory range.

    +

    Advise the Unified Memory subsystem about the usage pattern for the +memory range starting at devPtr with a size of count bytes. The +start address and end address of the memory range will be rounded down +and rounded up respectively to be aligned to CPU page size before the +advice is applied. The memory range must refer to managed memory +allocated via cudaMallocManaged or declared via managed +variables. The memory range could also refer to system-allocated +pageable memory provided it represents a valid, host-accessible region +of memory and all additional constraints imposed by advice as +outlined below are also satisfied. Specifying an invalid system- +allocated pageable memory range results in an error being returned.

    +

    The advice parameter can take the following values:

    +
      +
    • cudaMemAdviseSetReadMostly: This implies that the data is +mostly going to be read from and only occasionally written to. Any +read accesses from any processor to this region will create a read- +only copy of at least the accessed pages in that processor’s memory. +Additionally, if cudaMemPrefetchAsync or +cudaMemPrefetchAsync_v2 is called on this region, it will +create a read-only copy of the data on the destination processor. If +the target location for cudaMemPrefetchAsync_v2 is a host +NUMA node and a read-only copy already exists on another host NUMA +node, that copy will be migrated to the targeted host NUMA node. If +any processor writes to this region, all copies of the corresponding +page will be invalidated except for the one where the write occurred. +If the writing processor is the CPU and the preferred location of the +page is a host NUMA node, then the page will also be migrated to that +host NUMA node. The location argument is ignored for this advice. +Note that for a page to be read-duplicated, the accessing processor +must either be the CPU or a GPU that has a non-zero value for the +device attribute cudaDevAttrConcurrentManagedAccess. +Also, if a context is created on a device that does not have the +device attribute cudaDevAttrConcurrentManagedAccess set, +then read-duplication will not occur until all such contexts are +destroyed. If the memory region refers to valid system-allocated +pageable memory, then the accessing device must have a non-zero value +for the device attribute cudaDevAttrPageableMemoryAccess +for a read-only copy to be created on that device. Note however that +if the accessing device also has a non-zero value for the device +attribute +cudaDevAttrPageableMemoryAccessUsesHostPageTables, then +setting this advice will not create a read-only copy when that device +accesses this memory region.

    • +
    • cudaMemAdviceUnsetReadMostly: Undoes the effect of +cudaMemAdviseSetReadMostly and also prevents the Unified +Memory driver from attempting heuristic read-duplication on the +memory range. Any read-duplicated copies of the data will be +collapsed into a single copy. The location for the collapsed copy +will be the preferred location if the page has a preferred location +and one of the read-duplicated copies was resident at that location. +Otherwise, the location chosen is arbitrary. Note: The location +argument is ignored for this advice.

    • +
    • cudaMemAdviseSetPreferredLocation: This advice sets the +preferred location for the data to be the memory belonging to +location. When type is +cudaMemLocationTypeHost, id +is ignored and the preferred location is set to be host memory. To +set the preferred location to a specific host NUMA node, applications +must set type to +cudaMemLocationTypeHostNuma and +id must specify the NUMA ID of the host +NUMA node. If type is set to +cudaMemLocationTypeHostNumaCurrent, +id will be ignored and the host NUMA node +closest to the calling thread’s CPU will be used as the preferred +location. If type is a +cudaMemLocationTypeDevice, then +id must be a valid device ordinal and the +device must have a non-zero value for the device attribute +cudaDevAttrConcurrentManagedAccess. Setting the preferred +location does not cause data to migrate to that location immediately. +Instead, it guides the migration policy when a fault occurs on that +memory region. If the data is already in its preferred location and +the faulting processor can establish a mapping without requiring the +data to be migrated, then data migration will be avoided. On the +other hand, if the data is not in its preferred location or if a +direct mapping cannot be established, then it will be migrated to the +processor accessing it. It is important to note that setting the +preferred location does not prevent data prefetching done using +cudaMemPrefetchAsync. Having a preferred location can +override the page thrash detection and resolution logic in the +Unified Memory driver. Normally, if a page is detected to be +constantly thrashing between for example host and device memory, the +page may eventually be pinned to host memory by the Unified Memory +driver. But if the preferred location is set as device memory, then +the page will continue to thrash indefinitely. If +cudaMemAdviseSetReadMostly is also set on this memory +region or any subset of it, then the policies associated with that +advice will override the policies of this advice, unless read +accesses from location will not result in a read-only copy being +created on that procesor as outlined in description for the advice +cudaMemAdviseSetReadMostly. If the memory region refers +to valid system-allocated pageable memory, and +type is +cudaMemLocationTypeDevice then +id must be a valid device that has a non- +zero alue for the device attribute +cudaDevAttrPageableMemoryAccess.

    • +
    • cudaMemAdviseUnsetPreferredLocation: Undoes the effect of +cudaMemAdviseSetPreferredLocation and changes the +preferred location to none. The location argument is ignored for +this advice.

    • +
    • cudaMemAdviseSetAccessedBy: This advice implies that the +data will be accessed by processor location. The +type must be either +cudaMemLocationTypeDevice with +id representing a valid device ordinal or +cudaMemLocationTypeHost and +id will be ignored. All other location +types are invalid. If id is a GPU, then +the device attribute cudaDevAttrConcurrentManagedAccess +must be non-zero. This advice does not cause data migration and has +no impact on the location of the data per se. Instead, it causes the +data to always be mapped in the specified processor’s page tables, as +long as the location of the data permits a mapping to be established. +If the data gets migrated for any reason, the mappings are updated +accordingly. This advice is recommended in scenarios where data +locality is not important, but avoiding faults is. Consider for +example a system containing multiple GPUs with peer-to-peer access +enabled, where the data located on one GPU is occasionally accessed +by peer GPUs. In such scenarios, migrating data over to the other +GPUs is not as important because the accesses are infrequent and the +overhead of migration may be too high. But preventing faults can +still help improve performance, and so having a mapping set up in +advance is useful. Note that on CPU access of this data, the data may +be migrated to host memory because the CPU typically cannot access +device memory directly. Any GPU that had the +cudaMemAdviseSetAccessedBy flag set for this data will +now have its mapping updated to point to the page in host memory. If +cudaMemAdviseSetReadMostly is also set on this memory +region or any subset of it, then the policies associated with that +advice will override the policies of this advice. Additionally, if +the preferred location of this memory region or any subset of it is +also location, then the policies associated with +CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the +policies of this advice. If the memory region refers to valid system- +allocated pageable memory, and type is +cudaMemLocationTypeDevice then device in +id must have a non-zero value for the +device attribute cudaDevAttrPageableMemoryAccess. +Additionally, if id has a non-zero value +for the device attribute +cudaDevAttrPageableMemoryAccessUsesHostPageTables, then +this call has no effect.

    • +
    • CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of +cudaMemAdviseSetAccessedBy. Any mappings to the data from +location may be removed at any time causing accesses to result in +non-fatal page faults. If the memory region refers to valid system- +allocated pageable memory, and type is +cudaMemLocationTypeDevice then device in +id must have a non-zero value for the +device attribute cudaDevAttrPageableMemoryAccess. +Additionally, if id has a non-zero value +for the device attribute +cudaDevAttrPageableMemoryAccessUsesHostPageTables, then +this call has no effect.

    • +
    +
    +
    Parameters:
    +
      +
    • devPtr (Any) – Pointer to memory to set the advice for

    • +
    • count (size_t) – Size in bytes of the memory range

    • +
    • advice (cudaMemoryAdvise) – Advice to be applied for the specified memory range

    • +
    • location (cudaMemLocation) – location to apply the advice for

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevice

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemRangeGetAttribute(size_t dataSize, attribute: cudaMemRangeAttribute, devPtr, size_t count)#
    +

    Query an attribute of a given memory range.

    +

    Query an attribute about the memory range starting at devPtr with a +size of count bytes. The memory range must refer to managed memory +allocated via cudaMallocManaged or declared via managed +variables.

    +

    The attribute parameter can take the following values:

    +
      +
    • cudaMemRangeAttributeReadMostly: If this attribute is +specified, data will be interpreted as a 32-bit integer, and +dataSize must be 4. The result returned will be 1 if all pages in +the given memory range have read-duplication enabled, or 0 otherwise.

    • +
    • cudaMemRangeAttributePreferredLocation: If this attribute +is specified, data will be interpreted as a 32-bit integer, and +dataSize must be 4. The result returned will be a GPU device id if +all pages in the memory range have that GPU as their preferred +location, or it will be cudaCpuDeviceId if all pages in the memory +range have the CPU as their preferred location, or it will be +cudaInvalidDeviceId if either all the pages don’t have the same +preferred location or some of the pages don’t have a preferred +location at all. Note that the actual location of the pages in the +memory range at the time of the query may be different from the +preferred location.

    • +
    • cudaMemRangeAttributeAccessedBy: If this attribute is +specified, data will be interpreted as an array of 32-bit integers, +and dataSize must be a non-zero multiple of 4. The result returned +will be a list of device ids that had +cudaMemAdviceSetAccessedBy set for that entire memory +range. If any device does not have that advice set for the entire +memory range, that device will not be included. If data is larger +than the number of devices that have that advice set for that memory +range, cudaInvalidDeviceId will be returned in all the extra space +provided. For ex., if dataSize is 12 (i.e. data has 3 elements) +and only device 0 has the advice set, then the result returned will +be { 0, cudaInvalidDeviceId, cudaInvalidDeviceId }. If data is +smaller than the number of devices that have that advice set, then +only as many devices will be returned as can fit in the array. There +is no guarantee on which specific devices will be returned, however.

    • +
    • cudaMemRangeAttributeLastPrefetchLocation: If this +attribute is specified, data will be interpreted as a 32-bit +integer, and dataSize must be 4. The result returned will be the +last location to which all pages in the memory range were prefetched +explicitly via cudaMemPrefetchAsync. This will either be +a GPU id or cudaCpuDeviceId depending on whether the last location +for prefetch was a GPU or the CPU respectively. If any page in the +memory range was never explicitly prefetched or if all pages were not +prefetched to the same location, cudaInvalidDeviceId will be +returned. Note that this simply returns the last location that the +applicaton requested to prefetch the memory range to. It gives no +indication as to whether the prefetch operation to that location has +completed or even begun.

      + +
    • +
    • cudaMemRangeAttributePreferredLocationId: If this

    • +
    +

    attribute is specified, data will be interpreted as a 32-bit integer, +and dataSize must be 4. If the +cudaMemRangeAttributePreferredLocationType query for the +same address range returns cudaMemLocationTypeDevice, it +will be a valid device ordinal or if it returns +cudaMemLocationTypeHostNuma, it will be a valid host NUMA +node ID or if it returns any other location type, the id should be +ignored.

    +
    +
    +
    + +

    attribute is specified, data will be interpreted as a 32-bit integer, +and dataSize must be 4. If the +cudaMemRangeAttributeLastPrefetchLocationType query for the +same address range returns cudaMemLocationTypeDevice, it +will be a valid device ordinal or if it returns +cudaMemLocationTypeHostNuma, it will be a valid host NUMA +node ID or if it returns any other location type, the id should be +ignored.

    +
    +
    Parameters:
    +
      +
    • dataSize (size_t) – Array containing the size of data

    • +
    • attribute (cudaMemRangeAttribute) – The attribute to query

    • +
    • devPtr (Any) – Start of the range to query

    • +
    • count (size_t) – Size of the range to query

    • +
    +
    +
    Returns:
    +

      +
    • cudaError_tcudaSuccess, cudaErrorInvalidValue

    • +
    • data (Any) – A pointers to a memory location where the result of each attribute +query will be written to.

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemRangeGetAttributes(dataSizes: Tuple[int] | List[int], attributes: Optional[Tuple[cudaMemRangeAttribute] | List[cudaMemRangeAttribute]], size_t numAttributes, devPtr, size_t count)#
    +

    Query attributes of a given memory range.

    +

    Query attributes of the memory range starting at devPtr with a size +of count bytes. The memory range must refer to managed memory +allocated via cudaMallocManaged or declared via managed +variables. The attributes array will be interpreted to have +numAttributes entries. The dataSizes array will also be interpreted +to have numAttributes entries. The results of the query will be +stored in data.

    +

    The list of supported attributes are given below. Please refer to +cudaMemRangeGetAttribute for attribute descriptions and +restrictions.

    + +
    +
    Parameters:
    +
      +
    • dataSizes (List[int]) – Array containing the sizes of each result

    • +
    • attributes (List[cudaMemRangeAttribute]) – An array of attributes to query (numAttributes and the number of +attributes in this array should match)

    • +
    • numAttributes (size_t) – Number of attributes to query

    • +
    • devPtr (Any) – Start of the range to query

    • +
    • count (size_t) – Size of the range to query

    • +
    +
    +
    Returns:
    +

      +
    • cudaError_tcudaSuccess, cudaErrorInvalidValue

    • +
    • data (List[Any]) – A two-dimensional array containing pointers to memory locations +where the result of each attribute query will be written to.

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.make_cudaPitchedPtr(d, size_t p, size_t xsz, size_t ysz)#
    +

    Returns a cudaPitchedPtr based on input parameters.

    +

    Returns a cudaPitchedPtr based on the specified input +parameters d, p, xsz, and ysz.

    +
    +
    Parameters:
    +
      +
    • d (Any) – Pointer to allocated memory

    • +
    • p (size_t) – Pitch of allocated memory in bytes

    • +
    • xsz (size_t) – Logical width of allocation in elements

    • +
    • ysz (size_t) – Logical height of allocation in elements

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.make_cudaPos(size_t x, size_t y, size_t z)#
    +

    Returns a cudaPos based on input parameters.

    +

    Returns a cudaPos based on the specified input parameters +x, y, and z.

    +
    +
    Parameters:
    +
      +
    • x (size_t) – X position

    • +
    • y (size_t) – Y position

    • +
    • z (size_t) – Z position

    • +
    +
    +
    Returns:
    +

      +
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • +
    • cudaPoscudaPos specified by x, y, and z

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.make_cudaExtent(size_t w, size_t h, size_t d)#
    +

    Returns a cudaExtent based on input parameters.

    +

    Returns a cudaExtent based on the specified input +parameters w, h, and d.

    +
    +
    Parameters:
    +
      +
    • w (size_t) – Width in elements when referring to array memory, in bytes when +referring to linear memory

    • +
    • h (size_t) – Height in elements

    • +
    • d (size_t) – Depth in elements

    • +
    +
    +
    Returns:
    +

      +
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • +
    • cudaExtentcudaExtent specified by w, h, and d

    • +
    +

    +
    +
    + +
    + +
    +
    +

    Stream Ordered Memory Allocator#

    +

    overview

    +

    The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior.

    +

    The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee.

    +

    Supported Platforms

    +

    Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cudaDeviceGetAttribute() with the device attribute cudaDevAttrMemoryPoolsSupported.

    +
    +
    +cuda.bindings.runtime.cudaMallocAsync(size_t size, hStream)#
    +

    Allocates memory with stream ordered semantics.

    +

    Inserts an allocation operation into hStream. A pointer to the +allocated memory is returned immediately in *dptr. The allocation must +not be accessed until the the allocation operation completes. The +allocation comes from the memory pool associated with the stream’s +device.

    +
    +
    Parameters:
    +
      +
    • size (size_t) – Number of bytes to allocate

    • +
    • hStream (CUstream or cudaStream_t) – The stream establishing the stream ordering contract and the memory +pool to allocate from

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    The default memory pool of a device contains device memory from that device.

    +

    Basic stream ordering allows future work submitted into the same stream to use the allocation. Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation operation completes before work submitted in a separate stream runs.

    +

    During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool’s properties are used to set the node’s creation parameters.

    +
    + +
    +
    +cuda.bindings.runtime.cudaFreeAsync(devPtr, hStream)#
    +

    Frees memory with stream ordered semantics.

    +

    Inserts a free operation into hStream. The allocation must not be +accessed after stream execution reaches the free. After this API +returns, accessing the memory from any subsequent work launched on the +GPU or querying its pointer attributes results in undefined behavior.

    +
    +
    Parameters:
    +
      +
    • dptr (Any) – memory to free

    • +
    • hStream (CUstream or cudaStream_t) – The stream establishing the stream ordering promise

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorNotSupported

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +

    Notes

    +

    During stream capture, this function results in the creation of a free node and must therefore be passed the address of a graph allocation.

    +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolTrimTo(memPool, size_t minBytesToKeep)#
    +

    Tries to release memory back to the OS.

    +

    Releases memory back to the OS until the pool contains fewer than +minBytesToKeep reserved bytes, or there is no more memory that the +allocator can safely release. The allocator cannot release OS +allocations that back outstanding asynchronous allocations. The OS +allocations may happen at different granularity from the user +allocations.

    +
    +
    Parameters:
    +
      +
    • pool (CUmemoryPool or cudaMemPool_t) – The memory pool to trim

    • +
    • minBytesToKeep (size_t) – If the pool has less than minBytesToKeep reserved, the TrimTo +operation is a no-op. Otherwise the pool will be guaranteed to have +at least minBytesToKeep bytes reserved after the operation.

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +

    Notes

    +

    : Allocations that have not been freed count as outstanding.

    +

    : Allocations that have been asynchronously freed but whose completion has not been observed on the host (eg. by a synchronize) can count as outstanding.

    +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolSetAttribute(memPool, attr: cudaMemPoolAttr, value)#
    +

    Sets attributes of a memory pool.

    +

    Supported attributes are:

    +
      +
    • cudaMemPoolAttrReleaseThreshold: (value type = +cuuint64_t) Amount of reserved memory in bytes to hold onto before +trying to release memory back to the OS. When more than the release +threshold bytes of memory are held by the memory pool, the allocator +will try to release memory back to the OS on the next call to stream, +event or context synchronize. (default 0)

    • +
    • cudaMemPoolReuseFollowEventDependencies: (value type = +int) Allow cudaMallocAsync to use memory asynchronously +freed in another stream as long as a stream ordering dependency of +the allocating stream on the free action exists. Cuda events and null +stream interactions can create the required stream ordered +dependencies. (default enabled)

    • +
    • cudaMemPoolReuseAllowOpportunistic: (value type = int) +Allow reuse of already completed frees when there is no dependency +between the free and allocation. (default enabled)

    • +
    • cudaMemPoolReuseAllowInternalDependencies: (value type = +int) Allow cudaMallocAsync to insert new stream +dependencies in order to establish the stream ordering required to +reuse a piece of memory released by cudaFreeAsync +(default enabled).

    • +
    • cudaMemPoolAttrReservedMemHigh: (value type = cuuint64_t) +Reset the high watermark that tracks the amount of backing memory +that was allocated for the memory pool. It is illegal to set this +attribute to a non-zero value.

    • +
    • cudaMemPoolAttrUsedMemHigh: (value type = cuuint64_t) +Reset the high watermark that tracks the amount of used memory that +was allocated for the memory pool. It is illegal to set this +attribute to a non-zero value.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolGetAttribute(memPool, attr: cudaMemPoolAttr)#
    +

    Gets attributes of a memory pool.

    +

    Supported attributes are:

    +
      +
    • cudaMemPoolAttrReleaseThreshold: (value type = +cuuint64_t) Amount of reserved memory in bytes to hold onto before +trying to release memory back to the OS. When more than the release +threshold bytes of memory are held by the memory pool, the allocator +will try to release memory back to the OS on the next call to stream, +event or context synchronize. (default 0)

    • +
    • cudaMemPoolReuseFollowEventDependencies: (value type = +int) Allow cudaMallocAsync to use memory asynchronously +freed in another stream as long as a stream ordering dependency of +the allocating stream on the free action exists. Cuda events and null +stream interactions can create the required stream ordered +dependencies. (default enabled)

    • +
    • cudaMemPoolReuseAllowOpportunistic: (value type = int) +Allow reuse of already completed frees when there is no dependency +between the free and allocation. (default enabled)

    • +
    • cudaMemPoolReuseAllowInternalDependencies: (value type = +int) Allow cudaMallocAsync to insert new stream +dependencies in order to establish the stream ordering required to +reuse a piece of memory released by cudaFreeAsync +(default enabled).

    • +
    • cudaMemPoolAttrReservedMemCurrent: (value type = +cuuint64_t) Amount of backing memory currently allocated for the +mempool.

    • +
    • cudaMemPoolAttrReservedMemHigh: (value type = cuuint64_t) +High watermark of backing memory allocated for the mempool since the +last time it was reset.

    • +
    • cudaMemPoolAttrUsedMemCurrent: (value type = cuuint64_t) +Amount of memory from the pool that is currently in use by the +application.

    • +
    • cudaMemPoolAttrUsedMemHigh: (value type = cuuint64_t) +High watermark of the amount of memory from the pool that was in use +by the application since the last time it was reset.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolSetAccess(memPool, descList: Optional[Tuple[cudaMemAccessDesc] | List[cudaMemAccessDesc]], size_t count)#
    +

    Controls visibility of pools between devices.

    +
    +
    Parameters:
    +
      +
    • pool (CUmemoryPool or cudaMemPool_t) – The pool being modified

    • +
    • map (List[cudaMemAccessDesc]) – Array of access descriptors. Each descriptor instructs the access +to enable for a single gpu

    • +
    • count (size_t) – Number of descriptors in the map array.

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolGetAccess(memPool, cudaMemLocation location: Optional[cudaMemLocation])#
    +

    Returns the accessibility of a pool from a device.

    +

    Returns the accessibility of the pool’s memory from the specified +location.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

      +
    • cudaError_t

    • +
    • flags (cudaMemAccessFlags) – the accessibility of the pool from the specified location

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolCreate(cudaMemPoolProps poolProps: Optional[cudaMemPoolProps])#
    +

    Creates a memory pool.

    +

    Creates a CUDA memory pool and returns the handle in pool. The +poolProps determines the properties of the pool such as the backing +device and IPC capabilities.

    +

    To create a memory pool targeting a specific host NUMA node, +applications must set +cudaMemPoolProps::cudaMemLocation::type to +cudaMemLocationTypeHostNuma and +cudaMemPoolProps::cudaMemLocation::id must specify the NUMA +ID of the host memory node. Specifying +cudaMemLocationTypeHostNumaCurrent or +cudaMemLocationTypeHost as the +cudaMemPoolProps::cudaMemLocation::type will result in +cudaErrorInvalidValue. By default, the pool’s memory will +be accessible from the device it is allocated on. In the case of pools +created with cudaMemLocationTypeHostNuma, their default +accessibility will be from the host CPU. Applications can control the +maximum size of the pool by specifying a non-zero value for +maxSize. If set to 0, the maximum size of +the pool will default to a system dependent value.

    +

    Applications can set handleTypes to +cudaMemHandleTypeFabric in order to create +cudaMemPool_t suitable for sharing within an IMEX domain. +An IMEX domain is either an OS instance or a group of securely +connected OS instances using the NVIDIA IMEX daemon. An IMEX channel is +a global resource within the IMEX domain that represents a logical +entity that aims to provide fine grained accessibility control for the +participating processes. When exporter and importer CUDA processes have +been granted access to the same IMEX channel, they can securely share +memory. If the allocating process does not have access setup for an +IMEX channel, attempting to export a CUmemoryPool with +cudaMemHandleTypeFabric will result in +cudaErrorNotPermitted. The nvidia-modprobe CLI provides +more information regarding setting up of IMEX channels.

    +
    +
    Parameters:
    +

    poolProps (cudaMemPoolProps) – None

    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Specifying cudaMemHandleTypeNone creates a memory pool that will not support IPC.

    +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolDestroy(memPool)#
    +

    Destroys the specified memory pool.

    +

    If any pointers obtained from this pool haven’t been freed or the pool +has free operations that haven’t completed when +cudaMemPoolDestroy is invoked, the function will return +immediately and the resources associated with the pool will be released +automatically once there are no more outstanding allocations.

    +

    Destroying the current mempool of a device sets the default mempool of +that device as the current mempool for that device.

    +
    +
    Parameters:
    +

    memPool (CUmemoryPool or cudaMemPool_t) – None

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +

    Notes

    +

    A device’s default memory pool cannot be destroyed.

    +
    + +
    +
    +cuda.bindings.runtime.cudaMallocFromPoolAsync(size_t size, memPool, stream)#
    +

    Allocates memory from a specified pool with stream ordered semantics.

    +

    Inserts an allocation operation into hStream. A pointer to the +allocated memory is returned immediately in *dptr. The allocation must +not be accessed until the the allocation operation completes. The +allocation comes from the specified memory pool.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    During stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool’s properties are used to set the node’s creation parameters.

    +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolExportToShareableHandle(memPool, handleType: cudaMemAllocationHandleType, unsigned int flags)#
    +

    Exports a memory pool to the requested handle type.

    +

    Given an IPC capable mempool, create an OS handle to share the pool +with another process. A recipient process can convert the shareable +handle into a mempool with +cudaMemPoolImportFromShareableHandle. Individual pointers +can then be shared with the cudaMemPoolExportPointer and +cudaMemPoolImportPointer APIs. The implementation of what +the shareable handle is and how it can be transferred is defined by the +requested handle type.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

      +
    • cudaError_tcudaSuccess, cudaErrorInvalidValue, cudaErrorOutOfMemory

    • +
    • handle_out (Any) – pointer to the location in which to store the requested handle

    • +
    +

    +
    +
    + +

    Notes

    +

    : To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than cudaMemHandleTypeNone.

    +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolImportFromShareableHandle(shareableHandle, handleType: cudaMemAllocationHandleType, unsigned int flags)#
    +

    imports a memory pool from a shared handle.

    +

    Specific allocations can be imported from the imported pool with +cudaMemPoolImportPointer.

    +
    +
    Parameters:
    +
      +
    • handle (Any) – OS handle of the pool to open

    • +
    • handleType (cudaMemAllocationHandleType) – The type of handle being imported

    • +
    • flags (unsigned int) – must be 0

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Imported memory pools do not support creating new allocations. As such imported memory pools may not be used in cudaDeviceSetMemPool or cudaMallocFromPoolAsync calls.

    +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolExportPointer(ptr)#
    +

    Export data to share a memory pool allocation between processes.

    +

    Constructs shareData_out for sharing a specific allocation from an +already shared memory pool. The recipient process can import the +allocation with the cudaMemPoolImportPointer api. The data +is not a handle and may be shared through any IPC mechanism.

    +
    +
    Parameters:
    +

    ptr (Any) – pointer to memory being exported

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaMemPoolImportPointer(memPool, cudaMemPoolPtrExportData exportData: Optional[cudaMemPoolPtrExportData])#
    +

    Import a memory pool allocation from another process.

    +

    Returns in ptr_out a pointer to the imported memory. The imported +memory must not be accessed before the allocation operation completes +in the exporting process. The imported memory must be freed from all +importing processes before being freed in the exporting process. The +pointer may be freed with cudaFree or cudaFreeAsync. If +cudaFreeAsync is used, the free must be completed on the +importing process before the free operation on the exporting process.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    The cudaFreeAsync api may be used in the exporting process before the cudaFreeAsync operation completes in its stream as long as the cudaFreeAsync in the exporting process specifies a stream with a stream dependency on the importing process’s cudaFreeAsync.

    +
    + +
    +
    +

    Unified Addressing#

    +

    This section describes the unified addressing functions of the CUDA runtime application programming interface.

    +

    Overview

    +

    CUDA devices can share a unified address space with the host.

    +
    +

    For these devices there is no distinction between a device pointer and a host pointer – the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below).

    +
    +

    Supported Platforms

    +

    Whether or not a device supports unified addressing may be queried by calling cudaGetDeviceProperties() with the device property cudaDeviceProp::unifiedAddressing.

    +

    Unified addressing is automatically enabled in 64-bit processes .

    +

    Looking Up Information from Pointer Values

    +

    It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cudaPointerGetAttributes()

    +

    Since pointers are unique, it is not necessary to specify information about the pointers specified to cudaMemcpy() and other copy functions.

    +
    +

    The copy direction cudaMemcpyDefault may be used to specify that the CUDA runtime should infer the location of the pointer from its value.

    +
    +

    Automatic Mapping of Host Allocated Host Memory

    +

    All host memory allocated through all devices using cudaMallocHost() and cudaHostAlloc() is always directly accessible from all devices that support unified addressing. This is the case regardless of whether or not the flags cudaHostAllocPortable and cudaHostAllocMapped are specified.

    +

    The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host. It is not necessary to call cudaHostGetDevicePointer() to get the device pointer for these allocations.

    +

    Note that this is not the case for memory allocated using the flag cudaHostAllocWriteCombined, as discussed below.

    +

    Direct Access of Peer Memory

    +

    Upon enabling direct access from a device that supports unified addressing to another peer device that supports unified addressing using cudaDeviceEnablePeerAccess() all memory allocated in the peer device using cudaMalloc() and cudaMallocPitch() will immediately be accessible by the current device. The device pointer value through which any peer’s memory may be accessed in the current device is the same pointer value through which that memory may be accessed from the peer device.

    +

    Exceptions, Disjoint Addressing

    +

    Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cudaHostRegister() and host memory allocated using the flag cudaHostAllocWriteCombined. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all devices that support unified addressing.

    +

    This device address may be queried using cudaHostGetDevicePointer() when a device using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory in cudaMemcpy() and similar functions using the cudaMemcpyDefault memory direction.

    +
    +
    +cuda.bindings.runtime.cudaPointerGetAttributes(ptr)#
    +

    Returns attributes about a specified pointer.

    +

    Returns in *attributes the attributes of the pointer ptr. If +pointer was not allocated in, mapped by or registered with context +supporting unified addressing cudaErrorInvalidValue is +returned.

    +

    The cudaPointerAttributes structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    In this structure, the individual fields mean

    +
      +
    • type identifies type of memory. It +can be cudaMemoryTypeUnregistered for unregistered host +memory, cudaMemoryTypeHost for registered host memory, +cudaMemoryTypeDevice for device memory or +cudaMemoryTypeManaged for managed memory.

    • +
    • device is the device against which ptr was allocated. +If ptr has memory type cudaMemoryTypeDevice then this +identifies the device on which the memory referred to by ptr +physically resides. If ptr has memory type +cudaMemoryTypeHost then this identifies the device which +was current when the allocation was made (and if that device is +deinitialized then this allocation will vanish with that device’s +state).

    • +
    • devicePointer is the device pointer alias through which +the memory referred to by ptr may be accessed on the current +device. If the memory referred to by ptr cannot be accessed +directly by the current device then this is NULL.

    • +
    • hostPointer is the host pointer alias through which the +memory referred to by ptr may be accessed on the host. If the +memory referred to by ptr cannot be accessed directly by the host +then this is NULL.

    • +
    +
    +
    Parameters:
    +

    ptr (Any) – Pointer to get attributes for

    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    In CUDA 11.0 forward passing host pointer will return cudaMemoryTypeUnregistered in type and call will return cudaSuccess.

    +
    + +
    +
    +

    Peer Device Memory Access#

    +

    This section describes the peer device memory access functions of the CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaDeviceCanAccessPeer(int device, int peerDevice)#
    +

    Queries if a device may directly access a peer device’s memory.

    +

    Returns in *canAccessPeer a value of 1 if device device is capable +of directly accessing memory from peerDevice and 0 otherwise. If +direct access of peerDevice from device is possible, then access +may be enabled by calling cudaDeviceEnablePeerAccess().

    +
    +
    Parameters:
    +
      +
    • device (int) – Device from which allocations on peerDevice are to be directly +accessed.

    • +
    • peerDevice (int) – Device on which the allocations to be directly accessed by device +reside.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags)#
    +

    Enables direct access to memory allocations on a peer device.

    +

    On success, all allocations from peerDevice will immediately be +accessible by the current device. They will remain accessible until +access is explicitly disabled using +cudaDeviceDisablePeerAccess() or either device is reset +using cudaDeviceReset().

    +

    Note that access granted by this call is unidirectional and that in +order to access memory on the current device from peerDevice, a +separate symmetric call to cudaDeviceEnablePeerAccess() is +required.

    +

    Note that there are both device-wide and system-wide limitations per +system configuration, as noted in the CUDA Programming Guide under the +section “Peer-to-Peer Memory Access”.

    +

    Returns cudaErrorInvalidDevice if +cudaDeviceCanAccessPeer() indicates that the current device +cannot directly access memory from peerDevice.

    +

    Returns cudaErrorPeerAccessAlreadyEnabled if direct access +of peerDevice from the current device has already been enabled.

    +

    Returns cudaErrorInvalidValue if flags is not 0.

    +
    +
    Parameters:
    +
      +
    • peerDevice (int) – Peer device to enable direct access to from the current device

    • +
    • flags (unsigned int) – Reserved for future use and must be set to 0

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidDevice, cudaErrorPeerAccessAlreadyEnabled, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceDisablePeerAccess(int peerDevice)#
    +

    Disables direct access to memory allocations on a peer device.

    +

    Returns cudaErrorPeerAccessNotEnabled if direct access to +memory on peerDevice has not yet been enabled from the current +device.

    +
    +
    Parameters:
    +

    peerDevice (int) – Peer device to disable direct access to

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorPeerAccessNotEnabled, cudaErrorInvalidDevice

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +

    OpenGL Interoperability#

    +

    impl_private

    +

    This section describes the OpenGL interoperability functions of the CUDA runtime application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interopability.

    +
    +
    +class cuda.bindings.runtime.cudaGLDeviceList(value)#
    +

    CUDA devices corresponding to the current OpenGL context

    +
    +
    +cudaGLDeviceListAll = 1#
    +

    The CUDA devices for all GPUs used by the current OpenGL context

    +
    + +
    +
    +cudaGLDeviceListCurrentFrame = 2#
    +

    The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame

    +
    + +
    +
    +cudaGLDeviceListNextFrame = 3#
    +

    The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame

    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGLGetDevices(unsigned int cudaDeviceCount, deviceList: cudaGLDeviceList)#
    +

    Gets the CUDA devices associated with the current OpenGL context.

    +

    Returns in *pCudaDeviceCount the number of CUDA-compatible devices +corresponding to the current OpenGL context. Also returns in +*pCudaDevices at most cudaDeviceCount of the CUDA-compatible +devices corresponding to the current OpenGL context. If any of the GPUs +being used by the current OpenGL context are not CUDA capable then the +call will return cudaErrorNoDevice.

    +
    +
    Parameters:
    +
      +
    • cudaDeviceCount (unsigned int) – The size of the output device array pCudaDevices

    • +
    • deviceList (cudaGLDeviceList) – The set of devices to return. This set may be cudaGLDeviceListAll +for all devices, cudaGLDeviceListCurrentFrame for the devices used +to render the current frame (in SLI), or cudaGLDeviceListNextFrame +for the devices used to render the next frame (in SLI).

    • +
    +
    +
    Returns:
    +

      +
    • cudaError_t – cudaSuccess +cudaErrorNoDevice +cudaErrorInvalidGraphicsContext +cudaErrorUnknown

    • +
    • pCudaDeviceCount (unsigned int) – Returned number of CUDA devices corresponding to the current OpenGL +context

    • +
    • pCudaDevices (List[int]) – Returned CUDA devices corresponding to the current OpenGL context

    • +
    +

    +
    +
    + +

    Notes

    +

    This function is not supported on Mac OS X.

    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsGLRegisterImage(image, target, unsigned int flags)#
    +

    Register an OpenGL texture or renderbuffer object.

    +

    Registers the texture or renderbuffer object specified by image for +access by CUDA. A handle to the registered object is returned as +resource.

    +

    target must match the type of the object, and must be one of +GL_TEXTURE_2D, GL_TEXTURE_RECTANGLE, +GL_TEXTURE_CUBE_MAP, GL_TEXTURE_3D, +GL_TEXTURE_2D_ARRAY, or GL_RENDERBUFFER.

    +

    The register flags flags specify the intended usage, as follows:

    + +

    The following image formats are supported. For brevity’s sake, the list +is abbreviated. For ex., {GL_R, GL_RG} X {8, 16} would expand to the +following 4 formats {GL_R8, GL_R16, GL_RG8, GL_RG16} :

    +
      +
    • GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, +GL_INTENSITY

    • +
    • {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, +32I}

    • +
    • {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X {8, 16, +16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, +32I_EXT}

    • +
    +

    The following image classes are currently disallowed:

    +
      +
    • Textures with borders

    • +
    • Multisampled renderbuffers

    • +
    +
    +
    Parameters:
    +
      +
    • image (GLuint) – name of texture or renderbuffer object to be registered

    • +
    • target (GLenum) – Identifies the type of object specified by image

    • +
    • flags (unsigned int) – Register flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsGLRegisterBuffer(buffer, unsigned int flags)#
    +

    Registers an OpenGL buffer object.

    +

    Registers the buffer object specified by buffer for access by CUDA. A +handle to the registered object is returned as resource. The register +flags flags specify the intended usage, as follows:

    +
      +
    • cudaGraphicsRegisterFlagsNone: Specifies no hints about +how this resource will be used. It is therefore assumed that this +resource will be read from and written to by CUDA. This is the +default value.

    • +
    • cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA +will not write to this resource.

    • +
    • cudaGraphicsRegisterFlagsWriteDiscard: Specifies that +CUDA will not read from this resource and will write over the entire +contents of the resource, so none of the data previously stored in +the resource will be preserved.

    • +
    +
    +
    Parameters:
    +
      +
    • buffer (GLuint) – name of buffer object to be registered

    • +
    • flags (unsigned int) – Register flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Direct3D 9 Interoperability#

    +
    +
    +

    Direct3D 10 Interoperability#

    +
    +
    +

    Direct3D 11 Interoperability#

    +
    +
    +

    VDPAU Interoperability#

    +

    This section describes the VDPAU interoperability functions of the CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaVDPAUGetDevice(vdpDevice, vdpGetProcAddress)#
    +

    Gets the CUDA device associated with a VdpDevice.

    +

    Returns the CUDA device associated with a VdpDevice, if applicable.

    +
    +
    Parameters:
    +
      +
    • vdpDevice (VdpDevice) – A VdpDevice handle

    • +
    • vdpGetProcAddress (VdpGetProcAddress) – VDPAU’s VdpGetProcAddress function pointer

    • +
    +
    +
    Returns:
    +

      +
    • cudaError_tcudaSuccess

    • +
    • device (int) – Returns the device associated with vdpDevice, or -1 if the device +associated with vdpDevice is not a compute device.

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaVDPAUSetVDPAUDevice(int device, vdpDevice, vdpGetProcAddress)#
    +

    Sets a CUDA device to use VDPAU interoperability.

    +

    Records vdpDevice as the VdpDevice for VDPAU interoperability with +the CUDA device device and sets device as the current device for +the calling host thread.

    +

    This function will immediately initialize the primary context on +device if needed.

    +

    If device has already been initialized then this call will fail with +the error cudaErrorSetOnActiveProcess. In this case it is +necessary to reset device using cudaDeviceReset() before +VDPAU interoperability on device may be enabled.

    +
    +
    Parameters:
    +
      +
    • device (int) – Device to use for VDPAU interoperability

    • +
    • vdpDevice (VdpDevice) – The VdpDevice to interoperate with

    • +
    • vdpGetProcAddress (VdpGetProcAddress) – VDPAU’s VdpGetProcAddress function pointer

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidDevice, cudaErrorSetOnActiveProcess

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsVDPAURegisterVideoSurface(vdpSurface, unsigned int flags)#
    +

    Register a VdpVideoSurface object.

    +

    Registers the VdpVideoSurface specified by vdpSurface for access by +CUDA. A handle to the registered object is returned as resource. The +surface’s intended usage is specified using flags, as follows:

    +
      +
    • cudaGraphicsMapFlagsNone: Specifies no hints about how +this resource will be used. It is therefore assumed that this +resource will be read from and written to by CUDA. This is the +default value.

    • +
    • cudaGraphicsMapFlagsReadOnly: Specifies that CUDA will +not write to this resource.

    • +
    • cudaGraphicsMapFlagsWriteDiscard: Specifies that CUDA +will not read from this resource and will write over the entire +contents of the resource, so none of the data previously stored in +the resource will be preserved.

    • +
    +
    +
    Parameters:
    +
      +
    • vdpSurface (VdpVideoSurface) – VDPAU object to be registered

    • +
    • flags (unsigned int) – Map flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsVDPAURegisterOutputSurface(vdpSurface, unsigned int flags)#
    +

    Register a VdpOutputSurface object.

    +

    Registers the VdpOutputSurface specified by vdpSurface for access by +CUDA. A handle to the registered object is returned as resource. The +surface’s intended usage is specified using flags, as follows:

    +
      +
    • cudaGraphicsMapFlagsNone: Specifies no hints about how +this resource will be used. It is therefore assumed that this +resource will be read from and written to by CUDA. This is the +default value.

    • +
    • cudaGraphicsMapFlagsReadOnly: Specifies that CUDA will +not write to this resource.

    • +
    • cudaGraphicsMapFlagsWriteDiscard: Specifies that CUDA +will not read from this resource and will write over the entire +contents of the resource, so none of the data previously stored in +the resource will be preserved.

    • +
    +
    +
    Parameters:
    +
      +
    • vdpSurface (VdpOutputSurface) – VDPAU object to be registered

    • +
    • flags (unsigned int) – Map flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    EGL Interoperability#

    +

    This section describes the EGL interoperability functions of the CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaGraphicsEGLRegisterImage(image, unsigned int flags)#
    +

    Registers an EGL image.

    +

    Registers the EGLImageKHR specified by image for access by CUDA. A +handle to the registered object is returned as pCudaResource. +Additional Mapping/Unmapping is not required for the registered +resource and cudaGraphicsResourceGetMappedEglFrame can be +directly called on the pCudaResource.

    +

    The application will be responsible for synchronizing access to shared +objects. The application must ensure that any pending operation which +access the objects have completed before passing control to CUDA. This +may be accomplished by issuing and waiting for glFinish command on all +GLcontexts (for OpenGL and likewise for other APIs). The application +will be also responsible for ensuring that any pending operation on the +registered CUDA resource has completed prior to executing subsequent +commands in other APIs accesing the same memory objects. This can be +accomplished by calling cuCtxSynchronize or cuEventSynchronize +(preferably).

    +

    The surface’s intended usage is specified using flags, as follows:

    +
      +
    • cudaGraphicsRegisterFlagsNone: Specifies no hints about +how this resource will be used. It is therefore assumed that this +resource will be read from and written to by CUDA. This is the +default value.

    • +
    • cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA +will not write to this resource.

    • +
    • cudaGraphicsRegisterFlagsWriteDiscard: Specifies that +CUDA will not read from this resource and will write over the entire +contents of the resource, so none of the data previously stored in +the resource will be preserved.

    • +
    +

    The EGLImageKHR is an object which can be used to create EGLImage +target resource. It is defined as a void pointer. typedef void* +EGLImageKHR

    +
    +
    Parameters:
    +
      +
    • image (EGLImageKHR) – An EGLImageKHR image which can be used to create target resource.

    • +
    • flags (unsigned int) – Map flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEGLStreamConsumerConnect(eglStream)#
    +

    Connect CUDA to EGLStream as a consumer.

    +

    Connect CUDA as a consumer to EGLStreamKHR specified by eglStream.

    +

    The EGLStreamKHR is an EGL object that transfers a sequence of image +frames from one API to another.

    +
    +
    Parameters:
    +

    eglStream (EGLStreamKHR) – EGLStreamKHR handle

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEGLStreamConsumerConnectWithFlags(eglStream, unsigned int flags)#
    +

    Connect CUDA to EGLStream as a consumer with given flags.

    +

    Connect CUDA as a consumer to EGLStreamKHR specified by stream with +specified flags defined by cudaEglResourceLocationFlags.

    +

    The flags specify whether the consumer wants to access frames from +system memory or video memory. Default is +cudaEglResourceLocationVidmem.

    +
    +
    Parameters:
    +
      +
    • eglStream (EGLStreamKHR) – EGLStreamKHR handle

    • +
    • flags (unsigned int) – Flags denote intended location - system or video.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEGLStreamConsumerDisconnect(conn)#
    +

    Disconnect CUDA as a consumer to EGLStream .

    +

    Disconnect CUDA as a consumer to EGLStreamKHR.

    +
    +
    Parameters:
    +

    conn (cudaEglStreamConnection) – Conection to disconnect.

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEGLStreamConsumerAcquireFrame(conn, pCudaResource, pStream, unsigned int timeout)#
    +

    Acquire an image frame from the EGLStream with CUDA as a consumer.

    +

    Acquire an image frame from EGLStreamKHR. +cudaGraphicsResourceGetMappedEglFrame can be called on +pCudaResource to get cudaEglFrame.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown, cudaErrorLaunchTimeout

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEGLStreamConsumerReleaseFrame(conn, pCudaResource, pStream)#
    +

    Releases the last frame acquired from the EGLStream.

    +

    Release the acquired image frame specified by pCudaResource to +EGLStreamKHR.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEGLStreamProducerConnect(eglStream, width, height)#
    +

    Connect CUDA to EGLStream as a producer.

    +

    Connect CUDA as a producer to EGLStreamKHR specified by stream.

    +

    The EGLStreamKHR is an EGL object that transfers a sequence of image +frames from one API to another.

    +
    +
    Parameters:
    +
      +
    • eglStream (EGLStreamKHR) – EGLStreamKHR handle

    • +
    • width (EGLint) – width of the image to be submitted to the stream

    • +
    • height (EGLint) – height of the image to be submitted to the stream

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEGLStreamProducerDisconnect(conn)#
    +

    Disconnect CUDA as a producer to EGLStream .

    +

    Disconnect CUDA as a producer to EGLStreamKHR.

    +
    +
    Parameters:
    +

    conn (cudaEglStreamConnection) – Conection to disconnect.

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEGLStreamProducerPresentFrame(conn, cudaEglFrame eglframe: cudaEglFrame, pStream)#
    +

    Present a CUDA eglFrame to the EGLStream with CUDA as a producer.

    +

    The cudaEglFrame is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    For cudaEglFrame of type cudaEglFrameTypePitch, +the application may present sub-region of a memory allocation. In that +case, ptr will specify the start address of +the sub-region in the allocation and cudaEglPlaneDesc will +specify the dimensions of the sub-region.

    +
    +
    Parameters:
    +
      +
    • conn (cudaEglStreamConnection) – Connection on which to present the CUDA array

    • +
    • eglframe (cudaEglFrame) – CUDA Eglstream Proucer Frame handle to be sent to the consumer over +EglStream.

    • +
    • pStream (cudaStream_t) – CUDA stream on which to present the frame.

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorUnknown

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaEGLStreamProducerReturnFrame(conn, cudaEglFrame eglframe: Optional[cudaEglFrame], pStream)#
    +

    Return the CUDA eglFrame to the EGLStream last released by the consumer.

    +

    This API can potentially return cudaErrorLaunchTimeout if the consumer +has not returned a frame to EGL stream. If timeout is returned the +application can retry.

    +
    +
    Parameters:
    +
      +
    • conn (cudaEglStreamConnection) – Connection on which to present the CUDA array

    • +
    • eglframe (cudaEglFrame) – CUDA Eglstream Proucer Frame handle returned from the consumer over +EglStream.

    • +
    • pStream (cudaStream_t) – CUDA stream on which to return the frame.

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorLaunchTimeout, cudaErrorInvalidValue, cudaErrorUnknown

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsResourceGetMappedEglFrame(resource, unsigned int index, unsigned int mipLevel)#
    +

    Get an eglFrame through which to access a registered EGL graphics resource.

    +

    Returns in *eglFrame an eglFrame pointer through which the registered +graphics resource resource may be accessed. This API can only be +called for EGL graphics resources.

    +

    The cudaEglFrame is defined as

    +

    View CUDA Toolkit Documentation for a C++ code example

    +
    +
    Parameters:
    +
      +
    • resource (cudaGraphicsResource_t) – Registered resource to access.

    • +
    • index (unsigned int) – Index for cubemap surfaces.

    • +
    • mipLevel (unsigned int) – Mipmap level for the subresource to access.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Note that in case of multiplanar *eglFrame, pitch of only first plane (unsigned int pitch) is to be considered by the application.

    +
    + +
    +
    +cuda.bindings.runtime.cudaEventCreateFromEGLSync(eglSync, unsigned int flags)#
    +

    Creates an event from EGLSync object.

    +

    Creates an event *phEvent from an EGLSyncKHR eglSync with the flages +specified via flags. Valid flags include:

    + +

    cudaEventRecord and TimingData are not supported for events +created from EGLSync.

    +

    The EGLSyncKHR is an opaque handle to an EGL sync object. typedef void* +EGLSyncKHR

    +
    +
    Parameters:
    +
      +
    • eglSync (EGLSyncKHR) – Opaque handle to EGLSync object

    • +
    • flags (unsigned int) – Event creation flags

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Graphics Interoperability#

    +

    This section describes the graphics interoperability functions of the CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaGraphicsUnregisterResource(resource)#
    +

    Unregisters a graphics resource for access by CUDA.

    +

    Unregisters the graphics resource resource so it is not accessible by +CUDA unless registered again.

    +

    If resource is invalid then +cudaErrorInvalidResourceHandle is returned.

    +
    +
    Parameters:
    +

    resource (cudaGraphicsResource_t) – Resource to unregister

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorUnknown

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaGraphicsD3D9RegisterResource, cudaGraphicsD3D10RegisterResource, cudaGraphicsD3D11RegisterResource, cudaGraphicsGLRegisterBuffer, cudaGraphicsGLRegisterImage, cuGraphicsUnregisterResource

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsResourceSetMapFlags(resource, unsigned int flags)#
    +

    Set usage flags for mapping a graphics resource.

    +

    Set flags for mapping the graphics resource resource.

    +

    Changes to flags will take effect the next time resource is mapped. +The flags argument may be any of the following:

    +
      +
    • cudaGraphicsMapFlagsNone: Specifies no hints about how +resource will be used. It is therefore assumed that CUDA may read +from or write to resource.

    • +
    • cudaGraphicsMapFlagsReadOnly: Specifies that CUDA will +not write to resource.

    • +
    • cudaGraphicsMapFlagsWriteDiscard: Specifies CUDA will not +read from resource and will write over the entire contents of +resource, so none of the data previously stored in resource will +be preserved.

    • +
    +

    If resource is presently mapped for access by CUDA then +cudaErrorUnknown is returned. If flags is not one of the +above values then cudaErrorInvalidValue is returned.

    +
    +
    Parameters:
    +
      +
    • resource (cudaGraphicsResource_t) – Registered resource to set flags for

    • +
    • flags (unsigned int) – Parameters for resource mapping

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorUnknown,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsMapResources(int count, resources, stream)#
    +

    Map graphics resources for access by CUDA.

    +

    Maps the count graphics resources in resources for access by CUDA.

    +

    The resources in resources may be accessed by CUDA until they are +unmapped. The graphics API from which resources were registered +should not access any resources while they are mapped by CUDA. If an +application does so, the results are undefined.

    +

    This function provides the synchronization guarantee that any graphics +calls issued before cudaGraphicsMapResources() will +complete before any subsequent CUDA work issued in stream begins.

    +

    If resources contains any duplicate entries then +cudaErrorInvalidResourceHandle is returned. If any of +resources are presently mapped for access by CUDA then +cudaErrorUnknown is returned.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorUnknown

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsUnmapResources(int count, resources, stream)#
    +

    Unmap graphics resources.

    +

    Unmaps the count graphics resources in resources.

    +

    Once unmapped, the resources in resources may not be accessed by CUDA +until they are mapped again.

    +

    This function provides the synchronization guarantee that any CUDA work +issued in stream before cudaGraphicsUnmapResources() will +complete before any subsequently issued graphics work begins.

    +

    If resources contains any duplicate entries then +cudaErrorInvalidResourceHandle is returned. If any of +resources are not presently mapped for access by CUDA then +cudaErrorUnknown is returned.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidResourceHandle, cudaErrorUnknown

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsResourceGetMappedPointer(resource)#
    +

    Get an device pointer through which to access a mapped graphics resource.

    +

    Returns in *devPtr a pointer through which the mapped graphics +resource resource may be accessed. Returns in *size the size of the +memory in bytes which may be accessed from that pointer. The value set +in devPtr may change every time that resource is mapped.

    +

    If resource is not a buffer then it cannot be accessed via a pointer +and cudaErrorUnknown is returned. If resource is not +mapped then cudaErrorUnknown is returned.

    +
    +
    Parameters:
    +

    resource (cudaGraphicsResource_t) – None

    +
    +
    Returns:
    +

      +
    • cudaError_t

    • +
    • devPtr (Any) – None

    • +
    • size (int) – None

    • +
    +

    +
    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsSubResourceGetMappedArray(resource, unsigned int arrayIndex, unsigned int mipLevel)#
    +

    Get an array through which to access a subresource of a mapped graphics resource.

    +

    Returns in *array an array through which the subresource of the +mapped graphics resource resource which corresponds to array index +arrayIndex and mipmap level mipLevel may be accessed. The value set +in array may change every time that resource is mapped.

    +

    If resource is not a texture then it cannot be accessed via an array +and cudaErrorUnknown is returned. If arrayIndex is not a +valid array index for resource then cudaErrorInvalidValue +is returned. If mipLevel is not a valid mipmap level for resource +then cudaErrorInvalidValue is returned. If resource is +not mapped then cudaErrorUnknown is returned.

    +
    +
    Parameters:
    +
      +
    • resource (cudaGraphicsResource_t) – Mapped resource to access

    • +
    • arrayIndex (unsigned int) – Array index for array textures or cubemap face index as defined by +cudaGraphicsCubeFace for cubemap textures for the +subresource to access

    • +
    • mipLevel (unsigned int) – Mipmap level for the subresource to access

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphicsResourceGetMappedMipmappedArray(resource)#
    +

    Get a mipmapped array through which to access a mapped graphics resource.

    +

    Returns in *mipmappedArray a mipmapped array through which the mapped +graphics resource resource may be accessed. The value set in +mipmappedArray may change every time that resource is mapped.

    +

    If resource is not a texture then it cannot be accessed via an array +and cudaErrorUnknown is returned. If resource is not +mapped then cudaErrorUnknown is returned.

    +
    +
    Parameters:
    +

    resource (cudaGraphicsResource_t) – Mapped resource to access

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Texture Object Management#

    +

    This section describes the low level texture object management functions of the CUDA runtime application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher.

    +
    +
    +cuda.bindings.runtime.cudaGetChannelDesc(array)#
    +

    Get the channel descriptor of an array.

    +

    Returns in *desc the channel descriptor of the CUDA array array.

    +
    +
    Parameters:
    +

    array (cudaArray_const_t) – Memory array on device

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaCreateChannelDesc (C API), cudaCreateTextureObject, cudaCreateSurfaceObject

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaCreateChannelDesc(int x, int y, int z, int w, f: cudaChannelFormatKind)#
    +

    Returns a channel descriptor using the specified format.

    +

    Returns a channel descriptor with format f and number of bits of each +component x, y, z, and w. The cudaChannelFormatDesc +is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where cudaChannelFormatKind is one of +cudaChannelFormatKindSigned, +cudaChannelFormatKindUnsigned, or +cudaChannelFormatKindFloat.

    +
    +
    Parameters:
    +
      +
    • x (int) – X component

    • +
    • y (int) – Y component

    • +
    • z (int) – Z component

    • +
    • w (int) – W component

    • +
    • f (cudaChannelFormatKind) – Channel format

    • +
    +
    +
    Returns:
    +

      +
    • cudaError_t.cudaSuccess – cudaError_t.cudaSuccess

    • +
    • cudaChannelFormatDesc – Channel descriptor with format f

    • +
    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaCreateTextureObject(cudaResourceDesc pResDesc: Optional[cudaResourceDesc], cudaTextureDesc pTexDesc: Optional[cudaTextureDesc], cudaResourceViewDesc pResViewDesc: Optional[cudaResourceViewDesc])#
    +

    Creates a texture object.

    +

    Creates a texture object and returns it in pTexObject. pResDesc +describes the data to texture from. pTexDesc describes how the data +should be sampled. pResViewDesc is an optional argument that +specifies an alternate format for the data described by pResDesc, and +also describes the subresource region to restrict access to when +texturing. pResViewDesc can only be specified if the type of resource +is a CUDA array or a CUDA mipmapped array not in a block compressed +format.

    +

    Texture objects are only supported on devices of compute capability 3.0 +or higher. Additionally, a texture object is an opaque value, and, as +such, should only be accessed through CUDA API calls.

    +

    The cudaResourceDesc structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • resType specifies the type of resource +to texture from. CUresourceType is defined as:

    • +
    • View CUDA Toolkit Documentation for a C++ code example

    • +
    +

    If resType is set to +cudaResourceTypeArray, +cudaResourceDesc::res::array::array must be set to a valid +CUDA array handle.

    +

    If resType is set to +cudaResourceTypeMipmappedArray, +cudaResourceDesc::res::mipmap::mipmap must be set to a +valid CUDA mipmapped array handle and +normalizedCoords must be set to true.

    +

    If resType is set to +cudaResourceTypeLinear, +cudaResourceDesc::res::linear::devPtr must be set to a +valid device pointer, that is aligned to +textureAlignment. +cudaResourceDesc::res::linear::desc describes the format +and the number of components per array element. +cudaResourceDesc::res::linear::sizeInBytes specifies the +size of the array in bytes. The total number of elements in the linear +address range cannot exceed +maxTexture1DLinear. The number of elements +is computed as (sizeInBytes / sizeof(desc)).

    +

    If resType is set to +cudaResourceTypePitch2D, +cudaResourceDesc::res::pitch2D::devPtr must be set to a +valid device pointer, that is aligned to +textureAlignment. +cudaResourceDesc::res::pitch2D::desc describes the format +and the number of components per array element. +cudaResourceDesc::res::pitch2D::width and +cudaResourceDesc::res::pitch2D::height specify the width +and height of the array in elements, and cannot exceed +cudaResourceDesc::res::pitch2D::pitchInBytes specifies the +pitch between two rows in bytes and has to be aligned to +texturePitchAlignment. Pitch cannot exceed +:py:obj:`~.cudaDeviceProp.maxTexture2DLinear`[2].

    +

    The cudaTextureDesc struct is defined as

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where

    + +

    The cudaResourceViewDesc struct is defined as

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    where:

    +
      +
    • format specifies how the data +contained in the CUDA array or CUDA mipmapped array should be +interpreted. Note that this can incur a change in size of the texture +data. If the resource view format is a block compressed format, then +the underlying CUDA array or CUDA mipmapped array has to have a +32-bit unsigned integer format with 2 or 4 channels, depending on the +block compressed format. For ex., BC1 and BC4 require the underlying +CUDA array to have a 32-bit unsigned int with 2 channels. The other +BC formats require the underlying resource to have the same 32-bit +unsigned int format but with 4 channels.

    • +
    • width specifies the new width of the +texture data. If the resource view format is a block compressed +format, this value has to be 4 times the original width of the +resource. For non block compressed formats, this value has to be +equal to that of the original resource.

    • +
    • height specifies the new height of +the texture data. If the resource view format is a block compressed +format, this value has to be 4 times the original height of the +resource. For non block compressed formats, this value has to be +equal to that of the original resource.

    • +
    • depth specifies the new depth of the +texture data. This value has to be equal to that of the original +resource.

    • +
    • firstMipmapLevel specifies the most +detailed mipmap level. This will be the new mipmap level zero. For +non-mipmapped resources, this value has to be +zero.:py:obj:~.cudaTextureDesc.minMipmapLevelClamp and +maxMipmapLevelClamp will be relative to +this value. For ex., if the firstMipmapLevel is set to 2, and a +minMipmapLevelClamp of 1.2 is specified, then the actual minimum +mipmap level clamp will be 3.2.

    • +
    • lastMipmapLevel specifies the least +detailed mipmap level. For non-mipmapped resources, this value has to +be zero.

    • +
    • firstLayer specifies the first layer +index for layered textures. This will be the new layer zero. For non- +layered resources, this value has to be zero.

    • +
    • lastLayer specifies the last layer +index for layered textures. For non-layered resources, this value has +to be zero.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDestroyTextureObject(texObject)#
    +

    Destroys a texture object.

    +

    Destroys the texture object specified by texObject.

    +
    +
    Parameters:
    +

    texObject (cudaTextureObject_t) – Texture object to destroy

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetTextureObjectResourceDesc(texObject)#
    +

    Returns a texture object’s resource descriptor.

    +

    Returns the resource descriptor for the texture object specified by +texObject.

    +
    +
    Parameters:
    +

    texObject (cudaTextureObject_t) – Texture object

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetTextureObjectTextureDesc(texObject)#
    +

    Returns a texture object’s texture descriptor.

    +

    Returns the texture descriptor for the texture object specified by +texObject.

    +
    +
    Parameters:
    +

    texObject (cudaTextureObject_t) – Texture object

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetTextureObjectResourceViewDesc(texObject)#
    +

    Returns a texture object’s resource view descriptor.

    +

    Returns the resource view descriptor for the texture object specified +by texObject. If no resource view was specified, +cudaErrorInvalidValue is returned.

    +
    +
    Parameters:
    +

    texObject (cudaTextureObject_t) – Texture object

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Surface Object Management#

    +

    This section describes the low level texture object management functions of the CUDA runtime application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher.

    +
    +
    +cuda.bindings.runtime.cudaCreateSurfaceObject(cudaResourceDesc pResDesc: Optional[cudaResourceDesc])#
    +

    Creates a surface object.

    +

    Creates a surface object and returns it in pSurfObject. pResDesc +describes the data to perform surface load/stores on. +resType must be +cudaResourceTypeArray and +cudaResourceDesc::res::array::array must be set to a valid +CUDA array handle.

    +

    Surface objects are only supported on devices of compute capability 3.0 +or higher. Additionally, a surface object is an opaque value, and, as +such, should only be accessed through CUDA API calls.

    +
    +
    Parameters:
    +

    pResDesc (cudaResourceDesc) – Resource descriptor

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDestroySurfaceObject(surfObject)#
    +

    Destroys a surface object.

    +

    Destroys the surface object specified by surfObject.

    +
    +
    Parameters:
    +

    surfObject (cudaSurfaceObject_t) – Surface object to destroy

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGetSurfaceObjectResourceDesc(surfObject)#
    +

    Returns a surface object’s resource descriptor Returns the resource descriptor for the surface object specified by surfObject.

    +
    +
    Parameters:
    +

    surfObject (cudaSurfaceObject_t) – Surface object

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Version Management#

    +
    +
    +cuda.bindings.runtime.cudaDriverGetVersion()#
    +

    Returns the latest version of CUDA supported by the driver.

    +

    Returns in *driverVersion the latest version of CUDA supported by the +driver. The version is returned as (1000 * major + 10 * minor). For +example, CUDA 9.2 would be represented by 9020. If no driver is +installed, then 0 is returned as the driver version.

    +

    This function automatically returns cudaErrorInvalidValue +if driverVersion is NULL.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaRuntimeGetVersion()#
    +

    Returns the CUDA Runtime version.

    +

    Returns in *runtimeVersion the version number of the current CUDA +Runtime instance. The version is returned as (1000 * major + 10 * +minor). For example, CUDA 9.2 would be represented by 9020.

    +

    As of CUDA 12.0, this function no longer initializes CUDA. The purpose +of this API is solely to return a compile-time constant stating the +CUDA Toolkit version in the above format.

    +

    This function automatically returns cudaErrorInvalidValue +if the runtimeVersion argument is NULL.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.getLocalRuntimeVersion()#
    +

    Returns the CUDA Runtime version of local shared library.

    +

    Returns in *runtimeVersion the version number of the current CUDA +Runtime instance. The version is returned as (1000 * major + 10 * +minor). For example, CUDA 9.2 would be represented by 9020.

    +

    As of CUDA 12.0, this function no longer initializes CUDA. The purpose +of this API is solely to return a compile-time constant stating the +CUDA Toolkit version in the above format.

    +

    This function automatically returns cudaErrorInvalidValue +if the runtimeVersion argument is NULL.

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +

    Graph Management#

    +

    This section describes the graph management functions of CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaGraphCreate(unsigned int flags)#
    +

    Creates a graph.

    +

    Creates an empty graph, which is returned via pGraph.

    +
    +
    Parameters:
    +

    flags (unsigned int) – Graph creation flags, must be 0

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddKernelNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaKernelNodeParams pNodeParams: Optional[cudaKernelNodeParams])#
    +

    Creates a kernel execution node and adds it to a graph.

    +

    Creates a new kernel execution node and adds it to graph with +numDependencies dependencies specified via pDependencies and +arguments specified in pNodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. pDependencies may not have any duplicate entries. +A handle to the new node will be returned in pGraphNode.

    +

    The cudaKernelNodeParams structure is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    When the graph is launched, the node will invoke kernel func on a +(gridDim.x x gridDim.y x gridDim.z) grid of blocks. Each block +contains (blockDim.x x blockDim.y x blockDim.z) threads.

    +

    sharedMem sets the amount of dynamic shared memory that will be +available to each thread block.

    +

    Kernel parameters to func can be specified in one of two ways:

    +

    1) Kernel parameters can be specified via kernelParams. If the kernel +has N parameters, then kernelParams needs to be an array of N +pointers. Each pointer, from `kernelParams`[0] to `kernelParams`[N-1], +points to the region of memory from which the actual parameter will be +copied. The number of kernel parameters and their offsets and sizes do +not need to be specified as that information is retrieved directly from +the kernel’s image.

    +

    2) Kernel parameters can also be packaged by the application into a +single buffer that is passed in via extra. This places the burden on +the application of knowing each kernel parameter’s size and +alignment/padding within the buffer. The extra parameter exists to +allow this function to take additional less commonly used arguments. +extra specifies a list of names of extra settings and their +corresponding values. Each extra setting name is immediately followed +by the corresponding value. The list must be terminated with either +NULL or CU_LAUNCH_PARAM_END.

    + +

    The error cudaErrorInvalidValue will be returned if kernel +parameters are specified with both kernelParams and extra (i.e. +both kernelParams and extra are non-NULL).

    +

    The kernelParams or extra array, as well as the argument values it +points to, are copied during this call.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • pNodeParams (cudaKernelNodeParams) – Parameters for the GPU execution node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Kernels launched using graphs must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects.

    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphKernelNodeGetParams(node)#
    +

    Returns a kernel node’s parameters.

    +

    Returns the parameters of kernel node node in pNodeParams. The +kernelParams or extra array returned in pNodeParams, as well as +the argument values it points to, are owned by the node. This memory +remains valid until the node is destroyed or its parameters are +modified, and should not be modified directly. Use +cudaGraphKernelNodeSetParams to update the parameters of +this node.

    +

    The params will contain either kernelParams or extra, according to +which of these was most recently set on the node.

    +
    +
    Parameters:
    +

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaLaunchKernel, cudaGraphAddKernelNode, cudaGraphKernelNodeSetParams

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphKernelNodeSetParams(node, cudaKernelNodeParams pNodeParams: Optional[cudaKernelNodeParams])#
    +

    Sets a kernel node’s parameters.

    +

    Sets the parameters of kernel node node to pNodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle, cudaErrorMemoryAllocation

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphKernelNodeCopyAttributes(hSrc, hDst)#
    +

    Copies attributes from source node to destination node.

    +

    Copies attributes from source node src to destination node dst. +Both node must have the same context.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidContext

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphKernelNodeGetAttribute(hNode, attr: cudaKernelNodeAttrID)#
    +

    Queries node attribute.

    +

    Queries attribute attr from node hNode and stores it in +corresponding member of value_out.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphKernelNodeSetAttribute(hNode, attr: cudaKernelNodeAttrID, cudaKernelNodeAttrValue value: Optional[cudaKernelNodeAttrValue])#
    +

    Sets node attribute.

    +

    Sets attribute attr on node hNode from corresponding attribute of +value.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidResourceHandle

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddMemcpyNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaMemcpy3DParms pCopyParams: Optional[cudaMemcpy3DParms])#
    +

    Creates a memcpy node and adds it to a graph.

    +

    Creates a new memcpy node and adds it to graph with numDependencies +dependencies specified via pDependencies. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. pDependencies may not have any duplicate entries. +A handle to the new node will be returned in pGraphNode.

    +

    When the graph is launched, the node will perform the memcpy described +by pCopyParams. See cudaMemcpy3D() for a description of +the structure and its restrictions.

    +

    Memcpy nodes have some additional restrictions with regards to managed +memory, if the system contains at least one device which has a zero +value for the device attribute +cudaDevAttrConcurrentManagedAccess.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • pCopyParams (cudaMemcpy3DParms) – Parameters for the memory copy

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddMemcpyNode1D(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, dst, src, size_t count, kind: cudaMemcpyKind)#
    +

    Creates a 1D memcpy node and adds it to a graph.

    +

    Creates a new 1D memcpy node and adds it to graph with +numDependencies dependencies specified via pDependencies. It is +possible for numDependencies to be 0, in which case the node will be +placed at the root of the graph. pDependencies may not have any +duplicate entries. A handle to the new node will be returned in +pGraphNode.

    +

    When the graph is launched, the node will copy count bytes from the +memory area pointed to by src to the memory area pointed to by dst, +where kind specifies the direction of the copy, and must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. Launching a memcpy node with dst and src +pointers that do not match the direction of the copy results in an +undefined behavior.

    +

    Memcpy nodes have some additional restrictions with regards to managed +memory, if the system contains at least one device which has a zero +value for the device attribute +cudaDevAttrConcurrentManagedAccess.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • dst (Any) – Destination memory address

    • +
    • src (Any) – Source memory address

    • +
    • count (size_t) – Size in bytes to copy

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphMemcpyNodeGetParams(node)#
    +

    Returns a memcpy node’s parameters.

    +

    Returns the parameters of memcpy node node in pNodeParams.

    +
    +
    Parameters:
    +

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams(node, cudaMemcpy3DParms pNodeParams: Optional[cudaMemcpy3DParms])#
    +

    Sets a memcpy node’s parameters.

    +

    Sets the parameters of memcpy node node to pNodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaGraphNodeSetParams, cudaMemcpy3D, cudaGraphMemcpyNodeSetParamsToSymbol, cudaGraphMemcpyNodeSetParamsFromSymbol, cudaGraphMemcpyNodeSetParams1D, cudaGraphAddMemcpyNode, cudaGraphMemcpyNodeGetParams

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams1D(node, dst, src, size_t count, kind: cudaMemcpyKind)#
    +

    Sets a memcpy node’s parameters to perform a 1-dimensional copy.

    +

    Sets the parameters of memcpy node node to the copy described by the +provided parameters.

    +

    When the graph is launched, the node will copy count bytes from the +memory area pointed to by src to the memory area pointed to by dst, +where kind specifies the direction of the copy, and must be one of +cudaMemcpyHostToHost, cudaMemcpyHostToDevice, +cudaMemcpyDeviceToHost, +cudaMemcpyDeviceToDevice, or cudaMemcpyDefault. +Passing cudaMemcpyDefault is recommended, in which case the +type of transfer is inferred from the pointer values. However, +cudaMemcpyDefault is only allowed on systems that support +unified virtual addressing. Launching a memcpy node with dst and src +pointers that do not match the direction of the copy results in an +undefined behavior.

    +
    +
    Parameters:
    +
      +
    • node (CUgraphNode or cudaGraphNode_t) – Node to set the parameters for

    • +
    • dst (Any) – Destination memory address

    • +
    • src (Any) – Source memory address

    • +
    • count (size_t) – Size in bytes to copy

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddMemsetNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaMemsetParams pMemsetParams: Optional[cudaMemsetParams])#
    +

    Creates a memset node and adds it to a graph.

    +

    Creates a new memset node and adds it to graph with numDependencies +dependencies specified via pDependencies. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. pDependencies may not have any duplicate entries. +A handle to the new node will be returned in pGraphNode.

    +

    The element size must be 1, 2, or 4 bytes. When the graph is launched, +the node will perform the memset described by pMemsetParams.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • pMemsetParams (cudaMemsetParams) – Parameters for the memory set

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphMemsetNodeGetParams(node)#
    +

    Returns a memset node’s parameters.

    +

    Returns the parameters of memset node node in pNodeParams.

    +
    +
    Parameters:
    +

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphMemsetNodeSetParams(node, cudaMemsetParams pNodeParams: Optional[cudaMemsetParams])#
    +

    Sets a memset node’s parameters.

    +

    Sets the parameters of memset node node to pNodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddHostNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaHostNodeParams pNodeParams: Optional[cudaHostNodeParams])#
    +

    Creates a host execution node and adds it to a graph.

    +

    Creates a new CPU execution node and adds it to graph with +numDependencies dependencies specified via pDependencies and +arguments specified in pNodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. pDependencies may not have any duplicate entries. +A handle to the new node will be returned in pGraphNode.

    +

    When the graph is launched, the node will invoke the specified CPU +function. Host nodes are not supported under MPS with pre-Volta GPUs.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • pNodeParams (cudaHostNodeParams) – Parameters for the host node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphHostNodeGetParams(node)#
    +

    Returns a host node’s parameters.

    +

    Returns the parameters of host node node in pNodeParams.

    +
    +
    Parameters:
    +

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphHostNodeSetParams(node, cudaHostNodeParams pNodeParams: Optional[cudaHostNodeParams])#
    +

    Sets a host node’s parameters.

    +

    Sets the parameters of host node node to nodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddChildGraphNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, childGraph)#
    +

    Creates a child graph node and adds it to a graph.

    +

    Creates a new node which executes an embedded graph, and adds it to +graph with numDependencies dependencies specified via +pDependencies. It is possible for numDependencies to be 0, in which +case the node will be placed at the root of the graph. pDependencies +may not have any duplicate entries. A handle to the new node will be +returned in pGraphNode.

    +

    If hGraph contains allocation or free nodes, this call will return an +error.

    +

    The node executes an embedded child graph. The child graph is cloned in +this call.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • childGraph (CUgraph or cudaGraph_t) – The graph to clone into this node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphChildGraphNodeGetGraph(node)#
    +

    Gets a handle to the embedded graph of a child graph node.

    +

    Gets a handle to the embedded graph in a child graph node. This call +does not clone the graph. Changes to the graph will be reflected in the +node, and the node retains ownership of the graph.

    +

    Allocation and free nodes cannot be added to the returned graph. +Attempting to do so will return an error.

    +
    +
    Parameters:
    +

    node (CUgraphNode or cudaGraphNode_t) – Node to get the embedded graph for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddEmptyNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies)#
    +

    Creates an empty node and adds it to a graph.

    +

    Creates a new node which performs no operation, and adds it to graph +with numDependencies dependencies specified via pDependencies. It +is possible for numDependencies to be 0, in which case the node will +be placed at the root of the graph. pDependencies may not have any +duplicate entries. A handle to the new node will be returned in +pGraphNode.

    +

    An empty node performs no operation during execution, but can be used +for transitive ordering. For example, a phased execution graph with 2 +groups of n nodes with a barrier between them can be represented using +an empty node and 2*n dependency edges, rather than no empty node and +n^2 dependency edges.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddEventRecordNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, event)#
    +

    Creates an event record node and adds it to a graph.

    +

    Creates a new event record node and adds it to hGraph with +numDependencies dependencies specified via dependencies and event +specified in event. It is possible for numDependencies to be 0, in +which case the node will be placed at the root of the graph. +dependencies may not have any duplicate entries. A handle to the new +node will be returned in phGraphNode.

    +

    Each launch of the graph will record event to capture execution of +the node’s dependencies.

    +

    These nodes may not be used in loops or conditionals.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphEventRecordNodeGetEvent(node)#
    +

    Returns the event associated with an event record node.

    +

    Returns the event of event record node hNode in event_out.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the event for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphEventRecordNodeSetEvent(node, event)#
    +

    Sets an event record node’s event.

    +

    Sets the event of event record node hNode to event.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddEventWaitNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, event)#
    +

    Creates an event wait node and adds it to a graph.

    +

    Creates a new event wait node and adds it to hGraph with +numDependencies dependencies specified via dependencies and event +specified in event. It is possible for numDependencies to be 0, in +which case the node will be placed at the root of the graph. +dependencies may not have any duplicate entries. A handle to the new +node will be returned in phGraphNode.

    +

    The graph node will wait for all work captured in event. See +cuEventRecord() for details on what is captured by an +event. The synchronization will be performed efficiently on the device +when applicable. event may be from a different context or device than +the launch stream.

    +

    These nodes may not be used in loops or conditionals.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphEventWaitNodeGetEvent(node)#
    +

    Returns the event associated with an event wait node.

    +

    Returns the event of event wait node hNode in event_out.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the event for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphEventWaitNodeSetEvent(node, event)#
    +

    Sets an event wait node’s event.

    +

    Sets the event of event wait node hNode to event.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddExternalSemaphoresSignalNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaExternalSemaphoreSignalNodeParams nodeParams: Optional[cudaExternalSemaphoreSignalNodeParams])#
    +

    Creates an external semaphore signal node and adds it to a graph.

    +

    Creates a new external semaphore signal node and adds it to graph +with numDependencies dependencies specified via dependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. dependencies may not have any duplicate entries. A +handle to the new node will be returned in pGraphNode.

    +

    Performs a signal operation on a set of externally allocated semaphore +objects when the node is launched. The operation(s) will occur after +all of the node’s dependencies have completed.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExternalSemaphoresSignalNodeGetParams(hNode)#
    +

    Returns an external semaphore signal node’s parameters.

    +

    Returns the parameters of an external semaphore signal node hNode in +params_out. The extSemArray and paramsArray returned in +params_out, are owned by the node. This memory remains valid until +the node is destroyed or its parameters are modified, and should not be +modified directly. Use +cudaGraphExternalSemaphoresSignalNodeSetParams to update +the parameters of this node.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExternalSemaphoresSignalNodeSetParams(hNode, cudaExternalSemaphoreSignalNodeParams nodeParams: Optional[cudaExternalSemaphoreSignalNodeParams])#
    +

    Sets an external semaphore signal node’s parameters.

    +

    Sets the parameters of an external semaphore signal node hNode to +nodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddExternalSemaphoresWaitNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaExternalSemaphoreWaitNodeParams nodeParams: Optional[cudaExternalSemaphoreWaitNodeParams])#
    +

    Creates an external semaphore wait node and adds it to a graph.

    +

    Creates a new external semaphore wait node and adds it to graph with +numDependencies dependencies specified via dependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. dependencies may not have any duplicate entries. A +handle to the new node will be returned in pGraphNode.

    +

    Performs a wait operation on a set of externally allocated semaphore +objects when the node is launched. The node’s dependencies will not be +launched until the wait operation has completed.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExternalSemaphoresWaitNodeGetParams(hNode)#
    +

    Returns an external semaphore wait node’s parameters.

    +

    Returns the parameters of an external semaphore wait node hNode in +params_out. The extSemArray and paramsArray returned in +params_out, are owned by the node. This memory remains valid until +the node is destroyed or its parameters are modified, and should not be +modified directly. Use +cudaGraphExternalSemaphoresSignalNodeSetParams to update +the parameters of this node.

    +
    +
    Parameters:
    +

    hNode (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExternalSemaphoresWaitNodeSetParams(hNode, cudaExternalSemaphoreWaitNodeParams nodeParams: Optional[cudaExternalSemaphoreWaitNodeParams])#
    +

    Sets an external semaphore wait node’s parameters.

    +

    Sets the parameters of an external semaphore wait node hNode to +nodeParams.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddMemAllocNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaMemAllocNodeParams nodeParams: Optional[cudaMemAllocNodeParams])#
    +

    Creates an allocation node and adds it to a graph.

    +

    Creates a new allocation node and adds it to graph with +numDependencies dependencies specified via pDependencies and +arguments specified in nodeParams. It is possible for +numDependencies to be 0, in which case the node will be placed at the +root of the graph. pDependencies may not have any duplicate entries. +A handle to the new node will be returned in pGraphNode.

    +

    When cudaGraphAddMemAllocNode creates an allocation node, +it returns the address of the allocation in nodeParams.dptr. The +allocation’s address remains fixed across instantiations and launches.

    +

    If the allocation is freed in the same graph, by creating a free node +using cudaGraphAddMemFreeNode, the allocation can be +accessed by nodes ordered after the allocation node but before the free +node. These allocations cannot be freed outside the owning graph, and +they can only be freed once in the owning graph.

    +

    If the allocation is not freed in the same graph, then it can be +accessed not only by nodes in the graph which are ordered after the +allocation node, but also by stream operations ordered after the +graph’s execution but before the allocation is freed.

    +

    Allocations which are not freed in the same graph can be freed by:

    +
      +
    • passing the allocation to cudaMemFreeAsync or +cudaMemFree;

    • +
    • launching a graph with a free node for that allocation; or

    • +
    • specifying cudaGraphInstantiateFlagAutoFreeOnLaunch +during instantiation, which makes each launch behave as though it +called cudaMemFreeAsync for every unfreed allocation.

    • +
    +

    It is not possible to free an allocation in both the owning graph and +another graph. If the allocation is freed in the same graph, a free +node cannot be added to another graph. If the allocation is freed in +another graph, a free node can no longer be added to the owning graph.

    +

    The following restrictions apply to graphs which contain allocation +and/or memory free nodes:

    +
      +
    • Nodes and edges of the graph cannot be deleted.

    • +
    • The graph cannot be used in a child node.

    • +
    • Only one instantiation of the graph may exist at any point in time.

    • +
    • The graph cannot be cloned.

    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphMemAllocNodeGetParams(node)#
    +

    Returns a memory alloc node’s parameters.

    +

    Returns the parameters of a memory alloc node hNode in params_out. +The poolProps and accessDescs returned in params_out, are owned +by the node. This memory remains valid until the node is destroyed. The +returned parameters must not be modified.

    +
    +
    Parameters:
    +

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddMemFreeNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, dptr)#
    +

    Creates a memory free node and adds it to a graph.

    +

    Creates a new memory free node and adds it to graph with +numDependencies dependencies specified via pDependencies and +address specified in dptr. It is possible for numDependencies to be +0, in which case the node will be placed at the root of the graph. +pDependencies may not have any duplicate entries. A handle to the new +node will be returned in pGraphNode.

    +

    cudaGraphAddMemFreeNode will return +cudaErrorInvalidValue if the user attempts to free:

    +
      +
    • an allocation twice in the same graph.

    • +
    • an address that was not returned by an allocation node.

    • +
    • an invalid address.

    • +
    +

    The following restrictions apply to graphs which contain allocation +and/or memory free nodes:

    +
      +
    • Nodes and edges of the graph cannot be deleted.

    • +
    • The graph cannot be used in a child node.

    • +
    • Only one instantiation of the graph may exist at any point in time.

    • +
    • The graph cannot be cloned.

    • +
    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • dptr (Any) – Address of memory to free

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphMemFreeNodeGetParams(node)#
    +

    Returns a memory free node’s parameters.

    +

    Returns the address of a memory free node hNode in dptr_out.

    +
    +
    Parameters:
    +

    node (CUgraphNode or cudaGraphNode_t) – Node to get the parameters for

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGraphMemTrim(int device)#
    +

    Free unused memory that was cached on the specified device for use with graphs back to the OS.

    +

    Blocks which are not in use by a graph that is either currently +executing or scheduled to execute are freed back to the operating +system.

    +
    +
    Parameters:
    +

    device (int) – The device for which cached memory should be freed.

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceGetGraphMemAttribute(int device, attr: cudaGraphMemAttributeType)#
    +

    Query asynchronous allocation attributes related to graphs.

    +

    Valid attributes are:

    + +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaDeviceSetGraphMemAttribute(int device, attr: cudaGraphMemAttributeType, value)#
    +

    Set asynchronous allocation attributes related to graphs.

    +

    Valid attributes are:

    +
      +
    • cudaGraphMemAttrUsedMemHigh: High watermark of memory, in +bytes, associated with graphs since the last time it was reset. High +watermark can only be reset to zero.

    • +
    • cudaGraphMemAttrReservedMemHigh: High watermark of +memory, in bytes, currently allocated for use by the CUDA graphs +asynchronous allocator.

    • +
    +
    +
    Parameters:
    +
      +
    • device (int) – Specifies the scope of the query

    • +
    • attr (cudaGraphMemAttributeType) – attribute to get

    • +
    • value (Any) – pointer to value to set

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidDevice

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphClone(originalGraph)#
    +

    Clones a graph.

    +

    This function creates a copy of originalGraph and returns it in +pGraphClone. All parameters are copied into the cloned graph. The +original graph may be modified after this call without affecting the +clone.

    +

    Child graph nodes in the original graph are recursively copied into the +clone.

    +
    +
    Parameters:
    +

    originalGraph (CUgraph or cudaGraph_t) – Graph to clone

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphNodeFindInClone(originalNode, clonedGraph)#
    +

    Finds a cloned version of a node.

    +

    This function returns the node in clonedGraph corresponding to +originalNode in the original graph.

    +

    clonedGraph must have been cloned from originalGraph via +cudaGraphClone. originalNode must have been in +originalGraph at the time of the call to cudaGraphClone, +and the corresponding cloned node in clonedGraph must not have been +removed. The cloned node is then returned via pClonedNode.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaGraphClone

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphNodeGetType(node)#
    +

    Returns a node’s type.

    +

    Returns the node type of node in pType.

    +
    +
    Parameters:
    +

    node (CUgraphNode or cudaGraphNode_t) – Node to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphGetNodes(graph, size_t numNodes=0)#
    +

    Returns a graph’s nodes.

    +

    Returns a list of graph’s nodes. nodes may be NULL, in which case +this function will return the number of nodes in numNodes. Otherwise, +numNodes entries will be filled in. If numNodes is higher than the +actual number of nodes, the remaining entries in nodes will be set to +NULL, and the number of nodes actually obtained will be returned in +numNodes.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphGetRootNodes(graph, size_t pNumRootNodes=0)#
    +

    Returns a graph’s root nodes.

    +

    Returns a list of graph’s root nodes. pRootNodes may be NULL, in +which case this function will return the number of root nodes in +pNumRootNodes. Otherwise, pNumRootNodes entries will be filled in. +If pNumRootNodes is higher than the actual number of root nodes, the +remaining entries in pRootNodes will be set to NULL, and the number +of nodes actually obtained will be returned in pNumRootNodes.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to query

    • +
    • pNumRootNodes (int) – See description

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphGetEdges(graph, size_t numEdges=0)#
    +

    Returns a graph’s dependency edges.

    +

    Returns a list of graph’s dependency edges. Edges are returned via +corresponding indices in from and to; that is, the node in to`[i] +has a dependency on the node in `from`[i]. `from and to may both be +NULL, in which case this function only returns the number of edges in +numEdges. Otherwise, numEdges entries will be filled in. If +numEdges is higher than the actual number of edges, the remaining +entries in from and to will be set to NULL, and the number of edges +actually returned will be written to numEdges.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to get the edges from

    • +
    • numEdges (int) – See description

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphGetEdges_v2(graph, size_t numEdges=0)#
    +

    Returns a graph’s dependency edges (12.3+)

    +

    Returns a list of graph’s dependency edges. Edges are returned via +corresponding indices in from, to and edgeData; that is, the node +in to`[i] has a dependency on the node in `from`[i] with data +`edgeData`[i]. `from and to may both be NULL, in which case this +function only returns the number of edges in numEdges. Otherwise, +numEdges entries will be filled in. If numEdges is higher than the +actual number of edges, the remaining entries in from and to will +be set to NULL, and the number of edges actually returned will be +written to numEdges. edgeData may alone be NULL, in which case the +edges must all have default (zeroed) edge data. Attempting a losst +query via NULL edgeData will result in +cudaErrorLossyQuery. If edgeData is non-NULL then from +and to must be as well.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to get the edges from

    • +
    • numEdges (int) – See description

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphNodeGetDependencies(node, size_t pNumDependencies=0)#
    +

    Returns a node’s dependencies.

    +

    Returns a list of node’s dependencies. pDependencies may be NULL, +in which case this function will return the number of dependencies in +pNumDependencies. Otherwise, pNumDependencies entries will be +filled in. If pNumDependencies is higher than the actual number of +dependencies, the remaining entries in pDependencies will be set to +NULL, and the number of nodes actually obtained will be returned in +pNumDependencies.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphNodeGetDependencies_v2(node, size_t pNumDependencies=0)#
    +

    Returns a node’s dependencies (12.3+)

    +

    Returns a list of node’s dependencies. pDependencies may be NULL, +in which case this function will return the number of dependencies in +pNumDependencies. Otherwise, pNumDependencies entries will be +filled in. If pNumDependencies is higher than the actual number of +dependencies, the remaining entries in pDependencies will be set to +NULL, and the number of nodes actually obtained will be returned in +pNumDependencies.

    +

    Note that if an edge has non-zero (non-default) edge data and +edgeData is NULL, this API will return +cudaErrorLossyQuery. If edgeData is non-NULL, then +pDependencies must be as well.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphNodeGetDependentNodes(node, size_t pNumDependentNodes=0)#
    +

    Returns a node’s dependent nodes.

    +

    Returns a list of node’s dependent nodes. pDependentNodes may be +NULL, in which case this function will return the number of dependent +nodes in pNumDependentNodes. Otherwise, pNumDependentNodes entries +will be filled in. If pNumDependentNodes is higher than the actual +number of dependent nodes, the remaining entries in pDependentNodes +will be set to NULL, and the number of nodes actually obtained will be +returned in pNumDependentNodes.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphNodeGetDependentNodes_v2(node, size_t pNumDependentNodes=0)#
    +

    Returns a node’s dependent nodes (12.3+)

    +

    Returns a list of node’s dependent nodes. pDependentNodes may be +NULL, in which case this function will return the number of dependent +nodes in pNumDependentNodes. Otherwise, pNumDependentNodes entries +will be filled in. If pNumDependentNodes is higher than the actual +number of dependent nodes, the remaining entries in pDependentNodes +will be set to NULL, and the number of nodes actually obtained will be +returned in pNumDependentNodes.

    +

    Note that if an edge has non-zero (non-default) edge data and +edgeData is NULL, this API will return +cudaErrorLossyQuery. If edgeData is non-NULL, then +pDependentNodes must be as well.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddDependencies(graph, from_: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], to: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies)#
    +

    Adds dependency edges to a graph.

    +

    The number of dependencies to be added is defined by numDependencies +Elements in pFrom and pTo at corresponding indices define a +dependency. Each node in pFrom and pTo must belong to graph.

    +

    If numDependencies is 0, elements in pFrom and pTo will be +ignored. Specifying an existing dependency will return an error.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which dependencies are added

    • +
    • from (List[cudaGraphNode_t]) – Array of nodes that provide the dependencies

    • +
    • to (List[cudaGraphNode_t]) – Array of dependent nodes

    • +
    • numDependencies (size_t) – Number of dependencies to be added

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddDependencies_v2(graph, from_: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], to: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], edgeData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies)#
    +

    Adds dependency edges to a graph. (12.3+)

    +

    The number of dependencies to be added is defined by numDependencies +Elements in pFrom and pTo at corresponding indices define a +dependency. Each node in pFrom and pTo must belong to graph.

    +

    If numDependencies is 0, elements in pFrom and pTo will be +ignored. Specifying an existing dependency will return an error.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which dependencies are added

    • +
    • from (List[cudaGraphNode_t]) – Array of nodes that provide the dependencies

    • +
    • to (List[cudaGraphNode_t]) – Array of dependent nodes

    • +
    • edgeData (List[cudaGraphEdgeData]) – Optional array of edge data. If NULL, default (zeroed) edge data is +assumed.

    • +
    • numDependencies (size_t) – Number of dependencies to be added

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphRemoveDependencies(graph, from_: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], to: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies)#
    +

    Removes dependency edges from a graph.

    +

    The number of pDependencies to be removed is defined by +numDependencies. Elements in pFrom and pTo at corresponding +indices define a dependency. Each node in pFrom and pTo must belong +to graph.

    +

    If numDependencies is 0, elements in pFrom and pTo will be +ignored. Specifying a non-existing dependency will return an error.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph from which to remove dependencies

    • +
    • from (List[cudaGraphNode_t]) – Array of nodes that provide the dependencies

    • +
    • to (List[cudaGraphNode_t]) – Array of dependent nodes

    • +
    • numDependencies (size_t) – Number of dependencies to be removed

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphRemoveDependencies_v2(graph, from_: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], to: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], edgeData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies)#
    +

    Removes dependency edges from a graph. (12.3+)

    +

    The number of pDependencies to be removed is defined by +numDependencies. Elements in pFrom and pTo at corresponding +indices define a dependency. Each node in pFrom and pTo must belong +to graph.

    +

    If numDependencies is 0, elements in pFrom and pTo will be +ignored. Specifying an edge that does not exist in the graph, with data +matching edgeData, results in an error. edgeData is nullable, which +is equivalent to passing default (zeroed) data for each edge.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph from which to remove dependencies

    • +
    • from (List[cudaGraphNode_t]) – Array of nodes that provide the dependencies

    • +
    • to (List[cudaGraphNode_t]) – Array of dependent nodes

    • +
    • edgeData (List[cudaGraphEdgeData]) – Optional array of edge data. If NULL, edge data is assumed to be +default (zeroed).

    • +
    • numDependencies (size_t) – Number of dependencies to be removed

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphDestroyNode(node)#
    +

    Remove a node from the graph.

    +

    Removes node from its graph. This operation also severs any +dependencies of other nodes on node and vice versa.

    +

    Dependencies cannot be removed from graphs which contain allocation or +free nodes. Any attempt to do so will return an error.

    +
    +
    Parameters:
    +

    node (CUgraphNode or cudaGraphNode_t) – Node to remove

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphInstantiate(graph, unsigned long long flags)#
    +

    Creates an executable graph from a graph.

    +

    Instantiates graph as an executable graph. The graph is validated for +any structural constraints or intra-node constraints which were not +previously validated. If instantiation is successful, a handle to the +instantiated graph is returned in pGraphExec.

    +

    The flags parameter controls the behavior of instantiation and +subsequent graph launches. Valid flags are:

    +
      +
    • cudaGraphInstantiateFlagAutoFreeOnLaunch, which +configures a graph containing memory allocation nodes to +automatically free any unfreed memory allocations before the graph is +relaunched.

    • +
    • cudaGraphInstantiateFlagDeviceLaunch, which configures +the graph for launch from the device. If this flag is passed, the +executable graph handle returned can be used to launch the graph from +both the host and device. This flag cannot be used in conjunction +with cudaGraphInstantiateFlagAutoFreeOnLaunch.

    • +
    • cudaGraphInstantiateFlagUseNodePriority, which causes the +graph to use the priorities from the per-node attributes rather than +the priority of the launch stream during execution. Note that +priorities are only available on kernel nodes, and are copied from +stream priority during stream capture.

    • +
    +

    If graph contains any allocation or free nodes, there can be at most +one executable graph in existence for that graph at a time. An attempt +to instantiate a second executable graph before destroying the first +with cudaGraphExecDestroy will result in an error. The same +also applies if graph contains any device-updatable kernel nodes.

    +

    Graphs instantiated for launch on the device have additional +restrictions which do not apply to host graphs:

    +
      +
    • The graph’s nodes must reside on a single device.

    • +
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, +and child graph nodes.

    • +
    • The graph cannot be empty and must contain at least one kernel, +memcpy, or memset node. Operation-specific restrictions are outlined +below.

    • +
    • Kernel nodes:

      +
        +
      • Use of CUDA Dynamic Parallelism is not permitted.

      • +
      • Cooperative launches are permitted as long as MPS is not in use.

      • +
      +
    • +
    • Memcpy nodes:

      +
        +
      • Only copies involving device memory and/or pinned device-mapped +host memory are permitted.

      • +
      • Copies involving CUDA arrays are not permitted.

      • +
      • Both operands must be accessible from the current device, and the +current device must match the device of other nodes in the graph.

      • +
      +
    • +
    +

    If graph is not instantiated for launch on the device but contains +kernels which call device-side cudaGraphLaunch() from +multiple devices, this will result in an error.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphInstantiateWithFlags(graph, unsigned long long flags)#
    +

    Creates an executable graph from a graph.

    +

    Instantiates graph as an executable graph. The graph is validated for +any structural constraints or intra-node constraints which were not +previously validated. If instantiation is successful, a handle to the +instantiated graph is returned in pGraphExec.

    +

    The flags parameter controls the behavior of instantiation and +subsequent graph launches. Valid flags are:

    +
      +
    • cudaGraphInstantiateFlagAutoFreeOnLaunch, which +configures a graph containing memory allocation nodes to +automatically free any unfreed memory allocations before the graph is +relaunched.

    • +
    • cudaGraphInstantiateFlagDeviceLaunch, which configures +the graph for launch from the device. If this flag is passed, the +executable graph handle returned can be used to launch the graph from +both the host and device. This flag can only be used on platforms +which support unified addressing. This flag cannot be used in +conjunction with +cudaGraphInstantiateFlagAutoFreeOnLaunch.

    • +
    • cudaGraphInstantiateFlagUseNodePriority, which causes the +graph to use the priorities from the per-node attributes rather than +the priority of the launch stream during execution. Note that +priorities are only available on kernel nodes, and are copied from +stream priority during stream capture.

    • +
    +

    If graph contains any allocation or free nodes, there can be at most +one executable graph in existence for that graph at a time. An attempt +to instantiate a second executable graph before destroying the first +with cudaGraphExecDestroy will result in an error. The same +also applies if graph contains any device-updatable kernel nodes.

    +

    If graph contains kernels which call device-side +cudaGraphLaunch() from multiple devices, this will result +in an error.

    +

    Graphs instantiated for launch on the device have additional +restrictions which do not apply to host graphs:

    +
      +
    • The graph’s nodes must reside on a single device.

    • +
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, +and child graph nodes.

    • +
    • The graph cannot be empty and must contain at least one kernel, +memcpy, or memset node. Operation-specific restrictions are outlined +below.

    • +
    • Kernel nodes:

      +
        +
      • Use of CUDA Dynamic Parallelism is not permitted.

      • +
      • Cooperative launches are permitted as long as MPS is not in use.

      • +
      +
    • +
    • Memcpy nodes:

      +
        +
      • Only copies involving device memory and/or pinned device-mapped +host memory are permitted.

      • +
      • Copies involving CUDA arrays are not permitted.

      • +
      • Both operands must be accessible from the current device, and the +current device must match the device of other nodes in the graph.

      • +
      +
    • +
    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphInstantiateWithParams(graph, cudaGraphInstantiateParams instantiateParams: Optional[cudaGraphInstantiateParams])#
    +

    Creates an executable graph from a graph.

    +

    Instantiates graph as an executable graph according to the +instantiateParams structure. The graph is validated for any +structural constraints or intra-node constraints which were not +previously validated. If instantiation is successful, a handle to the +instantiated graph is returned in pGraphExec.

    +

    instantiateParams controls the behavior of instantiation and +subsequent graph launches, as well as returning more detailed +information in the event of an error. +cudaGraphInstantiateParams is defined as:

    +

    View CUDA Toolkit Documentation for a C++ code example

    +

    The flags field controls the behavior of instantiation and subsequent +graph launches. Valid flags are:

    +
      +
    • cudaGraphInstantiateFlagAutoFreeOnLaunch, which +configures a graph containing memory allocation nodes to +automatically free any unfreed memory allocations before the graph is +relaunched.

    • +
    • cudaGraphInstantiateFlagUpload, which will perform an +upload of the graph into uploadStream once the graph has been +instantiated.

    • +
    • cudaGraphInstantiateFlagDeviceLaunch, which configures +the graph for launch from the device. If this flag is passed, the +executable graph handle returned can be used to launch the graph from +both the host and device. This flag can only be used on platforms +which support unified addressing. This flag cannot be used in +conjunction with +cudaGraphInstantiateFlagAutoFreeOnLaunch.

    • +
    • cudaGraphInstantiateFlagUseNodePriority, which causes the +graph to use the priorities from the per-node attributes rather than +the priority of the launch stream during execution. Note that +priorities are only available on kernel nodes, and are copied from +stream priority during stream capture.

    • +
    +

    If graph contains any allocation or free nodes, there can be at most +one executable graph in existence for that graph at a time. An attempt +to instantiate a second executable graph before destroying the first +with cudaGraphExecDestroy will result in an error. The same +also applies if graph contains any device-updatable kernel nodes.

    +

    If graph contains kernels which call device-side +cudaGraphLaunch() from multiple devices, this will result +in an error.

    +

    Graphs instantiated for launch on the device have additional +restrictions which do not apply to host graphs:

    +
      +
    • The graph’s nodes must reside on a single device.

    • +
    • The graph can only contain kernel nodes, memcpy nodes, memset nodes, +and child graph nodes.

    • +
    • The graph cannot be empty and must contain at least one kernel, +memcpy, or memset node. Operation-specific restrictions are outlined +below.

    • +
    • Kernel nodes:

      +
        +
      • Use of CUDA Dynamic Parallelism is not permitted.

      • +
      • Cooperative launches are permitted as long as MPS is not in use.

      • +
      +
    • +
    • Memcpy nodes:

      +
        +
      • Only copies involving device memory and/or pinned device-mapped +host memory are permitted.

      • +
      • Copies involving CUDA arrays are not permitted.

      • +
      • Both operands must be accessible from the current device, and the +current device must match the device of other nodes in the graph.

      • +
      +
    • +
    +

    In the event of an error, the result_out and errNode_out fields +will contain more information about the nature of the error. Possible +error reporting includes:

    +
      +
    • cudaGraphInstantiateError, if passed an invalid value or +if an unexpected error occurred which is described by the return +value of the function. errNode_out will be set to NULL.

    • +
    • cudaGraphInstantiateInvalidStructure, if the graph +structure is invalid. errNode_out will be set to one of the +offending nodes.

    • +
    • cudaGraphInstantiateNodeOperationNotSupported, if the +graph is instantiated for device launch but contains a node of an +unsupported node type, or a node which performs unsupported +operations, such as use of CUDA dynamic parallelism within a kernel +node. errNode_out will be set to this node.

    • +
    • cudaGraphInstantiateMultipleDevicesNotSupported, if the +graph is instantiated for device launch but a node’s device differs +from that of another node. This error can also be returned if a graph +is not instantiated for device launch and it contains kernels which +call device-side cudaGraphLaunch() from multiple devices. +errNode_out will be set to this node.

    • +
    +

    If instantiation is successful, result_out will be set to +cudaGraphInstantiateSuccess, and hErrNode_out will be set +to NULL.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecGetFlags(graphExec)#
    +

    Query the instantiation flags of an executable graph.

    +

    Returns the flags that were passed to instantiation for the given +executable graph. cudaGraphInstantiateFlagUpload will not +be returned by this API as it does not affect the resulting executable +graph.

    +
    +
    Parameters:
    +

    graphExec (CUgraphExec or cudaGraphExec_t) – The executable graph to query

    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecKernelNodeSetParams(hGraphExec, node, cudaKernelNodeParams pNodeParams: Optional[cudaKernelNodeParams])#
    +

    Sets the parameters for a kernel node in the given graphExec.

    +

    Sets the parameters of a kernel node in an executable graph +hGraphExec. The node is identified by the corresponding node node +in the non-executable graph, from which the executable graph was +instantiated.

    +

    node must not have been removed from the original graph. All +nodeParams fields may change, but the following restrictions apply to +func updates:

    +
      +
    • The owning device of the function cannot change.

    • +
    • A node whose function originally did not use CUDA dynamic parallelism +cannot be updated to a function which uses CDP

    • +
    • A node whose function originally did not make device-side update +calls cannot be updated to a function which makes device-side update +calls.

    • +
    • If hGraphExec was not instantiated for device launch, a node whose +function originally did not use device-side +cudaGraphLaunch() cannot be updated to a function which +uses device-side cudaGraphLaunch() unless the node +resides on the same device as nodes which contained such calls at +instantiate-time. If no such calls were present at instantiation, +these updates cannot be performed at all.

    • +
    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. node is also not modified by this call.

    +

    If node is a device-updatable kernel node, the next upload/launch of +hGraphExec will overwrite any previous device-side updates. +Additionally, applying host updates to a device-updatable kernel node +while it is being updated from the device will result in undefined +behavior.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecMemcpyNodeSetParams(hGraphExec, node, cudaMemcpy3DParms pNodeParams: Optional[cudaMemcpy3DParms])#
    +

    Sets the parameters for a memcpy node in the given graphExec.

    +

    Updates the work represented by node in hGraphExec as though node +had contained pNodeParams at instantiation. node must remain in the +graph which was used to instantiate hGraphExec. Changed edges to and +from node are ignored.

    +

    The source and destination memory in pNodeParams must be allocated +from the same contexts as the original source and destination memory. +Both the instantiation-time memory operands and the memory operands in +pNodeParams must be 1-dimensional. Zero-length operations are not +supported.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. node is also not modified by this call.

    +

    Returns cudaErrorInvalidValue if the memory operands’ +mappings changed or either the original or new memory operands are +multidimensional.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecMemcpyNodeSetParams1D(hGraphExec, node, dst, src, size_t count, kind: cudaMemcpyKind)#
    +

    Sets the parameters for a memcpy node in the given graphExec to perform a 1-dimensional copy.

    +

    Updates the work represented by node in hGraphExec as though node +had contained the given params at instantiation. node must remain in +the graph which was used to instantiate hGraphExec. Changed edges to +and from node are ignored.

    +

    src and dst must be allocated from the same contexts as the +original source and destination memory. The instantiation-time memory +operands must be 1-dimensional. Zero-length operations are not +supported.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. node is also not modified by this call.

    +

    Returns cudaErrorInvalidValue if the memory operands’ +mappings changed or the original memory operands are multidimensional.

    +
    +
    Parameters:
    +
      +
    • hGraphExec (CUgraphExec or cudaGraphExec_t) – The executable graph in which to set the specified node

    • +
    • node (CUgraphNode or cudaGraphNode_t) – Memcpy node from the graph which was used to instantiate graphExec

    • +
    • dst (Any) – Destination memory address

    • +
    • src (Any) – Source memory address

    • +
    • count (size_t) – Size in bytes to copy

    • +
    • kind (cudaMemcpyKind) – Type of transfer

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecMemsetNodeSetParams(hGraphExec, node, cudaMemsetParams pNodeParams: Optional[cudaMemsetParams])#
    +

    Sets the parameters for a memset node in the given graphExec.

    +

    Updates the work represented by node in hGraphExec as though node +had contained pNodeParams at instantiation. node must remain in the +graph which was used to instantiate hGraphExec. Changed edges to and +from node are ignored.

    +

    Zero sized operations are not supported.

    +

    The new destination pointer in pNodeParams must be to the same kind +of allocation as the original destination pointer and have the same +context association and device mapping as the original destination +pointer.

    +

    Both the value and pointer address may be updated. Changing other +aspects of the memset (width, height, element size or pitch) may cause +the update to be rejected. Specifically, for 2d memsets, all dimension +changes are rejected. For 1d memsets, changes in height are explicitly +rejected and other changes are oportunistically allowed if the +resulting work maps onto the work resources already allocated for the +node.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. node is also not modified by this call.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecHostNodeSetParams(hGraphExec, node, cudaHostNodeParams pNodeParams: Optional[cudaHostNodeParams])#
    +

    Sets the parameters for a host node in the given graphExec.

    +

    Updates the work represented by node in hGraphExec as though node +had contained pNodeParams at instantiation. node must remain in the +graph which was used to instantiate hGraphExec. Changed edges to and +from node are ignored.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. node is also not modified by this call.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecChildGraphNodeSetParams(hGraphExec, node, childGraph)#
    +

    Updates node parameters in the child graph node in the given graphExec.

    +

    Updates the work represented by node in hGraphExec as though the +nodes contained in node’s graph had the parameters contained in +childGraph’s nodes at instantiation. node must remain in the graph +which was used to instantiate hGraphExec. Changed edges to and from +node are ignored.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. node is also not modified by this call.

    +

    The topology of childGraph, as well as the node insertion order, must +match that of the graph contained in node. See +cudaGraphExecUpdate() for a list of restrictions on what +can be updated in an instantiated graph. The update is recursive, so +child graph nodes contained within the top level child graph will also +be updated.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecEventRecordNodeSetEvent(hGraphExec, hNode, event)#
    +

    Sets the event for an event record node in the given graphExec.

    +

    Sets the event of an event record node in an executable graph +hGraphExec. The node is identified by the corresponding node hNode +in the non-executable graph, from which the executable graph was +instantiated.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecEventWaitNodeSetEvent(hGraphExec, hNode, event)#
    +

    Sets the event for an event wait node in the given graphExec.

    +

    Sets the event of an event wait node in an executable graph +hGraphExec. The node is identified by the corresponding node hNode +in the non-executable graph, from which the executable graph was +instantiated.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecExternalSemaphoresSignalNodeSetParams(hGraphExec, hNode, cudaExternalSemaphoreSignalNodeParams nodeParams: Optional[cudaExternalSemaphoreSignalNodeParams])#
    +

    Sets the parameters for an external semaphore signal node in the given graphExec.

    +

    Sets the parameters of an external semaphore signal node in an +executable graph hGraphExec. The node is identified by the +corresponding node hNode in the non-executable graph, from which the +executable graph was instantiated.

    +

    hNode must not have been removed from the original graph.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    Changing nodeParams->numExtSems is not supported.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecExternalSemaphoresWaitNodeSetParams(hGraphExec, hNode, cudaExternalSemaphoreWaitNodeParams nodeParams: Optional[cudaExternalSemaphoreWaitNodeParams])#
    +

    Sets the parameters for an external semaphore wait node in the given graphExec.

    +

    Sets the parameters of an external semaphore wait node in an executable +graph hGraphExec. The node is identified by the corresponding node +hNode in the non-executable graph, from which the executable graph +was instantiated.

    +

    hNode must not have been removed from the original graph.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +

    Changing nodeParams->numExtSems is not supported.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphNodeSetEnabled(hGraphExec, hNode, unsigned int isEnabled)#
    +

    Enables or disables the specified node in the given graphExec.

    +

    Sets hNode to be either enabled or disabled. Disabled nodes are +functionally equivalent to empty nodes until they are reenabled. +Existing node parameters are not affected by disabling/enabling the +node.

    +

    The node is identified by the corresponding node hNode in the non- +executable graph, from which the executable graph was instantiated.

    +

    hNode must not have been removed from the original graph.

    +

    The modifications only affect future launches of hGraphExec. Already +enqueued or running launches of hGraphExec are not affected by this +call. hNode is also not modified by this call.

    +
    +
    Parameters:
    +
      +
    • hGraphExec (CUgraphExec or cudaGraphExec_t) – The executable graph in which to set the specified node

    • +
    • hNode (CUgraphNode or cudaGraphNode_t) – Node from the graph from which graphExec was instantiated

    • +
    • isEnabled (unsigned int) – Node is enabled if != 0, otherwise the node is disabled

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +

    Notes

    +

    Currently only kernel, memset and memcpy nodes are supported.

    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphNodeGetEnabled(hGraphExec, hNode)#
    +

    Query whether a node in the given graphExec is enabled.

    +

    Sets isEnabled to 1 if hNode is enabled, or 0 if hNode is disabled.

    +

    The node is identified by the corresponding node hNode in the non- +executable graph, from which the executable graph was instantiated.

    +

    hNode must not have been removed from the original graph.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +

    Notes

    +

    Currently only kernel, memset and memcpy nodes are supported.

    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecUpdate(hGraphExec, hGraph)#
    +

    Check whether an executable graph can be updated with a graph and perform the update if possible.

    +

    Updates the node parameters in the instantiated graph specified by +hGraphExec with the node parameters in a topologically identical +graph specified by hGraph.

    +

    Limitations:

    +
      +
    • Kernel nodes:

      +
        +
      • The owning context of the function cannot change.

      • +
      • A node whose function originally did not use CUDA dynamic +parallelism cannot be updated to a function which uses CDP.

      • +
      • A node whose function originally did not make device-side update +calls cannot be updated to a function which makes device-side +update calls.

      • +
      • A cooperative node cannot be updated to a non-cooperative node, and +vice-versa.

      • +
      • If the graph was instantiated with +cudaGraphInstantiateFlagUseNodePriority, the priority attribute +cannot change. Equality is checked on the originally requested +priority values, before they are clamped to the device’s supported +range.

      • +
      • If hGraphExec was not instantiated for device launch, a node +whose function originally did not use device-side +cudaGraphLaunch() cannot be updated to a function which +uses device-side cudaGraphLaunch() unless the node +resides on the same device as nodes which contained such calls at +instantiate-time. If no such calls were present at instantiation, +these updates cannot be performed at all.

      • +
      • Neither hGraph nor hGraphExec may contain device-updatable +kernel nodes.

      • +
      +
    • +
    • Memset and memcpy nodes:

      +
        +
      • The CUDA device(s) to which the operand(s) was allocated/mapped +cannot change.

      • +
      • The source/destination memory must be allocated from the same +contexts as the original source/destination memory.

      • +
      • For 2d memsets, only address and assinged value may be updated.

      • +
      • For 1d memsets, updating dimensions is also allowed, but may fail +if the resulting operation doesn’t map onto the work resources +already allocated for the node.

      • +
      +
    • +
    • Additional memcpy node restrictions:

      +
        +
      • Changing either the source or destination memory type(i.e. +CU_MEMORYTYPE_DEVICE, CU_MEMORYTYPE_ARRAY, etc.) is not supported.

      • +
      +
    • +
    • Conditional nodes:

      +
        +
      • Changing node parameters is not supported.

      • +
      • Changeing parameters of nodes within the conditional body graph is +subject to the rules above.

      • +
      • Conditional handle flags and default values are updated as part of +the graph update.

      • +
      +
    • +
    +

    Note: The API may add further restrictions in future releases. The +return code should always be checked.

    +

    cudaGraphExecUpdate sets the result member of resultInfo to +cudaGraphExecUpdateErrorTopologyChanged under the following conditions:

    +
      +
    • The count of nodes directly in hGraphExec and hGraph differ, in +which case resultInfo->errorNode is set to NULL.

    • +
    • hGraph has more exit nodes than hGraph, in which case +resultInfo->errorNode is set to one of the exit nodes in hGraph.

    • +
    • A node in hGraph has a different number of dependencies than the +node from hGraphExec it is paired with, in which case +resultInfo->errorNode is set to the node from hGraph.

    • +
    • A node in hGraph has a dependency that does not match with the +corresponding dependency of the paired node from hGraphExec. +resultInfo->errorNode will be set to the node from hGraph. +resultInfo->errorFromNode will be set to the mismatched dependency. +The dependencies are paired based on edge order and a dependency does +not match when the nodes are already paired based on other edges +examined in the graph.

    • +
    +

    cudaGraphExecUpdate sets the result member of resultInfo to:

    +
      +
    • cudaGraphExecUpdateError if passed an invalid value.

    • +
    • cudaGraphExecUpdateErrorTopologyChanged if the graph topology changed

    • +
    • cudaGraphExecUpdateErrorNodeTypeChanged if the type of a node +changed, in which case hErrorNode_out is set to the node from +hGraph.

    • +
    • cudaGraphExecUpdateErrorFunctionChanged if the function of a kernel +node changed (CUDA driver < 11.2)

    • +
    • cudaGraphExecUpdateErrorUnsupportedFunctionChange if the func field +of a kernel changed in an unsupported way(see note above), in which +case hErrorNode_out is set to the node from hGraph

    • +
    • cudaGraphExecUpdateErrorParametersChanged if any parameters to a node +changed in a way that is not supported, in which case +hErrorNode_out is set to the node from hGraph

    • +
    • cudaGraphExecUpdateErrorAttributesChanged if any attributes of a node +changed in a way that is not supported, in which case +hErrorNode_out is set to the node from hGraph

    • +
    • cudaGraphExecUpdateErrorNotSupported if something about a node is +unsupported, like the node’s type or configuration, in which case +hErrorNode_out is set to the node from hGraph

    • +
    +

    If the update fails for a reason not listed above, the result member of +resultInfo will be set to cudaGraphExecUpdateError. If the update +succeeds, the result member will be set to cudaGraphExecUpdateSuccess.

    +

    cudaGraphExecUpdate returns cudaSuccess when the updated was performed +successfully. It returns cudaErrorGraphExecUpdateFailure if the graph +update was not performed because it included changes which violated +constraints specific to instantiated graph update.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaGraphInstantiate

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphUpload(graphExec, stream)#
    +

    Uploads an executable graph in a stream.

    +

    Uploads hGraphExec to the device in hStream without executing it. +Uploads of the same hGraphExec will be serialized. Each upload is +ordered behind both any previous work in hStream and any previous +launches of hGraphExec. Uses memory cached by stream to back the +allocations owned by graphExec.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue,

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphLaunch(graphExec, stream)#
    +

    Launches an executable graph in a stream.

    +

    Executes graphExec in stream. Only one instance of graphExec may +be executing at a time. Each launch is ordered behind both any previous +work in stream and any previous launches of graphExec. To execute a +graph concurrently, it must be instantiated multiple times into +multiple executable graphs.

    +

    If any allocations created by graphExec remain unfreed (from a +previous launch) and graphExec was not instantiated with +cudaGraphInstantiateFlagAutoFreeOnLaunch, the launch will +fail with cudaErrorInvalidValue.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecDestroy(graphExec)#
    +

    Destroys an executable graph.

    +

    Destroys the executable graph specified by graphExec.

    +
    +
    Parameters:
    +

    graphExec (CUgraphExec or cudaGraphExec_t) – Executable graph to destroy

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphDestroy(graph)#
    +

    Destroys a graph.

    +

    Destroys the graph specified by graph, as well as all of its nodes.

    +
    +
    Parameters:
    +

    graph (CUgraph or cudaGraph_t) – Graph to destroy

    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +

    cudaGraphCreate

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphDebugDotPrint(graph, char *path, unsigned int flags)#
    +

    Write a DOT file describing graph structure.

    +

    Using the provided graph, write to path a DOT formatted description +of the graph. By default this includes the graph topology, node types, +node id, kernel names and memcpy direction. flags can be specified to +write more detailed information about each node type such as parameter +values, kernel attributes, node and function handles.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – The graph to create a DOT file from

    • +
    • path (bytes) – The path to write the DOT file to

    • +
    • flags (unsigned int) – Flags from cudaGraphDebugDotFlags for specifying which additional +node information to write

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorOperatingSystem

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaUserObjectCreate(ptr, destroy, unsigned int initialRefcount, unsigned int flags)#
    +

    Create a user object.

    +

    Create a user object with the specified destructor callback and initial +reference count. The initial references are owned by the caller.

    +

    Destructor callbacks cannot make CUDA API calls and should avoid +blocking behavior, as they are executed by a shared internal thread. +Another thread may be signaled to perform such actions, if it does not +block forward progress of tasks scheduled through CUDA.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • ptr (Any) – The pointer to pass to the destroy function

    • +
    • destroy (cudaHostFn_t) – Callback to free the user object when it is no longer in use

    • +
    • initialRefcount (unsigned int) – The initial refcount to create the object with, typically 1. The +initial references are owned by the calling thread.

    • +
    • flags (unsigned int) – Currently it is required to pass +cudaUserObjectNoDestructorSync, which is the only +defined flag. This indicates that the destroy callback cannot be +waited on by any CUDA API. Users requiring synchronization of the +callback should signal its completion manually.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaUserObjectRetain(object, unsigned int count)#
    +

    Retain a reference to a user object.

    +

    Retains new references to a user object. The new references are owned +by the caller.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • object (cudaUserObject_t) – The object to retain

    • +
    • count (unsigned int) – The number of references to retain, typically 1. Must be nonzero +and not larger than INT_MAX.

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaUserObjectRelease(object, unsigned int count)#
    +

    Release a reference to a user object.

    +

    Releases user object references owned by the caller. The object’s +destructor is invoked if the reference count reaches zero.

    +

    It is undefined behavior to release references not owned by the caller, +or to use a user object handle after all references are released.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • object (cudaUserObject_t) – The object to release

    • +
    • count (unsigned int) – The number of references to release, typically 1. Must be nonzero +and not larger than INT_MAX.

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphRetainUserObject(graph, object, unsigned int count, unsigned int flags)#
    +

    Retain a reference to a user object from a graph.

    +

    Creates or moves user object references that will be owned by a CUDA +graph.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – The graph to associate the reference with

    • +
    • object (cudaUserObject_t) – The user object to retain a reference for

    • +
    • count (unsigned int) – The number of references to add to the graph, typically 1. Must be +nonzero and not larger than INT_MAX.

    • +
    • flags (unsigned int) – The optional flag cudaGraphUserObjectMove transfers +references from the calling thread, rather than create new +references. Pass 0 to create new references.

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +
    +
    cudaUserObjectCreate

    py:obj:~.cudaUserObjectRetain, cudaUserObjectRelease, cudaGraphReleaseUserObject, cudaGraphCreate

    +
    +
    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphReleaseUserObject(graph, object, unsigned int count)#
    +

    Release a user object reference from a graph.

    +

    Releases user object references owned by a graph.

    +

    See CUDA User Objects in the CUDA C++ Programming Guide for more +information on user objects.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – The graph that will release the reference

    • +
    • object (cudaUserObject_t) – The user object to release a reference for

    • +
    • count (unsigned int) – The number of references to release, typically 1. Must be nonzero +and not larger than INT_MAX.

    • +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    +
    +

    See also

    +
    +
    cudaUserObjectCreate

    py:obj:~.cudaUserObjectRetain, cudaUserObjectRelease, cudaGraphRetainUserObject, cudaGraphCreate

    +
    +
    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddNode(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], size_t numDependencies, cudaGraphNodeParams nodeParams: Optional[cudaGraphNodeParams])#
    +

    Adds a node of arbitrary type to a graph.

    +

    Creates a new node in graph described by nodeParams with +numDependencies dependencies specified via pDependencies. +numDependencies may be 0. pDependencies may be null if +numDependencies is 0. pDependencies may not have any duplicate +entries.

    +

    nodeParams is a tagged union. The node type should be specified in +the typename field, and type-specific parameters in the corresponding +union member. All unused bytes - that is, reserved0 and all bytes +past the utilized union member - must be set to zero. It is recommended +to use brace initialization or memset to ensure all bytes are +initialized.

    +

    Note that for some node types, nodeParams may contain “out +parameters” which are modified during the call, such as +nodeParams->alloc.dptr.

    +

    A handle to the new node will be returned in phGraphNode.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphAddNode_v2(graph, pDependencies: Optional[Tuple[cudaGraphNode_t] | List[cudaGraphNode_t]], dependencyData: Optional[Tuple[cudaGraphEdgeData] | List[cudaGraphEdgeData]], size_t numDependencies, cudaGraphNodeParams nodeParams: Optional[cudaGraphNodeParams])#
    +

    Adds a node of arbitrary type to a graph (12.3+)

    +

    Creates a new node in graph described by nodeParams with +numDependencies dependencies specified via pDependencies. +numDependencies may be 0. pDependencies may be null if +numDependencies is 0. pDependencies may not have any duplicate +entries.

    +

    nodeParams is a tagged union. The node type should be specified in +the typename field, and type-specific parameters in the corresponding +union member. All unused bytes - that is, reserved0 and all bytes +past the utilized union member - must be set to zero. It is recommended +to use brace initialization or memset to ensure all bytes are +initialized.

    +

    Note that for some node types, nodeParams may contain “out +parameters” which are modified during the call, such as +nodeParams->alloc.dptr.

    +

    A handle to the new node will be returned in phGraphNode.

    +
    +
    Parameters:
    +
      +
    • graph (CUgraph or cudaGraph_t) – Graph to which to add the node

    • +
    • pDependencies (List[cudaGraphNode_t]) – Dependencies of the node

    • +
    • dependencyData (List[cudaGraphEdgeData]) – Optional edge data for the dependencies. If NULL, the data is +assumed to be default (zeroed) for all dependencies.

    • +
    • numDependencies (size_t) – Number of dependencies

    • +
    • nodeParams (cudaGraphNodeParams) – Specification of the node

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphNodeSetParams(node, cudaGraphNodeParams nodeParams: Optional[cudaGraphNodeParams])#
    +

    Update’s a graph node’s parameters.

    +

    Sets the parameters of graph node node to nodeParams. The node type +specified by nodeParams->type must match the type of node. +nodeParams must be fully initialized and all unused bytes (reserved, +padding) zeroed.

    +

    Modifying parameters is not supported for node types +cudaGraphNodeTypeMemAlloc and cudaGraphNodeTypeMemFree.

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDeviceFunction, cudaErrorNotSupported

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphExecNodeSetParams(graphExec, node, cudaGraphNodeParams nodeParams: Optional[cudaGraphNodeParams])#
    +

    Update’s a graph node’s parameters in an instantiated graph.

    +

    Sets the parameters of a node in an executable graph graphExec. The +node is identified by the corresponding node node in the non- +executable graph from which the executable graph was instantiated. +node must not have been removed from the original graph.

    +

    The modifications only affect future launches of graphExec. Already +enqueued or running launches of graphExec are not affected by this +call. node is also not modified by this call.

    +

    Allowed changes to parameters on executable graphs are as follows:

    +

    View CUDA Toolkit Documentation for a table example

    +
    +
    Parameters:
    +
    +
    +
    Returns:
    +

    cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDeviceFunction, cudaErrorNotSupported

    +
    +
    Return type:
    +

    cudaError_t

    +
    +
    + +
    + +
    +
    +cuda.bindings.runtime.cudaGraphConditionalHandleCreate(graph, unsigned int defaultLaunchValue, unsigned int flags)#
    +

    Create a conditional handle.

    +

    Creates a conditional handle associated with hGraph.

    +

    The conditional handle must be associated with a conditional node in +this graph or one of its children.

    +

    Handles not associated with a conditional node may cause graph +instantiation to fail.

    +
    +
    Parameters:
    +
      +
    • hGraph (CUgraph or cudaGraph_t) – Graph which will contain the conditional node using this handle.

    • +
    • defaultLaunchValue (unsigned int) – Optional initial value for the conditional variable.

    • +
    • flags (unsigned int) – Currently must be cudaGraphCondAssignDefault or 0.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuGraphAddNode

    +
    +
    + +
    +
    +

    Driver Entry Point Access#

    +

    This section describes the driver entry point access functions of CUDA runtime application programming interface.

    +
    +
    +cuda.bindings.runtime.cudaGetDriverEntryPoint(char *symbol, unsigned long long flags)#
    +

    Returns the requested driver API function pointer.

    +

    Returns in **funcPtr the address of the CUDA driver function for the +requested flags.

    +

    For a requested driver symbol, if the CUDA version in which the driver +symbol was introduced is less than or equal to the CUDA runtime +version, the API will return the function pointer to the corresponding +versioned driver function.

    +

    The pointer returned by the API should be cast to a function pointer +matching the requested driver function’s definition in the API header +file. The function pointer typedef can be picked up from the +corresponding typedefs header file. For example, cudaTypedefs.h +consists of function pointer typedefs for driver APIs defined in +cuda.h.

    +

    The API will return cudaSuccess and set the returned +funcPtr if the requested driver function is valid and supported on +the platform.

    +

    The API will return cudaSuccess and set the returned +funcPtr to NULL if the requested driver function is not supported on +the platform, no ABI compatible driver function exists for the CUDA +runtime version or if the driver symbol is invalid.

    +

    It will also set the optional driverStatus to one of the values in +cudaDriverEntryPointQueryResult with the following +meanings:

    + +

    The requested flags can be:

    +
      +
    • cudaEnableDefault: This is the default mode. This is +equivalent to cudaEnablePerThreadDefaultStream if the +code is compiled with –default-stream per-thread compilation flag or +the macro CUDA_API_PER_THREAD_DEFAULT_STREAM is defined; +cudaEnableLegacyStream otherwise.

    • +
    • cudaEnableLegacyStream: This will enable the search for +all driver symbols that match the requested driver symbol name except +the corresponding per-thread versions.

    • +
    • cudaEnablePerThreadDefaultStream: This will enable the +search for all driver symbols that match the requested driver symbol +name including the per-thread versions. If a per-thread version is +not found, the API will return the legacy version of the driver +function.

    • +
    +
    +
    Parameters:
    +
      +
    • symbol (bytes) – The base name of the driver API function to look for. As an +example, for the driver API cuMemAlloc_v2, symbol +would be cuMemAlloc. Note that the API will use the CUDA runtime +version to return the address to the most recent ABI compatible +driver symbol, cuMemAlloc or cuMemAlloc_v2.

    • +
    • flags (unsigned long long) – Flags to specify search options.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuGetProcAddress

    +
    +
    + +
    +
    +cuda.bindings.runtime.cudaGetDriverEntryPointByVersion(char *symbol, unsigned int cudaVersion, unsigned long long flags)#
    +

    Returns the requested driver API function pointer by CUDA version.

    +

    Returns in **funcPtr the address of the CUDA driver function for the +requested flags and CUDA driver version.

    +

    The CUDA version is specified as (1000 * major + 10 * minor), so CUDA +11.2 should be specified as 11020. For a requested driver symbol, if +the specified CUDA version is greater than or equal to the CUDA version +in which the driver symbol was introduced, this API will return the +function pointer to the corresponding versioned function.

    +

    The pointer returned by the API should be cast to a function pointer +matching the requested driver function’s definition in the API header +file. The function pointer typedef can be picked up from the +corresponding typedefs header file. For example, cudaTypedefs.h +consists of function pointer typedefs for driver APIs defined in +cuda.h.

    +

    For the case where the CUDA version requested is greater than the CUDA +Toolkit installed, there may not be an appropriate function pointer +typedef in the corresponding header file and may need a custom typedef +to match the driver function signature returned. This can be done by +getting the typedefs from a later toolkit or creating appropriately +matching custom function typedefs.

    +

    The API will return cudaSuccess and set the returned +funcPtr if the requested driver function is valid and supported on +the platform.

    +

    The API will return cudaSuccess and set the returned +funcPtr to NULL if the requested driver function is not supported on +the platform, no ABI compatible driver function exists for the +requested version or if the driver symbol is invalid.

    +

    It will also set the optional driverStatus to one of the values in +cudaDriverEntryPointQueryResult with the following +meanings:

    + +

    The requested flags can be:

    +
      +
    • cudaEnableDefault: This is the default mode. This is +equivalent to cudaEnablePerThreadDefaultStream if the +code is compiled with –default-stream per-thread compilation flag or +the macro CUDA_API_PER_THREAD_DEFAULT_STREAM is defined; +cudaEnableLegacyStream otherwise.

    • +
    • cudaEnableLegacyStream: This will enable the search for +all driver symbols that match the requested driver symbol name except +the corresponding per-thread versions.

    • +
    • cudaEnablePerThreadDefaultStream: This will enable the +search for all driver symbols that match the requested driver symbol +name including the per-thread versions. If a per-thread version is +not found, the API will return the legacy version of the driver +function.

    • +
    +
    +
    Parameters:
    +
      +
    • symbol (bytes) – The base name of the driver API function to look for. As an +example, for the driver API cuMemAlloc_v2, symbol +would be cuMemAlloc.

    • +
    • cudaVersion (unsigned int) – The CUDA version to look for the requested driver symbol

    • +
    • flags (unsigned long long) – Flags to specify search options.

    • +
    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cuGetProcAddress

    +
    +
    + +
    +
    +

    C++ API Routines#

    +

    C++-style interface built on top of CUDA runtime API. +impl_private

    +

    This section describes the C++ high level API functions of the CUDA runtime application programming interface. To use these functions, your application needs to be compiled with the nvcc compiler.

    +
    +
    +

    Interactions with the CUDA Driver API#

    +

    This section describes the interactions between the CUDA Driver API and the CUDA Runtime API

    +

    Primary Contexts

    +

    There exists a one to one relationship between CUDA devices in the CUDA Runtime API and ::CUcontext s in the CUDA Driver API within a process. The specific context which the CUDA Runtime API uses for a device is called the device’s primary context. From the perspective of the CUDA Runtime API, a device and its primary context are synonymous.

    +

    Initialization and Tear-Down

    +

    CUDA Runtime API calls operate on the CUDA Driver API ::CUcontext which is current to to the calling host thread.

    +

    The function cudaInitDevice() ensures that the primary context is initialized for the requested device but does not make it current to the calling thread.

    +

    The function cudaSetDevice() initializes the primary context for the specified device and makes it current to the calling thread by calling ::cuCtxSetCurrent().

    +

    The CUDA Runtime API will automatically initialize the primary context for a device at the first CUDA Runtime API call which requires an active context. If no ::CUcontext is current to the calling thread when a CUDA Runtime API call which requires an active context is made, then the primary context for a device will be selected, made current to the calling thread, and initialized.

    +

    The context which the CUDA Runtime API initializes will be initialized using the parameters specified by the CUDA Runtime API functions cudaSetDeviceFlags(), ::cudaD3D9SetDirect3DDevice(), ::cudaD3D10SetDirect3DDevice(), ::cudaD3D11SetDirect3DDevice(), cudaGLSetGLDevice(), and cudaVDPAUSetVDPAUDevice(). Note that these functions will fail with cudaErrorSetOnActiveProcess if they are called when the primary context for the specified device has already been initialized. (or if the current device has already been initialized, in the case of cudaSetDeviceFlags()).

    +

    Primary contexts will remain active until they are explicitly deinitialized using cudaDeviceReset(). The function cudaDeviceReset() will deinitialize the primary context for the calling thread’s current device immediately. The context will remain current to all of the threads that it was current to. The next CUDA Runtime API call on any thread which requires an active context will trigger the reinitialization of that device’s primary context.

    +

    Note that primary contexts are shared resources. It is recommended that the primary context not be reset except just before exit or to recover from an unspecified launch failure.

    +

    Context Interoperability

    +

    Note that the use of multiple ::CUcontext s per device within a single process will substantially degrade performance and is strongly discouraged. Instead, it is highly recommended that the implicit one-to-one device-to-context mapping for the process provided by the CUDA Runtime API be used.

    +

    If a non-primary ::CUcontext created by the CUDA Driver API is current to a thread then the CUDA Runtime API calls to that thread will operate on that ::CUcontext, with some exceptions listed below. Interoperability between data types is discussed in the following sections.

    +

    The function cudaPointerGetAttributes() will return the error cudaErrorIncompatibleDriverContext if the pointer being queried was allocated by a non-primary context. The function cudaDeviceEnablePeerAccess() and the rest of the peer access API may not be called when a non-primary ::CUcontext is current.

    +
    +

    To use the pointer query and peer access APIs with a context created using the CUDA Driver API, it is necessary that the CUDA Driver API be used to access these features.

    +
    +

    All CUDA Runtime API state (e.g, global variables’ addresses and values) travels with its underlying ::CUcontext. In particular, if a ::CUcontext is moved from one thread to another then all CUDA Runtime API state will move to that thread as well.

    +

    Please note that attaching to legacy contexts (those with a version of 3010 as returned by ::cuCtxGetApiVersion()) is not possible. The CUDA Runtime will return cudaErrorIncompatibleDriverContext in such cases.

    +

    Interactions between CUstream and cudaStream_t

    +

    The types ::CUstream and cudaStream_t are identical and may be used interchangeably.

    +

    Interactions between CUevent and cudaEvent_t

    +

    The types ::CUevent and cudaEvent_t are identical and may be used interchangeably.

    +

    Interactions between CUarray and cudaArray_t

    +

    The types ::CUarray and struct ::cudaArray * represent the same data type and may be used interchangeably by casting the two types between each other.

    +

    In order to use a ::CUarray in a CUDA Runtime API function which takes a struct ::cudaArray *, it is necessary to explicitly cast the ::CUarray to a struct ::cudaArray *.

    +

    In order to use a struct ::cudaArray * in a CUDA Driver API function which takes a ::CUarray, it is necessary to explicitly cast the struct ::cudaArray * to a ::CUarray .

    +

    Interactions between CUgraphicsResource and cudaGraphicsResource_t

    +

    The types ::CUgraphicsResource and cudaGraphicsResource_t represent the same data type and may be used interchangeably by casting the two types between each other.

    +

    In order to use a ::CUgraphicsResource in a CUDA Runtime API function which takes a cudaGraphicsResource_t, it is necessary to explicitly cast the ::CUgraphicsResource to a cudaGraphicsResource_t.

    +

    In order to use a cudaGraphicsResource_t in a CUDA Driver API function which takes a ::CUgraphicsResource, it is necessary to explicitly cast the cudaGraphicsResource_t to a ::CUgraphicsResource.

    +

    Interactions between CUtexObject and cudaTextureObject_t

    +

    The types ::CUtexObject and cudaTextureObject_t represent the same data type and may be used interchangeably by casting the two types between each other.

    +

    In order to use a ::CUtexObject in a CUDA Runtime API function which takes a cudaTextureObject_t, it is necessary to explicitly cast the ::CUtexObject to a cudaTextureObject_t.

    +

    In order to use a cudaTextureObject_t in a CUDA Driver API function which takes a ::CUtexObject, it is necessary to explicitly cast the cudaTextureObject_t to a ::CUtexObject.

    +

    Interactions between CUsurfObject and cudaSurfaceObject_t

    +

    The types ::CUsurfObject and cudaSurfaceObject_t represent the same data type and may be used interchangeably by casting the two types between each other.

    +

    In order to use a ::CUsurfObject in a CUDA Runtime API function which takes a cudaSurfaceObject_t, it is necessary to explicitly cast the ::CUsurfObject to a cudaSurfaceObject_t.

    +

    In order to use a cudaSurfaceObject_t in a CUDA Driver API function which takes a ::CUsurfObject, it is necessary to explicitly cast the cudaSurfaceObject_t to a ::CUsurfObject.

    +

    Interactions between CUfunction and cudaFunction_t

    +

    The types ::CUfunction and cudaFunction_t represent the same data type and may be used interchangeably by casting the two types between each other.

    +

    In order to use a cudaFunction_t in a CUDA Driver API function which takes a ::CUfunction, it is necessary to explicitly cast the cudaFunction_t to a ::CUfunction.

    +
    +
    +cuda.bindings.runtime.cudaGetKernel(entryFuncAddr)#
    +

    Get pointer to device kernel that matches entry function entryFuncAddr.

    +

    Returns in kernelPtr the device kernel corresponding to the entry +function entryFuncAddr.

    +
    +
    Parameters:
    +

    entryFuncAddr (Any) – Address of device entry function to search kernel for

    +
    +
    Returns:
    +

    +

    +
    +
    +
    +

    See also

    +

    cudaGetKernel

    +
    +
    + +
    +
    +

    Data types used by CUDA Runtime#

    +
    +
    +class cuda.bindings.runtime.cudaEglPlaneDesc_st(void_ptr _ptr=0)#
    +

    CUDA EGL Plane Descriptor - structure defining each plane of a CUDA +EGLFrame

    +
    +
    +width#
    +

    Width of plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +height#
    +

    Height of plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth of plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +numChannels#
    +

    Number of channels for the plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +channelDesc#
    +

    Channel Format Descriptor

    +
    +
    Type:
    +

    cudaChannelFormatDesc

    +
    +
    +
    + +
    +
    +reserved#
    +

    Reserved for future use

    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEglFrame_st(void_ptr _ptr=0)#
    +

    CUDA EGLFrame Descriptor - structure defining one frame of EGL. +Each frame may contain one or more planes depending on whether the +surface is Multiplanar or not. Each plane of EGLFrame is +represented by cudaEglPlaneDesc which is defined as: +typedefstructcudaEglPlaneDesc_st unsignedintwidth; +unsignedintheight; unsignedintdepth; unsignedintpitch; +unsignedintnumChannels; structcudaChannelFormatDescchannelDesc; +unsignedintreserved[4]; cudaEglPlaneDesc;

    +
    +
    +frame#
    +
    +
    Type:
    +

    anon_union10

    +
    +
    +
    + +
    +
    +planeDesc#
    +

    CUDA EGL Plane Descriptor cudaEglPlaneDesc

    +
    +
    Type:
    +

    List[cudaEglPlaneDesc]

    +
    +
    +
    + +
    +
    +planeCount#
    +

    Number of planes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +frameType#
    +

    Array or Pitch

    +
    +
    Type:
    +

    cudaEglFrameType

    +
    +
    +
    + +
    +
    +eglColorFormat#
    +

    CUDA EGL Color Format

    +
    +
    Type:
    +

    cudaEglColorFormat

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaChannelFormatDesc(void_ptr _ptr=0)#
    +

    CUDA Channel format descriptor

    +
    +
    +x#
    +

    x

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +y#
    +

    y

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +z#
    +

    z

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +w#
    +

    w

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +f#
    +

    Channel format kind

    +
    +
    Type:
    +

    cudaChannelFormatKind

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaArraySparseProperties(void_ptr _ptr=0)#
    +

    Sparse CUDA array and CUDA mipmapped array properties

    +
    +
    +tileExtent#
    +
    +
    Type:
    +

    anon_struct0

    +
    +
    +
    + +
    +
    +miptailFirstLevel#
    +

    First mip level at which the mip tail begins

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +miptailSize#
    +

    Total size of the mip tail.

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags will either be zero or cudaArraySparsePropertiesSingleMipTail

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaArrayMemoryRequirements(void_ptr _ptr=0)#
    +

    CUDA array and CUDA mipmapped array memory requirements

    +
    +
    +size#
    +

    Total size of the array.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +alignment#
    +

    Alignment necessary for mapping the array.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaPitchedPtr(void_ptr _ptr=0)#
    +

    CUDA Pitched memory pointer ::make_cudaPitchedPtr

    +
    +
    +ptr#
    +

    Pointer to allocated memory

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of allocated memory in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +xsize#
    +

    Logical width of allocation in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +ysize#
    +

    Logical height of allocation in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExtent(void_ptr _ptr=0)#
    +

    CUDA extent ::make_cudaExtent

    +
    +
    +width#
    +

    Width in elements when referring to array memory, in bytes when +referring to linear memory

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Height in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaPos(void_ptr _ptr=0)#
    +

    CUDA 3D position ::make_cudaPos

    +
    +
    +x#
    +

    x

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +y#
    +

    y

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +z#
    +

    z

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemcpy3DParms(void_ptr _ptr=0)#
    +

    CUDA 3D memory copying parameters

    +
    +
    +srcArray#
    +

    Source memory address

    +
    +
    Type:
    +

    cudaArray_t

    +
    +
    +
    + +
    +
    +srcPos#
    +

    Source position offset

    +
    +
    Type:
    +

    cudaPos

    +
    +
    +
    + +
    +
    +srcPtr#
    +

    Pitched source memory address

    +
    +
    Type:
    +

    cudaPitchedPtr

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination memory address

    +
    +
    Type:
    +

    cudaArray_t

    +
    +
    +
    + +
    +
    +dstPos#
    +

    Destination position offset

    +
    +
    Type:
    +

    cudaPos

    +
    +
    +
    + +
    +
    +dstPtr#
    +

    Pitched destination memory address

    +
    +
    Type:
    +

    cudaPitchedPtr

    +
    +
    +
    + +
    +
    +extent#
    +

    Requested memory copy size

    +
    +
    Type:
    +

    cudaExtent

    +
    +
    +
    + +
    +
    +kind#
    +

    Type of transfer

    +
    +
    Type:
    +

    cudaMemcpyKind

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemcpyNodeParams(void_ptr _ptr=0)#
    +

    Memcpy node parameters

    +
    +
    +flags#
    +

    Must be zero

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +reserved#
    +

    Must be zero

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +copyParams#
    +

    Parameters for the memory copy

    +
    +
    Type:
    +

    cudaMemcpy3DParms

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemcpy3DPeerParms(void_ptr _ptr=0)#
    +

    CUDA 3D cross-device memory copying parameters

    +
    +
    +srcArray#
    +

    Source memory address

    +
    +
    Type:
    +

    cudaArray_t

    +
    +
    +
    + +
    +
    +srcPos#
    +

    Source position offset

    +
    +
    Type:
    +

    cudaPos

    +
    +
    +
    + +
    +
    +srcPtr#
    +

    Pitched source memory address

    +
    +
    Type:
    +

    cudaPitchedPtr

    +
    +
    +
    + +
    +
    +srcDevice#
    +

    Source device

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +dstArray#
    +

    Destination memory address

    +
    +
    Type:
    +

    cudaArray_t

    +
    +
    +
    + +
    +
    +dstPos#
    +

    Destination position offset

    +
    +
    Type:
    +

    cudaPos

    +
    +
    +
    + +
    +
    +dstPtr#
    +

    Pitched destination memory address

    +
    +
    Type:
    +

    cudaPitchedPtr

    +
    +
    +
    + +
    +
    +dstDevice#
    +

    Destination device

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +extent#
    +

    Requested memory copy size

    +
    +
    Type:
    +

    cudaExtent

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemsetParams(void_ptr _ptr=0)#
    +

    CUDA Memset node parameters

    +
    +
    +dst#
    +

    Destination device pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of destination device pointer. Unused if height is 1

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +value#
    +

    Value to be set

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +elementSize#
    +

    Size of each element in bytes. Must be 1, 2, or 4.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the row in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Number of rows

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemsetParamsV2(void_ptr _ptr=0)#
    +

    CUDA Memset node parameters

    +
    +
    +dst#
    +

    Destination device pointer

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of destination device pointer. Unused if height is 1

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +value#
    +

    Value to be set

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +elementSize#
    +

    Size of each element in bytes. Must be 1, 2, or 4.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the row in elements

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Number of rows

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaAccessPolicyWindow(void_ptr _ptr=0)#
    +

    Specifies an access policy for a window, a contiguous extent of +memory beginning at base_ptr and ending at base_ptr + num_bytes. +Partition into many segments and assign segments such that. sum of +“hit segments” / window == approx. ratio. sum of “miss segments” / +window == approx 1-ratio. Segments and ratio specifications are +fitted to the capabilities of the architecture. Accesses in a hit +segment apply the hitProp access policy. Accesses in a miss segment +apply the missProp access policy.

    +
    +
    +base_ptr#
    +

    Starting address of the access policy window. CUDA driver may align +it.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +num_bytes#
    +

    Size in bytes of the window policy. CUDA driver may restrict the +maximum size and alignment.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +hitRatio#
    +

    hitRatio specifies percentage of lines assigned hitProp, rest are +assigned missProp.

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +hitProp#
    +

    ::CUaccessProperty set for hit.

    +
    +
    Type:
    +

    cudaAccessProperty

    +
    +
    +
    + +
    +
    +missProp#
    +

    ::CUaccessProperty set for miss. Must be either NORMAL or +STREAMING.

    +
    +
    Type:
    +

    cudaAccessProperty

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaHostNodeParams(void_ptr _ptr=0)#
    +

    CUDA host node parameters

    +
    +
    +fn#
    +

    The function to call when the node executes

    +
    +
    Type:
    +

    cudaHostFn_t

    +
    +
    +
    + +
    +
    +userData#
    +

    Argument to pass to the function

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaHostNodeParamsV2(void_ptr _ptr=0)#
    +

    CUDA host node parameters

    +
    +
    +fn#
    +

    The function to call when the node executes

    +
    +
    Type:
    +

    cudaHostFn_t

    +
    +
    +
    + +
    +
    +userData#
    +

    Argument to pass to the function

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaResourceDesc(void_ptr _ptr=0)#
    +

    CUDA resource descriptor

    +
    +
    +resType#
    +

    Resource type

    +
    +
    Type:
    +

    cudaResourceType

    +
    +
    +
    + +
    +
    +res#
    +
    +
    Type:
    +

    anon_union0

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaResourceViewDesc(void_ptr _ptr=0)#
    +

    CUDA resource view descriptor

    +
    +
    +format#
    +

    Resource view format

    +
    +
    Type:
    +

    cudaResourceViewFormat

    +
    +
    +
    + +
    +
    +width#
    +

    Width of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +height#
    +

    Height of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth of the resource view

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +firstMipmapLevel#
    +

    First defined mipmap level

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +lastMipmapLevel#
    +

    Last defined mipmap level

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +firstLayer#
    +

    First layer index

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +lastLayer#
    +

    Last layer index

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaPointerAttributes(void_ptr _ptr=0)#
    +

    CUDA pointer attributes

    +
    +
    +type#
    +

    The type of memory - cudaMemoryTypeUnregistered, +cudaMemoryTypeHost, cudaMemoryTypeDevice or cudaMemoryTypeManaged.

    +
    +
    Type:
    +

    cudaMemoryType

    +
    +
    +
    + +
    +
    +device#
    +

    The device against which the memory was allocated or registered. If +the memory type is cudaMemoryTypeDevice then this identifies the +device on which the memory referred physically resides. If the +memory type is cudaMemoryTypeHost or::cudaMemoryTypeManaged then +this identifies the device which was current when the memory was +allocated or registered (and if that device is deinitialized then +this allocation will vanish with that device’s state).

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +devicePointer#
    +

    The address which may be dereferenced on the current device to +access the memory or NULL if no such address exists.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +hostPointer#
    +

    The address which may be dereferenced on the host to access the +memory or NULL if no such address exists. CUDA doesn’t check if +unregistered memory is allocated so this field may contain invalid +pointer if an invalid pointer has been passed to CUDA.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaFuncAttributes(void_ptr _ptr=0)#
    +

    CUDA function attributes

    +
    +
    +sharedSizeBytes#
    +

    The size in bytes of statically-allocated shared memory per block +required by this function. This does not include dynamically- +allocated shared memory requested by the user at runtime.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +constSizeBytes#
    +

    The size in bytes of user-allocated constant memory required by +this function.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +localSizeBytes#
    +

    The size in bytes of local memory used by each thread of this +function.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +maxThreadsPerBlock#
    +

    The maximum number of threads per block, beyond which a launch of +the function would fail. This number depends on both the function +and the device on which the function is currently loaded.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +numRegs#
    +

    The number of registers used by each thread of this function.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +ptxVersion#
    +

    The PTX virtual architecture version for which the function was +compiled. This value is the major PTX version * 10 + the minor PTX +version, so a PTX version 1.3 function would return the value 13.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +binaryVersion#
    +

    The binary architecture version for which the function was +compiled. This value is the major binary version * 10 + the minor +binary version, so a binary version 1.3 function would return the +value 13.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +cacheModeCA#
    +

    The attribute to indicate whether the function has been compiled +with user specified option “-Xptxas –dlcm=ca” set.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxDynamicSharedSizeBytes#
    +

    The maximum size in bytes of dynamic shared memory per block for +this function. Any launch must have a dynamic shared memory size +smaller than this value.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +preferredShmemCarveout#
    +

    On devices where the L1 cache and shared memory use the same +hardware resources, this sets the shared memory carveout +preference, in percent of the maximum shared memory. Refer to +cudaDevAttrMaxSharedMemoryPerMultiprocessor. This is only a hint, +and the driver can choose a different ratio if required to execute +the function. See cudaFuncSetAttribute

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +clusterDimMustBeSet#
    +

    If this attribute is set, the kernel must launch with a valid +cluster dimension specified.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +requiredClusterWidth#
    +

    The required cluster width/height/depth in blocks. The values must +either all be 0 or all be positive. The validity of the cluster +dimensions is otherwise checked at launch time. If the value is +set during compile time, it cannot be set at runtime. Setting it at +runtime should return cudaErrorNotPermitted. See +cudaFuncSetAttribute

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +requiredClusterHeight#
    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +requiredClusterDepth#
    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +clusterSchedulingPolicyPreference#
    +

    The block scheduling policy of a function. See cudaFuncSetAttribute

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +nonPortableClusterSizeAllowed#
    +

    Whether the function can be launched with non-portable cluster +size. 1 is allowed, 0 is disallowed. A non-portable cluster size +may only function on the specific SKUs the program is tested on. +The launch might fail if the program is run on a different hardware +platform. CUDA API provides cudaOccupancyMaxActiveClusters to +assist with checking whether the desired size can be launched on +the current device. Portable Cluster Size A portable cluster size +is guaranteed to be functional on all compute capabilities higher +than the target compute capability. The portable cluster size for +sm_90 is 8 blocks per cluster. This value may increase for future +compute capabilities. The specific hardware unit may support +higher cluster sizes that’s not guaranteed to be portable. See +cudaFuncSetAttribute

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +reserved#
    +

    Reserved for future use.

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemLocation(void_ptr _ptr=0)#
    +

    Specifies a memory location. To specify a gpu, set type = +cudaMemLocationTypeDevice and set id = the gpu’s device ordinal. To +specify a cpu NUMA node, set type = cudaMemLocationTypeHostNuma and +set id = host NUMA node id.

    +
    +
    +type#
    +

    Specifies the location type, which modifies the meaning of id.

    +
    +
    Type:
    +

    cudaMemLocationType

    +
    +
    +
    + +
    +
    +id#
    +

    identifier for a given this location’s ::CUmemLocationType.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemAccessDesc(void_ptr _ptr=0)#
    +

    Memory access descriptor

    +
    +
    +location#
    +

    Location on which the request is to change it’s accessibility

    +
    +
    Type:
    +

    cudaMemLocation

    +
    +
    +
    + +
    +
    +flags#
    +

    ::CUmemProt accessibility flags to set on the request

    +
    +
    Type:
    +

    cudaMemAccessFlags

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemPoolProps(void_ptr _ptr=0)#
    +

    Specifies the properties of allocations made from the pool.

    +
    +
    +allocType#
    +

    Allocation type. Currently must be specified as +cudaMemAllocationTypePinned

    +
    +
    Type:
    +

    cudaMemAllocationType

    +
    +
    +
    + +
    +
    +handleTypes#
    +

    Handle types that will be supported by allocations from the pool.

    +
    +
    Type:
    +

    cudaMemAllocationHandleType

    +
    +
    +
    + +
    +
    +location#
    +

    Location allocations should reside.

    +
    +
    Type:
    +

    cudaMemLocation

    +
    +
    +
    + +
    +
    +win32SecurityAttributes#
    +

    Windows-specific LPSECURITYATTRIBUTES required when +cudaMemHandleTypeWin32 is specified. This security attribute +defines the scope of which exported allocations may be tranferred +to other processes. In all other cases, this field is required to +be zero.

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +maxSize#
    +

    Maximum pool size. When set to 0, defaults to a system dependent +value.

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +usage#
    +

    Bitmask indicating intended usage for the pool.

    +
    +
    Type:
    +

    unsigned short

    +
    +
    +
    + +
    +
    +reserved#
    +

    reserved for future use, must be 0

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemPoolPtrExportData(void_ptr _ptr=0)#
    +

    Opaque data for exporting a pool allocation

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemAllocNodeParams(void_ptr _ptr=0)#
    +

    Memory allocation node parameters

    +
    +
    +poolProps#
    +

    in: location where the allocation should reside (specified in +::location). ::handleTypes must be cudaMemHandleTypeNone. IPC is +not supported. in: array of memory access descriptors. Used to +describe peer GPU access

    +
    +
    Type:
    +

    cudaMemPoolProps

    +
    +
    +
    + +
    +
    +accessDescs#
    +

    in: number of memory access descriptors. Must not exceed the number +of GPUs.

    +
    +
    Type:
    +

    cudaMemAccessDesc

    +
    +
    +
    + +
    +
    +accessDescCount#
    +

    in: Number of `accessDescs`s

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +bytesize#
    +

    in: size in bytes of the requested allocation

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dptr#
    +

    out: address of the allocation returned by CUDA

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemAllocNodeParamsV2(void_ptr _ptr=0)#
    +

    Memory allocation node parameters

    +
    +
    +poolProps#
    +

    in: location where the allocation should reside (specified in +::location). ::handleTypes must be cudaMemHandleTypeNone. IPC is +not supported. in: array of memory access descriptors. Used to +describe peer GPU access

    +
    +
    Type:
    +

    cudaMemPoolProps

    +
    +
    +
    + +
    +
    +accessDescs#
    +

    in: number of memory access descriptors. Must not exceed the number +of GPUs.

    +
    +
    Type:
    +

    cudaMemAccessDesc

    +
    +
    +
    + +
    +
    +accessDescCount#
    +

    in: Number of `accessDescs`s

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +bytesize#
    +

    in: size in bytes of the requested allocation

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +dptr#
    +

    out: address of the allocation returned by CUDA

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemFreeNodeParams(void_ptr _ptr=0)#
    +

    Memory free node parameters

    +
    +
    +dptr#
    +

    in: the pointer to free

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.CUuuid_st(void_ptr _ptr=0)#
    +
    +
    +bytes#
    +

    < CUDA definition of UUID

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaDeviceProp(void_ptr _ptr=0)#
    +

    CUDA device properties

    +
    +
    +name#
    +

    ASCII string identifying device

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +uuid#
    +

    16-byte unique identifier

    +
    +
    Type:
    +

    cudaUUID_t

    +
    +
    +
    + +
    +
    +luid#
    +

    8-byte locally unique identifier. Value is undefined on TCC and +non-Windows platforms

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +luidDeviceNodeMask#
    +

    LUID device node mask. Value is undefined on TCC and non-Windows +platforms

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +totalGlobalMem#
    +

    Global memory available on device in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +sharedMemPerBlock#
    +

    Shared memory available per block in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +regsPerBlock#
    +

    32-bit registers available per block

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +warpSize#
    +

    Warp size in threads

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memPitch#
    +

    Maximum pitch in bytes allowed by memory copies

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +maxThreadsPerBlock#
    +

    Maximum number of threads per block

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxThreadsDim#
    +

    Maximum size of each dimension of a block

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxGridSize#
    +

    Maximum size of each dimension of a grid

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +clockRate#
    +

    Deprecated, Clock frequency in kilohertz

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +totalConstMem#
    +

    Constant memory available on device in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +major#
    +

    Major compute capability

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +minor#
    +

    Minor compute capability

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +textureAlignment#
    +

    Alignment requirement for textures

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +texturePitchAlignment#
    +

    Pitch alignment requirement for texture references bound to pitched +memory

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +deviceOverlap#
    +

    Device can concurrently copy memory and execute a kernel. +Deprecated. Use instead asyncEngineCount.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +multiProcessorCount#
    +

    Number of multiprocessors on device

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +kernelExecTimeoutEnabled#
    +

    Deprecated, Specified whether there is a run time limit on kernels

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +integrated#
    +

    Device is integrated as opposed to discrete

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +canMapHostMemory#
    +

    Device can map host memory with +cudaHostAlloc/cudaHostGetDevicePointer

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +computeMode#
    +

    Deprecated, Compute mode (See cudaComputeMode)

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxTexture1D#
    +

    Maximum 1D texture size

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxTexture1DMipmap#
    +

    Maximum 1D mipmapped texture size

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxTexture1DLinear#
    +

    Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() +or cuDeviceGetTexture1DLinearMaxWidth() instead.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxTexture2D#
    +

    Maximum 2D texture dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxTexture2DMipmap#
    +

    Maximum 2D mipmapped texture dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxTexture2DLinear#
    +

    Maximum dimensions (width, height, pitch) for 2D textures bound to +pitched memory

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxTexture2DGather#
    +

    Maximum 2D texture dimensions if texture gather operations have to +be performed

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxTexture3D#
    +

    Maximum 3D texture dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxTexture3DAlt#
    +

    Maximum alternate 3D texture dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxTextureCubemap#
    +

    Maximum Cubemap texture dimensions

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxTexture1DLayered#
    +

    Maximum 1D layered texture dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxTexture2DLayered#
    +

    Maximum 2D layered texture dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxTextureCubemapLayered#
    +

    Maximum Cubemap layered texture dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxSurface1D#
    +

    Maximum 1D surface size

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxSurface2D#
    +

    Maximum 2D surface dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxSurface3D#
    +

    Maximum 3D surface dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxSurface1DLayered#
    +

    Maximum 1D layered surface dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxSurface2DLayered#
    +

    Maximum 2D layered surface dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +maxSurfaceCubemap#
    +

    Maximum Cubemap surface dimensions

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxSurfaceCubemapLayered#
    +

    Maximum Cubemap layered surface dimensions

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +surfaceAlignment#
    +

    Alignment requirements for surfaces

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +concurrentKernels#
    +

    Device can possibly execute multiple kernels concurrently

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +ECCEnabled#
    +

    Device has ECC support enabled

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +pciBusID#
    +

    PCI bus ID of the device

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +pciDeviceID#
    +

    PCI device ID of the device

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +pciDomainID#
    +

    PCI domain ID of the device

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +tccDriver#
    +

    1 if device is a Tesla device using TCC driver, 0 otherwise

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +asyncEngineCount#
    +

    Number of asynchronous engines

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +unifiedAddressing#
    +

    Device shares a unified address space with the host

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memoryClockRate#
    +

    Deprecated, Peak memory clock frequency in kilohertz

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memoryBusWidth#
    +

    Global memory bus width in bits

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +l2CacheSize#
    +

    Size of L2 cache in bytes

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +persistingL2CacheMaxSize#
    +

    Device’s maximum l2 persisting lines capacity setting in bytes

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxThreadsPerMultiProcessor#
    +

    Maximum resident threads per multiprocessor

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +streamPrioritiesSupported#
    +

    Device supports stream priorities

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +globalL1CacheSupported#
    +

    Device supports caching globals in L1

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +localL1CacheSupported#
    +

    Device supports caching locals in L1

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +sharedMemPerMultiprocessor#
    +

    Shared memory available per multiprocessor in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +regsPerMultiprocessor#
    +

    32-bit registers available per multiprocessor

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +managedMemory#
    +

    Device supports allocating managed memory on this system

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +isMultiGpuBoard#
    +

    Device is on a multi-GPU board

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +multiGpuBoardGroupID#
    +

    Unique identifier for a group of devices on the same multi-GPU +board

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +hostNativeAtomicSupported#
    +

    Link between the device and the host supports native atomic +operations

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +singleToDoublePrecisionPerfRatio#
    +

    Deprecated, Ratio of single precision performance (in floating- +point operations per second) to double precision performance

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +pageableMemoryAccess#
    +

    Device supports coherently accessing pageable memory without +calling cudaHostRegister on it

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +concurrentManagedAccess#
    +

    Device can coherently access managed memory concurrently with the +CPU

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +computePreemptionSupported#
    +

    Device supports Compute Preemption

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +canUseHostPointerForRegisteredMem#
    +

    Device can access host registered memory at the same virtual +address as the CPU

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +cooperativeLaunch#
    +

    Device supports launching cooperative kernels via +cudaLaunchCooperativeKernel

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +cooperativeMultiDeviceLaunch#
    +

    Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +sharedMemPerBlockOptin#
    +

    Per device maximum shared memory per block usable by special opt in

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +pageableMemoryAccessUsesHostPageTables#
    +

    Device accesses pageable memory via the host’s page tables

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +directManagedMemAccessFromHost#
    +

    Host can directly access managed memory on the device without +migration.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxBlocksPerMultiProcessor#
    +

    Maximum number of resident blocks per multiprocessor

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +accessPolicyMaxWindowSize#
    +

    The maximum value of cudaAccessPolicyWindow::num_bytes.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +reservedSharedMemPerBlock#
    +

    Shared memory reserved by CUDA driver per block in bytes

    +
    +
    Type:
    +

    size_t

    +
    +
    +
    + +
    +
    +hostRegisterSupported#
    +

    Device supports host memory registration via cudaHostRegister.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +sparseCudaArraySupported#
    +

    1 if the device supports sparse CUDA arrays and sparse CUDA +mipmapped arrays, 0 otherwise

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +hostRegisterReadOnlySupported#
    +

    Device supports using the cudaHostRegister flag +cudaHostRegisterReadOnly to register memory that must be mapped as +read-only to the GPU

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +timelineSemaphoreInteropSupported#
    +

    External timeline semaphore interop is supported on the device

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memoryPoolsSupported#
    +

    1 if the device supports using the cudaMallocAsync and cudaMemPool +family of APIs, 0 otherwise

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +gpuDirectRDMASupported#
    +

    1 if the device supports GPUDirect RDMA APIs, 0 otherwise

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +gpuDirectRDMAFlushWritesOptions#
    +

    Bitmask to be interpreted according to the +cudaFlushGPUDirectRDMAWritesOptions enum

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +gpuDirectRDMAWritesOrdering#
    +

    See the cudaGPUDirectRDMAWritesOrdering enum for numerical values

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memoryPoolSupportedHandleTypes#
    +

    Bitmask of handle types supported with mempool-based IPC

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +deferredMappingCudaArraySupported#
    +

    1 if the device supports deferred mapping CUDA arrays and CUDA +mipmapped arrays

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +ipcEventSupported#
    +

    Device supports IPC Events.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +clusterLaunch#
    +

    Indicates device supports cluster launch

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +unifiedFunctionPointers#
    +

    Indicates device supports unified pointers

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +reserved2#
    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +reserved1#
    +

    Reserved for future use

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +reserved#
    +

    Reserved for future use

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaIpcEventHandle_st(void_ptr _ptr=0)#
    +

    CUDA IPC event handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaIpcMemHandle_st(void_ptr _ptr=0)#
    +

    CUDA IPC memory handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemFabricHandle_st(void_ptr _ptr=0)#
    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalMemoryHandleDesc(void_ptr _ptr=0)#
    +

    External memory handle descriptor

    +
    +
    +type#
    +

    Type of the handle

    +
    +
    Type:
    +

    cudaExternalMemoryHandleType

    +
    +
    +
    + +
    +
    +handle#
    +
    +
    Type:
    +

    anon_union1

    +
    +
    +
    + +
    +
    +size#
    +

    Size of the memory allocation

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags must either be zero or cudaExternalMemoryDedicated

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalMemoryBufferDesc(void_ptr _ptr=0)#
    +

    External memory buffer descriptor

    +
    +
    +offset#
    +

    Offset into the memory object where the buffer’s base is

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +size#
    +

    Size of the buffer

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags reserved for future use. Must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalMemoryMipmappedArrayDesc(void_ptr _ptr=0)#
    +

    External memory mipmap descriptor

    +
    +
    +offset#
    +

    Offset into the memory object where the base level of the mipmap +chain is.

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +formatDesc#
    +

    Format of base level of the mipmap chain

    +
    +
    Type:
    +

    cudaChannelFormatDesc

    +
    +
    +
    + +
    +
    +extent#
    +

    Dimensions of base level of the mipmap chain

    +
    +
    Type:
    +

    cudaExtent

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags associated with CUDA mipmapped arrays. See +cudaMallocMipmappedArray

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +numLevels#
    +

    Total number of levels in the mipmap chain

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalSemaphoreHandleDesc(void_ptr _ptr=0)#
    +

    External semaphore handle descriptor

    +
    +
    +type#
    +

    Type of the handle

    +
    +
    Type:
    +

    cudaExternalSemaphoreHandleType

    +
    +
    +
    + +
    +
    +handle#
    +
    +
    Type:
    +

    anon_union2

    +
    +
    +
    + +
    +
    +flags#
    +

    Flags reserved for the future. Must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalSemaphoreSignalParams(void_ptr _ptr=0)#
    +

    External semaphore signal parameters, compatible with driver type

    +
    +
    +params#
    +
    +
    Type:
    +

    anon_struct15

    +
    +
    +
    + +
    +
    +flags#
    +

    Only when cudaExternalSemaphoreSignalParams is used to signal a +cudaExternalSemaphore_t of type +cudaExternalSemaphoreHandleTypeNvSciSync, the valid flag is +cudaExternalSemaphoreSignalSkipNvSciBufMemSync: which indicates +that while signaling the cudaExternalSemaphore_t, no memory +synchronization operations should be performed for any external +memory object imported as cudaExternalMemoryHandleTypeNvSciBuf. For +all other types of cudaExternalSemaphore_t, flags must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalSemaphoreWaitParams(void_ptr _ptr=0)#
    +

    External semaphore wait parameters, compatible with driver type

    +
    +
    +params#
    +
    +
    Type:
    +

    anon_struct18

    +
    +
    +
    + +
    +
    +flags#
    +

    Only when cudaExternalSemaphoreSignalParams is used to signal a +cudaExternalSemaphore_t of type +cudaExternalSemaphoreHandleTypeNvSciSync, the valid flag is +cudaExternalSemaphoreSignalSkipNvSciBufMemSync: which indicates +that while waiting for the cudaExternalSemaphore_t, no memory +synchronization operations should be performed for any external +memory object imported as cudaExternalMemoryHandleTypeNvSciBuf. For +all other types of cudaExternalSemaphore_t, flags must be zero.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +reserved#
    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaKernelNodeParams(void_ptr _ptr=0)#
    +

    CUDA GPU kernel node parameters

    +
    +
    +func#
    +

    Kernel to launch

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +gridDim#
    +

    Grid dimensions

    +
    +
    Type:
    +

    dim3

    +
    +
    +
    + +
    +
    +blockDim#
    +

    Block dimensions

    +
    +
    Type:
    +

    dim3

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to individual kernel arguments

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +extra#
    +

    Pointer to kernel arguments in the “extra” format

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaKernelNodeParamsV2(void_ptr _ptr=0)#
    +

    CUDA GPU kernel node parameters

    +
    +
    +func#
    +

    Kernel to launch

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +gridDim#
    +

    Grid dimensions

    +
    +
    Type:
    +

    dim3

    +
    +
    +
    + +
    +
    +blockDim#
    +

    Block dimensions

    +
    +
    Type:
    +

    dim3

    +
    +
    +
    + +
    +
    +sharedMemBytes#
    +

    Dynamic shared-memory size per thread block in bytes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +kernelParams#
    +

    Array of pointers to individual kernel arguments

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +extra#
    +

    Pointer to kernel arguments in the “extra” format

    +
    +
    Type:
    +

    Any

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalSemaphoreSignalNodeParams(void_ptr _ptr=0)#
    +

    External semaphore signal node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    cudaExternalSemaphore_t

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore signal parameters.

    +
    +
    Type:
    +

    cudaExternalSemaphoreSignalParams

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalSemaphoreSignalNodeParamsV2(void_ptr _ptr=0)#
    +

    External semaphore signal node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    cudaExternalSemaphore_t

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore signal parameters.

    +
    +
    Type:
    +

    cudaExternalSemaphoreSignalParams

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalSemaphoreWaitNodeParams(void_ptr _ptr=0)#
    +

    External semaphore wait node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    cudaExternalSemaphore_t

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore wait parameters.

    +
    +
    Type:
    +

    cudaExternalSemaphoreWaitParams

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalSemaphoreWaitNodeParamsV2(void_ptr _ptr=0)#
    +

    External semaphore wait node parameters

    +
    +
    +extSemArray#
    +

    Array of external semaphore handles.

    +
    +
    Type:
    +

    cudaExternalSemaphore_t

    +
    +
    +
    + +
    +
    +paramsArray#
    +

    Array of external semaphore wait parameters.

    +
    +
    Type:
    +

    cudaExternalSemaphoreWaitParams

    +
    +
    +
    + +
    +
    +numExtSems#
    +

    Number of handles and parameters supplied in extSemArray and +paramsArray.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaConditionalNodeParams(void_ptr _ptr=0)#
    +

    CUDA conditional node parameters

    +
    +
    +handle#
    +

    Conditional node handle. Handles must be created in advance of +creating the node using cudaGraphConditionalHandleCreate.

    +
    +
    Type:
    +

    cudaGraphConditionalHandle

    +
    +
    +
    + +
    +
    +type#
    +

    Type of conditional node.

    +
    +
    Type:
    +

    cudaGraphConditionalNodeType

    +
    +
    +
    + +
    +
    +size#
    +

    Size of graph output array. Must be 1.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +phGraph_out#
    +

    CUDA-owned array populated with conditional node child graphs +during creation of the node. Valid for the lifetime of the +conditional node. The contents of the graph(s) are subject to the +following constraints: - Allowed node types are kernel nodes, +empty nodes, child graphs, memsets, memcopies, and conditionals. +This applies recursively to child graphs and conditional bodies. +- All kernels, including kernels in nested conditionals or child +graphs at any level, must belong to the same CUDA context. +These graphs may be populated using graph node creation APIs or +cudaStreamBeginCaptureToGraph.

    +
    +
    Type:
    +

    cudaGraph_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaChildGraphNodeParams(void_ptr _ptr=0)#
    +

    Child graph node parameters

    +
    +
    +graph#
    +

    The child graph to clone into the node for node creation, or a +handle to the graph owned by the node for node query

    +
    +
    Type:
    +

    cudaGraph_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEventRecordNodeParams(void_ptr _ptr=0)#
    +

    Event record node parameters

    +
    +
    +event#
    +

    The event to record when the node executes

    +
    +
    Type:
    +

    cudaEvent_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEventWaitNodeParams(void_ptr _ptr=0)#
    +

    Event wait node parameters

    +
    +
    +event#
    +

    The event to wait on from the node

    +
    +
    Type:
    +

    cudaEvent_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphNodeParams(void_ptr _ptr=0)#
    +

    Graph node parameters. See cudaGraphAddNode.

    +
    +
    +type#
    +

    Type of the node

    +
    +
    Type:
    +

    cudaGraphNodeType

    +
    +
    +
    + +
    +
    +reserved0#
    +

    Reserved. Must be zero.

    +
    +
    Type:
    +

    List[int]

    +
    +
    +
    + +
    +
    +reserved1#
    +

    Padding. Unused bytes must be zero.

    +
    +
    Type:
    +

    List[long long]

    +
    +
    +
    + +
    +
    +kernel#
    +

    Kernel node parameters.

    +
    +
    Type:
    +

    cudaKernelNodeParamsV2

    +
    +
    +
    + +
    +
    +memcpy#
    +

    Memcpy node parameters.

    +
    +
    Type:
    +

    cudaMemcpyNodeParams

    +
    +
    +
    + +
    +
    +memset#
    +

    Memset node parameters.

    +
    +
    Type:
    +

    cudaMemsetParamsV2

    +
    +
    +
    + +
    +
    +host#
    +

    Host node parameters.

    +
    +
    Type:
    +

    cudaHostNodeParamsV2

    +
    +
    +
    + +
    +
    +graph#
    +

    Child graph node parameters.

    +
    +
    Type:
    +

    cudaChildGraphNodeParams

    +
    +
    +
    + +
    +
    +eventWait#
    +

    Event wait node parameters.

    +
    +
    Type:
    +

    cudaEventWaitNodeParams

    +
    +
    +
    + +
    +
    +eventRecord#
    +

    Event record node parameters.

    +
    +
    Type:
    +

    cudaEventRecordNodeParams

    +
    +
    +
    + +
    +
    +extSemSignal#
    +

    External semaphore signal node parameters.

    +
    +
    Type:
    +

    cudaExternalSemaphoreSignalNodeParamsV2

    +
    +
    +
    + +
    +
    +extSemWait#
    +

    External semaphore wait node parameters.

    +
    +
    Type:
    +

    cudaExternalSemaphoreWaitNodeParamsV2

    +
    +
    +
    + +
    +
    +alloc#
    +

    Memory allocation node parameters.

    +
    +
    Type:
    +

    cudaMemAllocNodeParamsV2

    +
    +
    +
    + +
    +
    +free#
    +

    Memory free node parameters.

    +
    +
    Type:
    +

    cudaMemFreeNodeParams

    +
    +
    +
    + +
    +
    +conditional#
    +

    Conditional node parameters.

    +
    +
    Type:
    +

    cudaConditionalNodeParams

    +
    +
    +
    + +
    +
    +reserved2#
    +

    Reserved bytes. Must be zero.

    +
    +
    Type:
    +

    long long

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphEdgeData_st(void_ptr _ptr=0)#
    +

    Optional annotation for edges in a CUDA graph. Note, all edges +implicitly have annotations and default to a zero-initialized value +if not specified. A zero-initialized struct indicates a standard +full serialization of two nodes with memory visibility.

    +
    +
    +from_port#
    +

    This indicates when the dependency is triggered from the upstream +node on the edge. The meaning is specfic to the node type. A value +of 0 in all cases means full completion of the upstream node, with +memory visibility to the downstream node or portion thereof +(indicated by to_port). Only kernel nodes define non-zero +ports. A kernel node can use the following output port types: +cudaGraphKernelNodePortDefault, +cudaGraphKernelNodePortProgrammatic, or +cudaGraphKernelNodePortLaunchCompletion.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +to_port#
    +

    This indicates what portion of the downstream node is dependent on +the upstream node or portion thereof (indicated by from_port). +The meaning is specific to the node type. A value of 0 in all cases +means the entirety of the downstream node is dependent on the +upstream work. Currently no node types define non-zero ports. +Accordingly, this field must be set to zero.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +type#
    +

    This should be populated with a value from +::cudaGraphDependencyType. (It is typed as char due to compiler- +specific layout of bitfields.) See ::cudaGraphDependencyType.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +reserved#
    +

    These bytes are unused and must be zeroed. This ensures +compatibility if additional fields are added in the future.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphInstantiateParams_st(void_ptr _ptr=0)#
    +

    Graph instantiation parameters

    +
    +
    +flags#
    +

    Instantiation flags

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +uploadStream#
    +

    Upload stream

    +
    +
    Type:
    +

    cudaStream_t

    +
    +
    +
    + +
    +
    +errNode_out#
    +

    The node which caused instantiation to fail, if any

    +
    +
    Type:
    +

    cudaGraphNode_t

    +
    +
    +
    + +
    +
    +result_out#
    +

    Whether instantiation was successful. If it failed, the reason why

    +
    +
    Type:
    +

    cudaGraphInstantiateResult

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphExecUpdateResultInfo_st(void_ptr _ptr=0)#
    +

    Result information returned by cudaGraphExecUpdate

    +
    +
    +result#
    +

    Gives more specific detail when a cuda graph update fails.

    +
    +
    Type:
    +

    cudaGraphExecUpdateResult

    +
    +
    +
    + +
    +
    +errorNode#
    +

    The “to node” of the error edge when the topologies do not match. +The error node when the error is associated with a specific node. +NULL when the error is generic.

    +
    +
    Type:
    +

    cudaGraphNode_t

    +
    +
    +
    + +
    +
    +errorFromNode#
    +

    The from node of error edge when the topologies do not match. +Otherwise NULL.

    +
    +
    Type:
    +

    cudaGraphNode_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphKernelNodeUpdate(void_ptr _ptr=0)#
    +

    Struct to specify a single node update to pass as part of a larger +array to ::cudaGraphKernelNodeUpdatesApply

    +
    +
    +node#
    +

    Node to update

    +
    +
    Type:
    +

    cudaGraphDeviceNode_t

    +
    +
    +
    + +
    +
    +field#
    +

    Which type of update to apply. Determines how updateData is +interpreted

    +
    +
    Type:
    +

    cudaGraphKernelNodeField

    +
    +
    +
    + +
    +
    +updateData#
    +

    Update data to apply. Which field is used depends on field’s value

    +
    +
    Type:
    +

    anon_union8

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaLaunchMemSyncDomainMap_st(void_ptr _ptr=0)#
    +

    Memory Synchronization Domain map See cudaLaunchMemSyncDomain. By +default, kernels are launched in domain 0. Kernel launched with +cudaLaunchMemSyncDomainRemote will have a different domain ID. User +may also alter the domain ID with ::cudaLaunchMemSyncDomainMap for +a specific stream / graph node / kernel launch. See +cudaLaunchAttributeMemSyncDomainMap. Domain ID range is available +through cudaDevAttrMemSyncDomainCount.

    +
    +
    +default_#
    +

    The default domain ID to use for designated kernels

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +remote#
    +

    The remote domain ID to use for designated kernels

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaLaunchAttributeValue(void_ptr _ptr=0)#
    +

    Launch attributes union; used as value field of +::cudaLaunchAttribute

    +
    +
    +pad#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +accessPolicyWindow#
    +

    Value of launch attribute cudaLaunchAttributeAccessPolicyWindow.

    +
    +
    Type:
    +

    cudaAccessPolicyWindow

    +
    +
    +
    + +
    +
    +cooperative#
    +

    Value of launch attribute cudaLaunchAttributeCooperative. Nonzero +indicates a cooperative kernel (see cudaLaunchCooperativeKernel).

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +syncPolicy#
    +

    Value of launch attribute cudaLaunchAttributeSynchronizationPolicy. +::cudaSynchronizationPolicy for work queued up in this stream.

    +
    +
    Type:
    +

    cudaSynchronizationPolicy

    +
    +
    +
    + +
    +
    +clusterDim#
    +

    Value of launch attribute cudaLaunchAttributeClusterDimension that +represents the desired cluster dimensions for the kernel. Opaque +type with the following fields: - x - The X dimension of the +cluster, in blocks. Must be a divisor of the grid X dimension. - +y - The Y dimension of the cluster, in blocks. Must be a divisor +of the grid Y dimension. - z - The Z dimension of the cluster, +in blocks. Must be a divisor of the grid Z dimension.

    +
    +
    Type:
    +

    anon_struct20

    +
    +
    +
    + +
    +
    +clusterSchedulingPolicyPreference#
    +

    Value of launch attribute +cudaLaunchAttributeClusterSchedulingPolicyPreference. Cluster +scheduling policy preference for the kernel.

    +
    +
    Type:
    +

    cudaClusterSchedulingPolicy

    +
    +
    +
    + +
    +
    +programmaticStreamSerializationAllowed#
    +

    Value of launch attribute +cudaLaunchAttributeProgrammaticStreamSerialization.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +programmaticEvent#
    +

    Value of launch attribute cudaLaunchAttributeProgrammaticEvent with +the following fields: - cudaEvent_t event - Event to fire when +all blocks trigger it. - int flags; - Event record flags, see +cudaEventRecordWithFlags. Does not accept cudaEventRecordExternal. +- int triggerAtBlockStart - If this is set to non-0, each block +launch will automatically trigger the event.

    +
    +
    Type:
    +

    anon_struct21

    +
    +
    +
    + +
    +
    +priority#
    +

    Value of launch attribute cudaLaunchAttributePriority. Execution +priority of the kernel.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memSyncDomainMap#
    +

    Value of launch attribute cudaLaunchAttributeMemSyncDomainMap. See +::cudaLaunchMemSyncDomainMap.

    +
    +
    Type:
    +

    cudaLaunchMemSyncDomainMap

    +
    +
    +
    + +
    +
    +memSyncDomain#
    +

    Value of launch attribute cudaLaunchAttributeMemSyncDomain. See +cudaLaunchMemSyncDomain.

    +
    +
    Type:
    +

    cudaLaunchMemSyncDomain

    +
    +
    +
    + +
    +
    +launchCompletionEvent#
    +

    Value of launch attribute cudaLaunchAttributeLaunchCompletionEvent +with the following fields: - cudaEvent_t event - Event to fire +when the last block launches. - int flags - Event record +flags, see cudaEventRecordWithFlags. Does not accept +cudaEventRecordExternal.

    +
    +
    Type:
    +

    anon_struct22

    +
    +
    +
    + +
    +
    +deviceUpdatableKernelNode#
    +

    Value of launch attribute +cudaLaunchAttributeDeviceUpdatableKernelNode with the following +fields: - int deviceUpdatable - Whether or not the resulting +kernel node should be device-updatable. - +cudaGraphDeviceNode_t devNode - Returns a handle to pass to the +various device-side update functions.

    +
    +
    Type:
    +

    anon_struct23

    +
    +
    +
    + +
    +
    +sharedMemCarveout#
    +

    Value of launch attribute +cudaLaunchAttributePreferredSharedMemoryCarveout.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaLaunchAttribute_st(void_ptr _ptr=0)#
    +

    Launch attribute

    +
    +
    +id#
    +

    Attribute to set

    +
    +
    Type:
    +

    cudaLaunchAttributeID

    +
    +
    +
    + +
    +
    +val#
    +

    Value of the attribute

    +
    +
    Type:
    +

    cudaLaunchAttributeValue

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaAsyncNotificationInfo(void_ptr _ptr=0)#
    +

    Information describing an async notification event

    +
    +
    +type#
    +
    +
    Type:
    +

    cudaAsyncNotificationType

    +
    +
    +
    + +
    +
    +info#
    +
    +
    Type:
    +

    anon_union9

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaTextureDesc(void_ptr _ptr=0)#
    +

    CUDA texture descriptor

    +
    +
    +addressMode#
    +

    Texture address mode for up to 3 dimensions

    +
    +
    Type:
    +

    List[cudaTextureAddressMode]

    +
    +
    +
    + +
    +
    +filterMode#
    +

    Texture filter mode

    +
    +
    Type:
    +

    cudaTextureFilterMode

    +
    +
    +
    + +
    +
    +readMode#
    +

    Texture read mode

    +
    +
    Type:
    +

    cudaTextureReadMode

    +
    +
    +
    + +
    +
    +sRGB#
    +

    Perform sRGB->linear conversion during texture read

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +borderColor#
    +

    Texture Border Color

    +
    +
    Type:
    +

    List[float]

    +
    +
    +
    + +
    +
    +normalizedCoords#
    +

    Indicates whether texture reads are normalized or not

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +maxAnisotropy#
    +

    Limit to the anisotropy ratio

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +mipmapFilterMode#
    +

    Mipmap filter mode

    +
    +
    Type:
    +

    cudaTextureFilterMode

    +
    +
    +
    + +
    +
    +mipmapLevelBias#
    +

    Offset applied to the supplied mipmap level

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +minMipmapLevelClamp#
    +

    Lower end of the mipmap level range to clamp access to

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +maxMipmapLevelClamp#
    +

    Upper end of the mipmap level range to clamp access to

    +
    +
    Type:
    +

    float

    +
    +
    +
    + +
    +
    +disableTrilinearOptimization#
    +

    Disable any trilinear filtering optimizations.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +seamlessCubemap#
    +

    Enable seamless cube map filtering.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEglFrameType(value)#
    +

    CUDA EglFrame type - array or pointer

    +
    +
    +cudaEglFrameTypeArray = 0#
    +

    Frame type CUDA array

    +
    + +
    +
    +cudaEglFrameTypePitch = 1#
    +

    Frame type CUDA pointer

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEglResourceLocationFlags(value)#
    +

    Resource location flags- sysmem or vidmem For CUDA context on +iGPU, since video and system memory are equivalent - these flags +will not have an effect on the execution. For CUDA context on +dGPU, applications can use the flag +cudaEglResourceLocationFlags to give a hint about the +desired location. cudaEglResourceLocationSysmem - the +frame data is made resident on the system memory to be accessed by +CUDA. cudaEglResourceLocationVidmem - the frame data +is made resident on the dedicated video memory to be accessed by +CUDA. There may be an additional latency due to new allocation and +data migration, if the frame is produced on a different memory.

    +
    +
    +cudaEglResourceLocationSysmem = 0#
    +

    Resource location sysmem

    +
    + +
    +
    +cudaEglResourceLocationVidmem = 1#
    +

    Resource location vidmem

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEglColorFormat(value)#
    +

    CUDA EGL Color Format - The different planar and multiplanar +formats currently supported for CUDA_EGL interops.

    +
    +
    +cudaEglColorFormatYUV420Planar = 0#
    +

    Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV420SemiPlanar = 1#
    +

    Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar.

    +
    + +
    +
    +cudaEglColorFormatYUV422Planar = 2#
    +

    Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV422SemiPlanar = 3#
    +

    Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar.

    +
    + +
    +
    +cudaEglColorFormatARGB = 6#
    +

    R/G/B/A four channels in one surface with BGRA byte ordering.

    +
    + +
    +
    +cudaEglColorFormatRGBA = 7#
    +

    R/G/B/A four channels in one surface with ABGR byte ordering.

    +
    + +
    +
    +cudaEglColorFormatL = 8#
    +

    single luminance channel in one surface.

    +
    + +
    +
    +cudaEglColorFormatR = 9#
    +

    single color channel in one surface.

    +
    + +
    +
    +cudaEglColorFormatYUV444Planar = 10#
    +

    Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV444SemiPlanar = 11#
    +

    Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar.

    +
    + +
    +
    +cudaEglColorFormatYUYV422 = 12#
    +

    Y, U, V in one surface, interleaved as UYVY in one channel.

    +
    + +
    +
    +cudaEglColorFormatUYVY422 = 13#
    +

    Y, U, V in one surface, interleaved as YUYV in one channel.

    +
    + +
    +
    +cudaEglColorFormatABGR = 14#
    +

    R/G/B/A four channels in one surface with RGBA byte ordering.

    +
    + +
    +
    +cudaEglColorFormatBGRA = 15#
    +

    R/G/B/A four channels in one surface with ARGB byte ordering.

    +
    + +
    +
    +cudaEglColorFormatA = 16#
    +

    Alpha color format - one channel in one surface.

    +
    + +
    +
    +cudaEglColorFormatRG = 17#
    +

    R/G color format - two channels in one surface with GR byte ordering

    +
    + +
    +
    +cudaEglColorFormatAYUV = 18#
    +

    Y, U, V, A four channels in one surface, interleaved as VUYA.

    +
    + +
    +
    +cudaEglColorFormatYVU444SemiPlanar = 19#
    +

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU422SemiPlanar = 20#
    +

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU420SemiPlanar = 21#
    +

    Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_444SemiPlanar = 22#
    +

    Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_420SemiPlanar = 23#
    +

    Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatY12V12U12_444SemiPlanar = 24#
    +

    Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatY12V12U12_420SemiPlanar = 25#
    +

    Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatVYUY_ER = 26#
    +

    Extended Range Y, U, V in one surface, interleaved as YVYU in one channel.

    +
    + +
    +
    +cudaEglColorFormatUYVY_ER = 27#
    +

    Extended Range Y, U, V in one surface, interleaved as YUYV in one channel.

    +
    + +
    +
    +cudaEglColorFormatYUYV_ER = 28#
    +

    Extended Range Y, U, V in one surface, interleaved as UYVY in one channel.

    +
    + +
    +
    +cudaEglColorFormatYVYU_ER = 29#
    +

    Extended Range Y, U, V in one surface, interleaved as VYUY in one channel.

    +
    + +
    +
    +cudaEglColorFormatYUVA_ER = 31#
    +

    Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY.

    +
    + +
    +
    +cudaEglColorFormatAYUV_ER = 32#
    +

    Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA.

    +
    + +
    +
    +cudaEglColorFormatYUV444Planar_ER = 33#
    +

    Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV422Planar_ER = 34#
    +

    Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV420Planar_ER = 35#
    +

    Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV444SemiPlanar_ER = 36#
    +

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV422SemiPlanar_ER = 37#
    +

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV420SemiPlanar_ER = 38#
    +

    Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU444Planar_ER = 39#
    +

    Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU422Planar_ER = 40#
    +

    Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU420Planar_ER = 41#
    +

    Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU444SemiPlanar_ER = 42#
    +

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU422SemiPlanar_ER = 43#
    +

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU420SemiPlanar_ER = 44#
    +

    Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatBayerRGGB = 45#
    +

    Bayer format - one channel in one surface with interleaved RGGB ordering.

    +
    + +
    +
    +cudaEglColorFormatBayerBGGR = 46#
    +

    Bayer format - one channel in one surface with interleaved BGGR ordering.

    +
    + +
    +
    +cudaEglColorFormatBayerGRBG = 47#
    +

    Bayer format - one channel in one surface with interleaved GRBG ordering.

    +
    + +
    +
    +cudaEglColorFormatBayerGBRG = 48#
    +

    Bayer format - one channel in one surface with interleaved GBRG ordering.

    +
    + +
    +
    +cudaEglColorFormatBayer10RGGB = 49#
    +

    Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer10BGGR = 50#
    +

    Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer10GRBG = 51#
    +

    Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer10GBRG = 52#
    +

    Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer12RGGB = 53#
    +

    Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer12BGGR = 54#
    +

    Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer12GRBG = 55#
    +

    Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer12GBRG = 56#
    +

    Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer14RGGB = 57#
    +

    Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer14BGGR = 58#
    +

    Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer14GRBG = 59#
    +

    Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer14GBRG = 60#
    +

    Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer20RGGB = 61#
    +

    Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer20BGGR = 62#
    +

    Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer20GRBG = 63#
    +

    Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer20GBRG = 64#
    +

    Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatYVU444Planar = 65#
    +

    Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU422Planar = 66#
    +

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU420Planar = 67#
    +

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatBayerIspRGGB = 68#
    +

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype.

    +
    + +
    +
    +cudaEglColorFormatBayerIspBGGR = 69#
    +

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype.

    +
    + +
    +
    +cudaEglColorFormatBayerIspGRBG = 70#
    +

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype.

    +
    + +
    +
    +cudaEglColorFormatBayerIspGBRG = 71#
    +

    Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype.

    +
    + +
    +
    +cudaEglColorFormatBayerBCCR = 72#
    +

    Bayer format - one channel in one surface with interleaved BCCR ordering.

    +
    + +
    +
    +cudaEglColorFormatBayerRCCB = 73#
    +

    Bayer format - one channel in one surface with interleaved RCCB ordering.

    +
    + +
    +
    +cudaEglColorFormatBayerCRBC = 74#
    +

    Bayer format - one channel in one surface with interleaved CRBC ordering.

    +
    + +
    +
    +cudaEglColorFormatBayerCBRC = 75#
    +

    Bayer format - one channel in one surface with interleaved CBRC ordering.

    +
    + +
    +
    +cudaEglColorFormatBayer10CCCC = 76#
    +

    Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer12BCCR = 77#
    +

    Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer12RCCB = 78#
    +

    Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer12CRBC = 79#
    +

    Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer12CBRC = 80#
    +

    Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatBayer12CCCC = 81#
    +

    Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op.

    +
    + +
    +
    +cudaEglColorFormatY = 82#
    +

    Color format for single Y plane.

    +
    + +
    +
    +cudaEglColorFormatYUV420SemiPlanar_2020 = 83#
    +

    Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU420SemiPlanar_2020 = 84#
    +

    Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV420Planar_2020 = 85#
    +

    Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU420Planar_2020 = 86#
    +

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV420SemiPlanar_709 = 87#
    +

    Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU420SemiPlanar_709 = 88#
    +

    Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYUV420Planar_709 = 89#
    +

    Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatYVU420Planar_709 = 90#
    +

    Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_420SemiPlanar_709 = 91#
    +

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = 92#
    +

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = 93#
    +

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_422SemiPlanar = 94#
    +

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_422SemiPlanar_709 = 95#
    +

    Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatY_ER = 96#
    +

    Extended Range Color format for single Y plane.

    +
    + +
    +
    +cudaEglColorFormatY_709_ER = 97#
    +

    Extended Range Color format for single Y plane.

    +
    + +
    +
    +cudaEglColorFormatY10_ER = 98#
    +

    Extended Range Color format for single Y10 plane.

    +
    + +
    +
    +cudaEglColorFormatY10_709_ER = 99#
    +

    Extended Range Color format for single Y10 plane.

    +
    + +
    +
    +cudaEglColorFormatY12_ER = 100#
    +

    Extended Range Color format for single Y12 plane.

    +
    + +
    +
    +cudaEglColorFormatY12_709_ER = 101#
    +

    Extended Range Color format for single Y12 plane.

    +
    + +
    +
    +cudaEglColorFormatYUVA = 102#
    +

    Y, U, V, A four channels in one surface, interleaved as AVUY.

    +
    + +
    +
    +cudaEglColorFormatYVYU = 104#
    +

    Y, U, V in one surface, interleaved as YVYU in one channel.

    +
    + +
    +
    +cudaEglColorFormatVYUY = 105#
    +

    Y, U, V in one surface, interleaved as VYUY in one channel.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_420SemiPlanar_ER = 106#
    +

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = 107#
    +

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_444SemiPlanar_ER = 108#
    +

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = 109#
    +

    Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatY12V12U12_420SemiPlanar_ER = 110#
    +

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = 111#
    +

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height.

    +
    + +
    +
    +cudaEglColorFormatY12V12U12_444SemiPlanar_ER = 112#
    +

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    +
    + +
    +
    +cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = 113#
    +

    Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaError_t(value)#
    +

    impl_private CUDA error types

    +
    +
    +cudaSuccess = 0#
    +

    The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see cudaEventQuery() and cudaStreamQuery()).

    +
    + +
    +
    +cudaErrorInvalidValue = 1#
    +

    This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values.

    +
    + +
    +
    +cudaErrorMemoryAllocation = 2#
    +

    The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation.

    +
    + +
    +
    +cudaErrorInitializationError = 3#
    +

    The API call failed because the CUDA driver and runtime could not be initialized.

    +
    + +
    +
    +cudaErrorCudartUnloading = 4#
    +

    This indicates that a CUDA Runtime API call cannot be executed because it is being called during process shut down, at a point in time after CUDA driver has been unloaded.

    +
    + +
    +
    +cudaErrorProfilerDisabled = 5#
    +

    This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler.

    +
    + +
    +
    +cudaErrorProfilerNotInitialized = 6#
    +

    [Deprecated]

    +
    + +
    +
    +cudaErrorProfilerAlreadyStarted = 7#
    +

    [Deprecated]

    +
    + +
    +
    +cudaErrorProfilerAlreadyStopped = 8#
    +

    [Deprecated]

    +
    + +
    +
    +cudaErrorInvalidConfiguration = 9#
    +

    This indicates that a kernel launch is requesting resources that can never be satisfied by the current device. Requesting more shared memory per block than the device supports will trigger this error, as will requesting too many threads or blocks. See cudaDeviceProp for more device limitations.

    +
    + +
    +
    +cudaErrorInvalidPitchValue = 12#
    +

    This indicates that one or more of the pitch-related parameters passed to the API call is not within the acceptable range for pitch.

    +
    + +
    +
    +cudaErrorInvalidSymbol = 13#
    +

    This indicates that the symbol name/identifier passed to the API call is not a valid name or identifier.

    +
    + +
    +
    +cudaErrorInvalidHostPointer = 16#
    +

    This indicates that at least one host pointer passed to the API call is not a valid host pointer. [Deprecated]

    +
    + +
    +
    +cudaErrorInvalidDevicePointer = 17#
    +

    This indicates that at least one device pointer passed to the API call is not a valid device pointer. [Deprecated]

    +
    + +
    +
    +cudaErrorInvalidTexture = 18#
    +

    This indicates that the texture passed to the API call is not a valid texture.

    +
    + +
    +
    +cudaErrorInvalidTextureBinding = 19#
    +

    This indicates that the texture binding is not valid. This occurs if you call cudaGetTextureAlignmentOffset() with an unbound texture.

    +
    + +
    +
    +cudaErrorInvalidChannelDescriptor = 20#
    +

    This indicates that the channel descriptor passed to the API call is not valid. This occurs if the format is not one of the formats specified by cudaChannelFormatKind, or if one of the dimensions is invalid.

    +
    + +
    +
    +cudaErrorInvalidMemcpyDirection = 21#
    +

    This indicates that the direction of the memcpy passed to the API call is not one of the types specified by cudaMemcpyKind.

    +
    + +
    +
    +cudaErrorAddressOfConstant = 22#
    +

    This indicated that the user has taken the address of a constant variable, which was forbidden up until the CUDA 3.1 release. [Deprecated]

    +
    + +
    +
    +cudaErrorTextureFetchFailed = 23#
    +

    This indicated that a texture fetch was not able to be performed. This was previously used for device emulation of texture operations. [Deprecated]

    +
    + +
    +
    +cudaErrorTextureNotBound = 24#
    +

    This indicated that a texture was not bound for access. This was previously used for device emulation of texture operations. [Deprecated]

    +
    + +
    +
    +cudaErrorSynchronizationError = 25#
    +

    This indicated that a synchronization operation had failed. This was previously used for some device emulation functions. [Deprecated]

    +
    + +
    +
    +cudaErrorInvalidFilterSetting = 26#
    +

    This indicates that a non-float texture was being accessed with linear filtering. This is not supported by CUDA.

    +
    + +
    +
    +cudaErrorInvalidNormSetting = 27#
    +

    This indicates that an attempt was made to read a non-float texture as a normalized float. This is not supported by CUDA.

    +
    + +
    +
    +cudaErrorMixedDeviceExecution = 28#
    +

    Mixing of device and device emulation code was not allowed. [Deprecated]

    +
    + +
    +
    +cudaErrorNotYetImplemented = 31#
    +

    This indicates that the API call is not yet implemented. Production releases of CUDA will never return this error. [Deprecated]

    +
    + +
    +
    +cudaErrorMemoryValueTooLarge = 32#
    +

    This indicated that an emulated device pointer exceeded the 32-bit address range. [Deprecated]

    +
    + +
    +
    +cudaErrorStubLibrary = 34#
    +

    This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error.

    +
    + +
    +
    +cudaErrorInsufficientDriver = 35#
    +

    This indicates that the installed NVIDIA CUDA driver is older than the CUDA runtime library. This is not a supported configuration. Users should install an updated NVIDIA display driver to allow the application to run.

    +
    + +
    +
    +cudaErrorCallRequiresNewerDriver = 36#
    +

    This indicates that the API call requires a newer CUDA driver than the one currently installed. Users should install an updated NVIDIA CUDA driver to allow the API call to succeed.

    +
    + +
    +
    +cudaErrorInvalidSurface = 37#
    +

    This indicates that the surface passed to the API call is not a valid surface.

    +
    + +
    +
    +cudaErrorDuplicateVariableName = 43#
    +

    This indicates that multiple global or constant variables (across separate CUDA source files in the application) share the same string name.

    +
    + +
    +
    +cudaErrorDuplicateTextureName = 44#
    +

    This indicates that multiple textures (across separate CUDA source files in the application) share the same string name.

    +
    + +
    +
    +cudaErrorDuplicateSurfaceName = 45#
    +

    This indicates that multiple surfaces (across separate CUDA source files in the application) share the same string name.

    +
    + +
    +
    +cudaErrorDevicesUnavailable = 46#
    +

    This indicates that all CUDA devices are busy or unavailable at the current time. Devices are often busy/unavailable due to use of cudaComputeModeProhibited, cudaComputeModeExclusiveProcess, or when long running CUDA kernels have filled up the GPU and are blocking new work from starting. They can also be unavailable due to memory constraints on a device that already has active CUDA work being performed.

    +
    + +
    +
    +cudaErrorIncompatibleDriverContext = 49#
    +

    This indicates that the current context is not compatible with this the CUDA Runtime. This can only occur if you are using CUDA Runtime/Driver interoperability and have created an existing Driver context using the driver API. The Driver context may be incompatible either because the Driver context was created using an older version of the API, because the Runtime API call expects a primary driver context and the Driver context is not primary, or because the Driver context has been destroyed. Please see :py:obj:`~.Interactions`with the CUDA Driver API” for more information.

    +
    + +
    +
    +cudaErrorMissingConfiguration = 52#
    +

    The device function being invoked (usually via cudaLaunchKernel()) was not previously configured via the cudaConfigureCall() function.

    +
    + +
    +
    +cudaErrorPriorLaunchFailure = 53#
    +

    This indicated that a previous kernel launch failed. This was previously used for device emulation of kernel launches. [Deprecated]

    +
    + +
    +
    +cudaErrorLaunchMaxDepthExceeded = 65#
    +

    This error indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches.

    +
    + +
    +
    +cudaErrorLaunchFileScopedTex = 66#
    +

    This error indicates that a grid launch did not occur because the kernel uses file-scoped textures which are unsupported by the device runtime. Kernels launched via the device runtime only support textures created with the Texture Object API’s.

    +
    + +
    +
    +cudaErrorLaunchFileScopedSurf = 67#
    +

    This error indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. Kernels launched via the device runtime only support surfaces created with the Surface Object API’s.

    +
    + +
    +
    +cudaErrorSyncDepthExceeded = 68#
    +

    This error indicates that a call to cudaDeviceSynchronize made from the device runtime failed because the call was made at grid depth greater than than either the default (2 levels of grids) or user specified device limit cudaLimitDevRuntimeSyncDepth. To be able to synchronize on launched grids at a greater depth successfully, the maximum nested depth at which cudaDeviceSynchronize will be called must be specified with the cudaLimitDevRuntimeSyncDepth limit to the cudaDeviceSetLimit api before the host-side launch of a kernel using the device runtime. Keep in mind that additional levels of sync depth require the runtime to reserve large amounts of device memory that cannot be used for user allocations. Note that cudaDeviceSynchronize made from device runtime is only supported on devices of compute capability < 9.0.

    +
    + +
    +
    +cudaErrorLaunchPendingCountExceeded = 69#
    +

    This error indicates that a device runtime grid launch failed because the launch would exceed the limit cudaLimitDevRuntimePendingLaunchCount. For this launch to proceed successfully, cudaDeviceSetLimit must be called to set the cudaLimitDevRuntimePendingLaunchCount to be higher than the upper bound of outstanding launches that can be issued to the device runtime. Keep in mind that raising the limit of pending device runtime launches will require the runtime to reserve device memory that cannot be used for user allocations.

    +
    + +
    +
    +cudaErrorInvalidDeviceFunction = 98#
    +

    The requested device function does not exist or is not compiled for the proper device architecture.

    +
    + +
    +
    +cudaErrorNoDevice = 100#
    +

    This indicates that no CUDA-capable devices were detected by the installed CUDA driver.

    +
    + +
    +
    +cudaErrorInvalidDevice = 101#
    +

    This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device.

    +
    + +
    +
    +cudaErrorDeviceNotLicensed = 102#
    +

    This indicates that the device doesn’t have a valid Grid License.

    +
    + +
    +
    +cudaErrorSoftwareValidityNotEstablished = 103#
    +

    By default, the CUDA runtime may perform a minimal set of self-tests, as well as CUDA driver tests, to establish the validity of both. Introduced in CUDA 11.2, this error return indicates that at least one of these tests has failed and the validity of either the runtime or the driver could not be established.

    +
    + +
    +
    +cudaErrorStartupFailure = 127#
    +

    This indicates an internal startup failure in the CUDA runtime.

    +
    + +
    +
    +cudaErrorInvalidKernelImage = 200#
    +

    This indicates that the device kernel image is invalid.

    +
    + +
    +
    +cudaErrorDeviceUninitialized = 201#
    +

    This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had cuCtxDestroy() invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See cuCtxGetApiVersion() for more details.

    +
    + +
    +
    +cudaErrorMapBufferObjectFailed = 205#
    +

    This indicates that the buffer object could not be mapped.

    +
    + +
    +
    +cudaErrorUnmapBufferObjectFailed = 206#
    +

    This indicates that the buffer object could not be unmapped.

    +
    + +
    +
    +cudaErrorArrayIsMapped = 207#
    +

    This indicates that the specified array is currently mapped and thus cannot be destroyed.

    +
    + +
    +
    +cudaErrorAlreadyMapped = 208#
    +

    This indicates that the resource is already mapped.

    +
    + +
    +
    +cudaErrorNoKernelImageForDevice = 209#
    +

    This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration.

    +
    + +
    +
    +cudaErrorAlreadyAcquired = 210#
    +

    This indicates that a resource has already been acquired.

    +
    + +
    +
    +cudaErrorNotMapped = 211#
    +

    This indicates that a resource is not mapped.

    +
    + +
    +
    +cudaErrorNotMappedAsArray = 212#
    +

    This indicates that a mapped resource is not available for access as an array.

    +
    + +
    +
    +cudaErrorNotMappedAsPointer = 213#
    +

    This indicates that a mapped resource is not available for access as a pointer.

    +
    + +
    +
    +cudaErrorECCUncorrectable = 214#
    +

    This indicates that an uncorrectable ECC error was detected during execution.

    +
    + +
    +
    +cudaErrorUnsupportedLimit = 215#
    +

    This indicates that the cudaLimit passed to the API call is not supported by the active device.

    +
    + +
    +
    +cudaErrorDeviceAlreadyInUse = 216#
    +

    This indicates that a call tried to access an exclusive-thread device that is already in use by a different thread.

    +
    + +
    +
    +cudaErrorPeerAccessUnsupported = 217#
    +

    This error indicates that P2P access is not supported across the given devices.

    +
    + +
    +
    +cudaErrorInvalidPtx = 218#
    +

    A PTX compilation failed. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.

    +
    + +
    +
    +cudaErrorInvalidGraphicsContext = 219#
    +

    This indicates an error with the OpenGL or DirectX context.

    +
    + +
    +
    +cudaErrorNvlinkUncorrectable = 220#
    +

    This indicates that an uncorrectable NVLink error was detected during the execution.

    +
    + +
    +
    +cudaErrorJitCompilerNotFound = 221#
    +

    This indicates that the PTX JIT compiler library was not found. The JIT Compiler library is used for PTX compilation. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.

    +
    + +
    +
    +cudaErrorUnsupportedPtxVersion = 222#
    +

    This indicates that the provided PTX was compiled with an unsupported toolchain. The most common reason for this, is the PTX was generated by a compiler newer than what is supported by the CUDA driver and PTX JIT compiler.

    +
    + +
    +
    +cudaErrorJitCompilationDisabled = 223#
    +

    This indicates that the JIT compilation was disabled. The JIT compilation compiles PTX. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.

    +
    + +
    +
    +cudaErrorUnsupportedExecAffinity = 224#
    +

    This indicates that the provided execution affinity is not supported by the device.

    +
    + +
    +
    +cudaErrorUnsupportedDevSideSync = 225#
    +

    This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize.

    +
    + +
    +
    +cudaErrorInvalidSource = 300#
    +

    This indicates that the device kernel source is invalid.

    +
    + +
    +
    +cudaErrorFileNotFound = 301#
    +

    This indicates that the file specified was not found.

    +
    + +
    +
    +cudaErrorSharedObjectSymbolNotFound = 302#
    +

    This indicates that a link to a shared object failed to resolve.

    +
    + +
    +
    +cudaErrorSharedObjectInitFailed = 303#
    +

    This indicates that initialization of a shared object failed.

    +
    + +
    +
    +cudaErrorOperatingSystem = 304#
    +

    This error indicates that an OS call failed.

    +
    + +
    +
    +cudaErrorInvalidResourceHandle = 400#
    +

    This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like cudaStream_t and cudaEvent_t.

    +
    + +
    +
    +cudaErrorIllegalState = 401#
    +

    This indicates that a resource required by the API call is not in a valid state to perform the requested operation.

    +
    + +
    +
    +cudaErrorLossyQuery = 402#
    +

    This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments.

    +
    + +
    +
    +cudaErrorSymbolNotFound = 500#
    +

    This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names.

    +
    + +
    +
    +cudaErrorNotReady = 600#
    +

    This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than cudaSuccess (which indicates completion). Calls that may return this value include cudaEventQuery() and cudaStreamQuery().

    +
    + +
    +
    +cudaErrorIllegalAddress = 700#
    +

    The device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorLaunchOutOfResources = 701#
    +

    This indicates that a launch did not occur because it did not have appropriate resources. Although this error is similar to cudaErrorInvalidConfiguration, this error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel’s register count.

    +
    + +
    +
    +cudaErrorLaunchTimeout = 702#
    +

    This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device property kernelExecTimeoutEnabled for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorLaunchIncompatibleTexturing = 703#
    +

    This error indicates a kernel launch that uses an incompatible texturing mode.

    +
    + +
    +
    +cudaErrorPeerAccessAlreadyEnabled = 704#
    +

    This error indicates that a call to cudaDeviceEnablePeerAccess() is trying to re-enable peer addressing on from a context which has already had peer addressing enabled.

    +
    + +
    +
    +cudaErrorPeerAccessNotEnabled = 705#
    +

    This error indicates that cudaDeviceDisablePeerAccess() is trying to disable peer addressing which has not been enabled yet via cudaDeviceEnablePeerAccess().

    +
    + +
    +
    +cudaErrorSetOnActiveProcess = 708#
    +

    This indicates that the user has called cudaSetValidDevices(), cudaSetDeviceFlags(), cudaD3D9SetDirect3DDevice(), cudaD3D10SetDirect3DDevice, cudaD3D11SetDirect3DDevice(), or cudaVDPAUSetVDPAUDevice() after initializing the CUDA runtime by calling non-device management operations (allocating memory and launching kernels are examples of non-device management operations). This error can also be returned if using runtime/driver interoperability and there is an existing CUcontext active on the host thread.

    +
    + +
    +
    +cudaErrorContextIsDestroyed = 709#
    +

    This error indicates that the context current to the calling thread has been destroyed using cuCtxDestroy, or is a primary context which has not yet been initialized.

    +
    + +
    +
    +cudaErrorAssert = 710#
    +

    An assert triggered in device code during kernel execution. The device cannot be used again. All existing allocations are invalid. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorTooManyPeers = 711#
    +

    This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to cudaEnablePeerAccess().

    +
    + +
    +
    +cudaErrorHostMemoryAlreadyRegistered = 712#
    +

    This error indicates that the memory range passed to cudaHostRegister() has already been registered.

    +
    + +
    +
    +cudaErrorHostMemoryNotRegistered = 713#
    +

    This error indicates that the pointer passed to cudaHostUnregister() does not correspond to any currently registered memory region.

    +
    + +
    +
    +cudaErrorHardwareStackError = 714#
    +

    Device encountered an error in the call stack during kernel execution, possibly due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorIllegalInstruction = 715#
    +

    The device encountered an illegal instruction during kernel execution This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorMisalignedAddress = 716#
    +

    The device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorInvalidAddressSpace = 717#
    +

    While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorInvalidPc = 718#
    +

    The device encountered an invalid program counter. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorLaunchFailure = 719#
    +

    An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorCooperativeLaunchTooLarge = 720#
    +

    This error indicates that the number of blocks launched per grid for a kernel that was launched via either cudaLaunchCooperativeKernel or cudaLaunchCooperativeKernelMultiDevice exceeds the maximum number of blocks as allowed by cudaOccupancyMaxActiveBlocksPerMultiprocessor or cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors as specified by the device attribute cudaDevAttrMultiProcessorCount.

    +
    + +
    +
    +cudaErrorNotPermitted = 800#
    +

    This error indicates the attempted operation is not permitted.

    +
    + +
    +
    +cudaErrorNotSupported = 801#
    +

    This error indicates the attempted operation is not supported on the current system or device.

    +
    + +
    +
    +cudaErrorSystemNotReady = 802#
    +

    This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide.

    +
    + +
    +
    +cudaErrorSystemDriverMismatch = 803#
    +

    This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions.

    +
    + +
    +
    +cudaErrorCompatNotSupportedOnDevice = 804#
    +

    This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable.

    +
    + +
    +
    +cudaErrorMpsConnectionFailed = 805#
    +

    This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server.

    +
    + +
    +
    +cudaErrorMpsRpcFailure = 806#
    +

    This error indicates that the remote procedural call between the MPS server and the MPS client failed.

    +
    + +
    +
    +cudaErrorMpsServerNotReady = 807#
    +

    This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure.

    +
    + +
    +
    +cudaErrorMpsMaxClientsReached = 808#
    +

    This error indicates that the hardware resources required to create MPS client have been exhausted.

    +
    + +
    +
    +cudaErrorMpsMaxConnectionsReached = 809#
    +

    This error indicates the the hardware resources required to device connections have been exhausted.

    +
    + +
    +
    +cudaErrorMpsClientTerminated = 810#
    +

    This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorCdpNotSupported = 811#
    +

    This error indicates, that the program is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it.

    +
    + +
    +
    +cudaErrorCdpVersionMismatch = 812#
    +

    This error indicates, that the program contains an unsupported interaction between different versions of CUDA Dynamic Parallelism.

    +
    + +
    +
    +cudaErrorStreamCaptureUnsupported = 900#
    +

    The operation is not permitted when the stream is capturing.

    +
    + +
    +
    +cudaErrorStreamCaptureInvalidated = 901#
    +

    The current capture sequence on the stream has been invalidated due to a previous error.

    +
    + +
    +
    +cudaErrorStreamCaptureMerge = 902#
    +

    The operation would have resulted in a merge of two independent capture sequences.

    +
    + +
    +
    +cudaErrorStreamCaptureUnmatched = 903#
    +

    The capture was not initiated in this stream.

    +
    + +
    +
    +cudaErrorStreamCaptureUnjoined = 904#
    +

    The capture sequence contains a fork that was not joined to the primary stream.

    +
    + +
    +
    +cudaErrorStreamCaptureIsolation = 905#
    +

    A dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary.

    +
    + +
    +
    +cudaErrorStreamCaptureImplicit = 906#
    +

    The operation would have resulted in a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy.

    +
    + +
    +
    +cudaErrorCapturedEvent = 907#
    +

    The operation is not permitted on an event which was last recorded in a capturing stream.

    +
    + +
    +
    +cudaErrorStreamCaptureWrongThread = 908#
    +

    A stream capture sequence not initiated with the cudaStreamCaptureModeRelaxed argument to cudaStreamBeginCapture was passed to cudaStreamEndCapture in a different thread.

    +
    + +
    +
    +cudaErrorTimeout = 909#
    +

    This indicates that the wait operation has timed out.

    +
    + +
    +
    +cudaErrorGraphExecUpdateFailure = 910#
    +

    This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update.

    +
    + +
    +
    +cudaErrorExternalDevice = 911#
    +

    This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device’s signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.

    +
    + +
    +
    +cudaErrorInvalidClusterSize = 912#
    +

    This indicates that a kernel launch error has occurred due to cluster misconfiguration.

    +
    + +
    +
    +cudaErrorFunctionNotLoaded = 913#
    +

    Indiciates a function handle is not loaded when calling an API that requires a loaded function.

    +
    + +
    +
    +cudaErrorInvalidResourceType = 914#
    +

    This error indicates one or more resources passed in are not valid resource types for the operation.

    +
    + +
    +
    +cudaErrorInvalidResourceConfiguration = 915#
    +

    This error indicates one or more resources are insufficient or non-applicable for the operation.

    +
    + +
    +
    +cudaErrorUnknown = 999#
    +

    This indicates that an unknown internal error has occurred.

    +
    + +
    +
    +cudaErrorApiFailureBase = 10000#
    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaChannelFormatKind(value)#
    +

    Channel format kind

    +
    +
    +cudaChannelFormatKindSigned = 0#
    +

    Signed channel format

    +
    + +
    +
    +cudaChannelFormatKindUnsigned = 1#
    +

    Unsigned channel format

    +
    + +
    +
    +cudaChannelFormatKindFloat = 2#
    +

    Float channel format

    +
    + +
    +
    +cudaChannelFormatKindNone = 3#
    +

    No channel format

    +
    + +
    +
    +cudaChannelFormatKindNV12 = 4#
    +

    Unsigned 8-bit integers, planar 4:2:0 YUV format

    +
    + +
    +
    +cudaChannelFormatKindUnsignedNormalized8X1 = 5#
    +

    1 channel unsigned 8-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindUnsignedNormalized8X2 = 6#
    +

    2 channel unsigned 8-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindUnsignedNormalized8X4 = 7#
    +

    4 channel unsigned 8-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindUnsignedNormalized16X1 = 8#
    +

    1 channel unsigned 16-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindUnsignedNormalized16X2 = 9#
    +

    2 channel unsigned 16-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindUnsignedNormalized16X4 = 10#
    +

    4 channel unsigned 16-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindSignedNormalized8X1 = 11#
    +

    1 channel signed 8-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindSignedNormalized8X2 = 12#
    +

    2 channel signed 8-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindSignedNormalized8X4 = 13#
    +

    4 channel signed 8-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindSignedNormalized16X1 = 14#
    +

    1 channel signed 16-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindSignedNormalized16X2 = 15#
    +

    2 channel signed 16-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindSignedNormalized16X4 = 16#
    +

    4 channel signed 16-bit normalized integer

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed1 = 17#
    +

    4 channel unsigned normalized block-compressed (BC1 compression) format

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed1SRGB = 18#
    +

    4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed2 = 19#
    +

    4 channel unsigned normalized block-compressed (BC2 compression) format

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed2SRGB = 20#
    +

    4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed3 = 21#
    +

    4 channel unsigned normalized block-compressed (BC3 compression) format

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed3SRGB = 22#
    +

    4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed4 = 23#
    +

    1 channel unsigned normalized block-compressed (BC4 compression) format

    +
    + +
    +
    +cudaChannelFormatKindSignedBlockCompressed4 = 24#
    +

    1 channel signed normalized block-compressed (BC4 compression) format

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed5 = 25#
    +

    2 channel unsigned normalized block-compressed (BC5 compression) format

    +
    + +
    +
    +cudaChannelFormatKindSignedBlockCompressed5 = 26#
    +

    2 channel signed normalized block-compressed (BC5 compression) format

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed6H = 27#
    +

    3 channel unsigned half-float block-compressed (BC6H compression) format

    +
    + +
    +
    +cudaChannelFormatKindSignedBlockCompressed6H = 28#
    +

    3 channel signed half-float block-compressed (BC6H compression) format

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed7 = 29#
    +

    4 channel unsigned normalized block-compressed (BC7 compression) format

    +
    + +
    +
    +cudaChannelFormatKindUnsignedBlockCompressed7SRGB = 30#
    +

    4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemoryType(value)#
    +

    CUDA memory types

    +
    +
    +cudaMemoryTypeUnregistered = 0#
    +

    Unregistered memory

    +
    + +
    +
    +cudaMemoryTypeHost = 1#
    +

    Host memory

    +
    + +
    +
    +cudaMemoryTypeDevice = 2#
    +

    Device memory

    +
    + +
    +
    +cudaMemoryTypeManaged = 3#
    +

    Managed memory

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemcpyKind(value)#
    +

    CUDA memory copy types

    +
    +
    +cudaMemcpyHostToHost = 0#
    +

    Host -> Host

    +
    + +
    +
    +cudaMemcpyHostToDevice = 1#
    +

    Host -> Device

    +
    + +
    +
    +cudaMemcpyDeviceToHost = 2#
    +

    Device -> Host

    +
    + +
    +
    +cudaMemcpyDeviceToDevice = 3#
    +

    Device -> Device

    +
    + +
    +
    +cudaMemcpyDefault = 4#
    +

    Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaAccessProperty(value)#
    +

    Specifies performance hint with cudaAccessPolicyWindow +for hitProp and missProp members.

    +
    +
    +cudaAccessPropertyNormal = 0#
    +

    Normal cache persistence.

    +
    + +
    +
    +cudaAccessPropertyStreaming = 1#
    +

    Streaming access is less likely to persit from cache.

    +
    + +
    +
    +cudaAccessPropertyPersisting = 2#
    +

    Persisting access is more likely to persist in cache.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaStreamCaptureStatus(value)#
    +

    Possible stream capture statuses returned by +cudaStreamIsCapturing

    +
    +
    +cudaStreamCaptureStatusNone = 0#
    +

    Stream is not capturing

    +
    + +
    +
    +cudaStreamCaptureStatusActive = 1#
    +

    Stream is actively capturing

    +
    + +
    +
    +cudaStreamCaptureStatusInvalidated = 2#
    +

    Stream is part of a capture sequence that has been invalidated, but not terminated

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaStreamCaptureMode(value)#
    +

    Possible modes for stream capture thread interactions. For more +details see cudaStreamBeginCapture and +cudaThreadExchangeStreamCaptureMode

    +
    +
    +cudaStreamCaptureModeGlobal = 0#
    +
    + +
    +
    +cudaStreamCaptureModeThreadLocal = 1#
    +
    + +
    +
    +cudaStreamCaptureModeRelaxed = 2#
    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaSynchronizationPolicy(value)#
    +
    +
    +cudaSyncPolicyAuto = 1#
    +
    + +
    +
    +cudaSyncPolicySpin = 2#
    +
    + +
    +
    +cudaSyncPolicyYield = 3#
    +
    + +
    +
    +cudaSyncPolicyBlockingSync = 4#
    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaClusterSchedulingPolicy(value)#
    +

    Cluster scheduling policies. These may be passed to +cudaFuncSetAttribute

    +
    +
    +cudaClusterSchedulingPolicyDefault = 0#
    +

    the default policy

    +
    + +
    +
    +cudaClusterSchedulingPolicySpread = 1#
    +

    spread the blocks within a cluster to the SMs

    +
    + +
    +
    +cudaClusterSchedulingPolicyLoadBalancing = 2#
    +

    allow the hardware to load-balance the blocks in a cluster to the SMs

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaStreamUpdateCaptureDependenciesFlags(value)#
    +

    Flags for cudaStreamUpdateCaptureDependencies

    +
    +
    +cudaStreamAddCaptureDependencies = 0#
    +

    Add new nodes to the dependency set

    +
    + +
    +
    +cudaStreamSetCaptureDependencies = 1#
    +

    Replace the dependency set with the new nodes

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaUserObjectFlags(value)#
    +

    Flags for user objects for graphs

    +
    +
    +cudaUserObjectNoDestructorSync = 1#
    +

    Indicates the destructor execution is not synchronized by any CUDA handle.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaUserObjectRetainFlags(value)#
    +

    Flags for retaining user object references for graphs

    +
    +
    +cudaGraphUserObjectMove = 1#
    +

    Transfer references from the caller rather than creating new references.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphicsRegisterFlags(value)#
    +

    CUDA graphics interop register flags

    +
    +
    +cudaGraphicsRegisterFlagsNone = 0#
    +

    Default

    +
    + +
    +
    +cudaGraphicsRegisterFlagsReadOnly = 1#
    +

    CUDA will not write to this resource

    +
    + +
    +
    +cudaGraphicsRegisterFlagsWriteDiscard = 2#
    +

    CUDA will only write to and will not read from this resource

    +
    + +
    +
    +cudaGraphicsRegisterFlagsSurfaceLoadStore = 4#
    +

    CUDA will bind this resource to a surface reference

    +
    + +
    +
    +cudaGraphicsRegisterFlagsTextureGather = 8#
    +

    CUDA will perform texture gather operations on this resource

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphicsMapFlags(value)#
    +

    CUDA graphics interop map flags

    +
    +
    +cudaGraphicsMapFlagsNone = 0#
    +

    Default; Assume resource can be read/written

    +
    + +
    +
    +cudaGraphicsMapFlagsReadOnly = 1#
    +

    CUDA will not write to this resource

    +
    + +
    +
    +cudaGraphicsMapFlagsWriteDiscard = 2#
    +

    CUDA will only write to and will not read from this resource

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphicsCubeFace(value)#
    +

    CUDA graphics interop array indices for cube maps

    +
    +
    +cudaGraphicsCubeFacePositiveX = 0#
    +

    Positive X face of cubemap

    +
    + +
    +
    +cudaGraphicsCubeFaceNegativeX = 1#
    +

    Negative X face of cubemap

    +
    + +
    +
    +cudaGraphicsCubeFacePositiveY = 2#
    +

    Positive Y face of cubemap

    +
    + +
    +
    +cudaGraphicsCubeFaceNegativeY = 3#
    +

    Negative Y face of cubemap

    +
    + +
    +
    +cudaGraphicsCubeFacePositiveZ = 4#
    +

    Positive Z face of cubemap

    +
    + +
    +
    +cudaGraphicsCubeFaceNegativeZ = 5#
    +

    Negative Z face of cubemap

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaResourceType(value)#
    +

    CUDA resource types

    +
    +
    +cudaResourceTypeArray = 0#
    +

    Array resource

    +
    + +
    +
    +cudaResourceTypeMipmappedArray = 1#
    +

    Mipmapped array resource

    +
    + +
    +
    +cudaResourceTypeLinear = 2#
    +

    Linear resource

    +
    + +
    +
    +cudaResourceTypePitch2D = 3#
    +

    Pitch 2D resource

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaResourceViewFormat(value)#
    +

    CUDA texture resource view formats

    +
    +
    +cudaResViewFormatNone = 0#
    +

    No resource view format (use underlying resource format)

    +
    + +
    +
    +cudaResViewFormatUnsignedChar1 = 1#
    +

    1 channel unsigned 8-bit integers

    +
    + +
    +
    +cudaResViewFormatUnsignedChar2 = 2#
    +

    2 channel unsigned 8-bit integers

    +
    + +
    +
    +cudaResViewFormatUnsignedChar4 = 3#
    +

    4 channel unsigned 8-bit integers

    +
    + +
    +
    +cudaResViewFormatSignedChar1 = 4#
    +

    1 channel signed 8-bit integers

    +
    + +
    +
    +cudaResViewFormatSignedChar2 = 5#
    +

    2 channel signed 8-bit integers

    +
    + +
    +
    +cudaResViewFormatSignedChar4 = 6#
    +

    4 channel signed 8-bit integers

    +
    + +
    +
    +cudaResViewFormatUnsignedShort1 = 7#
    +

    1 channel unsigned 16-bit integers

    +
    + +
    +
    +cudaResViewFormatUnsignedShort2 = 8#
    +

    2 channel unsigned 16-bit integers

    +
    + +
    +
    +cudaResViewFormatUnsignedShort4 = 9#
    +

    4 channel unsigned 16-bit integers

    +
    + +
    +
    +cudaResViewFormatSignedShort1 = 10#
    +

    1 channel signed 16-bit integers

    +
    + +
    +
    +cudaResViewFormatSignedShort2 = 11#
    +

    2 channel signed 16-bit integers

    +
    + +
    +
    +cudaResViewFormatSignedShort4 = 12#
    +

    4 channel signed 16-bit integers

    +
    + +
    +
    +cudaResViewFormatUnsignedInt1 = 13#
    +

    1 channel unsigned 32-bit integers

    +
    + +
    +
    +cudaResViewFormatUnsignedInt2 = 14#
    +

    2 channel unsigned 32-bit integers

    +
    + +
    +
    +cudaResViewFormatUnsignedInt4 = 15#
    +

    4 channel unsigned 32-bit integers

    +
    + +
    +
    +cudaResViewFormatSignedInt1 = 16#
    +

    1 channel signed 32-bit integers

    +
    + +
    +
    +cudaResViewFormatSignedInt2 = 17#
    +

    2 channel signed 32-bit integers

    +
    + +
    +
    +cudaResViewFormatSignedInt4 = 18#
    +

    4 channel signed 32-bit integers

    +
    + +
    +
    +cudaResViewFormatHalf1 = 19#
    +

    1 channel 16-bit floating point

    +
    + +
    +
    +cudaResViewFormatHalf2 = 20#
    +

    2 channel 16-bit floating point

    +
    + +
    +
    +cudaResViewFormatHalf4 = 21#
    +

    4 channel 16-bit floating point

    +
    + +
    +
    +cudaResViewFormatFloat1 = 22#
    +

    1 channel 32-bit floating point

    +
    + +
    +
    +cudaResViewFormatFloat2 = 23#
    +

    2 channel 32-bit floating point

    +
    + +
    +
    +cudaResViewFormatFloat4 = 24#
    +

    4 channel 32-bit floating point

    +
    + +
    +
    +cudaResViewFormatUnsignedBlockCompressed1 = 25#
    +

    Block compressed 1

    +
    + +
    +
    +cudaResViewFormatUnsignedBlockCompressed2 = 26#
    +

    Block compressed 2

    +
    + +
    +
    +cudaResViewFormatUnsignedBlockCompressed3 = 27#
    +

    Block compressed 3

    +
    + +
    +
    +cudaResViewFormatUnsignedBlockCompressed4 = 28#
    +

    Block compressed 4 unsigned

    +
    + +
    +
    +cudaResViewFormatSignedBlockCompressed4 = 29#
    +

    Block compressed 4 signed

    +
    + +
    +
    +cudaResViewFormatUnsignedBlockCompressed5 = 30#
    +

    Block compressed 5 unsigned

    +
    + +
    +
    +cudaResViewFormatSignedBlockCompressed5 = 31#
    +

    Block compressed 5 signed

    +
    + +
    +
    +cudaResViewFormatUnsignedBlockCompressed6H = 32#
    +

    Block compressed 6 unsigned half-float

    +
    + +
    +
    +cudaResViewFormatSignedBlockCompressed6H = 33#
    +

    Block compressed 6 signed half-float

    +
    + +
    +
    +cudaResViewFormatUnsignedBlockCompressed7 = 34#
    +

    Block compressed 7

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaFuncAttribute(value)#
    +

    CUDA function attributes that can be set using +cudaFuncSetAttribute

    +
    +
    +cudaFuncAttributeMaxDynamicSharedMemorySize = 8#
    +

    Maximum dynamic shared memory size

    +
    + +
    +
    +cudaFuncAttributePreferredSharedMemoryCarveout = 9#
    +

    Preferred shared memory-L1 cache split

    +
    + +
    +
    +cudaFuncAttributeClusterDimMustBeSet = 10#
    +

    Indicator to enforce valid cluster dimension specification on kernel launch

    +
    + +
    +
    +cudaFuncAttributeRequiredClusterWidth = 11#
    +

    Required cluster width

    +
    + +
    +
    +cudaFuncAttributeRequiredClusterHeight = 12#
    +

    Required cluster height

    +
    + +
    +
    +cudaFuncAttributeRequiredClusterDepth = 13#
    +

    Required cluster depth

    +
    + +
    +
    +cudaFuncAttributeNonPortableClusterSizeAllowed = 14#
    +

    Whether non-portable cluster scheduling policy is supported

    +
    + +
    +
    +cudaFuncAttributeClusterSchedulingPolicyPreference = 15#
    +

    Required cluster scheduling policy preference

    +
    + +
    +
    +cudaFuncAttributeMax = 16#
    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaFuncCache(value)#
    +

    CUDA function cache configurations

    +
    +
    +cudaFuncCachePreferNone = 0#
    +

    Default function cache configuration, no preference

    +
    + +
    +
    +cudaFuncCachePreferShared = 1#
    +

    Prefer larger shared memory and smaller L1 cache

    +
    + +
    +
    +cudaFuncCachePreferL1 = 2#
    +

    Prefer larger L1 cache and smaller shared memory

    +
    + +
    +
    +cudaFuncCachePreferEqual = 3#
    +

    Prefer equal size L1 cache and shared memory

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaSharedMemConfig(value)#
    +

    CUDA shared memory configuration [Deprecated]

    +
    +
    +cudaSharedMemBankSizeDefault = 0#
    +
    + +
    +
    +cudaSharedMemBankSizeFourByte = 1#
    +
    + +
    +
    +cudaSharedMemBankSizeEightByte = 2#
    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaSharedCarveout(value)#
    +

    Shared memory carveout configurations. These may be passed to +cudaFuncSetAttribute

    +
    +
    +cudaSharedmemCarveoutDefault = -1#
    +

    No preference for shared memory or L1 (default)

    +
    + +
    +
    +cudaSharedmemCarveoutMaxShared = 100#
    +

    Prefer maximum available shared memory, minimum L1 cache

    +
    + +
    +
    +cudaSharedmemCarveoutMaxL1 = 0#
    +

    Prefer maximum available L1 cache, minimum shared memory

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaComputeMode(value)#
    +

    CUDA device compute modes

    +
    +
    +cudaComputeModeDefault = 0#
    +

    Default compute mode (Multiple threads can use cudaSetDevice() with this device)

    +
    + +
    +
    +cudaComputeModeExclusive = 1#
    +

    Compute-exclusive-thread mode (Only one thread in one process will be able to use cudaSetDevice() with this device)

    +
    + +
    +
    +cudaComputeModeProhibited = 2#
    +

    Compute-prohibited mode (No threads can use cudaSetDevice() with this device)

    +
    + +
    +
    +cudaComputeModeExclusiveProcess = 3#
    +

    Compute-exclusive-process mode (Many threads in one process will be able to use cudaSetDevice() with this device)

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaLimit(value)#
    +

    CUDA Limits

    +
    +
    +cudaLimitStackSize = 0#
    +

    GPU thread stack size

    +
    + +
    +
    +cudaLimitPrintfFifoSize = 1#
    +

    GPU printf FIFO size

    +
    + +
    +
    +cudaLimitMallocHeapSize = 2#
    +

    GPU malloc heap size

    +
    + +
    +
    +cudaLimitDevRuntimeSyncDepth = 3#
    +

    GPU device runtime synchronize depth

    +
    + +
    +
    +cudaLimitDevRuntimePendingLaunchCount = 4#
    +

    GPU device runtime pending launch count

    +
    + +
    +
    +cudaLimitMaxL2FetchGranularity = 5#
    +

    A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint

    +
    + +
    +
    +cudaLimitPersistingL2CacheSize = 6#
    +

    A size in bytes for L2 persisting lines cache size

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemoryAdvise(value)#
    +

    CUDA Memory Advise values

    +
    +
    +cudaMemAdviseSetReadMostly = 1#
    +

    Data will mostly be read and only occassionally be written to

    +
    + +
    +
    +cudaMemAdviseUnsetReadMostly = 2#
    +

    Undo the effect of cudaMemAdviseSetReadMostly

    +
    + +
    +
    +cudaMemAdviseSetPreferredLocation = 3#
    +

    Set the preferred location for the data as the specified device

    +
    + +
    +
    +cudaMemAdviseUnsetPreferredLocation = 4#
    +

    Clear the preferred location for the data

    +
    + +
    +
    +cudaMemAdviseSetAccessedBy = 5#
    +

    Data will be accessed by the specified device, so prevent page faults as much as possible

    +
    + +
    +
    +cudaMemAdviseUnsetAccessedBy = 6#
    +

    Let the Unified Memory subsystem decide on the page faulting policy for the specified device

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemRangeAttribute(value)#
    +

    CUDA range attributes

    +
    +
    +cudaMemRangeAttributeReadMostly = 1#
    +

    Whether the range will mostly be read and only occassionally be written to

    +
    + +
    +
    +cudaMemRangeAttributePreferredLocation = 2#
    +

    The preferred location of the range

    +
    + +
    +
    +cudaMemRangeAttributeAccessedBy = 3#
    +

    Memory range has cudaMemAdviseSetAccessedBy set for specified device

    +
    + +
    +
    +cudaMemRangeAttributeLastPrefetchLocation = 4#
    +

    The last location to which the range was prefetched

    +
    + +
    +
    +cudaMemRangeAttributePreferredLocationType = 5#
    +

    The preferred location type of the range

    +
    + +
    +
    +cudaMemRangeAttributePreferredLocationId = 6#
    +

    The preferred location id of the range

    +
    + +
    +
    +cudaMemRangeAttributeLastPrefetchLocationType = 7#
    +

    The last location type to which the range was prefetched

    +
    + +
    +
    +cudaMemRangeAttributeLastPrefetchLocationId = 8#
    +

    The last location id to which the range was prefetched

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesOptions(value)#
    +

    CUDA GPUDirect RDMA flush writes APIs supported on the device

    +
    +
    +cudaFlushGPUDirectRDMAWritesOptionHost = 1#
    +

    cudaDeviceFlushGPUDirectRDMAWrites() and its CUDA Driver API counterpart are supported on the device.

    +
    + +
    +
    +cudaFlushGPUDirectRDMAWritesOptionMemOps = 2#
    +

    The CU_STREAM_WAIT_VALUE_FLUSH flag and the CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the CUDA device.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering(value)#
    +

    CUDA GPUDirect RDMA flush writes ordering features of the device

    +
    +
    +cudaGPUDirectRDMAWritesOrderingNone = 0#
    +

    The device does not natively support ordering of GPUDirect RDMA writes. cudaFlushGPUDirectRDMAWrites() can be leveraged if supported.

    +
    + +
    +
    +cudaGPUDirectRDMAWritesOrderingOwner = 100#
    +

    Natively, the device can consistently consume GPUDirect RDMA writes, although other CUDA devices may not.

    +
    + +
    +
    +cudaGPUDirectRDMAWritesOrderingAllDevices = 200#
    +

    Any CUDA device in the system can consistently consume GPUDirect RDMA writes to this device.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesScope(value)#
    +

    CUDA GPUDirect RDMA flush writes scopes

    +
    +
    +cudaFlushGPUDirectRDMAWritesToOwner = 100#
    +

    Blocks until remote writes are visible to the CUDA device context owning the data.

    +
    + +
    +
    +cudaFlushGPUDirectRDMAWritesToAllDevices = 200#
    +

    Blocks until remote writes are visible to all CUDA device contexts.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesTarget(value)#
    +

    CUDA GPUDirect RDMA flush writes targets

    +
    +
    +cudaFlushGPUDirectRDMAWritesTargetCurrentDevice = 0#
    +

    Sets the target for cudaDeviceFlushGPUDirectRDMAWrites() to the currently active CUDA device context.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaDeviceAttr(value)#
    +

    CUDA device attributes

    +
    +
    +cudaDevAttrMaxThreadsPerBlock = 1#
    +

    Maximum number of threads per block

    +
    + +
    +
    +cudaDevAttrMaxBlockDimX = 2#
    +

    Maximum block dimension X

    +
    + +
    +
    +cudaDevAttrMaxBlockDimY = 3#
    +

    Maximum block dimension Y

    +
    + +
    +
    +cudaDevAttrMaxBlockDimZ = 4#
    +

    Maximum block dimension Z

    +
    + +
    +
    +cudaDevAttrMaxGridDimX = 5#
    +

    Maximum grid dimension X

    +
    + +
    +
    +cudaDevAttrMaxGridDimY = 6#
    +

    Maximum grid dimension Y

    +
    + +
    +
    +cudaDevAttrMaxGridDimZ = 7#
    +

    Maximum grid dimension Z

    +
    + +
    +
    +cudaDevAttrMaxSharedMemoryPerBlock = 8#
    +

    Maximum shared memory available per block in bytes

    +
    + +
    +
    +cudaDevAttrTotalConstantMemory = 9#
    +

    Memory available on device for constant variables in a CUDA C kernel in bytes

    +
    + +
    +
    +cudaDevAttrWarpSize = 10#
    +

    Warp size in threads

    +
    + +
    +
    +cudaDevAttrMaxPitch = 11#
    +

    Maximum pitch in bytes allowed by memory copies

    +
    + +
    +
    +cudaDevAttrMaxRegistersPerBlock = 12#
    +

    Maximum number of 32-bit registers available per block

    +
    + +
    +
    +cudaDevAttrClockRate = 13#
    +

    Peak clock frequency in kilohertz

    +
    + +
    +
    +cudaDevAttrTextureAlignment = 14#
    +

    Alignment requirement for textures

    +
    + +
    +
    +cudaDevAttrGpuOverlap = 15#
    +

    Device can possibly copy memory and execute a kernel concurrently

    +
    + +
    +
    +cudaDevAttrMultiProcessorCount = 16#
    +

    Number of multiprocessors on device

    +
    + +
    +
    +cudaDevAttrKernelExecTimeout = 17#
    +

    Specifies whether there is a run time limit on kernels

    +
    + +
    +
    +cudaDevAttrIntegrated = 18#
    +

    Device is integrated with host memory

    +
    + +
    +
    +cudaDevAttrCanMapHostMemory = 19#
    +

    Device can map host memory into CUDA address space

    +
    + +
    +
    +cudaDevAttrComputeMode = 20#
    +

    Compute mode (See cudaComputeMode for details)

    +
    + +
    +
    +cudaDevAttrMaxTexture1DWidth = 21#
    +

    Maximum 1D texture width

    +
    + +
    +
    +cudaDevAttrMaxTexture2DWidth = 22#
    +

    Maximum 2D texture width

    +
    + +
    +
    +cudaDevAttrMaxTexture2DHeight = 23#
    +

    Maximum 2D texture height

    +
    + +
    +
    +cudaDevAttrMaxTexture3DWidth = 24#
    +

    Maximum 3D texture width

    +
    + +
    +
    +cudaDevAttrMaxTexture3DHeight = 25#
    +

    Maximum 3D texture height

    +
    + +
    +
    +cudaDevAttrMaxTexture3DDepth = 26#
    +

    Maximum 3D texture depth

    +
    + +
    +
    +cudaDevAttrMaxTexture2DLayeredWidth = 27#
    +

    Maximum 2D layered texture width

    +
    + +
    +
    +cudaDevAttrMaxTexture2DLayeredHeight = 28#
    +

    Maximum 2D layered texture height

    +
    + +
    +
    +cudaDevAttrMaxTexture2DLayeredLayers = 29#
    +

    Maximum layers in a 2D layered texture

    +
    + +
    +
    +cudaDevAttrSurfaceAlignment = 30#
    +

    Alignment requirement for surfaces

    +
    + +
    +
    +cudaDevAttrConcurrentKernels = 31#
    +

    Device can possibly execute multiple kernels concurrently

    +
    + +
    +
    +cudaDevAttrEccEnabled = 32#
    +

    Device has ECC support enabled

    +
    + +
    +
    +cudaDevAttrPciBusId = 33#
    +

    PCI bus ID of the device

    +
    + +
    +
    +cudaDevAttrPciDeviceId = 34#
    +

    PCI device ID of the device

    +
    + +
    +
    +cudaDevAttrTccDriver = 35#
    +

    Device is using TCC driver model

    +
    + +
    +
    +cudaDevAttrMemoryClockRate = 36#
    +

    Peak memory clock frequency in kilohertz

    +
    + +
    +
    +cudaDevAttrGlobalMemoryBusWidth = 37#
    +

    Global memory bus width in bits

    +
    + +
    +
    +cudaDevAttrL2CacheSize = 38#
    +

    Size of L2 cache in bytes

    +
    + +
    +
    +cudaDevAttrMaxThreadsPerMultiProcessor = 39#
    +

    Maximum resident threads per multiprocessor

    +
    + +
    +
    +cudaDevAttrAsyncEngineCount = 40#
    +

    Number of asynchronous engines

    +
    + +
    +
    +cudaDevAttrUnifiedAddressing = 41#
    +

    Device shares a unified address space with the host

    +
    + +
    +
    +cudaDevAttrMaxTexture1DLayeredWidth = 42#
    +

    Maximum 1D layered texture width

    +
    + +
    +
    +cudaDevAttrMaxTexture1DLayeredLayers = 43#
    +

    Maximum layers in a 1D layered texture

    +
    + +
    +
    +cudaDevAttrMaxTexture2DGatherWidth = 45#
    +

    Maximum 2D texture width if cudaArrayTextureGather is set

    +
    + +
    +
    +cudaDevAttrMaxTexture2DGatherHeight = 46#
    +

    Maximum 2D texture height if cudaArrayTextureGather is set

    +
    + +
    +
    +cudaDevAttrMaxTexture3DWidthAlt = 47#
    +

    Alternate maximum 3D texture width

    +
    + +
    +
    +cudaDevAttrMaxTexture3DHeightAlt = 48#
    +

    Alternate maximum 3D texture height

    +
    + +
    +
    +cudaDevAttrMaxTexture3DDepthAlt = 49#
    +

    Alternate maximum 3D texture depth

    +
    + +
    +
    +cudaDevAttrPciDomainId = 50#
    +

    PCI domain ID of the device

    +
    + +
    +
    +cudaDevAttrTexturePitchAlignment = 51#
    +

    Pitch alignment requirement for textures

    +
    + +
    +
    +cudaDevAttrMaxTextureCubemapWidth = 52#
    +

    Maximum cubemap texture width/height

    +
    + +
    +
    +cudaDevAttrMaxTextureCubemapLayeredWidth = 53#
    +

    Maximum cubemap layered texture width/height

    +
    + +
    +
    +cudaDevAttrMaxTextureCubemapLayeredLayers = 54#
    +

    Maximum layers in a cubemap layered texture

    +
    + +
    +
    +cudaDevAttrMaxSurface1DWidth = 55#
    +

    Maximum 1D surface width

    +
    + +
    +
    +cudaDevAttrMaxSurface2DWidth = 56#
    +

    Maximum 2D surface width

    +
    + +
    +
    +cudaDevAttrMaxSurface2DHeight = 57#
    +

    Maximum 2D surface height

    +
    + +
    +
    +cudaDevAttrMaxSurface3DWidth = 58#
    +

    Maximum 3D surface width

    +
    + +
    +
    +cudaDevAttrMaxSurface3DHeight = 59#
    +

    Maximum 3D surface height

    +
    + +
    +
    +cudaDevAttrMaxSurface3DDepth = 60#
    +

    Maximum 3D surface depth

    +
    + +
    +
    +cudaDevAttrMaxSurface1DLayeredWidth = 61#
    +

    Maximum 1D layered surface width

    +
    + +
    +
    +cudaDevAttrMaxSurface1DLayeredLayers = 62#
    +

    Maximum layers in a 1D layered surface

    +
    + +
    +
    +cudaDevAttrMaxSurface2DLayeredWidth = 63#
    +

    Maximum 2D layered surface width

    +
    + +
    +
    +cudaDevAttrMaxSurface2DLayeredHeight = 64#
    +

    Maximum 2D layered surface height

    +
    + +
    +
    +cudaDevAttrMaxSurface2DLayeredLayers = 65#
    +

    Maximum layers in a 2D layered surface

    +
    + +
    +
    +cudaDevAttrMaxSurfaceCubemapWidth = 66#
    +

    Maximum cubemap surface width

    +
    + +
    +
    +cudaDevAttrMaxSurfaceCubemapLayeredWidth = 67#
    +

    Maximum cubemap layered surface width

    +
    + +
    +
    +cudaDevAttrMaxSurfaceCubemapLayeredLayers = 68#
    +

    Maximum layers in a cubemap layered surface

    +
    + +
    +
    +cudaDevAttrMaxTexture1DLinearWidth = 69#
    +

    Maximum 1D linear texture width

    +
    + +
    +
    +cudaDevAttrMaxTexture2DLinearWidth = 70#
    +

    Maximum 2D linear texture width

    +
    + +
    +
    +cudaDevAttrMaxTexture2DLinearHeight = 71#
    +

    Maximum 2D linear texture height

    +
    + +
    +
    +cudaDevAttrMaxTexture2DLinearPitch = 72#
    +

    Maximum 2D linear texture pitch in bytes

    +
    + +
    +
    +cudaDevAttrMaxTexture2DMipmappedWidth = 73#
    +

    Maximum mipmapped 2D texture width

    +
    + +
    +
    +cudaDevAttrMaxTexture2DMipmappedHeight = 74#
    +

    Maximum mipmapped 2D texture height

    +
    + +
    +
    +cudaDevAttrComputeCapabilityMajor = 75#
    +

    Major compute capability version number

    +
    + +
    +
    +cudaDevAttrComputeCapabilityMinor = 76#
    +

    Minor compute capability version number

    +
    + +
    +
    +cudaDevAttrMaxTexture1DMipmappedWidth = 77#
    +

    Maximum mipmapped 1D texture width

    +
    + +
    +
    +cudaDevAttrStreamPrioritiesSupported = 78#
    +

    Device supports stream priorities

    +
    + +
    +
    +cudaDevAttrGlobalL1CacheSupported = 79#
    +

    Device supports caching globals in L1

    +
    + +
    +
    +cudaDevAttrLocalL1CacheSupported = 80#
    +

    Device supports caching locals in L1

    +
    + +
    +
    +cudaDevAttrMaxSharedMemoryPerMultiprocessor = 81#
    +

    Maximum shared memory available per multiprocessor in bytes

    +
    + +
    +
    +cudaDevAttrMaxRegistersPerMultiprocessor = 82#
    +

    Maximum number of 32-bit registers available per multiprocessor

    +
    + +
    +
    +cudaDevAttrManagedMemory = 83#
    +

    Device can allocate managed memory on this system

    +
    + +
    +
    +cudaDevAttrIsMultiGpuBoard = 84#
    +

    Device is on a multi-GPU board

    +
    + +
    +
    +cudaDevAttrMultiGpuBoardGroupID = 85#
    +

    Unique identifier for a group of devices on the same multi-GPU board

    +
    + +
    +
    +cudaDevAttrHostNativeAtomicSupported = 86#
    +

    Link between the device and the host supports native atomic operations

    +
    + +
    +
    +cudaDevAttrSingleToDoublePrecisionPerfRatio = 87#
    +

    Ratio of single precision performance (in floating-point operations per second) to double precision performance

    +
    + +
    +
    +cudaDevAttrPageableMemoryAccess = 88#
    +

    Device supports coherently accessing pageable memory without calling cudaHostRegister on it

    +
    + +
    +
    +cudaDevAttrConcurrentManagedAccess = 89#
    +

    Device can coherently access managed memory concurrently with the CPU

    +
    + +
    +
    +cudaDevAttrComputePreemptionSupported = 90#
    +

    Device supports Compute Preemption

    +
    + +
    +
    +cudaDevAttrCanUseHostPointerForRegisteredMem = 91#
    +

    Device can access host registered memory at the same virtual address as the CPU

    +
    + +
    +
    +cudaDevAttrReserved92 = 92#
    +
    + +
    +
    +cudaDevAttrReserved93 = 93#
    +
    + +
    +
    +cudaDevAttrReserved94 = 94#
    +
    + +
    +
    +cudaDevAttrCooperativeLaunch = 95#
    +

    Device supports launching cooperative kernels via cudaLaunchCooperativeKernel

    +
    + +
    +
    +cudaDevAttrCooperativeMultiDeviceLaunch = 96#
    +

    Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated.

    +
    + +
    +
    +cudaDevAttrMaxSharedMemoryPerBlockOptin = 97#
    +

    The maximum optin shared memory per block. This value may vary by chip. See cudaFuncSetAttribute

    +
    + +
    +
    +cudaDevAttrCanFlushRemoteWrites = 98#
    +

    Device supports flushing of outstanding remote writes.

    +
    + +
    +
    +cudaDevAttrHostRegisterSupported = 99#
    +

    Device supports host memory registration via cudaHostRegister.

    +
    + +
    +
    +cudaDevAttrPageableMemoryAccessUsesHostPageTables = 100#
    +

    Device accesses pageable memory via the host’s page tables.

    +
    + +
    +
    +cudaDevAttrDirectManagedMemAccessFromHost = 101#
    +

    Host can directly access managed memory on the device without migration.

    +
    + +
    +
    +cudaDevAttrMaxBlocksPerMultiprocessor = 106#
    +

    Maximum number of blocks per multiprocessor

    +
    + +
    +
    +cudaDevAttrMaxPersistingL2CacheSize = 108#
    +

    Maximum L2 persisting lines capacity setting in bytes.

    +
    + +
    +
    +cudaDevAttrMaxAccessPolicyWindowSize = 109#
    +

    Maximum value of num_bytes.

    +
    + +
    +
    +cudaDevAttrReservedSharedMemoryPerBlock = 111#
    +

    Shared memory reserved by CUDA driver per block in bytes

    +
    + +
    +
    +cudaDevAttrSparseCudaArraySupported = 112#
    +

    Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays

    +
    + +
    +
    +cudaDevAttrHostRegisterReadOnlySupported = 113#
    +

    Device supports using the cudaHostRegister flag cudaHostRegisterReadOnly to register memory that must be mapped as read-only to the GPU

    +
    + +
    +
    +cudaDevAttrTimelineSemaphoreInteropSupported = 114#
    +

    External timeline semaphore interop is supported on the device

    +
    + +
    +
    +cudaDevAttrMaxTimelineSemaphoreInteropSupported = 114#
    +

    Deprecated, External timeline semaphore interop is supported on the device

    +
    + +
    +
    +cudaDevAttrMemoryPoolsSupported = 115#
    +

    Device supports using the cudaMallocAsync and cudaMemPool family of APIs

    +
    + +
    +
    +cudaDevAttrGPUDirectRDMASupported = 116#
    +

    Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information)

    +
    + +
    +
    +cudaDevAttrGPUDirectRDMAFlushWritesOptions = 117#
    +

    The returned attribute shall be interpreted as a bitmask, where the individual bits are listed in the cudaFlushGPUDirectRDMAWritesOptions enum

    +
    + +
    +
    +cudaDevAttrGPUDirectRDMAWritesOrdering = 118#
    +

    GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See cudaGPUDirectRDMAWritesOrdering for the numerical values returned here.

    +
    + +
    +
    +cudaDevAttrMemoryPoolSupportedHandleTypes = 119#
    +

    Handle types supported with mempool based IPC

    +
    + +
    +
    +cudaDevAttrClusterLaunch = 120#
    +

    Indicates device supports cluster launch

    +
    + +
    +
    +cudaDevAttrDeferredMappingCudaArraySupported = 121#
    +

    Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays

    +
    + +
    +
    +cudaDevAttrReserved122 = 122#
    +
    + +
    +
    +cudaDevAttrReserved123 = 123#
    +
    + +
    +
    +cudaDevAttrReserved124 = 124#
    +
    + +
    +
    +cudaDevAttrIpcEventSupport = 125#
    +

    Device supports IPC Events.

    +
    + +
    +
    +cudaDevAttrMemSyncDomainCount = 126#
    +

    Number of memory synchronization domains the device supports.

    +
    + +
    +
    +cudaDevAttrReserved127 = 127#
    +
    + +
    +
    +cudaDevAttrReserved128 = 128#
    +
    + +
    +
    +cudaDevAttrReserved129 = 129#
    +
    + +
    +
    +cudaDevAttrNumaConfig = 130#
    +

    NUMA configuration of a device: value is of type cudaDeviceNumaConfig enum

    +
    + +
    +
    +cudaDevAttrNumaId = 131#
    +

    NUMA node ID of the GPU memory

    +
    + +
    +
    +cudaDevAttrReserved132 = 132#
    +
    + +
    +
    +cudaDevAttrMpsEnabled = 133#
    +

    Contexts created on this device will be shared via MPS

    +
    + +
    +
    +cudaDevAttrHostNumaId = 134#
    +

    NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA.

    +
    + +
    +
    +cudaDevAttrD3D12CigSupported = 135#
    +

    Device supports CIG with D3D12.

    +
    + +
    +
    +cudaDevAttrMax = 136#
    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemPoolAttr(value)#
    +

    CUDA memory pool attributes

    +
    +
    +cudaMemPoolReuseFollowEventDependencies = 1#
    +

    (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled)

    +
    + +
    +
    +cudaMemPoolReuseAllowOpportunistic = 2#
    +

    (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled)

    +
    + +
    +
    +cudaMemPoolReuseAllowInternalDependencies = 3#
    +

    (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled).

    +
    + +
    +
    +cudaMemPoolAttrReleaseThreshold = 4#
    +

    (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0)

    +
    + +
    +
    +cudaMemPoolAttrReservedMemCurrent = 5#
    +

    (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool.

    +
    + +
    +
    +cudaMemPoolAttrReservedMemHigh = 6#
    +

    (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero.

    +
    + +
    +
    +cudaMemPoolAttrUsedMemCurrent = 7#
    +

    (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application.

    +
    + +
    +
    +cudaMemPoolAttrUsedMemHigh = 8#
    +

    (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemLocationType(value)#
    +

    Specifies the type of location

    +
    +
    +cudaMemLocationTypeInvalid = 0#
    +
    + +
    +
    +cudaMemLocationTypeDevice = 1#
    +

    Location is a device location, thus id is a device ordinal

    +
    + +
    +
    +cudaMemLocationTypeHost = 2#
    +

    Location is host, id is ignored

    +
    + +
    +
    +cudaMemLocationTypeHostNuma = 3#
    +

    Location is a host NUMA node, thus id is a host NUMA node id

    +
    + +
    +
    +cudaMemLocationTypeHostNumaCurrent = 4#
    +

    Location is the host NUMA node closest to the current thread’s CPU, id is ignored

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemAccessFlags(value)#
    +

    Specifies the memory protection flags for mapping.

    +
    +
    +cudaMemAccessFlagsProtNone = 0#
    +

    Default, make the address range not accessible

    +
    + +
    +
    +cudaMemAccessFlagsProtRead = 1#
    +

    Make the address range read accessible

    +
    + +
    +
    +cudaMemAccessFlagsProtReadWrite = 3#
    +

    Make the address range read-write accessible

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemAllocationType(value)#
    +

    Defines the allocation types available

    +
    +
    +cudaMemAllocationTypeInvalid = 0#
    +
    + +
    +
    +cudaMemAllocationTypePinned = 1#
    +

    This allocation type is ‘pinned’, i.e. cannot migrate from its current location while the application is actively using it

    +
    + +
    +
    +cudaMemAllocationTypeMax = 2147483647#
    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemAllocationHandleType(value)#
    +

    Flags for specifying particular handle types

    +
    +
    +cudaMemHandleTypeNone = 0#
    +

    Does not allow any export mechanism. >

    +
    + +
    +
    +cudaMemHandleTypePosixFileDescriptor = 1#
    +

    Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int)

    +
    + +
    +
    +cudaMemHandleTypeWin32 = 2#
    +

    Allows a Win32 NT handle to be used for exporting. (HANDLE)

    +
    + +
    +
    +cudaMemHandleTypeWin32Kmt = 4#
    +

    Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE)

    +
    + +
    +
    +cudaMemHandleTypeFabric = 8#
    +

    Allows a fabric handle to be used for exporting. (cudaMemFabricHandle_t)

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphMemAttributeType(value)#
    +

    Graph memory attributes

    +
    +
    +cudaGraphMemAttrUsedMemCurrent = 0#
    +

    (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs.

    +
    + +
    +
    +cudaGraphMemAttrUsedMemHigh = 1#
    +

    (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero.

    +
    + +
    +
    +cudaGraphMemAttrReservedMemCurrent = 2#
    +

    (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator.

    +
    + +
    +
    +cudaGraphMemAttrReservedMemHigh = 3#
    +

    (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaDeviceP2PAttr(value)#
    +

    CUDA device P2P attributes

    +
    +
    +cudaDevP2PAttrPerformanceRank = 1#
    +

    A relative value indicating the performance of the link between two devices

    +
    + +
    +
    +cudaDevP2PAttrAccessSupported = 2#
    +

    Peer access is enabled

    +
    + +
    +
    +cudaDevP2PAttrNativeAtomicSupported = 3#
    +

    Native atomic operation over the link supported

    +
    + +
    +
    +cudaDevP2PAttrCudaArrayAccessSupported = 4#
    +

    Accessing CUDA arrays over the link supported

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalMemoryHandleType(value)#
    +

    External memory handle types

    +
    +
    +cudaExternalMemoryHandleTypeOpaqueFd = 1#
    +

    Handle is an opaque file descriptor

    +
    + +
    +
    +cudaExternalMemoryHandleTypeOpaqueWin32 = 2#
    +

    Handle is an opaque shared NT handle

    +
    + +
    +
    +cudaExternalMemoryHandleTypeOpaqueWin32Kmt = 3#
    +

    Handle is an opaque, globally shared handle

    +
    + +
    +
    +cudaExternalMemoryHandleTypeD3D12Heap = 4#
    +

    Handle is a D3D12 heap object

    +
    + +
    +
    +cudaExternalMemoryHandleTypeD3D12Resource = 5#
    +

    Handle is a D3D12 committed resource

    +
    + +
    +
    +cudaExternalMemoryHandleTypeD3D11Resource = 6#
    +

    Handle is a shared NT handle to a D3D11 resource

    +
    + +
    +
    +cudaExternalMemoryHandleTypeD3D11ResourceKmt = 7#
    +

    Handle is a globally shared handle to a D3D11 resource

    +
    + +
    +
    +cudaExternalMemoryHandleTypeNvSciBuf = 8#
    +

    Handle is an NvSciBuf object

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalSemaphoreHandleType(value)#
    +

    External semaphore handle types

    +
    +
    +cudaExternalSemaphoreHandleTypeOpaqueFd = 1#
    +

    Handle is an opaque file descriptor

    +
    + +
    +
    +cudaExternalSemaphoreHandleTypeOpaqueWin32 = 2#
    +

    Handle is an opaque shared NT handle

    +
    + +
    +
    +cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt = 3#
    +

    Handle is an opaque, globally shared handle

    +
    + +
    +
    +cudaExternalSemaphoreHandleTypeD3D12Fence = 4#
    +

    Handle is a shared NT handle referencing a D3D12 fence object

    +
    + +
    +
    +cudaExternalSemaphoreHandleTypeD3D11Fence = 5#
    +

    Handle is a shared NT handle referencing a D3D11 fence object

    +
    + +
    +
    +cudaExternalSemaphoreHandleTypeNvSciSync = 6#
    +

    Opaque handle to NvSciSync Object

    +
    + +
    +
    +cudaExternalSemaphoreHandleTypeKeyedMutex = 7#
    +

    Handle is a shared NT handle referencing a D3D11 keyed mutex object

    +
    + +
    +
    +cudaExternalSemaphoreHandleTypeKeyedMutexKmt = 8#
    +

    Handle is a shared KMT handle referencing a D3D11 keyed mutex object

    +
    + +
    +
    +cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd = 9#
    +

    Handle is an opaque handle file descriptor referencing a timeline semaphore

    +
    + +
    +
    +cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 = 10#
    +

    Handle is an opaque handle file descriptor referencing a timeline semaphore

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaCGScope(value)#
    +

    CUDA cooperative group scope

    +
    +
    +cudaCGScopeInvalid = 0#
    +

    Invalid cooperative group scope

    +
    + +
    +
    +cudaCGScopeGrid = 1#
    +

    Scope represented by a grid_group

    +
    + +
    +
    +cudaCGScopeMultiGrid = 2#
    +

    Scope represented by a multi_grid_group

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphConditionalHandleFlags(value)#
    +
    +
    +cudaGraphCondAssignDefault = 1#
    +

    Apply default handle value when graph is launched.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphConditionalNodeType(value)#
    +

    CUDA conditional node types

    +
    +
    +cudaGraphCondTypeIf = 0#
    +

    Conditional ‘if’ Node. Body executed once if condition value is non-zero.

    +
    + +
    +
    +cudaGraphCondTypeWhile = 1#
    +

    Conditional ‘while’ Node. Body executed repeatedly while condition value is non-zero.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphNodeType(value)#
    +

    CUDA Graph node types

    +
    +
    +cudaGraphNodeTypeKernel = 0#
    +

    GPU kernel node

    +
    + +
    +
    +cudaGraphNodeTypeMemcpy = 1#
    +

    Memcpy node

    +
    + +
    +
    +cudaGraphNodeTypeMemset = 2#
    +

    Memset node

    +
    + +
    +
    +cudaGraphNodeTypeHost = 3#
    +

    Host (executable) node

    +
    + +
    +
    +cudaGraphNodeTypeGraph = 4#
    +

    Node which executes an embedded graph

    +
    + +
    +
    +cudaGraphNodeTypeEmpty = 5#
    +

    Empty (no-op) node

    +
    + +
    +
    +cudaGraphNodeTypeWaitEvent = 6#
    +

    External event wait node

    +
    + +
    +
    +cudaGraphNodeTypeEventRecord = 7#
    +

    External event record node

    +
    + +
    +
    +cudaGraphNodeTypeExtSemaphoreSignal = 8#
    +

    External semaphore signal node

    +
    + +
    +
    +cudaGraphNodeTypeExtSemaphoreWait = 9#
    +

    External semaphore wait node

    +
    + +
    +
    +cudaGraphNodeTypeMemAlloc = 10#
    +

    Memory allocation node

    +
    + +
    +
    +cudaGraphNodeTypeMemFree = 11#
    +

    Memory free node

    +
    + +
    +
    +cudaGraphNodeTypeConditional = 13#
    +

    Conditional node May be used to implement a conditional execution path or loop

    +
    +

    inside of a graph. The graph(s) contained within the body of the conditional node

    +

    can be selectively executed or iterated upon based on the value of a conditional

    +

    variable.

    +

    Handles must be created in advance of creating the node

    +

    using cudaGraphConditionalHandleCreate.

    +

    The following restrictions apply to graphs which contain conditional nodes:

    +
    +

    The graph cannot be used in a child node.

    +

    Only one instantiation of the graph may exist at any point in time.

    +

    The graph cannot be cloned.

    +
    +

    To set the control value, supply a default value when creating the handle and/or

    +

    call cudaGraphSetConditional from device code.

    +
    +
    + +
    +
    +cudaGraphNodeTypeCount = 14#
    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphDependencyType(value)#
    +

    Type annotations that can be applied to graph edges as part of +cudaGraphEdgeData.

    +
    +
    +cudaGraphDependencyTypeDefault = 0#
    +

    This is an ordinary dependency.

    +
    + +
    +
    +cudaGraphDependencyTypeProgrammatic = 1#
    +

    This dependency type allows the downstream node to use cudaGridDependencySynchronize(). It may only be used between kernel nodes, and must be used with either the cudaGraphKernelNodePortProgrammatic or cudaGraphKernelNodePortLaunchCompletion outgoing port.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphExecUpdateResult(value)#
    +

    CUDA Graph Update error types

    +
    +
    +cudaGraphExecUpdateSuccess = 0#
    +

    The update succeeded

    +
    + +
    +
    +cudaGraphExecUpdateError = 1#
    +

    The update failed for an unexpected reason which is described in the return value of the function

    +
    + +
    +
    +cudaGraphExecUpdateErrorTopologyChanged = 2#
    +

    The update failed because the topology changed

    +
    + +
    +
    +cudaGraphExecUpdateErrorNodeTypeChanged = 3#
    +

    The update failed because a node type changed

    +
    + +
    +
    +cudaGraphExecUpdateErrorFunctionChanged = 4#
    +

    The update failed because the function of a kernel node changed (CUDA driver < 11.2)

    +
    + +
    +
    +cudaGraphExecUpdateErrorParametersChanged = 5#
    +

    The update failed because the parameters changed in a way that is not supported

    +
    + +
    +
    +cudaGraphExecUpdateErrorNotSupported = 6#
    +

    The update failed because something about the node is not supported

    +
    + +
    +
    +cudaGraphExecUpdateErrorUnsupportedFunctionChange = 7#
    +

    The update failed because the function of a kernel node changed in an unsupported way

    +
    + +
    +
    +cudaGraphExecUpdateErrorAttributesChanged = 8#
    +

    The update failed because the node attributes changed in a way that is not supported

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphInstantiateResult(value)#
    +

    Graph instantiation results

    +
    +
    +cudaGraphInstantiateSuccess = 0#
    +

    Instantiation succeeded

    +
    + +
    +
    +cudaGraphInstantiateError = 1#
    +

    Instantiation failed for an unexpected reason which is described in the return value of the function

    +
    + +
    +
    +cudaGraphInstantiateInvalidStructure = 2#
    +

    Instantiation failed due to invalid structure, such as cycles

    +
    + +
    +
    +cudaGraphInstantiateNodeOperationNotSupported = 3#
    +

    Instantiation for device launch failed because the graph contained an unsupported operation

    +
    + +
    +
    +cudaGraphInstantiateMultipleDevicesNotSupported = 4#
    +

    Instantiation for device launch failed due to the nodes belonging to different contexts

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphKernelNodeField(value)#
    +

    Specifies the field to update when performing multiple node updates +from the device

    +
    +
    +cudaGraphKernelNodeFieldInvalid = 0#
    +

    Invalid field

    +
    + +
    +
    +cudaGraphKernelNodeFieldGridDim = 1#
    +

    Grid dimension update

    +
    + +
    +
    +cudaGraphKernelNodeFieldParam = 2#
    +

    Kernel parameter update

    +
    + +
    +
    +cudaGraphKernelNodeFieldEnabled = 3#
    +

    Node enable/disable

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGetDriverEntryPointFlags(value)#
    +

    Flags to specify search options to be used with +cudaGetDriverEntryPoint For more details see +cuGetProcAddress

    +
    +
    +cudaEnableDefault = 0#
    +

    Default search mode for driver symbols.

    +
    + +
    +
    +cudaEnableLegacyStream = 1#
    +

    Search for legacy versions of driver symbols.

    +
    + +
    +
    +cudaEnablePerThreadDefaultStream = 2#
    +

    Search for per-thread versions of driver symbols.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaDriverEntryPointQueryResult(value)#
    +

    Enum for status from obtaining driver entry points, used with +cudaApiGetDriverEntryPoint

    +
    +
    +cudaDriverEntryPointSuccess = 0#
    +

    Search for symbol found a match

    +
    + +
    +
    +cudaDriverEntryPointSymbolNotFound = 1#
    +

    Search for symbol was not found

    +
    + +
    +
    +cudaDriverEntryPointVersionNotSufficent = 2#
    +

    Search for symbol was found but version wasn’t great enough

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphDebugDotFlags(value)#
    +

    CUDA Graph debug write options

    +
    +
    +cudaGraphDebugDotFlagsVerbose = 1#
    +

    Output all debug data as if every debug flag is enabled

    +
    + +
    +
    +cudaGraphDebugDotFlagsKernelNodeParams = 4#
    +

    Adds cudaKernelNodeParams to output

    +
    + +
    +
    +cudaGraphDebugDotFlagsMemcpyNodeParams = 8#
    +

    Adds cudaMemcpy3DParms to output

    +
    + +
    +
    +cudaGraphDebugDotFlagsMemsetNodeParams = 16#
    +

    Adds cudaMemsetParams to output

    +
    + +
    +
    +cudaGraphDebugDotFlagsHostNodeParams = 32#
    +

    Adds cudaHostNodeParams to output

    +
    + +
    +
    +cudaGraphDebugDotFlagsEventNodeParams = 64#
    +

    Adds cudaEvent_t handle from record and wait nodes to output

    +
    + +
    +
    +cudaGraphDebugDotFlagsExtSemasSignalNodeParams = 128#
    +

    Adds cudaExternalSemaphoreSignalNodeParams values to output

    +
    + +
    +
    +cudaGraphDebugDotFlagsExtSemasWaitNodeParams = 256#
    +

    Adds cudaExternalSemaphoreWaitNodeParams to output

    +
    + +
    +
    +cudaGraphDebugDotFlagsKernelNodeAttributes = 512#
    +

    Adds cudaKernelNodeAttrID values to output

    +
    + +
    +
    +cudaGraphDebugDotFlagsHandles = 1024#
    +

    Adds node handles and every kernel function handle to output

    +
    + +
    +
    +cudaGraphDebugDotFlagsConditionalNodeParams = 32768#
    +

    Adds cudaConditionalNodeParams to output

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphInstantiateFlags(value)#
    +

    Flags for instantiating a graph

    +
    +
    +cudaGraphInstantiateFlagAutoFreeOnLaunch = 1#
    +

    Automatically free memory allocated in a graph before relaunching.

    +
    + +
    +
    +cudaGraphInstantiateFlagUpload = 2#
    +

    Automatically upload the graph after instantiation. Only supported by

    +
    +

    cudaGraphInstantiateWithParams. The upload will be performed using the

    +

    stream provided in instantiateParams.

    +
    +
    + +
    +
    +cudaGraphInstantiateFlagDeviceLaunch = 4#
    +

    Instantiate the graph to be launchable from the device. This flag can only

    +
    +

    be used on platforms which support unified addressing. This flag cannot be

    +

    used in conjunction with cudaGraphInstantiateFlagAutoFreeOnLaunch.

    +
    +
    + +
    +
    +cudaGraphInstantiateFlagUseNodePriority = 8#
    +

    Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaLaunchMemSyncDomain(value)#
    +

    Memory Synchronization Domain A kernel can be launched in a +specified memory synchronization domain that affects all memory +operations issued by that kernel. A memory barrier issued in one +domain will only order memory operations in that domain, thus +eliminating latency increase from memory barriers ordering +unrelated traffic. By default, kernels are launched in domain 0. +Kernel launched with cudaLaunchMemSyncDomainRemote will +have a different domain ID. User may also alter the domain ID with +cudaLaunchMemSyncDomainMap for a specific stream / +graph node / kernel launch. See +cudaLaunchAttributeMemSyncDomain, +cudaStreamSetAttribute, cudaLaunchKernelEx, +cudaGraphKernelNodeSetAttribute. Memory operations +done in kernels launched in different domains are considered +system-scope distanced. In other words, a GPU scoped memory +synchronization is not sufficient for memory order to be observed +by kernels in another memory synchronization domain even if they +are on the same GPU.

    +
    +
    +cudaLaunchMemSyncDomainDefault = 0#
    +

    Launch kernels in the default domain

    +
    + +
    +
    +cudaLaunchMemSyncDomainRemote = 1#
    +

    Launch kernels in the remote domain

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaLaunchAttributeID(value)#
    +

    Launch attributes enum; used as id field of +cudaLaunchAttribute

    +
    +
    +cudaLaunchAttributeIgnore = 0#
    +

    Ignored entry, for convenient composition

    +
    + +
    +
    +cudaLaunchAttributeAccessPolicyWindow = 1#
    +

    Valid for streams, graph nodes, launches. See accessPolicyWindow.

    +
    + +
    +
    +cudaLaunchAttributeCooperative = 2#
    +

    Valid for graph nodes, launches. See cooperative.

    +
    + +
    +
    +cudaLaunchAttributeSynchronizationPolicy = 3#
    +

    Valid for streams. See syncPolicy.

    +
    + +
    +
    +cudaLaunchAttributeClusterDimension = 4#
    +

    Valid for graph nodes, launches. See clusterDim.

    +
    + +
    +
    +cudaLaunchAttributeClusterSchedulingPolicyPreference = 5#
    +

    Valid for graph nodes, launches. See clusterSchedulingPolicyPreference.

    +
    + +
    +
    +cudaLaunchAttributeProgrammaticStreamSerialization = 6#
    +

    Valid for launches. Setting programmaticStreamSerializationAllowed to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid’s execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions).

    +
    + +
    +
    +cudaLaunchAttributeProgrammaticEvent = 7#
    +

    Valid for launches. Set programmaticEvent to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event programmatically in a future CUDA release. A trigger can also be inserted at the beginning of each block’s execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling cudaEventSynchronize()) are not guaranteed to observe the release precisely when it is released. For example, cudaEventSynchronize() may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks.

    +
    +

    The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the cudaEventDisableTiming flag set).

    +
    +
    + +
    +
    +cudaLaunchAttributePriority = 8#
    +

    Valid for streams, graph nodes, launches. See priority.

    +
    + +
    +
    +cudaLaunchAttributeMemSyncDomainMap = 9#
    +

    Valid for streams, graph nodes, launches. See memSyncDomainMap.

    +
    + +
    +
    +cudaLaunchAttributeMemSyncDomain = 10#
    +

    Valid for streams, graph nodes, launches. See memSyncDomain.

    +
    + +
    +
    +cudaLaunchAttributeLaunchCompletionEvent = 12#
    +

    Valid for launches. Set launchCompletionEvent to record the event.

    +
    +

    Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock.

    +

    A launch completion event is nominally similar to a programmatic event with triggerAtBlockStart set except that it is not visible to cudaGridDependencySynchronize() and can be used with compute capability less than 9.0.

    +

    The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the cudaEventDisableTiming flag set).

    +
    +
    + +
    +
    +cudaLaunchAttributeDeviceUpdatableKernelNode = 13#
    +

    Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error.

    +
    +

    :cudaLaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via cudaLaunchAttributeValue::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node’s kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see cudaGraphKernelNodeUpdatesApply.

    +

    Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via cudaGraphDestroyNode. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via cudaGraphKernelNodeCopyAttributes. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to cudaGraphExecUpdate.

    +

    If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with cuGraphUpload before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again.

    +
    +
    + +
    +
    +cudaLaunchAttributePreferredSharedMemoryCarveout = 14#
    +

    Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting sharedMemCarveout to a percentage between 0-100 signals sets the shared memory carveout preference in percent of the total shared memory for that kernel launch. This attribute takes precedence over cudaFuncAttributePreferredSharedMemoryCarveout. This is only a hint, and the driver can choose a different configuration if required for the launch.

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaDeviceNumaConfig(value)#
    +

    CUDA device NUMA config

    +
    +
    +cudaDeviceNumaConfigNone = 0#
    +

    The GPU is not a NUMA node

    +
    + +
    +
    +cudaDeviceNumaConfigNumaNode = 1#
    +

    The GPU is a NUMA node, cudaDevAttrNumaId contains its NUMA ID

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaAsyncNotificationType(value)#
    +

    Types of async notification that can occur

    +
    +
    +cudaAsyncNotificationTypeOverBudget = 1#
    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaSurfaceBoundaryMode(value)#
    +

    CUDA Surface boundary modes

    +
    +
    +cudaBoundaryModeZero = 0#
    +

    Zero boundary mode

    +
    + +
    +
    +cudaBoundaryModeClamp = 1#
    +

    Clamp boundary mode

    +
    + +
    +
    +cudaBoundaryModeTrap = 2#
    +

    Trap boundary mode

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaSurfaceFormatMode(value)#
    +

    CUDA Surface format modes

    +
    +
    +cudaFormatModeForced = 0#
    +

    Forced format mode

    +
    + +
    +
    +cudaFormatModeAuto = 1#
    +

    Auto format mode

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaTextureAddressMode(value)#
    +

    CUDA texture address modes

    +
    +
    +cudaAddressModeWrap = 0#
    +

    Wrapping address mode

    +
    + +
    +
    +cudaAddressModeClamp = 1#
    +

    Clamp to edge address mode

    +
    + +
    +
    +cudaAddressModeMirror = 2#
    +

    Mirror address mode

    +
    + +
    +
    +cudaAddressModeBorder = 3#
    +

    Border address mode

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaTextureFilterMode(value)#
    +

    CUDA texture filter modes

    +
    +
    +cudaFilterModePoint = 0#
    +

    Point filter mode

    +
    + +
    +
    +cudaFilterModeLinear = 1#
    +

    Linear filter mode

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaTextureReadMode(value)#
    +

    CUDA texture read modes

    +
    +
    +cudaReadModeElementType = 0#
    +

    Read texture as specified element type

    +
    + +
    +
    +cudaReadModeNormalizedFloat = 1#
    +

    Read texture as normalized float

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEglPlaneDesc#
    +

    CUDA EGL Plane Descriptor - structure defining each plane of a CUDA +EGLFrame

    +
    +
    +width#
    +

    Width of plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +height#
    +

    Height of plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +depth#
    +

    Depth of plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +pitch#
    +

    Pitch of plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +numChannels#
    +

    Number of channels for the plane

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +channelDesc#
    +

    Channel Format Descriptor

    +
    +
    Type:
    +

    cudaChannelFormatDesc

    +
    +
    +
    + +
    +
    +reserved#
    +

    Reserved for future use

    +
    +
    Type:
    +

    List[unsigned int]

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEglFrame#
    +

    CUDA EGLFrame Descriptor - structure defining one frame of EGL. +Each frame may contain one or more planes depending on whether the +surface is Multiplanar or not. Each plane of EGLFrame is +represented by cudaEglPlaneDesc which is defined as: +typedefstructcudaEglPlaneDesc_st unsignedintwidth; +unsignedintheight; unsignedintdepth; unsignedintpitch; +unsignedintnumChannels; structcudaChannelFormatDescchannelDesc; +unsignedintreserved[4]; cudaEglPlaneDesc;

    +
    +
    +frame#
    +
    +
    Type:
    +

    anon_union10

    +
    +
    +
    + +
    +
    +planeDesc#
    +

    CUDA EGL Plane Descriptor cudaEglPlaneDesc

    +
    +
    Type:
    +

    List[cudaEglPlaneDesc]

    +
    +
    +
    + +
    +
    +planeCount#
    +

    Number of planes

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +frameType#
    +

    Array or Pitch

    +
    +
    Type:
    +

    cudaEglFrameType

    +
    +
    +
    + +
    +
    +eglColorFormat#
    +

    CUDA EGL Color Format

    +
    +
    Type:
    +

    cudaEglColorFormat

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEglStreamConnection#
    +

    CUDA EGLSream Connection

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaArray_t(*args, **kwargs)#
    +

    CUDA array

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaArray_const_t(*args, **kwargs)#
    +

    CUDA array (as source copy argument)

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMipmappedArray_t(*args, **kwargs)#
    +

    CUDA mipmapped array

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMipmappedArray_const_t(*args, **kwargs)#
    +

    CUDA mipmapped array (as source argument)

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaHostFn_t(*args, **kwargs)#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.CUuuid#
    +
    +
    +bytes#
    +

    < CUDA definition of UUID

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaUUID_t#
    +
    +
    +bytes#
    +

    < CUDA definition of UUID

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaIpcEventHandle_t#
    +

    CUDA IPC event handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaIpcMemHandle_t#
    +

    CUDA IPC memory handle

    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemFabricHandle_t#
    +
    +
    +reserved#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaStream_t#
    +

    CUDA stream

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaEvent_t#
    +

    CUDA event types

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphicsResource_t(*args, **kwargs)#
    +

    CUDA graphics resource types

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalMemory_t(*args, **kwargs)#
    +

    CUDA external memory

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaExternalSemaphore_t(*args, **kwargs)#
    +

    CUDA external semaphore

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraph_t#
    +

    CUDA graph

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphNode_t#
    +

    CUDA graph node.

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaUserObject_t#
    +

    CUDA user object for graphs

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphConditionalHandle#
    +

    CUDA handle for conditional graph nodes

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaFunction_t#
    +

    CUDA function

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaKernel_t(*args, **kwargs)#
    +

    CUDA kernel

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaMemPool_t#
    +

    CUDA memory pool

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphEdgeData#
    +

    Optional annotation for edges in a CUDA graph. Note, all edges +implicitly have annotations and default to a zero-initialized value +if not specified. A zero-initialized struct indicates a standard +full serialization of two nodes with memory visibility.

    +
    +
    +from_port#
    +

    This indicates when the dependency is triggered from the upstream +node on the edge. The meaning is specfic to the node type. A value +of 0 in all cases means full completion of the upstream node, with +memory visibility to the downstream node or portion thereof +(indicated by to_port). Only kernel nodes define non-zero +ports. A kernel node can use the following output port types: +cudaGraphKernelNodePortDefault, +cudaGraphKernelNodePortProgrammatic, or +cudaGraphKernelNodePortLaunchCompletion.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +to_port#
    +

    This indicates what portion of the downstream node is dependent on +the upstream node or portion thereof (indicated by from_port). +The meaning is specific to the node type. A value of 0 in all cases +means the entirety of the downstream node is dependent on the +upstream work. Currently no node types define non-zero ports. +Accordingly, this field must be set to zero.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +type#
    +

    This should be populated with a value from +::cudaGraphDependencyType. (It is typed as char due to compiler- +specific layout of bitfields.) See ::cudaGraphDependencyType.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +reserved#
    +

    These bytes are unused and must be zeroed. This ensures +compatibility if additional fields are added in the future.

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphExec_t#
    +

    CUDA executable (launchable) graph

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphInstantiateParams#
    +

    Graph instantiation parameters

    +
    +
    +flags#
    +

    Instantiation flags

    +
    +
    Type:
    +

    unsigned long long

    +
    +
    +
    + +
    +
    +uploadStream#
    +

    Upload stream

    +
    +
    Type:
    +

    cudaStream_t

    +
    +
    +
    + +
    +
    +errNode_out#
    +

    The node which caused instantiation to fail, if any

    +
    +
    Type:
    +

    cudaGraphNode_t

    +
    +
    +
    + +
    +
    +result_out#
    +

    Whether instantiation was successful. If it failed, the reason why

    +
    +
    Type:
    +

    cudaGraphInstantiateResult

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphExecUpdateResultInfo#
    +

    Result information returned by cudaGraphExecUpdate

    +
    +
    +result#
    +

    Gives more specific detail when a cuda graph update fails.

    +
    +
    Type:
    +

    cudaGraphExecUpdateResult

    +
    +
    +
    + +
    +
    +errorNode#
    +

    The “to node” of the error edge when the topologies do not match. +The error node when the error is associated with a specific node. +NULL when the error is generic.

    +
    +
    Type:
    +

    cudaGraphNode_t

    +
    +
    +
    + +
    +
    +errorFromNode#
    +

    The from node of error edge when the topologies do not match. +Otherwise NULL.

    +
    +
    Type:
    +

    cudaGraphNode_t

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaGraphDeviceNode_t(*args, **kwargs)#
    +

    CUDA device node handle for device-side node update

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaLaunchMemSyncDomainMap#
    +

    Memory Synchronization Domain map See cudaLaunchMemSyncDomain. By +default, kernels are launched in domain 0. Kernel launched with +cudaLaunchMemSyncDomainRemote will have a different domain ID. User +may also alter the domain ID with ::cudaLaunchMemSyncDomainMap for +a specific stream / graph node / kernel launch. See +cudaLaunchAttributeMemSyncDomainMap. Domain ID range is available +through cudaDevAttrMemSyncDomainCount.

    +
    +
    +default_#
    +

    The default domain ID to use for designated kernels

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +remote#
    +

    The remote domain ID to use for designated kernels

    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaLaunchAttributeValue(void_ptr _ptr=0)#
    +

    Launch attributes union; used as value field of +::cudaLaunchAttribute

    +
    +
    +pad#
    +
    +
    Type:
    +

    bytes

    +
    +
    +
    + +
    +
    +accessPolicyWindow#
    +

    Value of launch attribute cudaLaunchAttributeAccessPolicyWindow.

    +
    +
    Type:
    +

    cudaAccessPolicyWindow

    +
    +
    +
    + +
    +
    +cooperative#
    +

    Value of launch attribute cudaLaunchAttributeCooperative. Nonzero +indicates a cooperative kernel (see cudaLaunchCooperativeKernel).

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +syncPolicy#
    +

    Value of launch attribute cudaLaunchAttributeSynchronizationPolicy. +::cudaSynchronizationPolicy for work queued up in this stream.

    +
    +
    Type:
    +

    cudaSynchronizationPolicy

    +
    +
    +
    + +
    +
    +clusterDim#
    +

    Value of launch attribute cudaLaunchAttributeClusterDimension that +represents the desired cluster dimensions for the kernel. Opaque +type with the following fields: - x - The X dimension of the +cluster, in blocks. Must be a divisor of the grid X dimension. - +y - The Y dimension of the cluster, in blocks. Must be a divisor +of the grid Y dimension. - z - The Z dimension of the cluster, +in blocks. Must be a divisor of the grid Z dimension.

    +
    +
    Type:
    +

    anon_struct20

    +
    +
    +
    + +
    +
    +clusterSchedulingPolicyPreference#
    +

    Value of launch attribute +cudaLaunchAttributeClusterSchedulingPolicyPreference. Cluster +scheduling policy preference for the kernel.

    +
    +
    Type:
    +

    cudaClusterSchedulingPolicy

    +
    +
    +
    + +
    +
    +programmaticStreamSerializationAllowed#
    +

    Value of launch attribute +cudaLaunchAttributeProgrammaticStreamSerialization.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +programmaticEvent#
    +

    Value of launch attribute cudaLaunchAttributeProgrammaticEvent with +the following fields: - cudaEvent_t event - Event to fire when +all blocks trigger it. - int flags; - Event record flags, see +cudaEventRecordWithFlags. Does not accept cudaEventRecordExternal. +- int triggerAtBlockStart - If this is set to non-0, each block +launch will automatically trigger the event.

    +
    +
    Type:
    +

    anon_struct21

    +
    +
    +
    + +
    +
    +priority#
    +

    Value of launch attribute cudaLaunchAttributePriority. Execution +priority of the kernel.

    +
    +
    Type:
    +

    int

    +
    +
    +
    + +
    +
    +memSyncDomainMap#
    +

    Value of launch attribute cudaLaunchAttributeMemSyncDomainMap. See +::cudaLaunchMemSyncDomainMap.

    +
    +
    Type:
    +

    cudaLaunchMemSyncDomainMap

    +
    +
    +
    + +
    +
    +memSyncDomain#
    +

    Value of launch attribute cudaLaunchAttributeMemSyncDomain. See +cudaLaunchMemSyncDomain.

    +
    +
    Type:
    +

    cudaLaunchMemSyncDomain

    +
    +
    +
    + +
    +
    +launchCompletionEvent#
    +

    Value of launch attribute cudaLaunchAttributeLaunchCompletionEvent +with the following fields: - cudaEvent_t event - Event to fire +when the last block launches. - int flags - Event record +flags, see cudaEventRecordWithFlags. Does not accept +cudaEventRecordExternal.

    +
    +
    Type:
    +

    anon_struct22

    +
    +
    +
    + +
    +
    +deviceUpdatableKernelNode#
    +

    Value of launch attribute +cudaLaunchAttributeDeviceUpdatableKernelNode with the following +fields: - int deviceUpdatable - Whether or not the resulting +kernel node should be device-updatable. - +cudaGraphDeviceNode_t devNode - Returns a handle to pass to the +various device-side update functions.

    +
    +
    Type:
    +

    anon_struct23

    +
    +
    +
    + +
    +
    +sharedMemCarveout#
    +

    Value of launch attribute +cudaLaunchAttributePreferredSharedMemoryCarveout.

    +
    +
    Type:
    +

    unsigned int

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaLaunchAttribute#
    +

    Launch attribute

    +
    +
    +id#
    +

    Attribute to set

    +
    +
    Type:
    +

    cudaLaunchAttributeID

    +
    +
    +
    + +
    +
    +val#
    +

    Value of the attribute

    +
    +
    Type:
    +

    cudaLaunchAttributeValue

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaAsyncCallbackHandle_t(*args, **kwargs)#
    +

    CUDA async callback handle

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaAsyncNotificationInfo_t#
    +

    Information describing an async notification event

    +
    +
    +type#
    +
    +
    Type:
    +

    cudaAsyncNotificationType

    +
    +
    +
    + +
    +
    +info#
    +
    +
    Type:
    +

    anon_union9

    +
    +
    +
    + +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaAsyncCallback(*args, **kwargs)#
    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaSurfaceObject_t#
    +

    An opaque value that represents a CUDA Surface object

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +class cuda.bindings.runtime.cudaTextureObject_t#
    +

    An opaque value that represents a CUDA texture object

    +
    +
    +getPtr()#
    +

    Get memory address of class instance

    +
    + +
    + +
    +
    +runtime.CUDA_EGL_MAX_PLANES = 3#
    +

    Maximum number of planes per frame

    +
    + +
    +
    +runtime.cudaHostAllocDefault = 0#
    +

    Default page-locked allocation flag

    +
    + +
    +
    +runtime.cudaHostAllocPortable = 1#
    +

    Pinned memory accessible by all CUDA contexts

    +
    + +
    +
    +runtime.cudaHostAllocMapped = 2#
    +

    Map allocation into device space

    +
    + +
    +
    +runtime.cudaHostAllocWriteCombined = 4#
    +

    Write-combined memory

    +
    + +
    +
    +runtime.cudaHostRegisterDefault = 0#
    +

    Default host memory registration flag

    +
    + +
    +
    +runtime.cudaHostRegisterPortable = 1#
    +

    Pinned memory accessible by all CUDA contexts

    +
    + +
    +
    +runtime.cudaHostRegisterMapped = 2#
    +

    Map registered memory into device space

    +
    + +
    +
    +runtime.cudaHostRegisterIoMemory = 4#
    +

    Memory-mapped I/O space

    +
    + +
    +
    +runtime.cudaHostRegisterReadOnly = 8#
    +

    Memory-mapped read-only

    +
    + +
    +
    +runtime.cudaPeerAccessDefault = 0#
    +

    Default peer addressing enable flag

    +
    + +
    +
    +runtime.cudaStreamDefault = 0#
    +

    Default stream flag

    +
    + +
    +
    +runtime.cudaStreamNonBlocking = 1#
    +

    Stream does not synchronize with stream 0 (the NULL stream)

    +
    + +
    +
    +runtime.cudaStreamLegacy = 1#
    +

    Legacy stream handle

    +

    Stream handle that can be passed as a cudaStream_t to use an implicit stream with legacy synchronization behavior.

    +

    See details of the link_sync_behavior

    +
    + +
    +
    +runtime.cudaStreamPerThread = 2#
    +

    Per-thread stream handle

    +

    Stream handle that can be passed as a cudaStream_t to use an implicit stream with per-thread synchronization behavior.

    +

    See details of the link_sync_behavior

    +
    + +
    +
    +runtime.cudaEventDefault = 0#
    +

    Default event flag

    +
    + +
    +
    +runtime.cudaEventBlockingSync = 1#
    +

    Event uses blocking synchronization

    +
    + +
    +
    +runtime.cudaEventDisableTiming = 2#
    +

    Event will not record timing data

    +
    + +
    +
    +runtime.cudaEventInterprocess = 4#
    +

    Event is suitable for interprocess use. cudaEventDisableTiming must be set

    +
    + +
    +
    +runtime.cudaEventRecordDefault = 0#
    +

    Default event record flag

    +
    + +
    +
    +runtime.cudaEventRecordExternal = 1#
    +

    Event is captured in the graph as an external event node when performing stream capture

    +
    + +
    +
    +runtime.cudaEventWaitDefault = 0#
    +

    Default event wait flag

    +
    + +
    +
    +runtime.cudaEventWaitExternal = 1#
    +

    Event is captured in the graph as an external event node when performing stream capture

    +
    + +
    +
    +runtime.cudaDeviceScheduleAuto = 0#
    +

    Device flag - Automatic scheduling

    +
    + +
    +
    +runtime.cudaDeviceScheduleSpin = 1#
    +

    Device flag - Spin default scheduling

    +
    + +
    +
    +runtime.cudaDeviceScheduleYield = 2#
    +

    Device flag - Yield default scheduling

    +
    + +
    +
    +runtime.cudaDeviceScheduleBlockingSync = 4#
    +

    Device flag - Use blocking synchronization

    +
    + +
    +
    +runtime.cudaDeviceBlockingSync = 4#
    +

    Device flag - Use blocking synchronization [Deprecated]

    +
    + +
    +
    +runtime.cudaDeviceScheduleMask = 7#
    +

    Device schedule flags mask

    +
    + +
    +
    +runtime.cudaDeviceMapHost = 8#
    +

    Device flag - Support mapped pinned allocations

    +
    + +
    +
    +runtime.cudaDeviceLmemResizeToMax = 16#
    +

    Device flag - Keep local memory allocation after launch

    +
    + +
    +
    +runtime.cudaDeviceSyncMemops = 128#
    +

    Device flag - Ensure synchronous memory operations on this context will synchronize

    +
    + +
    +
    +runtime.cudaDeviceMask = 255#
    +

    Device flags mask

    +
    + +
    +
    +runtime.cudaArrayDefault = 0#
    +

    Default CUDA array allocation flag

    +
    + +
    +
    +runtime.cudaArrayLayered = 1#
    +

    Must be set in cudaMalloc3DArray to create a layered CUDA array

    +
    + +
    +
    +runtime.cudaArraySurfaceLoadStore = 2#
    +

    Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind surfaces to the CUDA array

    +
    + +
    +
    +runtime.cudaArrayCubemap = 4#
    +

    Must be set in cudaMalloc3DArray to create a cubemap CUDA array

    +
    + +
    +
    +runtime.cudaArrayTextureGather = 8#
    +

    Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform texture gather operations on the CUDA array

    +
    + +
    +
    +runtime.cudaArrayColorAttachment = 32#
    +

    Must be set in cudaExternalMemoryGetMappedMipmappedArray if the mipmapped array is used as a color target in a graphics API

    +
    + +
    +
    +runtime.cudaArraySparse = 64#
    +

    Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a sparse CUDA array or CUDA mipmapped array

    +
    + +
    +
    +runtime.cudaArrayDeferredMapping = 128#
    +

    Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a deferred mapping CUDA array or CUDA mipmapped array

    +
    + +
    +
    +runtime.cudaIpcMemLazyEnablePeerAccess = 1#
    +

    Automatically enable peer access between remote devices as needed

    +
    + +
    +
    +runtime.cudaMemAttachGlobal = 1#
    +

    Memory can be accessed by any stream on any device

    +
    + +
    +
    +runtime.cudaMemAttachHost = 2#
    +

    Memory cannot be accessed by any stream on any device

    +
    + +
    +
    +runtime.cudaMemAttachSingle = 4#
    +

    Memory can only be accessed by a single stream on the associated device

    +
    + +
    +
    +runtime.cudaOccupancyDefault = 0#
    +

    Default behavior

    +
    + +
    +
    +runtime.cudaOccupancyDisableCachingOverride = 1#
    +

    Assume global caching is enabled and cannot be automatically turned off

    +
    + +
    +
    +runtime.cudaCpuDeviceId = -1#
    +

    Device id that represents the CPU

    +
    + +
    +
    +runtime.cudaInvalidDeviceId = -2#
    +

    Device id that represents an invalid device

    +
    + +
    +
    +runtime.cudaInitDeviceFlagsAreValid = 1#
    +

    Tell the CUDA runtime that DeviceFlags is being set in cudaInitDevice call

    +
    + +
    +
    +runtime.cudaCooperativeLaunchMultiDeviceNoPreSync = 1#
    +

    If set, each kernel launched as part of cudaLaunchCooperativeKernelMultiDevice only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution.

    +
    + +
    +
    +runtime.cudaCooperativeLaunchMultiDeviceNoPostSync = 2#
    +

    If set, any subsequent work pushed in a stream that participated in a call to cudaLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution.

    +
    + +
    +
    +runtime.cudaArraySparsePropertiesSingleMipTail = 1#
    +

    Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers

    +
    + +
    +
    +runtime.CUDA_IPC_HANDLE_SIZE = 64#
    +

    CUDA IPC Handle Size

    +
    + +
    +
    +runtime.cudaExternalMemoryDedicated = 1#
    +

    Indicates that the external memory object is a dedicated resource

    +
    + +
    +
    +runtime.cudaExternalSemaphoreSignalSkipNvSciBufMemSync = 1#
    +

    When the /p flags parameter of cudaExternalSemaphoreSignalParams contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as cudaExternalMemoryHandleTypeNvSciBuf, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects.

    +
    + +
    +
    +runtime.cudaExternalSemaphoreWaitSkipNvSciBufMemSync = 2#
    +

    When the /p flags parameter of cudaExternalSemaphoreWaitParams contains this flag, it indicates that waiting an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as cudaExternalMemoryHandleTypeNvSciBuf, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects.

    +
    + +
    +
    +runtime.cudaNvSciSyncAttrSignal = 1#
    +

    When /p flags of cudaDeviceGetNvSciSyncAttributes is set to this, it indicates that application need signaler specific NvSciSyncAttr to be filled by cudaDeviceGetNvSciSyncAttributes.

    +
    + +
    +
    +runtime.cudaNvSciSyncAttrWait = 2#
    +

    When /p flags of cudaDeviceGetNvSciSyncAttributes is set to this, it indicates that application need waiter specific NvSciSyncAttr to be filled by cudaDeviceGetNvSciSyncAttributes.

    +
    + +
    +
    +runtime.cudaGraphKernelNodePortDefault = 0#
    +

    This port activates when the kernel has finished executing.

    +
    + +
    +
    +runtime.cudaGraphKernelNodePortProgrammatic = 1#
    +

    This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type cudaGraphDependencyTypeProgrammatic. See also cudaLaunchAttributeProgrammaticEvent.

    +
    + +
    +
    +runtime.cudaGraphKernelNodePortLaunchCompletion = 2#
    +

    This port activates when all blocks of the kernel have begun execution. See also cudaLaunchAttributeLaunchCompletionEvent.

    +
    + +
    +
    +runtime.cudaStreamAttrID = <enum 'cudaStreamAttrID'>#
    +
    + +
    +
    +runtime.cudaStreamAttributeAccessPolicyWindow = 1#
    +
    + +
    +
    +runtime.cudaStreamAttributeSynchronizationPolicy = 3#
    +
    + +
    +
    +runtime.cudaStreamAttributeMemSyncDomainMap = 9#
    +
    + +
    +
    +runtime.cudaStreamAttributeMemSyncDomain = 10#
    +
    + +
    +
    +runtime.cudaStreamAttributePriority = 8#
    +
    + +
    +
    +runtime.cudaStreamAttrValue = <class 'cuda.bindings.runtime.cudaStreamAttrValue'>#
    +
    + +
    +
    +runtime.cudaKernelNodeAttrID = <enum 'cudaKernelNodeAttrID'>#
    +
    + +
    +
    +runtime.cudaKernelNodeAttributeAccessPolicyWindow = 1#
    +
    + +
    +
    +runtime.cudaKernelNodeAttributeCooperative = 2#
    +
    + +
    +
    +runtime.cudaKernelNodeAttributePriority = 8#
    +
    + +
    +
    +runtime.cudaKernelNodeAttributeClusterDimension = 4#
    +
    + +
    +
    +runtime.cudaKernelNodeAttributeClusterSchedulingPolicyPreference = 5#
    +
    + +
    +
    +runtime.cudaKernelNodeAttributeMemSyncDomainMap = 9#
    +
    + +
    +
    +runtime.cudaKernelNodeAttributeMemSyncDomain = 10#
    +
    + +
    +
    +runtime.cudaKernelNodeAttributePreferredSharedMemoryCarveout = 14#
    +
    + +
    +
    +runtime.cudaKernelNodeAttributeDeviceUpdatableKernelNode = 13#
    +
    + +
    +
    +runtime.cudaKernelNodeAttrValue = <class 'cuda.bindings.runtime.cudaKernelNodeAttrValue'>#
    +
    + +
    +
    +runtime.cudaSurfaceType1D = 1#
    +
    + +
    +
    +runtime.cudaSurfaceType2D = 2#
    +
    + +
    +
    +runtime.cudaSurfaceType3D = 3#
    +
    + +
    +
    +runtime.cudaSurfaceTypeCubemap = 12#
    +
    + +
    +
    +runtime.cudaSurfaceType1DLayered = 241#
    +
    + +
    +
    +runtime.cudaSurfaceType2DLayered = 242#
    +
    + +
    +
    +runtime.cudaSurfaceTypeCubemapLayered = 252#
    +
    + +
    +
    +runtime.cudaTextureType1D = 1#
    +
    + +
    +
    +runtime.cudaTextureType2D = 2#
    +
    + +
    +
    +runtime.cudaTextureType3D = 3#
    +
    + +
    +
    +runtime.cudaTextureTypeCubemap = 12#
    +
    + +
    +
    +runtime.cudaTextureType1DLayered = 241#
    +
    + +
    +
    +runtime.cudaTextureType2DLayered = 242#
    +
    + +
    +
    +runtime.cudaTextureTypeCubemapLayered = 252#
    +
    + +
    +
    + +
    +
    + +
    + +
    +
    + + + + + + + \ No newline at end of file diff --git a/docs/motivation.html b/docs/motivation.html index 03a59845..1e2b21d4 100644 --- a/docs/motivation.html +++ b/docs/motivation.html @@ -7,7 +7,7 @@ - Motivation - CUDA Python 12.6.0 documentation + Motivation - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/objects.inv b/docs/objects.inv index 3ee75cb99a1be9e40939622ecf448630929766ca..531e6bbe86fd57673a14e8c4473fb590baa8657a 100644 GIT binary patch literal 31172 zcmY(qV{j&2_r4w5wmoq&v29E=@x-=m+sVYXtt&RJ*c00}-rUdc|K&e4O!)x^=s!p@e26^@)l$om_*6L z+1$?dds+SePT?W>o~xRhkT{yy7+BaEn>do#nUXl0TR4%JT3DNqd`BZY8+%6+CnpnQ z5*H^6TQd?*YYRh0IQO;5Ze6!^u9i;^!Q9=dpH93vzZKLpfP~_|SUV{ahp>645wXc< zITWRf{uU*+34RbW6^ThFU*G^kUv;t+R>!XTCWeQhV65p|ChkwQ_2}<*%K=*IFHcn+ ztVWGh4F|5>8K&au#qxSsNoudEU%Z?=b#QHh4rVMYg~E)dbW27guE5J(ll##z$B$O~tO z_?R1yr!8R!5pZK~L`g3D4m-@&oqn-_Y;dx-(_ zSh!Bn&?DyX2fREVm&b+!l-(`s}O%{Qp9KQ~ijVy!Gi6eyto+E|di7P|eiK|-836gH$ z(3))hQ$S?>@d-8ChK(b`#_4;oaedFZ>TTE{S=UXii)?_=(3#>gb%~(>)Zn%mn&_Yi zgSD|qIENim{2odb@{VS3oX6>UYo>bL`9|v{qjx`MYjb}oOl##3t)c)d<#cg0<9t#2 zexVR%xqPHw$08UNAdrMPC*1qZPMYO^xb}OMd9Y?SIK3rC346F1$Wk|&1qPEDUrTgq^LbLP4xk) z8#kM$i1Y)*GmpxJl4@Ds%!2SosnOD3OkrfLidQ^LQEI2uy$q8NWtg3iEX|pPeeiaT$f(GFU6%yhncuW|@4DKDF(o>m04C^ppa6xx#C2RMy>NEV! zb+(`U!y6@u(A;<}uT^YSSTTp)09V%0uF*DVN{#`;0DV-huaeh9JiTlrQh~a8X6`A9 zM%L649HqX6LfS&Zm_Jk|A%^vnjI$KAo*`xw(L&5xoLrtt>?low=Y1}GlPSjexfr!u zA1~k|(c^5g$`}vllYt%}Fc91{sp*f)X~@~b7H*XZw%69cs)-*Wm#V8Z@{ zB5^)o9(8}>EE9QWYk3#p>qYeWsOfi2gPYoRE-~5)pQ1-8)j^AgK3j|c#ocs__i)cA zN{`2CSJGE1=1ny;x#Q)zvFRY=Q|1l5BiaPONiNfZ3VO}><6#SSCGls&l{Kdf*!n#f{NV>sxVGUcoG`!9J(!vwS|b}oZwHMN-ZD1f z-D@DCH`FmYxMc)3tA>`DidBfx1|{9YJL0_epRSVZ@Qtw{^e#@ER%O;vV8P2WM!5E2 zg|~NAV`vjx-N)y$7zy!*KB^Ltc5T54qVxdG9@_mO#K-wdC{nvJtuLxOVRc`bQ0J+g zlsPlqzS$V3vsCnW1B0YaVrY^b;_~0sXN#s0O4bDcSV+p_%axv^pHASUz#AFG@G7?Z z(ciZLuw#!`6nVRH)w}xyi03K<>1@#}X!piuJTv%LUR?IpDzqh4Cf_8olgGuu2 zhfc(aDC1mi+<2fNub=9s2K>E>?^PS`;ndXZp7}+L=0t=+*J9<+R=cFaRN(HZ6lG0; zSe{S)#fJCyBA$+dtF(9-57rHFdhDcG`ntM(Ncyyu`2GBAPDyuB| zNuVeSX4E`?C;dHTcrgZm93$SksY0B4S;xl>5WAYCm`Su{S0eMcBj|v@fviHlMRcD^Z>@7ZZdGzTBm2j&K7ZT%W)a0vtrvHy;+Vq zRpmVaY~w%Rv^;$TZ4x;Dz8Zh8Rqi>cjQxP~Sydl>m6r5YW z3EtGxdt6%j!1D%R%$h)`<`8?qUm_$IZCP9nWCx+&-$jJReu^XB^iBBSdE)Zdg+1fF zYAxGKggAif@c%Byw<=3T@^b$OCfaf=J@^eQUtlCcnW(h#p`u#uxFd9W(r-fsuS~!?Yn!E{ zaL{(ORTw$>OU_o51#k@(TWJre&4R=g*$7AFk}?{s$M)Lr7FevKDzC>%tz%tctyvd9 zDjSsJ;hqwIHi0h>`|Jn|>H27=MD!@jio7zW;zENT7BDnNY9B3{lhJw~c~? z#AcBSty2ROv38_>2#r#)wD&Yt>%mB}Ckel4;tnwe(_>54yb8-7VCc{^N(c%9us5@UgnUNuL6x{tGXZ;Qj zf5^4|FI1T5g;BZ0k~2&v{OP7{JqXvOc&WHj+WnCYkx@&b`@02y*qA8E-ea|2`h)6$ z>`o|PhU3!bR}RD#S4gUxa=W%$Y}?YeTbMn(nac&#@Ph84P8>+ zobepP=fU<6y&|@2;*yNSxeFB-?#hp2u{$=J#2^1oIflDPV_58HzSG<9l2naZcKzl{E^K*WLB^2+|@{BIp zqzdwL)`{r;JDNCLrL9xz)>=Y5w6)DOH`%%qhZ5gRbpeNDt)#6?gGqSrihH;-y+NS# z%|E__CP3WDajWal@}EjTojn|l|LtTf1m|S@yN;WcY?A;N!z{K!c7O=_qK`XqFv5v> zBd%5Hdc~rAW@nxiR^t0Rk5qv58Zru(CklJIF@%SFv87EDkWtX>Jwd^$(pGDiPhlGQ z@Elp1=|#{sVkU(b2kdo^W6D3yrzl^^r^vt%l}_vvl@{qi(FAp-G2j;v?1g`NMw9td zVjstpA!Zu+Ut(Ay6z3PTlG|QiQ)NBRH!vQY&0{Oo;$^$N&d>RLK@=g@O63Zb*qdP{ z>|rnH0KS#bk-w7A5pXFby{18c$Vtlj`w3*6TIYpl|0fb`PNUs*r#)K4Biagx5Lmum zAlcux06UCLC2Ne$-zy}6^Y(gVH#WTfgOq^~ND36_d|DLA6mkjX?o|x~e@$kM1@ZZC z&i7n(5^F?9#CYlE4leQ_eCs>8irY*UfJEYcVyQCsk%+ir7=a<;#4&FuWu--*YwBBEihMPAsvF#3 zP^>8*@*B%8-%d3l;;ccneQt z|MX{aWSj#`joL*S#54rao)pzu2FeiE^P4`W*i(<~^;3@)$#_PuUMK&MF2nd#6s#1V zHx!~w6}yn1PO&1zpR9R-URS-;ywP-Bz}_zs@TuZU+X zIYC@1jEqN0b%TpKyTNB}G#K(}htWA3y)PG)os!Bi%d=OCU*2CR$mxCzJ^sH^d~%19ZgJMp-v-{ByhG~CHP?srZ|QVbztKE@^GM5Fz< zuo#|-G~bR=poDq^OQr{vTizOgvsM6Q?6P zYM!vDi+>mGM2db2egOw`@N19~KZgg6*s|DQDy_Pt@i44T55mBSJO)ZY)}AHXf+)jW zO97__>K|n-dgf2=3F_8$)R5U&XU%h|u=dzd8y7LlGIE(8gci)oWXn8r$6Atqbfi>zRiSKHn4PlAzaGH{eJDDv4{ zVlo^f;>w;v(P`OyfDYumC}L&=lCFm7C4`}4#-?(M(a$dIZ@jf%h)Ya0@;W`O@Te=H z5BkdCZ@m#Te+P(Pr1w88I*+F}gGfbB)dHr>(Np})AT8c>d=v-dJ4TOFR6h>!t#3b3 z8Y^QN6FRFpJjEUGPZ2qMYOifj7`n*-U0`06b)rm2a=i8DGpeUS;AqEf>!j$jQDK90 zqG<`Jq|j-|Wfcl%I1;nGG)Hb=Ay3Nil*d%!Os)vzCsgsAd;z5eX|dgVd!p@ z=UmSq_;*tu7zmksZ(TfI6H zljBVxHjNWcaI8^yOHhSel#CHi{YQtKKt#wx|5w#T@d4DRNwsitSg}o!8Vo6VuvTaf zIq^=!>7qLVhHPhRctpc5(w{K8qk8_sQ9{^1q5lj~j-Wg6a5{t6Kzu!citsypv>DD6sH>j*VY@lxboh|8tL>g{wpI2#~l68;Y< z#0P)x%3XJ%bL^lK2E;_Dm>ZMY_as9Lx#-a5GvEFtG-nXvB-l_Upvqm);Ns^SlDoRv zy|O^(zZm`eS*q{cJ=r_*HE{-y)NRgTzb))1+nxCyt@ zx>~OImJ;lg_Ijk*Mr)JW9k`caBlkp+h2;DmQkcstAz}~VY`qVWdL1?&MG5N0svZGq1P@W>Ax5KwE0HY0n&R+H zuv!ssf|R6bY>qq;Y5+)YLiPX}#+oYwGm?swX-}&j&e)m2+|RPOY;c;~iMCu}R`G=q zva|JqyEEx)()*L{-h1W4 zKt}>GP@3^QH8Z|B`$I8sUN%4ov|J=LT3-1{@I;gkzI)AG3ZzKRx+}PZiK!wYMwGav zv~#8J7H!0Dnls3*C^XI-YQKMtA>w7@2qNS+m%qDq>)Z`@A+2fiW3%ddc`*XW&*-tR z*CpcJefKBV0U|VQT~%xx=-PY)Nc1BnNEBl6A^J&drTl70-Vm(Dq0D=L{^${Z4^h^j zlixAqweoKVboyNzja#XL0swB8@s8KjCCviwLT8YwE?&W2?`eErGh99OVZ=Eqrk!n< zXhK<44i$VTjtc!87=mR>anX@_QT;-XROlO4;K_kGl%fn z*n!`4QAy@_QRu{{;q!{6$?@O($>)<(TZpW1kblj(0wXin5He7lw#&=q|) zYu)qreSrNhf^BJT(;VBZ-Fj7eIoWotNMzt%S4Qc>mOsa7n?E`;G@tppZgt~>jTXHI zZe1{M$?L<6QOyV+R_+WObH{%W019T1fQ?%sb=@)nm87p))2;^Gp^$ev;kh{0vD`%$Co@cFod%7PM)QXP`DV0(?ZQ`hgPHS0D)C8 z_IK!wyPG(>=e003y&Bxir?=_WYWOdTsEbzXY}>>U=lr!IXnKcuRbLkwDySdbR*FMo z_51K0D@Vmd_tHL;X}8@ovkU&b<>*hef@^e(aEXS?f`= zY*|ub3wd|@h?|J%#w4U#ZYXK&Nyz>bpeD8XDWK9iZ@2LCIi>@uXNziQ|JChRK)UKS z@q+w4GGdSc>zyhxPIlN9UL!1~m#ufcNT=H}xd=yw^lNU7AZo+OI6oQvQ+UnBJ4dq3 zjgbWpyRF1Mo?pB)j3kxT=|nS9LIP3(aG%W2+`L+loqdVe*#`5Bx%zjQ#So9&kEK6a z^-%MX&FQH~*jpOyxF<@aPIt(`Y%kzDa-Y{3bsM>~6CW>5yFi3x7@8k-==p3}&%%qh zXK988C6$Y<&Nr?70vg2N&ykBG7d`r*<5UKDXdNa=f`{-oKos^XR0 zI~=F_W}$7CBMQ?f5o@VQ5r5MPVYPnW%rB!TJkvPf10TrDcGGVSGoLpGugod;R{pL< zu2V<7kI7`JRzLHPrku#3=THl!&Ue(;Nb)(2ZP-`u*RI53)@C!RuXl`9*xzK;`wjb2 z108=P`{2+BglEYggR~33k{)D?lvLOE9g3>FwB3inp}d~DVPhY74t!>>v0M-pr4dRJ z@D4izJpdmet;k@#s zX#Tin22-CPQu!EEiL1nrCU(rTp;1WM&AdU0vKXV2&6Jz5YA!hssKK%curhzCX~hj3n7M06Grf;#zY?&M<-;NhwaLUR`;DqSnoTq2 zyPFhz+4W~q5gs~Qk?g}vXC%YN!VUHd>|@IjrLGhIRW}MgNHhZW9!^Ao!!;P0qGvdm zAPH8|gi$UulZHm{S`m>t?N3bLWtu?!$A@G!|UzLLT$ zYN0|(V~k3das7Ne{#KWg@=Y`Qi+X@GJ8B?-`+m*X3-0cQ6a4 z0t!fWkV1(hJ+ib}17gX6ouWuP;TX=(dBKBDl6>1UgWg?#dhI_HfSF8R4tEN3y!f%G zeqK!VdYK0;?m&JviH~;$%Qnq}7)u)O*p--RdjN4>zMxaVU`B`a?S2n)_eTp)9rczP z99G;n9$mHF$7%fF=y5dN2udvTj0Z z*kNvijqi@(c^KU~hS(oq3X$I)#(G46J-d=9;IyA}2JC#oDk~UBWFe{MneiFYT6C(6 z94I0TRzwVJQCi}?7pf3!Hh5S~my|y?W66*ITapPJ?fkG^9Q|Z%NGwPFSK<59yOlzF zg1W~~VVIu8d>-vi_s0Q0xXZ`>rIrz@n7$hjsV?EK^VsA88zP>pcLY_ZR+6dzL9VgfAushscKQ;q+T%=#p3NGB?XBBHlUf#K(d=cP#EsGccEw zJbtM$b#b2RiPKF+!-NE2&*uHEXZ1n+)sxHcaOs$Tb`Fkpx!9jM@zpvqMCJ7e=>g5(XVg<&-Kv%=Wh zfyZ<{)2?%+y8yTSdpH9GquO7uk#5|J&E76^($6eOY+4w7A{E<$ZpoZ0P~NqD*V>Ge zsxo&?sjj0+(MdDVAwuMTF6#9@C3ByKncqTXkr?2ec51yagc^ROeKm78DJ#y@J@Xql zU3}Zu@>Icfmo5ncswnGbQ;)g)jXBW2@b#KtI(7%<(R8Fdj450FC-8`qFaLh@BPcwhS|Q1exeQaiBkjS$#m*a_s{av ze)MkKyyzW^t9S?w}DtI<4mvUBQpICop1@{qqQH?Gnch*r)vB2j|nRz z2W#gSTi@|6Tc0^6x=sVWF|0Q=E7KZ8+F8(tv3s9w+jBHuNBS*fs+i<>1WVL;@1m3c zVG}|cYiIU-$eAB^C;E6=)yL3ssoA(gK4W|DtMgN2tsH{ORa>fCtyN|ASrqTLA~j(^ zK4=4t0nES+tXLabfA0BFqBnFKOuWqeNj;DKZuWZBDc*k zz?^l5--;9ba<_u$wDVuX-;VC!>f6_=Lcl%=KTf5VUa$-D(fzY~#^I4wF;e>zi zwQD`najsnzY(YPz^_P1z_)pO`|C{;ymOl($-XAVczTl;GV1ImUrRC6&Mf`>DCGqD( z!#E)V`^_^}`+@QG^w<0^rmgffGF+``2Dgi4rb{wFEhxo%*(& zaB)i#W=E7*Wa%!eYps83v#G9fE_ z;WZKt*04D|nXvowoE}&|b7(&Ohn0tkIr} z6t{7L5w~7Ee`s-kjXhbZJ{HFqmnDai`WH(B;y@q`F{Cm$)Y_Hlkf3!!rZ+l(X;Y02 zA21t+YK)&qb5sM{N*B(}3VsTe-ne9Qc&@$2?c;MMHV0PhbHIvksEWd={pQgK@?f9B zA22~ZzYWx+&Hw$`Fy1mAt@f$vA8wLVGucp<*<}9PW8xGNOnvHnnYGtQvnW0c$xL;# zFE)f|fdQSb2Dw}bAKQHY_)zE>WJ}N`fM}1*>4#@JJk6q%211<{lxGnenR`)CL2oQ1 z@kwF?(FwF_Rk0Jha}gVfZxI{KtdyDYyVQ}=vlO=9b)};LeVUs3vY$=kDdp(tg^Wq6 zU!hzc(n@bCg_$RPSz>~^98`L3RGPtl4BoynD9>elUDD)tEOgE>czU|xAEkcTnY zG4ae``%ego9ph)@_GAv;JHJdlZ9*Axg?{zI7#?SQ+q{99^p7V@UNTXk$3fcX24Q`&OZpDj zIf~-v*VFO3!vM#kD%H;EJM4K|v*`&xXpU68!=QZ3%&)y5)zE-H^wQW~Jid zLqCc;!=8ns!ehs0miKpF=l?yFKEI-~1uDLamF@i}D;cS>q6K0EbQL~`b3Z+l9T?PI z6!aS(kAFRKbd+_+Fq+%t3hI|lAqO-4ISzaEZ2GCL5;u8#>4~8qcdv*!(Eqyj6b;a` z`Df(z`xz|8H=v$(?zcPxD;|J^4N*YAdDTo`exAaYq}=3CK&h#@xk5qEsj94*$wktT zx}xLNxyVH)7VTnj-lm}t{+=uo=BK(tX-U23UoT}pW7I8NEiraAiMYvu>{JFB(xO5U zxB6+J^4by$NB;N))_NkE(=*ZR&(q(^R{Ez%$qK424<}M*Z3L8Sm8I*(lGw-MF#*5y z<&7TLp9(&OPA*B8$O?*C*IbiO$dc%E&1BL9A$Bb5|80>gGtDfru9t0YZPY6;b%5l> z>2~J-o}GXiQUU06kn@|p%(GRDMP96VD@2iLFOuv^d<-AuG|NOy(zilZbOCdoXOj2#1O{f3%s1+v$M5BnXU5+~N%a6inYFszJ-pvmUsX##JZJR2OgaSrPjo z&d1Lw3bPn>gs{NDzW^A`_n2th98Q2{yr_rp7V0j*e66T`?V`QeUSWgJzagIzCbCvs z!+zYf&jTD*L5&{znZobgurS>o4k>DK70ghL(DYh61#UQ-K&Zb|C?nU!WmtBSg0lw> zW5E9ke8k9rf&Hs$;lLwQX#NWzkZ1lY=N56TXvecPSlMnCE7WEN>6Kr%uM!bI$BRh% zfIF?|v#h@9Xm%mjA^$;RqgIeCg7ziKhHBV&w@u-m#BFi0_6nd#okgBQKHW6$H)*2G3vi=hO^tq5aQhVN#n#~sYY$F3xpZ>aUIp@L64uY%M~HR; z4V?#V)8m(Zynrrmjs5doHS(tPqc_(miu`TDPt5@4{5B1qy~CRtRuOi^?-w1!iUA!a z*fGcAgo6b+p*!eBLpHk-iBK5-;Scq);16=h+0j*#hwyW$3!~~Wn zE?sv+?y$rW&tXvNB-7;&RIYHIVS9YT?dIA(W%X-*%9~5wMU{@Dag<`rsQ2q?`gS{w zX&CnkPc|g5&-UJE45Pvkn|dj%lU>ardshVI_m!XM^UQWbY%S=A#^*hTi*~qn(vvk7 zFEvMNO>nml1}=!rdx(U>J}&bKrY7Tl=o}ZK0+t7-^Bt}jzAlHjsm~;MxCsoO@QdkJ z$*P+;@8aDn;_8lHMWvp^gaXKP^gdr)$14tqEr)iw;=iBi`oF9j^@+%aJ?YCe3Ew!kzCv##!p_DYW?dnevrhoWN zn=ayE8_#Lve6w^}cj6I~;Ds!f%e_E&8S{!0I)TgE<6r5tz8+y4Ik`zd8}z_ zjqrWV`M#^w0(&6udX!7Fmf&l_>VqeD_-VqaC~GcNIDjAj`I0bz?_R5~DU9GSqK^`j4!wCpWr~ zeviu8=TgQh2=|Y-7M}*011E3rI-hz5F9JpMC_y+bwh{wY4zjycuYZGgtNR0-L(#qI zVb`x&Okj4>VbHI`=OUEx)F@0vACz#>6t~_?4jU1r8k*;@VUwZ?Oe@cwhT-ISX|%x` z%*z$WA!!xoxvtx5Hf~p@6S9~>UKbg)Y#(705sl5q2ZKX&{atyV6m=KyW6<>zJs}a& z+NF%zI?>%I=%m&Sh8dVq*g_O8FlX3Xt-2GgQ;$MK`x{?Ow66gpzAU-;2}UBuJxAZL z`t_Fw1X#$(<1KCD9F-cqMp(ON)woLoZ6(xo^ev?`(a1)D@MJDpJ@b%rSMd`viROkUpbSTqg&A8%f6M6ShPbcdj)+frZ zus*04UP%5$p;?b2Go3ywx575%(-G8ltCEqNDUgoEPfsSdde1P|PS0u?tGWIpBSlS3Q+`EfZ73AtAeAf`jD0>> zNTU-sC~w<8-aNgcSl^9j5YM@LJ=83iu*#A6>Ssj`(xr3wPZ-|xSp&jz43{J<`PfKu zMfY>;)LDfevG@|9_}rsPxBmiTz04*Fe!v!za1?UYxkLi@H20t1=l$*}gTqv>7(>G; z`L~daSqq|4z_C^CDM+`L72J<1dY?eVO)=UZ#^M@r=|wt=%jPRh;-Qn7iizId7jk@8 z>(*_MzGZI#v=Hp!DxQPob#ilD<&XluHt?1=-_@QPIOJ@WJFDg{`?!>`X*px8Tip4V zS6T@Z)N8W$pGhoQ1c#+i2E{R*ZuK~1S+Ul>q2D+dJXU)sSb|{`hWNGra)B!xe#XybYevNhXvFj$d= zEQ!xE^}F~M5o&bfBrhwj$jN}kzjxo`>_4RGYBY@;(t$MV2sIb%|<3kpev`;)`lYNfGR(B$#hbmYyu`axRKSAKND` zoG-FcIP8iSePVQf$}L^D(_0xhFaM<14^gKnD73NI&oW?a`}{^9d^=O)xfv8kcgI^A zaWqZz{bbGAT;G3{fZm){!3aMlFUzY3bliftDP{NO@a%??b|7ytz+QH_orO9uvpg>o z{)lFmm9*}N2Iok?ovkJ`$D*!R=hi|ArIQXNbM1lLmZj)=sbiK?b?SZyy^vm&CR@ZkD{VGN+SCj`i z7bjsdPMnh2X&97rv%51>rM~;7$C|C6;H|;~BW#8*Abt@1a*Y)+VeX9LVv5pSOcb zO6bpymS5b?Z@6Y=3-K>b{y-yJ=Zff`#J_V6`)~6hi!cHAdnK52L=?BzO3Y5WZJ0nG zeiJnKatB87n6M3oB5W9hCvcrckzm#4S15+Pz4|yPYM5OYt_Vq1;Yp1Tu#8jReGtrS zF+`L7p^m&d_@{j*bieO<~@!wvk{z&;!=ieR)f!tt&&HICiKSYTSP^<}4wrmz#|u z9kZVa&c0xb=Kfw2b<+M0kEavtnc8*~r+4kGb-FEaV-mqH+I2H0fYDdrlLPU1h+g3@ zdW}l?&MEUO2=mQcTVCvvZ9dl#N#EVZOrw<`Udk_dc>X8yQ1&@ZC76m>c`PvzBBI@gb$t}$u6 z>>lKNMBPFcz$5*{s$>wXG(NCmb!~H>#)Df}|fAqE<8CMKTPWN7OS& zVua08(^L1j6A@<2t#Ix(Rqr7=odbxisA_f~HPR_+y!idHl^V8G`y7SAtM*l>wB`mb znQHj}#~FkdElp+TnEF;_!sJ6ecn%>LMY$O6ukp;F@@2TV?E!!nP{-F}1Pvg4Uy3<( zBsyeddtj839Co1WPa`R=ha!in0*B4*xqFo`CD17%#~Iwyh>TdFQ@vfBbf)~D-LIR~ znSZ2Ge*S}vWC}4^s^47Kxg9ln64~$G)7s>3gA3~DG+WMUzo_VWd9*5URx|=M66j^l zUnxZCyg{n$Lt_zJcwPaHXWU9^u+5b%{d(OzOxV-JB25%2T_w7m7y&c>IwRV&90i|T0tc{Pjgjwg?f~#6lC}$*CgjQY<~u` z^<}0Lz^;$5PwcYz{jw2}Zr=?lcPQ?7ZK1>VYg~PX2fYjMf59Auz%`Vtyrpa-)!qju zim)CwJ5dc7H0R_SKSdq4 z%qskH14e|&+-qCvKccBgtX7t$+ns=r_tRw@Rm-!daFGiBB->&QZkjGf16Jn_);O6# z=d-N$0BqaT`+@wFyT|AWT=Y9bboxg!IRPI|nswZYo(FR4@1a8tc5bo-TUw8&vY4c$WU?k zrJCU3*z&P~P}d-ddvpBGW=13AwRCFg(;h}9;QUL0ZnnF#)u^1kduUhnVa+% zd*OI-ixCK)5WG!SsFwTdm?D*Oc1<%5Vp#951C@ z-|Jx~IG#Iq1Mt{h7?xO(LHfUG1c9W=4&9}j&K^&u)YZvbDEw56n zl?{FDy;Yc=x;*dUE9wx_?dJq+bCn)9oy9DJo{H3W?l}KQUo~4HD4_B!?lCM?C^v;@ zV7e#;VJAHV)g(GTe*mBYBMHrz2*U?bImSMVR$1IiJWnCb05XL0Cs z39Dmq&ikoh9?0$}m3^P8%a$YTB5$G$8wzErkJOuI`K^Lfq6U5lvQ~b*E4P!DqGGWB z6O!;c*G`rzy|+Ipw;XiE2DjKxc5&}WmCaX;;@rEhmZ-k4 z$&KGuJT80C7cJx=W-O;Iluh+&G8MhrEAzq>y|^+jX7c-BQC$Ab-_2hYTCm4q8%_Q> zgN6(*e7N^CcCA7ds!%-`9VEo9JJ!jrTz|GM6W-RWqMUu2|_#DDB45s)JD6_Dn2XZ=DU5 zqob4`-0kdNK}QM-a)1+f`FiJ@MIfROFkJ*=i!eF}zY6vJ0RdnXp+Iqjl!DRM!b z>w>W>2t~ceaq2rLrtM1`^>$qOpSP`x!z3ioq33*LGF-5{wtWyOHi&qANTjl7QW5}b zZx~2nL732!aDXKYd&-BCin{z$hKLQpy#-e#f1Y7ybzG=isHD(eiC(X7oYzlI1RAjh z${(Gi%CGheTcO+VOyrl#6B$TRXV!wcw#z?!y_BiB-GB@;ex_h}5WE)tf;pva<|)gB z)5vi^{C#-K;FWUk4} zAFc*zag!V%fKE`~(H+@qU;5aG4Q^$7%{iA85yJT8AwOtplE~O|)T~9`9R07=@NC*( z34cx5>sOY`Y;8 z6f3Khi%b`YBK7_q+)}5BdtNVdoE%U7^4_GHQK$WdO!+$BXov;6KD>aGF3IY?3pGUK zBW`Ny?E_M7tsxJPI)n01zM@xACC#aCrpx8INxK&yiSEo>j_}(0MgaV7Q9-)~3 zT9_!sM2KTKti33;?>RZOPajpSCEBrm#dgl{hLuS3ANFY6>L81BH_%mVv2Gh?u+$jH zZnR}T1uX|yU&hQ*;)57tW4zgy-6Yf&;Hjt;wjJ*>Z-A@+=M=bm@Wg);vvdFOE8z%C z=aDVYyC=80(JQmdGkD}CnBgXc6LYiN)bku#pNv84;;cxo7tDRJQ=^A!^;0COh%32T zWd=5>oVYPzI`EYS_IVk3{(8}g1fcKxZQePwnp|N;8{u@B03xO_2t7onweP&)6}L?g z-PNv!nm}r)EwasZYbXI|d2>7tb33~9D$4;bufF^@A!-U#S`;4%xni5SEtO_ViTFcn z(IqKS(Ij)6>|zZ{wmbmCOm$6N+e&fQG&4aelV$}>tnpHMz2{z}B;FB#B9$I9Cm9x} z$N87OU;8Ja?(On|D*DxXdmN)5k6|3u3DfEXC)Ps=!|Lx0|`VGvqX8hm6;^!3Eo4F!mCs?^0VIDU80S zdcZGV9|BgOPZr5;85{)Q6sZj*A6|Dh=4#YEW;`6nsuay@5u{!gaba&(|ie zyNv0z%^gM)47R$CyT8!romPQ-T!76$+|I12J&dH*Dof77{lXGN-x@q;F6!rHylby7 z_q{pnf;fAJvDLD7*n*TaI9h0S_8fCa5gWJ>+5{qbxf<{~ED>?s>kewhrm*XFDnB?Qxta+ID6_ zAWrNt^2A^}nRO+VZB@F29&MFyR8wI$xgY!1S9 z4gP3m#_PZTw1YKqeIQMN^G>o{?7tzSR-6faww4Z9BDt#}kZS|oi8SM^yqgZeoq(jH}%=uoFaHqT>DqyYL!U5 z;QOa?k~@oO0KMHJAoPtxP~(T)y>lm1i!569V{u!%WqgEg1IrGyb@Q0Spl#B>{myxJ zOgplImm0f4ZmY<(tYdeKq(Ox+OV}oKk{rna?I1tpyWy{0PMYI6JjbGPMF5luv$l+cspw z&#)`1!OPqlx2xTHXKniKo57&7nfm=^S{cF{>A7^dXYXerT6U+yXU%e^ZAtwOQEILX zW~}e#oVx$EY#H=(r(Wu|nA4_&0nf0kxYpqqtrGSlVYF0C{clCjm-FXZX^HPFA~wqZ zvyezijRSgF=j)!W=UqftIaUnSKgbW)16$a-O&pQ=U;`QDv1XutkClwiHH-9w`_<<3 zia9<9LES1e%UEz7FzVW{;d~P>`i73j2{nLwyP03-dw}^PzhC{@lDxX+%imyp#vHa# z13wM*zS@S7s_{s1sMH^_F9OSM%MX4%Sg5kBzp zY)|y2lezNNIps^TD|=w8|05v)UuK%^&f4Y?jDjR*HTs@Sspc16iwah=;t`sdZFktx z{%QzWf)h5Alhw^t{JlH4oF@|#5rE9;p4z#o_6W?}jVT?2NI#G^`?Effs)uQgB(cLc zALQY4uSt^+_|jSHJN~y)JsULELnCf7P5^qf!@9xCRHkB;8d&DvH4UqA_=C7qEyWB| zdTipdPZy16ls3!WugIfO8*KMvLuvGSti?PN-{Ls2Vq`;`fvnK(WWQwVG#<%cJ;)rV z$$74C#$pBu_F={kq}+meJ8+tm{e$@rS(6b6HUUz;MRhJbN(NvJ-dU8rzLdV_?v~#K zs5mTv!c}nuzD4~rV`#?)ZN-UcH#aR?>H+2D&G-tDI*}6#ZE>HrhyMA^j3Ka#;X%E= zG`zepqegveZfL9GannJGXjzVPs%3m(vw&OpLCe_A3qiDDO**%)$w%BJOKnH7a~9@M*|8G%SF(`O>d`Q$QFc?ESxvg4hvaltENpXPqP z8(MkBK^7HT7Y*^?XPNyoNXF)1s`=7mR*KF4kT_o@3*svjKtpn%!8J6{OX*odwr>lx zm~O}io5t|(Bg}H87?TLIPE_9UfYzh_T9H^)sk}`^y=e_JZ0ePp6;ymZ;mvEv6{J;q zkW5d(v8UuFYJQAGW0E7d_EGQ@mD-D`|1IV}Pi_A3(ijZ9KO-Oz^YZPPo}Xi@i-3qv|6iP#ymk#Ss6ZQddIvq zP#Mr8IkXBB^6CpFi@p*cza_6wPN~`?Q!?+jD&ACi|E6%1uTd?+M!Ja`cN%!r=P>vn z;3~w4H~1~;bY3lY24|ffPBBeTCLLF*OD|3pJWX)d3?CXBzFJ~gw4@w^4fmrfo#A`e zs~bf$Yr$*0fm3j3rdaHG?2ULggFU|sT$a7xnqWWj=7ia&wuNRwJnGjIN{ngnUMG%i zkd)2ihbV1gtw->@zm%w@`bx@Wd#xzHh#!V!*lethJzN>N&jKs|MLSo3zL{{WsUCD# zC&%g|%*M(H;@|MDI4M#L8hknMzZyl?_Tv{6Zuu@Vy}p@E{S`Wc@!S7;0m88uYtJdS z4|pA&Eoa0ug)WEK@7=o)>Xi1>fS+3xk8HnTZ12X*GoQ8QR=i2_LKJukf_6&!z!k~K zcu9U!u{5LaUPMcc3*6CL2+gF!kW+zk=D0Z9jBakEi@b?bmM2xg?#o5{gUz%yef5M2 z=@J60F)&rmk(PDuawV$ikd zH?7~|EALh_fs9{qQSQ$kMHW)LrGo3vSNX{Pd{qe0pRe-O`*UklRr+#cnN^lHB3OLf z77MYe{UB+YHC9$Hs(jOwl{yerFjl6fDqqw=;QBF~JTMew|K8?Rwr1U3L$LlTEiwox z+@w{V`4vCf-Q<~C@yxQymfQ7)agCkTqJh&@`7K>RupzpfA-J5)Prw-2QIhXBD9~qB z5Ms=7OkVSg!hV)jhPcPA3bYF>Z&H=(X_aLK`?D&GkAh8pmsNGUGpPtO)5UXU?lSVI z(zaFA-jG!!S=slXeXkEg%ON29XSOgNAM>KUX{?BwbYYev$5{d(vwWQ+x=vnU_S^f| zL0UX1x4d6&K&Ity>t_%u$QEU_++7J*L)C0T=YzU#*e_=zF)UT7A#`|$u;Cqo3_k?f zY`8lKq}=5E;HbJqv=gKKv&pI=UC96vNer`fx_M>+iz0OAqo-NHuLWj2bXH}C>y8?3 zR(UgiG0UxTgL40GriGl)`P2^JF<*zFU;@w`Zv5h#&UjlZ&I&sos{O@8{FrW6d`aY& zv} z;J(0)yeRw0@4;MKv%JXCYM%bQ&zH?J1|GxAhLe)-BLu7+AyFmsWGUiU5V}sQmo6?% zA-%kZ2H7f0Yc#WlOe=k2(`Jc+At4c*ga+AD&a;;p{5-Z>Hu+<|*yR^cWQQ~^H$*zh zsBA=p2SCL|Mbnt#4{QV>wjJ#0OfC@Fs-)ho`5ued7@btA)5Qsy{cq8JFo#3$^X6H7 z2%ONZyk1b0y7g4RM)oWA?-t-j?x5&q=(v8xylc@AGy`+8+o}M7`Qj0<;oIZz?MGf- z<`UGAh&@{YvRCP*&X!S*Xt2MwSp_JQ+|faCKrVs`#Dz1pn zkmvbkE&d?BH7O}fc!s8_A6sm)STun(H};8|J`QF75VinsVRP}15G3UlRLc1_ASLCrk*W<*t$m9impAU0{SU#Ozkq_;4Z%S!dz zm1rj)^vJA;vY+jJKI>Yq_6a?`RhrYH84r(+w#DNc8 zdAE%Y;I7I7&^at#Smyknr>onvSgta_>P><{94q^_eX>e6Yv+oeaax@E3a}5t43VRm zipM+3pJ9itRhU%~oTsWYqw`W7*gA5a@$aVQW!`k{O6Jj~?bR4qs@n)s^ zaZ_;eMQu-K-T63Yojzp{=$;Aq?tco!6?C3%x`W*{1h~Jg&>l~)pH}KecFxj)hab*r z_+`!$qe0=JfP?l`7&qL0J1IA=N#bISwv+uqO31~64tr<$M%)b(Z`*9PO}MQdQwscD zzRXGtU0z&;e2tJO%xV#I+yhl9{vS7h8aL%npH=D`rlic)icr zELZ?vXRLnm>zpdB`%(j=!Aj<_$Ou?8j>D8quJmDB9Xs5lBS2`g{9BbO4uKs^`M}nW zggLt>aUr$~fG6f=?X&5-XXT{=1#`Y#H8~(5fW-5?QErC-6s*b;WUJ+xXorJc@aqF? z=917F!Q@9c)DxrZVTgu1f~jA5-4o|&!B#{|6ixkf@g5cz`D30fA&#`A@`JrR6*-E< zZ!b)8OpW70d1XG!i@*8NP4yZI4xnbm-)6bqtg;3GQjbwN1`*%$4S0FNm~WVaQ2cIX z^&LR_p4T9X>K<12p#lOC*9IXZK5F>|NgR<+v=9)&9d|50Zxp{aF%n?ZF^c_@ufC!( zd`=nf+bRPD?(OF)1Os9S9E7j~z7$cX^IF z^T!7%#!8rg1Wn2c+#t$JAHIfVHUsgJ9JIUIgV=iAGu5|STUNINh+yqELRD#UbBJ3U z*g>@qt0^MHb~MCC@mUY!(K!heN^z8&AV5aD2f}9@j)pnt4hGrvXnAC&yoUpODMU79 z2yRNA<|R^g?|zj%W-JiHjNd*C5S@#85NU!ZK;@oAq_Ghg{v~}MXkZ2f%znjpLRH9O z^8!GOEC>W*?}8K;=_-FJ)K^7}c4I*xsLI_~%Gv(?Q?J>5V22i0EBE891!`zi0r%K*VPjw_(_x#KJ-Ns`NehjA-wxo zih!aHv7FGMgpcl6GIt5)t4_y^3;s(bmbX}JH)*kW zF}|nyitlGyFGae}7a`vd%c}9JT>Onun5RFD1wV@=m^FioifpQqZ}BKTxbxH%m7PZ0Fv=BZ_r+n`sRO(k0fg<7vk>MnUol{l zDn3H6CzWcupfX*->D7Pr;8-4%=gIrh)#_WS=yO_H)1)G44XSwiBsd6I_LLW9y5ULw z!MLbYPm|T^;vRpbhXWHMo8N4pAj^kdK_4|9Zt1$+v-u?V@FJ~I~Zh6+|hW#qxQn4 z@_TVnR$ebNzKVjXxV5XE^&cK>mxjar{CMb6af5|5T-GMV9CS2&(lj z5EhT<vnnj)yU?`Bf&B`BHl`YR-YyJh6dv`a7Kp-`9x9j$e zPyYFE@mwnty-kyU0rXzWfz;BEz91NwvdHm2E1!TUNXd|f(DQPORm#G>j}oq)SUnr@ zWn`T-&*k#hNi1gCt{R1+NWhhQ7er=a2Z0wP9IdPuM~OE{=JwnR9`N|q z91=6Ii3fk=u*RD>GdsBtoX8eJHq`l2l9b%G~%%oTP44 zE5#7FdU2R?)%Z{4)fvBB<%^el_7}<@AFt;y@9JAxXURsE^yBqD11D;PtLL1@jvJCP zGj_l)K#{=LIr{-s$&2ke0oZVlE>B>-K#Z!w}d6N|~M zLyVR$qJtg5mlPdSQf$5)dQd;s>|!_Xf< z`>XU^?(z@N-gm0{SHZLNg>Nl^I%l{s6@1Sy^h(^91P-GC3C!Gg242ZjNOaA9Rku4b z0J=0hDD2RR7472~dG**G5Fscp&@N8U>wMFs`D&6^_5RuiMIBn95N#tp39yBq@SA*< z@vlKJ7jUc)vcI;<=J_UKErrsM8KL>rg0N|m^8&E#x6^6bJZBY%nJmhpZjuHtK06d} z=wQl)DiQw#**`R^P_90TpmZ`RgmUW*A%UfJ%O-!!Wg3ev)>AC)F_N&fS9x1x#ZfrO zZrUeN2-py-IE50nG=meiHKP-?cK5Cgu&p`r{BOMP{I!uK>k0MRnA31ks+PZn^#44q z@+GWv{XaX!ceY%hVT+4RpprkQMUky0d^4r#jzKB3@GCLhkUw%xxyhN$QKTxpLhB53EJXn z^DLZiQf?b63CszgVK(N&SnqLA`E+LQaZtDP%eo2M%e|yJRR)sTY$ek{3?Xsxw+&s+ z--`cbVtWKd-W3Ni+w&(VCVvCGW&?Y7+IKb*58W!0+Z)jDd35{Z)Hhz<{&g_6$66@1 z%cR-!ls;-se$VOtlmA(K(zc7IyvTNmb=3yfu#TA)6E@#I2h1*-OeBS=3AN_whClMP zw?ZPFxE8PWwYd@5DyVy+EbG#=k_hvig`iCzK_eg{Rq4>lL}YwqrknIzzRH`IdHTO) zrB5v{$kn=7oKS+zu-SUUz02;3H=Sa!*j80mG`$zIX@%MsP4~|v(a0TzFdBq0SmI*x9nnEP|N)WUhi=GoR#csf*JEaC;w_CZw&ReQmvO`cGitX@+;mQ@R^ zxgjP`Ic<8IU5(n7t=%ThDOP5};deKs?PLy2CVX^5Gf8T>n zVY3jaG25z;$<*>p=w>@E(~7J%GP<#UW{ZgZMaVAFuu!}9##|LTBT+8&`MUiaOt|Wr zr#}hNr&q@*dxr^%uf$fq{-QzSL4(FuYK`HSHHZ}X@pIzNX(b-$B`CY*WJ8g7*!X!v zw`OAeZgQ^skHl!u(q`G>f$!E;YpCOp9AC&3`ZV=5t=v?;w%Ysu;(uz5f~1R#zMdwb zk#(EpPtT1GrM3yR3+ghGk!B+q=`xZV%gQ!-gmf9ljjpz9Gu>!5#Y0uSRi##IzX9r- z1C9mtRWL8Iw9+QGdk|Hdq9&n{C38%WJC`FpZPn9PF zdXaY|r>!-2K1^SO>UNtWU2c;;>^l@PR*eoVU6#=)X{wesqYy$`Dy2Dg9#qxTth!=n z@yw8powvP`*c|dTY!2~HN-H+q?2a<>uX9zoq5n9OiMRZ$a-HS{=~+Ly<3@f+t=-rw z%hj&!WWFq(m8Ygxt?9dLQxl&tIryGBt;+4@?CS&=t=$kKwLpGiz$}Z${E1S5?o5K8 zK4rY$rLobTejCz?>#P>%TG{U+{!2RbMLyovge|$qarXw!O9-b*ClRy>Kd(n4dWjo) ziO=*B|Dc!ngI?mF{z6~IM2E3bJ# z%!&KB9Ke+eJvdg}8yGlb55iBg#@KZf3^{)BO=q_(z%a$3wEdb818vhGup!Mi1aMq& zf1O`t!ME)!U*`=D2d}#OJzV763GC_)v{irP6ErE_SPuf1mw>;vj)Nb4xu2&$cN@A0 zf!1`RDqn+;vvsy+VKSR+q7r!eyS*cf!_}~W{c!(E_0IR6HuytK^4<4Iu|K)MJF)F} zMl@23>VDGxr<{x70phUOZegTVmGnB-uq0n-^7LTEB4fS{Hfp-Z1C>pIajtCB1n<2I zY}&hDB0MkpiiuPq%IYBC3!?N29b8XN1*wMqM%7XYvL{F~KCM8XOPEWbl_5$Xa6~7` z8qjJGr3yHvljR2xLQ7qD^;d>SL z5IWD)cieX>0#OJuB2=(D@sX}yOTfiIi zF>OgDI-A{x(;Uc_3c5~VOSRq6YxLOxLEdF|hN8Drhq(LnNqaD_J!gRpp|tFmal380K|QPH^*5kS~Q!?#M9r~Yf3 zRWHoDw68EKoOL`89{hc{MJ?`sRrqE7tz5B2os`>RsbBw&uY}8j6(HO`vVO$`L_%bm zb#!Grv8OY6Yk&tk+cm#N^zUt6Ws|Di^1;&C{}zbX&CY@E^X7T7N}p=Hf|b_`s?rD9 zDobm0qq7KTxm^%6dIzRxl8){3je?>fYLXyh%asBIe_|_zRawc$tkt~hA#31AhPn@V zTVp&8bjUy+wtrjZA->Zjlh``ijrEQWD;V={ir1gUQ!U2Os+sKn^r=n9!BnGKFxIFR zOtp+IsWuv$W}s0`r%jH`abRjx(>Vf0W-!*MrgglHw2rq?V451oo7U0&S?hTFtkp_B zYqgTkI{R&;b?zQ%olQqtXVa0^*;MCTs?$Pr&Z9c#HJx^^)9RgiqtiT?oX(cya(Ww6 zrw`E?jcBUZOoN6FW7lHh!E~%+k05@yd-#khYigQl`;r>_QLj5VlGNLu`lF{~S$+LF(%Yva{T0-2xT8z-NnicC3;e4VAGS+SwZbRd4a&@?jhDBqp3m~cWh8HySoia zmk|WzDm%I1fGOV9V+BbpbI9WEV34@3e8 zY-vw^07}l)A(OhnGzbHcjVR6r52_s!JFYkuIDVs!1eumR@xX-f9ABV8??)1t^$ z_%dECvrY31W}-TNPkheur{@M-%woGXWxLj3@&@Lc_Do!AsRQf}Od`TQ6X8nxFgok! zaDoAVbbbd=iebRHWFtX%SNve$Seo&`$+#gAWOcc%7TK(nc^`In*OkV`AobiY!P@E8ahtW{N3iK^?9ko}+AbWh%6_fjyRNl!FZJfH4M zn;T+Dr@&tpqTz4yNt&;=RW?rRj6#5~Xq=j^0yXny6n$%^D>gPo$JTE4Bdbuk=LVEE zJW1SAyv{>x!VNNQorsj{P1^Kp;uSJs5Q$j9bN4wlo7hDHd$4qD>4T*$yXUJ$=4~q%?!jxBJ(IDBShrrRrZvw+Hjjrz+q8*wcV{_XhfI=G!z!* z6bli(s?c5?FRCwd*w7wI!9q0k`Y%cUxzfXIMM$8Q(G3e@ zyav7Id|oK42-m`L?~ZPH+VWW~!bQ2s!~oGC!>o)biSGyX~qe^jXbz+@Lx>#DsJ>mVI6aZwXmnRpeG zdKKqa%t4mn*8Z*MZPHqgksSh{VPJvcQbK)#5^&Y}6q{IoTaZ-B)Gt6g~5af<$Wj zZI*wlQj#4WddK!sEU5Pl_pfJ?p4N~-r#z=Yctlsq5H2^`FCUYvk)aw%pLB;9%Vtbu zS#*1|4|z@Eev7b&n$aSNo$=1IUy(LL6rD%boG&bEvwEbzHk4z;q!w0$5M z(2DJi3Q*DH+LJj{%m+lyv{&h-&X(LsV+!f;ZNm+0S;>*R&t~eS3$jI7Es-p&y+~$v zU>*u}%Vp-LRk~KaTRMci%0TzI2H}{SmwPnWWguUL7r^^82m2vT1X~qHLT&002(_pq zG|rxsZ}LhAs6?!T3)KP%1}xu!qycM(%WfcXsB8S~*pkN=WM~h_?sA5D_1HNtno{FB zY>-|)KGs=7So|7u*WdteWlM1#Z;1v^v-YE_tbqCqGEAT6o3;G5+kHF+s>s8>XoKHV zX$-!{CouRP{TX!@`kVlT!T0bD=8W|K9*|=04E5DtS~u49b$51=u4eBq!=LNsg2fo! zX6fdV`WNmB_j|a=nftwvythgJz0D@ei^|{U#m(sQB_J3Ghch2~k~*bdWxF_>wi-0E z;*gHWPzlPc_LdmIR6L+kY}Yfk!d&TSKALzpX^>`&T!V6In6XxPE~^Y38V-Qb<6uaF zx^dt!;*bV5jyqDog>nPnw|*~zsYzBWXzTZ$VvH_dj6bs%w%}vJKmSXL0iD%9P!bW~ zfj*9XycGC2@56-vM|*1JxU_Zfk6)|2$acTFa2K7FhpM;2{E7W1Y!m-2JP(Z7FHyG_ z-jt)6vrlALC~1p|)jBG*5&xsB4ev(8HgPx7Y!ysUk6kb!t(L*`bl3)=r)VvlDw;nK zp|pX6kxE?=V20~qo$&8`(`}HkwIWdz1^t@*I-JB#)2qzn^qGzf>ZJhRinBJYNw)OMWk7Zi6 zz?vKPRo-N^w-M1JL8=kQ?K!C%{>mWm zNhF>v)!22LnmzSVr$1>k*;@H}&i*jYLc~YC_i2XwF6MT{{g|&+U5aK4bLjWiL~lXod4Emy9&_yXK%xdZpPK)jw0eQvJv*W=cqXbjTf~_L zo=xs1cN*HLm$_IWNdAviHxl<8 z%=AB<$&HLzuKKoepLFsAHYuuG&Z`RTw#)?ci2bWlZuS$W!-hg{>rTnWhIT)zW!9w* zSLoXF;J1E{+?iA;2F5=s6acg{%B;mae6w85VzS;oCWa)EuH$f+EB8B`U-1`3n>@oX zMZPGzXsQ>H`<8+^?yu+3aKN+S=cw|tmq>jIOH1$$ty%VzE?#5~H!Xr}bt%`GZ7qS) zGW%}2hK$ZGw_Q6KsN!UDYy@eR*3B-hn=0(^JAt^ZgTS^=?#RSj-Ni$c2X51;X@uY9 z*$)zd_CGLe{z99vT$<1MNd;%W`9b^{#9pJfSOw2+B@` zjW=i z+F}T1Y<9R**GJ{1T$N8R7n1M2_;qN68Vdq@5^PY>DIQA#=f$yCnHaomjn(Z{5S3pF zQHuZ$f+DE}|W8R%N=>u3C$4Jbsmz734+VV-ZAhw_zp^AjJ16$R+4A z8lc>9XXv=O`IIe#64J)m3X_H1qZkyy#JfXS9rhzWPf?l>HozUk(;E_3p|vI0_TC9| z@K?wUzV|6Q#rJfT^Y+^=_{9#b;&b+ke8aDgiTk4jTV3xf2OrJeCaY2&0bQ&V*qxpp z827O85E&Vi?mxJg<>m#LK;Vcz$+OiGFshG;XMZ6t5~9rMy{gkHUt0NEvuf*6G^+|L zglkTbIVVopG((4HEW)0gJ7mjcj#;jm=WZa`eg;Gcj^Q1A%_ZyBkg8kv-!EZKOCqlx zJ8uYW^BG0jiP^n59~%bx~nYctnV@M24c* zX4u)rqlaal7z+YVbVeC~Kh|FYhP|}Nv61f^O3~esZk=yp>65jpn9QgH)j6B?h<$EI}-3I36%{!Ba% zVYv&{MO)!QB6;Vz@oA4Q_Rt-B=q?@&JV_Go+(&nB76wwFX;{(6K(e#dO1HF2YvqO3 zRg^a+`zls?+#Oo__`MNkYGP2erXTv~BkS(6@q$3xZk?bAtZro!$`#G0-t+r5B&*jD&1W{{yO(b@l#gS?}+b^hSQb+RV>b z@2{5ifcbfQ|1hRACIf&B7i0!n4HMv$d$aoZpxgo&o8U*XMC}$fXr&iH9^&grjVYNv zd+xR+Ipv6czygQBW+Iv%yAuvzL}0m!2Ti@m?#0@?n8qO8OUaM z*fH&7^g1r?j8aWf7v_810jbCYSYufud>smR7W zW@6j(aWYas&2%I+9f341`QbvI<%ti`r-cNBCa=kwb3KNN*OsDGcX7?>8|GX_bihNi zhX~-(>AQX)-^!$+8k#-(Qv2xBxW-383ph;MS;XS}d*;~K46yV3;Kz@hY~oBxPU4Uz zn!peSYQkW}lYv4@CnX~v-NuvSAN+y*-o~K?DZd7t`V{QvCH25IFOfK-&jvOa4w^r8 zmMD6-6-7f8N0-vbcbT*OF7EMvS&@kY+>4)h0px7mZjp&Ir-CN#(2u2qfK$)0lY@QV z*GH_^cKCWcqKM=_|RUJ;%L5> z{&40&cFoih%X?G-?HRA74W7sxt_hji)RMJ4*{PWZ)byvOTwQs%&<&3!O zHwoKoKBbY(x$i})y77c-CyP#1ogbTiIhc7yj_U5%wEOjNEhp*W>S^>kM{6|e&V+RL z2{e|Fj2599z4+3&!dSa$F=c?euEVxF9iS-> zSccS!WYl6qvraHY=dH5ohpj{-t5Jc?t>!nVQ7OS%&vkJ7RfdPu*O?{W@M~fcW6pDW-CWF2gxo<54;0`bNoe!hTZ=_{~H2j)u7miaeiXRj|6h0LSuRZ7zlGzN7JAzIf4YSWXQIW z@4HB5>|h0edL$m10o*f1H0)VA4IQx%;MP|(3#*S z;wu+Z*L5oeI4HOurtC zNq?g(j6tx6On{(XAmL$Lm>Wb$r}$9PZJ5{6Bk9m|mtBLk{&j?&RtwUq7%b#d4Nc@I zMy(Ntx@I-9v>25C961nvnTwWs?z`Aj7sc=a$Lh&pf^Su5p+k%9m$LduW4a2c+Wj;1 zw6Bf4Gib}Q&ngn|%9Ahb9J_jVz7$j&t*ks`pHmV-)?g(~Ok_juZ&A40jm&41pR5e@ z#aH{i<&I3yG4=>Aqr)bL_fbGtT+)cCJ=Mtfe3o)S3xG>5S=4IW*(j0@==Fjcd!<~| zcrzu=lU0-eoL1;7wiLS{n+a-JHl;!*Y$Xd$(au}eF+hpP;@p%JY*t! z0~iRKtP$aW7cnX7Y#M|~X*6zSvfPm$*r0-58W~i&2~kUk1{F05HIP5WPteH>xMRft z1lNKANGj9-C}t@qfq|>LAR?=LjI&YgO*Zf~S`m&ls|*=!T9(?Egp6%t60C(b!DQRB z4tS%JA>*tA*-HEwbqY zbNuBN<+rZ@#~tv=a)E8AL4~de3p8+`0v)FKe48G5X=F+XDSH4WSqX=&H|`d)v(U9$ z54^~;L+I5D+R8&E)oLy(MSMO&Hk(D9+(%$%Q7!=MD$4Q3QB_L{X-piDj4%;ITPHA> zB}D~-HNk?`8^A!wWFc9g0ZT%X^F5St#oo?cx)2jR*d~FLj2fe)#5rZNIXVND@ey$# zbB&2~kzxK~(-53az==kKg*Q&=zTB310#E*#OdBRDj}qdIR8TFLsi2|^HRBILM|To^ z?U?>=#kevGz?-|g#$-u&ApN5*DGwP%EBtVLF9;iN8YyLh%sI=(rWBOMr}e~P(v6XP zhdW_24{^XP1N}}IpbkGf&T2_KXM4RS%$0jP2V0LUm@|>vYAC;vaUU96SfT=3{7Q)| zkg1k#ki?Ik*J6Ui5I(*Wfk(2rcBI!=l@)==lSlCcY{`8&@$)$@bM`5HXW<+ZxOYV9 z&+Aa`f4Sux@Ca!XGFSmCuQ+r%YI3nJ>x-q{T< z4GKAriySFiNN7YhcB`WWFlJi7&ix>?m}<*9Q%YwFsN z@cDv-7QoOZDxsoQjtXF5Y1WESvh9{4Xf?^?-X(^CsfC&sUrw*wxjqRyHqyRBtJ|X1 zc{^I)i2KoLPpaFVR7)9%aivyANv_1vsaju=L20;pb$|Ye+83Xwefh0T@&26fPTPXr zwgp?YrJ)ygTGs2ftk-IOPUb#uH%FCXw{^vqa`n{<%G#aIr>b{9RlWDA>YrQg7oV!$ zR&TdM{;FN|7JFM$vfI97%QoF!pKB;~TUTs3SKI7)Gn(B`&_v{|{{NnaZ!#8;-U{4k zseD3>!(|u}=3TW8wQNRJJbpza%Q7x4(zuPVH`sN=WW1zZjwbST!qdgr?=z=Uoi$epcJnQXz0xonc&K= zqQVl?lp9l*Y#_aJJw53tnW&>==x3aSBwPi+GSa|Z*Y%!BE6=d&dgK_Jgz)mJ*2TOi ziNOAH3*2NCK`|bJO&D(1Yo2QMD*!NVj2huE?vXX^)|+DcBB;-^1R13f5+0%q`eWV0 zagkOjV`5BkEU;-01D%KBYY_2)&IoR-CE7)`+#011fgDk;dq@=U-z&;d04;8~Ms z+?=L3J;g~rqK6oZnZdzkG6uCf0OM>VvWHD1x;?Wo+0^e8Za2r=91r$IRV*VK1T|LB~GkLtVA{_EiSbhH&l=bc`U zhNolcRGfMaJ#l<*Dw-$ruIN&9r|h8o5Aj!Bl)sN_R`!-S0AF-h^jSR6$p@RsEhrsz zgG=72D{m1Mm9H87r4z4zBQ8%IPN^5n{ZOht1{o ziskl?_zQi{qbkNIJyDrXITJZ!%4ELKH|qc6Tf8+7osv7$b$x^S_atjJC3lzxbsBvT zcZF=NSB5fo#8=~Ii9esjMQi>%Q{1P|(Wet5OZMbdTpDJU@u&+lbfQc8p16yA^2Mko z{`EgPowM_^FMoLQV#%$U?~CT3zfI6y9(}O~eNl3`U-zW?Yh|jxR;Bvuo>YIWO!e35RCo8FyOro}6}qF`4$fXg?q<#|Ru1+=EHI=*O3n_}W+tvYM51b9!bD15u9gn=|CZJN zcM1>DKdx$NM&xW}XJln>YUWJjU{2&}Y2`v>Ze?pm^bbuO>>QoVTwKgdiQHVQ>@A4A zZLN$MU_3X5x@A2MxEAW$^9=5CS^wfIX3W&q=HiPhvvj%|-5Hm;3=Sq6W;sX~XBK9k zeLoR0kcmqt(@TJ*cb`r)p(+Ll5g`GA1RnXNY|#z>XeFdz+BO4hDd|kal}o)i^eGuT zf7W|`ekR=-H@{}Der)fT{ieSKwRW4gs()^NPW-Yyi&*LeK?<4`~ zq%&hvh32RXhHBvADNGjta-qm*Y|a^%yT$R)d^tpa_xKOY7sF<>Jb*Lr`f{!QD31xs zhFR~m0XwYJP0;0-I1)W3JLAj<$)}CC#};q#Ve1WS^nT!j%e5yg;bvDT@TgP2?e=#@ ztHMCcbjuN~yq!Hxl5GD(l)N%Z|Fzcpy$pEP4d;qYqM5DyQ%&4iWHq9`=2=Q;fSu-B zW?f&gr^fzrlqeOu+%8m#+&O7HCc%*lbrCgvKLZKsYq*tIO7rHCBDkDGg96jh0^`MLt_v6Bx zR<8rTweC+`<_ux-0 zD4UB#dX~i!PAzEod9uqE#dfN62SM3zZFVDPOKG(YIHF}|n9!e@46_dNnOPL}=$UfE zq%Cn04e}=HYqE3Ia*RC9GRx9Bd|{I7A_rUS$}s}0pv5&EP%z8OLMJnvZKJKZxZD4Crq!+Vkr*uUt~AQ%V3&u zh)R$6^aV}6F)S{G!V$l9iJIMb`Uv0BOG_@u)4E=M5iC*adgCLh>RB7kodpz#0>i!U zaJss73j=P#dK4(}Kf+2I94xW_FF^QR>bzVp#Zva1ZJY3Q!eX6L8VsS2Kd0QX0%!VV zh08x^kEF zLG|Byw`{G3-tX;LfPNV#ip8~@xb1da9;efpY;iCc3rB)%FRYK)=3Bey=B}Z3BZofp zcX5(A3cY<`xMN7F_Z#taqfj5VZ6rW*8QSHa+&igWMw7MN>#Kd_RY!>rFYuTh!vNaJT(!F`e0QvpMfrBBh!)Y{de+n`Eu;QBJ+ zynMHE;UE0zI4kcZpdN!)Xtg*TqBsTtVU>!&tF_v$7BS#X&XeaQ;tWM?Djqv)(FoOw zm@a3x!=;LyeObj=Y>>`D7?c_-BSRP>;C34B2_hPC3!irYRldzUOw|Bwf8m4ORX*`TcF)w^h?P#2kSC&nIS8|)fzQ@H&`XUJ-u zc=5%gEQ(YU6;cM~HD-@!W9&sSvZv&>Zgr-s3UpVIeSOdOKqqsK=9H-`bh#`x6-8c@ zp&pGe2c~YZjWXQOr!#c9JTugBQk0=xWWjP*5}gmiVP8%-ArK;r;9f%IFmr|iZEQOW zinL-%gxfxpg2|O)!uxyN%6?wK(^DZtmH!lF!%Ue}48F8=!R#CkZ{m)OhEIyYHF5pc zFE)$|Ajh%Ub|`FZA@gb-Hf4A#m$T#|dGh5bj&x0OZA%uFqUpYq2?zYlZED{FyVEFuo*vA z*T<}a1^`^mmxK+?GCN~VciE^RP3?#P8C%wA{SPc&r9EjUkYj}8qP&Hs{B|&wVU7pJ zqbo{({kh2b8BI(u^j@HAe$x}i&c~^U^*KGy>;azL+*4{nUF*MJl%?q(YiOm4A+D%% z3RD2t;na{sq!nDdcRmoYofO!FDKQ?PK=xp|CqP{mM|EkGQ60ZU>bW^}U~+BLKA55> zgUfOh9|m1LUnF)61kAq!NA{c`lRlCb*M6tj>d`1&MUpn{>Te}TR?{j>0{AS@mIIz| zu`PN{LUWR##Y=!li$H~!M8b{cLP_k(zOP*&gDP`&^<6L{TXNuhc(F~dOy`=DgtryY zMC&GUlIPVT>ig?WZWTxKc*|X?Tn46J+6yQ43YvzyoEF@HYJMJRo z@=~~OVsW1mf5@7EoHs~VLBhQ*K%$q>n!*(i#Rk4h2*>U#TVflw0Qff{@o`5O_Ynkd zF$mJ!slx+d_H=`ooS249CT~JMB$O!6bzu#33?nw!lMut29H*m=!h)&r?+vzdZ?DVi z$T>z#Qt-jTR#UAyx|t`L)kluIK@34-A`k8ptgT$=ksbm zxJX+6zNUho#cyA3C*|b!ID9^iU0U0+YGCu|n1V|HxRedwv@hwswfMpH!8hp=eEUEuKUQ7?PJeRG85epI=Efk1j2HM$ zl;A}#$-JN?7N1oQ5{w1-K$YVrzDLu_j;JNcpXcsLu5h7a>{SQiFlM<;i%`7sJ^xEJ zElS*#x`zuVOkuS!_fibo_F!8%vj0zEXd8*enkoZliteaO#CDA9l{`!nx5jUzNr5r~yg6@;sxEs!dpDfgB` zM5UHOT$HRq96xo$p)WJ@l`=T`3+wICA?w=Fp{lyj!zBD^v9v>JXEZwe25DGU4y6%u z`_9$yaGr?kebK`de5vRRhYOSmoA56Q^;T$Pj3Bu~rx2VmBJfr5K*ZIFnn-Bj5=dyV z^b^oeX!UxH(6Q|x#Vyf8|83(u71RgAh?WVExhpx9FF{&B-UQHY@qUb$Z{DMmV%v${ zl&_3=pxGSqCbBmcA~nHnywi-h8kr!Y4c^2WH~2|5Y4#2>tf24W!UV9qb@}8JJaekj zTTjqO2Ic*}mU1UR3pKngIAIv>Ll7uz|91rTFxC?Ga7IGW3nT!?r{RQ4z;!=4^zLuj zz^N1;DwJDKiJC z5!AG)79z5bJ&&*?%(TN}ZDXxM@P*myH0WShMLjEB<%*d(OiyG-yC`XpB9@6WF0E#2 zVvUjy675&&5Qk1j$d|Va8p=n=YaoSO%st>p@zcm^g}_O$(!K^66em@@K(ob4F7d=K z;2nu_IP(on)g|}Jt78k1-N7)J#BQ9-`SeMVfK-!*o6I+&HXt#AoLv*+ItBZ;z{AiNU&OHuIdZ zk_Ceilc`#lB1{S zKMd!v$65o9%?;rWGDoowv~M6vluoAz>j0jQ{uL>$X#_PwwM{06vfwL;`7clw^q7%M z&>mVQxcAI6902s1?$xktuV!}$M6c^D2pAK!_zBikVk*qHGb2_jehWR-fyvtu-r;q) zk}DNg~Nf2yoSl%6b79oD(r!zOB{hks$6r#pWGozdqXZ%hcJJ5 zo-P6sXtF)Wvz7M#>HUA9x(5dn3M%+_9-eebU-Zv2PV$Bgl1-g(RX0SddfZ9$M z1WU!`o32@d6BR_*8sEwmId=o`{wFnq?l7+TTz#OOB!8U_m6~x2BgK^+7q>e5${qY1 zANcogdS1`@51%;ujyCzHY)Jy(BLp^BySQ{e&!(VxuKR*2-~UY2G{8UfYDs+UaXLuX z%csi0;u*#Y^q*Pka8FxW>o?<;G=zRQ!1@F&E({n`S5<``VZ+si`ci6&^&AbgplbSV zK9n=@u^eE5UWS-R^+QxN)+A1JXo4HsA)eLwFu@u|@c%8z6J zDNaF$+@3(JeF*=H3cqI#4u5oG$p2q?Lvd4`4>q%KMX|21j;o_$^kAHnEXGtFgfDgL zQiFiC3bRm)#P^=t2X^kZ+b3yyLN}#5AEt!B#QlR5*AOPX)1D~DwLo<|MT&LgiORQ6 ztU=oB3U}2fim&Lgi$oxXE5h>}q9MZnY)#>d3CnV{MtWNuYglX&_fS5nAG)-_JY<51 z+JMPpFgW&yo@dj{A#pj3`1*h0-pJlT`Q{|uL|2m!CeR>Igf0T z^vU+uH7!lN$5(PSd{Nulz9wlJV^)q8UJe*<(e3nDY4BI(ZYKt7?Iuycg_m-+GuH&D zPzQ3wr+lS=*wt8e5L8%cUwrHgj1+%5S*l+&4c!$JKhwAb%?hx9@zaZ%Vq_A6I&!H5 zmu5+@%t}&vOt%Wsu;CHbMt^0%VUdg>JDhq1IYBjil6^f1|1sn!;?{7N$NaeJ5oCHP zf7yY%uv0c)#{G?_WBaUtTddOdXey(eqNA~* zi+Nl|d@RA=H7c}aN8-DU*Ps2_9bbcz`wxbXDHWgwhZOxf7q z={dYR1iLR*8f=nGMTqJ3sEo52cB6xgvtI>jt8ZI#;4h`Gh-uBYdB&5EgxTU~3*%0h zTdyyQrYbt%*f`j6h0JyK_eX82Zs>WD*_ZKKF#}E~5~CeJu8ACZoPPAdVQ8Jcp9zGgAQ zL#UW274SM#m=zg;W-^t>=l*mO?`LrZd20&{7FvYPhSx*yMZ>htTTuOK|LxE0(R;`3 z!(5k%2A9i&iux>Je;=`-{RvdQp!4l)=X>5BDDBup1RsR;e=jPtasq3~~ z6s9n`-2_wWC$yE_V*qb44paYhrm_@5B%_p^ zd{>VX{a`*)g2|?m>DzRvv%I6=@aV$z?K|DP>Qk~#FW_OH6Oe=-MVuzaqZtoBj4)-R z9oZ>@#=cJ_NWGXH)GYmm7=E5|#d+tUgfily3HV#908&CAgc#Hz?Yf~fUyde{QyG62 z(w9&vKC6HtG&S5K2ZygX*B0D_3V@UI!*cU&svk6fvOEf3#mkX&G2mf%2Kx1 z;ZnxM2kiTVz&ns4O~s<70RX)aYzi(S@vrHH`K*_Jf?9u83Lhm-$r0W97yQQ#4mdBQ zfQaGdp7|ihi*$=Nt1dqd+^PO3wRQh!57RMrZmOW*jb?;NfTF%4^;?f73l2<=nfu2L zo0@gtT=V%A3}G5MUe0ZKr3#FDvM%x%l?qTXCK+v_M0FC}o>4l_ z?I1hlOHIevbIwFwW(y~FVK!@f#}GvVzG?{4zwd8LUXB4l2S?w$n`E1>+*%+T=jCnD z$jVvscWZ}k*}SOOMM(bSOoq)o=jWI%5+(z ztTi7bzdApK&QIukv9%(1Kmf6?%jETI5Nfz8k2(h7!2QhDNx^s^(kgTn~(U!z|?62f4#!RoUp) zSy4wUaI|PFm+Nb_W%6-YVWQZBT<6H1&I2Wi*Rhi0Img!~Jgbh?#eURZTw&=aa?H6K zkWvkmS{MB@$@cqKpxc-A+H`!@Ura^`MqS`&Z44{m%h`(otF28o?o=!V-C0cf#W5yN;E-fQ-euOmILwV&MHxR zHrGR>_vSDQ>(E{K)WDlk~aB}JhOLG9acom5}uCvFQe-zpZ z8GW+yRFly@xwYZTWT+Ml)u2`+Y1uBtFCCnz2arD6KM;O&nlBc z#sKr@uxr#-i{DnfYjCiv_pIpZRnEX zB%|@u^-+GS7Sl)k^NQ5Csry1f59#{`g=V{I1$|q~Abh@uG=)G)a$9PT)tSVdM=-=# zJ!}+(xQeU+eLSOhNiSkS&~f}&KM6H1??#5lBohZCDmln}s4ad9RPt4}jwXboZRUj&|{LCwZjfhNg&gb!^xJ;JVk=Pt+w}>$Dy@T=>1Jy=NaUo-XcU#T z3la}omp+tnW*^WyL@I!--rhI#3}m|f2NPRx>pX?#qh`i@TvAE5>{7*w{|SchRfpDT zU++G>&Ya{fpyLNt?EFKip$;_)Z>i&T0#_AUfPK9D-b$y`B3Ac0LBQ$b-ofr`iFi8a zB9%k5O=4C0)@L+h{IAaE=SRknS5BUR*&Te-;SFUe7Kg{OM;3ML<7$3e5ILzm-~U1zimS!9#&Ic| z)uux1_S+K4JU7G|@6dUA{6gUD(m41w;=t8?y*nU&4~YbtrY4fzPy?x4B|FrATf=)p zt2)0RkF2^fFjGqLsUP#`j{m3!Ro|V@0boxj;fPLtxnLlW>r@Yib1K#*P`O{|=AdM< z-DV0(9>=CC-dgVe_L=cTVkkdGtk@ZJ?BP+z>tLNL!qI@v#CcMM-P-LH85+QGRB(9v zz0p!gbVs}zw5>%(A#Fg0-roF1vny>sJoW=2D!Y7&kOPaakZv^kQg5k~-_*hHAF~AP zHu{R*VFE{eIdl9ogsZR?K;yz`%DXb2rl_Wv1xMp@ z>}~8~bE=51fW=v7J!lW5;!2ZLCm%$8YNnFBCaj~8V}Y>Ml`oc_f0`fjJ6%1@4q(8XgDnb!I}#{}jFGkY&&uCGIG!CwC})pp z+Us>zYEBq{_#QE6kn14wZfhYh&schME*tSxB39P#f;rqwysz(_Fv_RhDMqFyc#iEw zAU?E$A_hD{XS~+dbT;E)D}s*At*QSh>kH$Oj7Mqi@SWyP!6{G%grRUGb94DM_y){& z5h;c7P5nXRRfVmR42Jww&{8fepx9h`&9vCDh!JI#|J5~@0nH7`d<7LClr>e9*2caA zp6(U6L&aR-o{DWLh$6V+P-90i$Rw^bYiacUOeFH^(;d!d_Y13LQVR$Cw6fbm0WX84 zk_C?Ez^~IRLi0PngSP)i7%$SlunHoT)&#k^xpa95%Lcu`Hj4rIzWsIB=bgIf!z>4{|dkwy@W8ghQs2#kddz*&YS-46s zo!4auTLLfKuK9O@hS1K7j^~Ds3vwhGNR$^+rc3vWXi+<_`)~=-lVfv>;f?kTSEkOr zpPHMOauVn5?z_>@X1!OO&IsoWM7VGbxkggIV-TS{K=g|A$)}=;I&^HgUTxTW;;_V# z<$x2EA{8@4W(VV~rB3PBF@>YWvq=Yy0r3|N^yGgov!VDe>8ZmSFc8$=rx> zC0QOY`;`*$2IiOit?MF@qbU&j<0#tFeqokj$-{^;WyT6wmZ;389!Mnl%zcCwGq@H- z3`$8P?J7l=lK7g0Y8=YCG}ElvzpUn>-w148t@2w`Aqow&=5ZDHloe?8fFV2Hw6x}z zty?mqGgb^8h_kgm$@(Cb?fLLii9yVJUTQN~iu==GCfe6$&1>16;^O<7%H>2*6Hp{% z8ETV9(962fX!~QS()`{lvdzIf_`Y!_#6*8`r7*@c?YPZ?>_~Vz3yY=b!C*KH-LYwl z*O;g~9EnXW#fmU&+XU>-n--=x-2I##DoT8d(E1%vy-pAj*rDzT6zHu|30O7uq3za|KqpO(H??2)$KYsIDy#|jhy#-$1ENrukVaIR7 zb9}w0IeL%JH9HyNb=#{|Dc)`@?x2b;voO%!I#M!oJ2Kv*wJRAK2HWh^GefJ5i8<#1 zA`8#Dfy)c^-Vz0+ZhWKG^FgO)d(_{?SQ~5J&rk%pg_L@_@@gx}fU->hfxUm!tMwP` zOF4+evA2qg{k!NyWDFo00D=BG3;1Sl;?t9Hk}mdh8VNWzhO1#1sXxCznskiuBD29- z=EJCv&)$ifm*60`OmSWtX!-U@e8~CM=4oKADO%2}QA4 zDsKVueb`Ds(@WY+K$5g@u_8Ds6LEc+1d1KaAd2}iIVE6};2V;J%Qqci;vb+y=q6l% zn)^V0|7{vi&Ed`D=Jcedh5L3UylS3kYE$_vTD;C^2|LI6gC{-D{(=WRzd@J@KU9IV z{lyYs;7B25@`Kee8ikB_hYU)U!7LYd`UJZH^5_@xO@KYb=;;~h>g1-Tb)pX(&&!Yq ze}Rid2Yc5`I1njp?4WK7+cvR)CKzWQ=9GKEA%unqXoUhh$>jSIS@jt{&Tp-X|dd?!$JyK$$_t=dESS! z@q%Mfh38if3uWI03o;*%Nwrn(0977{*qye)5Qp~-B5VW<25A9C2J8R{24Mm7u0Lm+ zLW)I+7lcQdEyS+C1;edi4Pu`SuT^XS8Py!4HQ zdXW$JrDw+jT6xyH2bGQlpsaXP!P%D=l)*|`b!8@uSPPGa_Fw-?eMZfc8tlm~Vey`+ z#NR|~HbOr+U=l?4+BPNWiwIDZY_%aqVxFMUS82mcK{Z6#oXH@%@qwz~XjMv}@7fgldcdL)tZJd3IfGS=6wfJ=nmG16^ zt4zt<72<6mYrXBF&)o|Amcf_L~)-`yR^5=Wwz$#O&DIHbTTx2Gxw0qEBQ)M#S4msldA z?E_fW$RK#Lf*Qd#7yAjlmWmIeIUgR9ssXu@Dq+~%A!yL$AQ#Ms(HAGUBFvUh9PO-MYV}y+6EVJGT%A28xVs^)Q=VxRyP@neh!}fH=Kh6h zehnsy@yAS0w;e{%SZZ)&%ujY`a3sh6@6ksH3~o=-SwfIWeDQ;RFb2Mdy0M7S=Q$L@ z?YEpe*^ok2PQxtIk4((^f}Laut(>qRHa`69B6q&ST7gzj6N^|Z46Jc~9z_xZKQtZ# z|7IT?`ExzUgS4g|Hu?H5pk&>yP?7QBb?UBDgM1VG@e4?YuUMWw?|p8)2k)Uzz!Pg? zjdNi~0b!ip9fB2~&YHrL0COn|zEZ|YkDyC8pj)Bq|1;2{YSxl&TJxTuWtY78VuF6Z zMJ8Uo$D)ty8w|(w;PM3e7}+zY-3%)Ek2W_BAKrkQE(DwHxVV4CKb~oXM6G2jRKa}> ziM*``z+KpGpbg;jhoO)+N_)+))p zrtgL?-xSj>KO1N|T9>g5i~YncG^$MoSsyr~CdnifJi3*DF7J-Z<4VBs^xQQ)oRRM# zt44u9Rc#OrTkzmc(kZ1t-g+FTTpbG3U_*~F=q{-V!?x6#Q*8W@?<&R$xs!S|FQBm6 ze z6C#jZ&NsXHV);S!qWKhT+gvA;3)v^qSsks@pcAH6FYovC-m-#8+a3D^ZvWPKL=iLZ zev!Y?my>PT$aNg8-)Tqr@w`#MPY28L1@x|QlVz3^eHJ*kutrie&=Yv1XC@I5Wb9*$ z-_<@39E@n&8l?Ad!%|uMYE@d2XmI5nyK)uKJeqSk8HfT7ZJZpCJ0I>uI+@ysF&qb3 zv0Kz|$63m)nA)w=ezhXqiQF6JJ(eBJ=2Tu}LYoO?i29JWvZcPTmGkvjA+U3WE#D=(2c#bL!u8+BQv*l*|+ z-Z^7EIsDob(W2VUjPKd9hb(v4&MGj@&=7o08&yKG_)k2Gh^aWMsqT%ZEYdf;3Mz8; zQCP>}YKzfhu1YEhY;$iQ&dyUI^b$6!bS^K=d)t3$PvGhYLBz@d4L z&8BA%^n9Zl9v6SmVlqW^l=E?aEW%~c(LZSKcN#YMPy&f4Uf8Z9tiKC1(J?vm`^Hdz z(FaSxoX2nh^j_^>!BBZja1Dv3z3zhRC_O0OM5MODojxJmob@EvkyRgArC+WO_2aO1 zE#sPsMzW~r9mcK=6V=wUOuJ{&ZtH5PCI{E4Gszv+K3MiZ;V+n_W%`Lx10r3|#;ArA*)_wM=R<;QS+yH}}%c_}=ptY@It9lJ`l zX`_*oMby45pHEU-X@k7Imgv};PPEHObA{V9xC~%(bE_fB&DbICjE>v`*%k|dL2cFE z|F85&7xOYxJ3MQWD-S(X_I*ygmQnBPJPLPIwE|?c-S2=3`|Hw!1CTYMVE)bjMShv` zLOsdCzD1+FZQW+=bst=$NtPJMlmu3mz<^?sN1Lg?{Vg+9gCd`&!FnV+!prX%b2{GV zszP`}v#^R?7qt7UDHrph_eWh^=qkZJEmkuC)cyqzlRO>)afa5Gp-vzY1qM6D5JXRCj^e}et0zxOC_$9)8ovX&vy1w&e;3b2q4YNDKpjE1XCwojZ z_01=`vlPW?2gL@~G>ruQKeKfdm*wC7iwn~&AB`{9qMiBWYrT32QQJ{sg-OUnY0qXm zYjA3{U?Rs$_-{=^Z-ax;NUd03n++l&`H)h)i{hxQ^XSO#CSj~<%Bfm_!GoeiH#gO= zD4KZ`qS0aWxd;d<%XO0a*>UeLey#8A&ZG07LLM~`yw~#cLSS^dnbz*VXLynLoV8M} zLFJPP4OZLVwu_Aj1)xsdyChuj(QE0Ml$A1q(Cz{k_a6iH0Js+ zx@=X)j*;qU#Tg&+;?D{>kbO=g;^k8Q6egdQK+BMe9>dOo#XGv)zN_w)u79qB`xHr0 zT3VqF8%PL?o|=uAkrSpax6~Rua|p|o`V9m^mDwW}E_wa{r|=6pWZ&z*HGl6%`Mm>hi0~S^dQsfJ`J#0D3LhOM90Gid-VE{W z-+hesT6+Dw$7HVh*Z75pba}@ri3@$)SC)q}GJBEh;S#kne8yM5!0CKbXfdqYpZveJ z3S>QaT*Ks(K3>)4Sr`gtI3(@M#XmC`{Tb$Y3MQ2vJ0_VA&`3hP$ceKfQ+knrX`FOp z^K9{;D0S-jq>}B{IGb9h-m_kt;n8IrKR4{>?wN- zz`yZjf2MZyRW$EJ&a^EjTFjis_*F-q5@Z=uA!OZLHkb4aOJ%37);A7+ft@_oQt4r) zVM-+eTjhxpeMzlZmxI30l&%dOrlV8xLveclDXleE&u3Oldr28iY4!pqdvYj=uL0}()lrE*bSVBZAs>yF=*6lYY<)Iu<`NT28#x`|2 zCiaQuSJ6`4nsY}oDe<7uz=O`?CZ4<{G7YNH?%wp(viu1?qtGY7@)?K{oitB-IQY0E z-@uVnE7p*4F{)H6+Sw4QCtq?FZ;s^_$)O9GDfEJLD4kG;@Z8ZTIF9?>9d)YX**rCD z@9pVGB0cYtjCtNu%wkvcyqOj)PlGRisdY;es^9DYkXXzjX)2Flag8IFQ^&Bx9w52FHe060Xi4jENCJSpvL?joj; zOL-8d$RPsx$MFY`!S}R9VI5mKCC(Hl9z-n>d%l#$t3(BrA{m-#QWp(rNU-TgOoT@J zh*HdsY?w)Zw6sd9ArLgoG?sNz?wAx{j_lG@?-C)z{(po8l=%ize$J z6)F(AQ@!CS#j8gtVz&-<=FdJ+kypO*$=#ezEFfJb@YDjf>H;}esXYPIO@z9MP^e=k z4eE;}GKlfTxKEgpd>uGZ^{WIUd|L=_vQiQ?KhGB-jS*YY<)!}))WzFVWh#5K0SY|a zj14G6f1J1K zed##U@WP;@ySvFR)r11Re?VkiCubD>0@z#OdbU8P9S(U3oKsg zs<@g7izToz@=H}qhTk9f@3F?9@LOJOX%y_C_ZdqZt<+G*wVPA!ChvtVWkE${`ivS{ zMtFfLwigiM)C&s8^|-`9u~g$15AvTnA>W>8o=UgP0;CA_EiE?E+IKTAlCy7HiUqGV zt_&JnQUw=;=#GIGpF5@+=cKr4WNkZvRG?;hjr}{GH)exk%eRS~o{I2(1UkTzNMCRs zKbSy=i%{a99*})O3u1+EjYFL_^~i;Ds$c|^h?3tEwt~n<_T}_`_~*cWS!GGZlF~qaU(^BW z(nt#5*x^djrYPq0G6UmcG6T|wGC>a>3|#fj=T-U(BN{h}Kuv!ikhfe;T=FHgQpfMn zPn}vPIM`uDRctrL;nLS&a)g8tseGg1w%$x(?omd^Ur6*o8V~Z4Q%?BTV1!~+vw+hJ z{*80^z!>Wb3_Rw^C`2y$!25t?)JHqbRNU;mR52Ka+kjhMPm)<~@7nCgrHO~*CtKOf z+^7_E;5AWa-UsX+xI2|IGKtHrEyhCMcy_30t zEXeQL&5&QjjdXqVrcKx8-FYJ~+-!;v7~|ImBE?nZbhTl($6pB))Psj_tr`E78 zSiCawtzytgm%=e%F;5DS!9mhW5GO>o5$+{&jV6{K^CUrDz>ymI;F5)D)_t(9L%s== z%z+;`SF=+pZ1)2le0d=iy@iZqsi1)6N+`u-zYmryc2n(oG~^9?N1%fB-f$y_&+2&C z3E%%^Y<*y}Ml3q4B>RHhFm^?HypywKlYo3Tg0lG<=Od2K^SzZl@+8Wb3hQ}Lls6T6 zLjTE>8D*&{n^k=$0+R&Y>FS^3b0}G7%tn$gvq!lO8+@aji&lr3*u$O2C5fB*JcXv{ zJ#3PdpPDud5gr(O?sUS@r1|DbU$6Rm7{yY$0V8`V37?rMRlv^ zg^%%@zYC3!816&(%%j;{`_g7ATO5}FNW%EC<5o&8&t8T_vqstkqc2S6#=`mJI{l$0 zVUz{L_)wUWx+D6YA4Z+jui(g;FZ8zPrNplna!Hhz5>Gm?g|s*4b^yit zy|3xo6ZP*s!WX@gI%MRH2ZdW5{$$hHHe?uj$L`vqdHfH5%28S1CIMZ_E5KUFC-B4aIMN^Fo z)VX`Zd;5wOwbO@HIV?H8`qEJHf=iXXEMM`bUUmK^=(cG$QRKg+*5jP?Z4>=u%O3U% zBYRMX{>{%}#bJI=!L?`A@Ba9IJUx2^z59{@Ig)n90g1j~Vs-THMqM+$7T&-LQG?Zr zLSCCP#Ku-Lc7T71wgv!hnx)fi@CVTivffoc1zq>yr>9LU?FmF39(?Av_zc)MXEE)j zQLPAc6OQ}Q&+z{T{`=bLNB=*I|KP#gy=$3qV2*KLp*NiIUHntD7s_r%Np?iApTv8V zs|i-USzD|}O&n+d1Yf{hVY}VreP}M@q9TWsUhR2nu7lGS%B}ah`~67V{xB@}-~+aY zDlal1t8A_5J5m=AxKaoUy=8j=6e*$68LT=)VsQ2BZM8p0&Fr^15xBPuWL8Nn0qs+tvQQpn`{DL9 zoEJGnL}VQ<_T40~X!AZ96CVu4N?|lHQeWFnyBQedtCTq2;BiCg2@|6i*G(fH>zcZ@JLxjG6kil;N z`^=cv{mzOa3L$D-N%E1$6d}$lwYq}L6+9t)9<{n&8X+zb!h~<}U@-O^RbI2HCUc*~;pVHTjD2CKm$%gZ_R44b$ z$c5W^pmZFduc=tq}aXtw@^8zZfJ!1fk4RmEj1Al+s6ZGdn3>N@Ik68ACvScY~%S8;c&1ZR$>Q3Po|q_HB-ac@egE<)oX+8A-ag zg7be0Fm%SG{g|T+bDsEm0z^Jl$hCfOX!TQnPaeVN z$}4vZv}phtTZ*-~Yy=rDyTQGAla5>C{Assj_KenOgE@4B^d)qfjc2PPjZmjMbu=_6 ze=2{g^P5EmA&z^`GMPD`yxCWz)h!-y)tZL>Q(8*>_bGRYR?mer z=jfLi=yWXfb-0=r>cCcx^YqPnRqe;rC|{utMja`gWcrTg+L?5I}0 zIh?5}rkLw?XfGr*sN$zv@c2Cpy9#|;N_E>>j ziGJTp-oN5h2r*koNZ_71#gdEgYVnqEZQq+ZBR2-F<47^ZU!vL~eFnz=d!UU|i29-s zYSWB-)7hYY$0}h~rd7L)TS9*g;gR{i>aB;By;np5v=%*I!NLr}mW}uq?1tc+0zZg#kC<7p$@pWEdE!2mvs(uTW1AHie zQsv!mu9I4$*&?@&u%(=bq+?*b;l4OqP8snIgMpZZ1mjoji|3e?gQTryHywMtRa#-l zx$`DVsrOFD11|Tt;@=bC?GhK$3ZoH+D%KfEUk2(E50D)5aWW1J*zb#RQs>>W$lH5W zEkbb<9{wwnw|z62Y0Pv|L%s9cX`2B$Rj>YJN7)NIz3avHP5V%*dE95MuUp?6 zQsQ+;;A1u3W;+O6Lyw%~cB`G>KIsBzCy7q0WismA_2}HsS|;%E zP>BaX@dIp&&IWY1YjNWn^mn0wt0pZmPKAjnbKz7c~Y?!}L}E?;$)_C*DNf zlCpk!HqUV|D&Sk7uzc4H*dxpwxQ!zhXOjeQXa)dXVCa*^FAHl^XV)3KSpQ^f9U&x5 zjPxTJeI30C9(((W@?&nBf%hx^`MSa3@~#}9Y=$M+b^w$jG_nes7lloYTB_#xMvu6u z0sgFSfIG6_<^iE$Wt43`wyr;Tm>nO`>x(sLP>4-jlu{W5JVD}yjRODz||u! z#~=kuklRIpjMh0+&tL?IrUwJ5ULibLuS)IgX+V!3Uh(!Z4Eg2NzAr~$w!M8LM9jyF z3*%42J~wl#VT-95jmYSl-j^PCy%42x9i7bna1s2dy0V(j zdJATcskybm3(x9`|7x3!PmgJvT?@ef$Kq=^XG}>pFc{TY)qb<(D6{=kCzdf; z=iiexloSxuCcxLG{0Y<0reOSbhba36P!>2YM9Fj~?(ukx6{fs01z9pP$pL zy6)4TFs>)~{zcsttlTu+q`<~jw}rgwf|H+QFOY`w@-4i}z5VVLUfO5NyjV3isd|^* z@##m5m0g);FH=LRJ^&P8M@@hkH33}h0=W7F%{eus#YC_VsM{(7@BA!TktJ*8h9DsU z+a%jpK5zEtG{$PeGI0lh3C{|E2{tt-*sjxZ^ls&QNaG0GXC;}Me#ArsLvY&wE(rEq z8p7z^v)s{s*WV&G2t@PAIpjInuZ556<@p+c0ncR(qdg@B`|UO_;GZ=qxSI2i!uQa$ z+IRvL#y0Cb*}!W)e+!nWuw6|f?7%F~@bFEER`rnm6p^;K_B72{Qp}Q{kLjv>1@j_I z--^QVn*l6e>1SA|v;zGJ+?ClR#e41DB+!02aD29A$&LWCgPPPfq^z#nV<=E}L2-Qc zl8RgnrifFU=VkhwE}N@w{c!=ARSJ-XCCp3dPQXe>SwYq?Mf%1HuD`%loHLm!zn*)e zyZuJ2Mdc>VxB%GAGZ_DGR-F`4a30h06{L-4K)!xo64k95^SB1{3jR-*_y(&v1m9Pq zyn2@RkAoq#;LdW7l`^1ZEZZ(&sZ_mub zz4l%l{%^X;&cX!h~S+NzQtSAX77a-djw~aHIHoK3# zHAO7+iXtx{y;YaxW;xAX0HwJlUz)2`0obsXz8f!lxRYIHz=BY$*L1y-f5A}NR}Lu{ zfaOe$aW>oX9rG9ZBGeRty1K7CQ8f%~3s-?Fd)_igj$@>K!>4YMp1Ww&k zD;B({&A#tn?3~X&Y5$$tiU4xl+hxHDdSaNx91Jp!$Cz`mp`if~`^}=w4p|06PsVZ2wpXUfbZ4Gc8Lt|6iY*!{(nL>w3 ztGw9DL)7G8*{po~4nG(@fnm13&QT=a7tmD+*ZEVKnHyBJy}rb?OKX*h236A;6(_)Q zswr_o#KF}^5XNb?ax?g{t@;}D?p6l}YRgYh2)o=#LRL!!I^77Ue1NY4PV1^; z%;@NPAAqvS-xyl$&N0yLRZ$SguZp&7z#)Synl5+FWLn2TxSQmKK{*Eu?DXrLgR0If z+1BxDJ1FB24_O_>IWKF_;qXNu?riTO*2`0o0s+elBoDr3<)~o){j7GVZ;i4ILC+pS zT%bXXHR-|j%&6Ab#lGC`%V1wTCq%@Dbj5NoP+h%%ZfvU3;N2uu92YuTQxLK7HKGcO zLA3=q(%apLU{_qA_b~$;=j~?wL*(7^eBdy({4z@|LpWWOm4j>J%e0NsMP$cSn63{~^~mVy#tX8U!S!ft&j zqS($6`T;AA`4&~y>3z|!Qbs2e(TdzQE6xsx5$BMg3!;bez(zPJ54yB*fXtGNJHIOg z02F9-C?93%bIMjwTT_8V^pnU4hbS&WBF8ECQ=$yAIGU!}-y(DdRCQStR^{yP>VZ30 zS&(xhN&=lcv3;k4t;F6NytKGcP`tqaZ|M$R5U_5qw=ALMWK2D@uow*DS^x?n<_Kt6 zpk1nqduiE!r&zqv2hwa_(D^Bx<}eS$Yky(`3+BC5yUW00(B2Ydft@8BP0+Rds%$kb zA-AY2g6M8}XN0e7v;8_L0Qb;OQHR_ZVjzIKk~3A=abDoR6ynQ@Wd!e7s38$$n*d$l zX3qfw+Tf{**ZUN};)i`006;B>g(l!fca=!Ie<`_{1s&kRl!Hy1C=8#sMQLRm;rer6 zP#9Q2!x0KgyFAQalg1sX8fOeh zUjQ%Q%97%s=r9w4RT860a_=2bZn=Gjl`?E-IjFYD!cms2)0Yh7=5oND4K0VL^_>mL zkkKxS5RNHY<|eqL4K8^PFNyeA8~m|0_+v#$T$S(PB@u$}!EGf<;so39k_f@@0gM^G zJHC!kPgZ~mV#FG7!3J9Noq;Hd6a5FgBt|g!i*erz{aWC+9z|3D7gt9N=o;$?B2L}d zTmNH)GYURe=@}B6cT^oUGzU2KkpYl{IN&@3fDU0A3hW@MDJ~8#((GjhxR#|DkgMZ| zl902DFF}rmGagz%oM>Wd-O ztoF_@8#o<^9gu<9OXG9wbGqh?GU=cC@K8AKy$PePhn-)-0=ZZ!l1}8fNY?A`N&lf- z0m?Fw_h=E7I9$aAmS-<%Rz2^HCxkiZ4t$-k3v*ow6W!P@D{o&_Ih0`>pu4OJJJ|}= z8yFkW5ULtL&h@$&fW+|!O#j5`?XrOF@}@Y*^f#rUY2O<_|0JSKQJns|JJol9{6h)= z%7dng259v~F#CU(8LJPjFz>1jk?>ipv-2?+hg1_b)(z$iIfGLPG%5p?RGh zI9W91p@zNzh#Sz=y#Pq2@vwf1&&ig2jqPrXW&eDsCy+1&Tybj{KHIL7B?IOk3+tEm zU>(yG2lh)-9^EbHoQIVCz_O~;&>+k!Z_$W%;CBotf!*ZQ-7fI z;OOM7z`o_>4I}WyAXysxIEggh1nZEZys;wNjGf6ezUrkrUf?pt^{n{boZC|;4%}}J z2=l=Mh94LD^ku+Lihukx;Kzk8KW6x8Ib^jzf7I|&dGVj>QHXqdVo}YDrNcoL7uc^pzxuvTe2l^tQgpRmL`433znC&+)RM%Qab-wWq*OLm+ z@uPWOY}82sx_TWyI=t$mOM;pb5d!2{TfWWic>STv4WZA`-!;5X9xJ>CasAQZr?hGE&I;vxLtDAKf%*>iZqkhm3BxLO2Ny9v zTp$+eHVtu8-t4M91t?PK3Z8JRZIIqQ<=iw+*;3UE+pA~1O}gct{(77iyF(xWS#2wN zK+?l4E`=tZK})_y7b^(dNIb;`+`_NfEZv5@Ay8nItU(-t2W53XAIhrMypyDSWd#N# zo`K$#aS0tUb$P5_BWi$>EtZ$<)2hBTXqTs|hYI0_wMZ8STd(#zKee+0sn;oox?VSF zRXNCD=meHkYAh-syidsnEMiT9We%!o zRkHo(Ns+E_^=SXu&EB}TN(hv3Fqk^NCRxVTW3fyw>mA&_t#-<|d8VojVvL=$evsAb zCmHFTjPzd$(w|jK8AbyHqx+}hb)J+|)$IQ1{NdKnETKxRsZT=C+;Gw@Lq?Q|ycXhA zjIF->0-68#1u}oXA3~_wdgKeY)$Q+JZwHrX3@-7@uoC|mx>5fax=sJk-K0^r8^G4o zQcgzU?XQs<_><8uk!<`LslV)d?|zNcU-rFUeu>n7{1U0UXV42`|M_dA4yRH>`V5^i zjb9-1%g{3a7`|!$7`|mO-b4G@t3z)1-mAiM3~E8&g{#B*L7$oDJN8+y)Q5!X>df+$ z@qeFYFDg0$(pKH1kA86ou5nI4(S`yi1yUQIeG~k{F;7GOZbxF@N6W`_H=PD#;23_+T{A7`qZ24t2A+O zzAs6+h!}xxAmU)5m0Ve6C~hTJp66b^lMlb^ffo$J-tHC11?#sA?5$?+k0Yi($5aQ} zpFL3WIjB0#MuEgr06RXunvl4uA(MH-a8;=MwR!n_psq;Ie+7-8=_?XJ+bf+E_mcpY z@=&<`tN61#nRBw2G-FMw27tPE!>uG6^NC2l)>AThXJ$t&V3uq}8q0&?P{N~~%>G*4 zJLXP!ootiUnFZ*iiM6O-T?QbeN>}UfJ{%ocIRjR*O}?k=w0xf>|C<+@MWmS?5w+V$ z&E>NY+ib-k5B6J;gc9s8T@sTm_eH_7vX!%61HH;QeMnnKDC1-n*F|r62O0#~clNDe zr7wEA`6LY5It03Dk(L&GhN`+;BnObaF4H|%nXt)3Im>11Q9Cm1#EaBN`1Z_{Gmx5l z7TYE(k{(%lI^;-1^(DoM74X&h1vxU?()3okbDBOyicpjFXuo?*S0(L0oNV_Zu4oOu zy3mc4Z}dt#VwSl`qobvDq?G+8ap|#s^VnLlT4d{YeIHOu(j&p;j@XOHaYl^YF6E8( zs?xOVoeJLC?nil2(AK+t@`o+Mbi?3#3ElK+Wrqk#zrf9xn(fSC_M%=D%#xqfyz>ip z-F(#re$oa0rUl%cwv1AY2-non;1%~t6PLWX&$fzmsCVDb*W8&)-JB`IzK>f?@GG9>JQ-^c8b#3YFfQRnL|?BR?Gn} zw-Y>F+X-T$e$6d;6Q3lGG>UwyX(OD;gv55fNz#ly!bfev=!spaws*winRU~sL>nS` zz)q8>ou+Qs?sOb<9~~@BihRFyZF&LCP@vZzaoTC+*>n0rjwL=s^^zC)60K4d4UsTi z0-_z;$pAF|yHNa+SmZJt?RV4-wM>z*vzNKjiV<`DDnr(-VQAvsY2tp-#Qmox?jM@C z|I)*~)1wdn(uCEcW&hHnWZF-oR5H0RxAQ$Wn_j_|=`PJPF6$YSZTEsOnaNIbSf0ud zTfM9y(&g!qqIP*rWU9?>c(`2JMf%2Qed=oIp{dFWE;n{GNf>lJ5awmFwnQqyB-0Tz zA7$xt%2w3_4Ac>dk4dpb2MO=&Cs3RZ0ne)pAL& zLF>7~cZMg#rZkuJ6zlkg9to20Vx9#4eR>;@%tKleU7S%c^6PgByHANIbrPQyei{OM575gsoCN-7mdDbS{FxXX}A~1I-!P5OVd!rJJKrRUh4Y@N%`r` z))g&HOBL1b8ZA@ByHYX@o0f)p;_3oZL-!aOy2sGaJ$E)O4I7Ah0;IGWol&MVWEGQX zryEKfNx;E5lY92%)S;9Ysl@Slk`q8?4{ zAl=fT3_4U3g-6#Ty8)2+loDrLY7N@y3_ny7bi^DgvX($Yg{F)yl*E!Gq)?$K;|SYC z!r(!L)fzFVL^NT7B*%{u*W~`g^Y)}shHd1yihrq2VhQ|(J;}D{4~odgdW-+;IaW;M zkIV*+0aWimFmqqx>|zZf3-GLg=w0X<6l{?bR}Fef*&3%+0a?^zKpdzT5Ckd)B!P-T zfy8oRi!%cP^IGItHGHA9iL!!t{K|>b$x5*_=H~9NJuBY%aIT&^=^Gn#nb!IHV2@h( z^_BL%`JS(Nn~nKOcr}pF2eFLGGcGmQKl8pq%>I%*VKoxg;m%~Lf>OoIzTb!l@&D}8 zf{lx6T~C1I|INS;99*J3rseB+oxJRbuy(pzlGf8_YnJRt8j?@Gk!}(GMBgY-B8-t``MO1D*C@jp`Ic@G z+LhL@{E==E`p1Q#e_R;8W$7Ll`iI^Z{o~?`{_g&wzq`L^?+Zi!tY_$-k_`P*lA(V} z(mo++?`rLnk@k5)yQ*qeRCl-4-p^cy_J$HN252koqgwlnt@8_}y1?4>Q=eAyb-A_` zX!P{(^aY&K>MFd(wfVzS^rVxmvFh6D{dD`=!|iX;?QiiHUNP+GG3z?J)+|o-bbpZPm88;gt1As#b~mzey|Ic(^s9 zTVp6w-6>9c6Lw1-MB*@`tyA1^%{tgV&b=LkN(m(mPWayn(+JbaWDs4Hs`(}V~ej;apJ{oB9Kb57quZ#iu$r$KU22wj? zu-?%=`L|?f|CC{RXQ;{l;VII|)>(DkD}NNbi3RQ-$1988u(OC@L`80%3;6x_V~EIW zfCa%b1U85*9~r2;sK+Fm?UqPT-7vro7sbzhATP)g)a&dAnDiVqC*GBa@8zXwbzA@` z)V$#kXu4qGm0eKC^u2-MRNiYN`l~=w{AT%@WEoo%)#he}c-Zs^gzB~f<`wf&=@BX0 zZ!FbTI||$uX8{f%WXg#n)(=FMpEd|=n%V?n)f7Z3?L^y^BNCa83K*@LN^3==se(nQ ztOAGBSD)?jeX(TITwR83E(-Qn+)mV`Uk2~H4LF%WUttgF3MH($t|+bmRu@S7x6dgB zGkl67FJh2^Q6&$5uF^m(p6hq5pv`m!UW?pr@6|2Y6)|=u-#;gVZqsp+uJ;8SB|An& z6|S?pBd+R>n-8*1vXV6)EwYDpbM}K3sC+OT_^rs{vXaTC$)KyPycCYh*V2Wp?uXCP z9bXH;4gMK9-)xhzUFSunvJ%iq$V9pvcgJUivdG^FTjlgIgfP#iN%2Bgb@{s}Tj$9N zo{Q!TTRnlNjD;(n2%#nO-kge@1UtH>t`k+HSz4yaI{hy-dKYEM8#i+zmme8$w z8m!8GJV&g4FS4r~san_%Ns)>Mp@ch@6=lo35N{<=Hn#J#WJQ*&X^qD?T{C2Ek8@^~ zen1QpKCtODStiR7j|4>974N-j)~2H0C&lVVQm~**mVe8ikdd1wk1CsVc%z731aEbE zyt`hrmt*cxG8?SPmP%hXeOGb>g4%;9_x-E0+s$1$=E<}#Fi)5K0Y+od#s;$_x)4&n5 zw%=OdReC^THlM~s7qYS>bR#RaoOROWj(oIBdh78zVzPQ{&D|s~)M~;fn&tLgJqJav zC-c16=;=vsNevWRgAd7iPu;>^`IDGfZ;R=)Nwo}>fnF0Fb1ZfN9Y`=WRIQ1qsWKZM zF|=A@>7kC&Y(+nmNeA7((^72H@Wrth6xEyZD{?w3s8?EQSB7{UR6%)tgJr&DQjvAB zxsK4H+fKStKtkP9B4)`?Tb!dgKbMRVu5J~5u`d_TwXNIHvOOk;qCCdeQ~T=mJ~C*& z+r7(Np;W|KvK{f25Gxkn|Cf6&V(8GdLA}Ob=S$M*RdJ=squzm?abqX)t2ZgZ3Bfz5 zYZ}~*S^ASz%L%a?v?pb6ch*`glS5W)(kvl7WDw})0FYSowmWW?RK<5b6X~g`*-gzg z@cr$QcoO5#K%BG}ayu+pkr|MAPP_7+=dE?xxZbU&qnETNkPXn>p@IWH;1aKfIoT6K zxhzRrfmo^Nk2E>+iIwhVyJq6?3_|eRI?evpa>lVx$Bp6vf?Wv@+Bn5}OeE|UIn%5C zciij3AMkmjYhy>*j#@|LQ#D!^5nho$ixp4R;6a(t*87B>Q`)$iPQ4JUxm-zH`eWXJ zm5a<=io^Qq@j5cHeC|m(%%bA7u1lLv^47q)g#5V_2YU}<;~*`?37NAKb}QWxx-nA4 z_gms|RK<;{P#R&((`8!ffvb%bN{9EDzsuL8tAHc=j1ex1j>u8=zuY0w)F@FVgN?M& z$!Cls{$r76FX|arI{yCqH2q#AbW46sM&&(QYS{)lym>=^46NKLbpT8q=%(1zo^iE_ zJZ7bGQRv#ajw{N`5v5)b?|_AQK1;H9;RTY-=TY`|mjB4;a2uVQYAqwi*TzA*Vu;tt zKB9}QB=h-MMBu$^z&9^*5p%urXX|MrBpmm&l#KKxctcKdEIzG3pF?W_#znpn@8wjr zL9wupDG8Fzo|8TJMfwUM<9sodP%9g5s}QFn0_3cd?T)R4(L+>6zQVc1Em{PpIVk|v z!)MF9SfK#~U93QL$S0mO&sWS#iev-2-qXdoVVJP~P9ow;%z;NSM$yj>O3GuBQjB(i zp-=?D!K=YB1+N4rJfsT1Zbhk*7}>mgGLUFl5O*z8A+VK4Ucyui)OEy4#noA-awnnp zyDy+_o@={6SU+UTcOvuW=jvr(%8Kp~0j@_(o2}&Si4_TL!m5)WRaKzD?MBYh?M9s{ z)n`>$v-H1oHk)N>BR{gE~kJcu7Fm!b8gE2(+aw!@+nA7!lF&Cn(lc~pU zrIe)p>Jgc|GmnOmmMUX29d^Tmzte5KdBWE+lOk-r8VVeY~3BJ$BYXZwBuBV32$+UyHJbyZh@(dw?Jz&-KZ_G76s8w` zI`JZv6vek^=3|kTY`56TE0ciweL}qA--y&Qz$V5E=Uu$m?uf<9qydC&DS03Mi!e9W zRmwdrAM=Uu#g7bp6Y?Te+#fFqTTub8_$SuBC}jL1;)?6IV8Ea(Q4$dcAdBz7VWgsX zJs@GFDS{b9YA>^Cbbt!9%b)+GN~LC}n;HMZhy~y?kLv^l;_A$Pon|jyzD}3#UcsJO zf!ON(MC>g2sSRwuXUQgAs_Ss-oCcAS?h|?QJoEBG-ra4bzZ01&et>8FPeiJ|;osK& zLM!2J2K>6jCnBrl)^&zI8xdyLPtrXmn696wM?}os5qF0*2fLf^X_gf4xZ4OP$fZa% zG2_b#%OnEoF;NXzuSw!!M5cZ*SV!+A+$A|?BHZ3%kVt&9Ca&D(#5B_5)eXj-#|n9O zg@&8ywy<(j#%C`??XW$V=MCFX+GWnhUQs&X6`Jh&8JW(GpP}jOHg>gsLfeszGE3NQ z;H)Cc9{){QQs+N=@I8nR!P8qERPJao9b9D#5#$LgkEedvVj^f^P!#!XDq9$wBT9fa zgARoeli_5lR<_d=^!!)5_}LHm!~yK=53$vXe2jRv@I(Hb{Y1jKp0>7M`m06P234_Q zby~>2eRP@j+3(T&NC?>oD}<1W*}UT2ZXG{=2k3g%+G;`+Z=StGFc4P~8-`bi=XlDL zAwV+qgOioTyG*l21{$@OeI!KaV|15LYSUjP=fd5}yh6Z~y(G(bl`BP`6uw|lOPA_< zu<}%Pw`2n!FLjET%D93UDjg!Y_%zv-P0kb$;dq}_#)X4fRT6NO7o)yug0g5;+7jKw zhm`%GqgH=G0L2>&=4xfVrfc;egE(TwvG}r)Wh<5~;RdyX6u%ibn6uf66lK`2JGo*{ z0ILu7k*L#S*WDrVi<(SRV{vMOrzo?A=ginfCYUB6iDM-a&~p1u$+@-Bq#YyZG#UcV zBpjPOZdv7@F~D`T8i1D1vsEg}CF{dOAAOy)P7r48-3i3x5b7p^z0%9HVwU`?_cb^M5)0M^*O5-W+!C=txH}0=DEl>u}G?k>7|B7^i|l?-D6mp(|sV~UiuLz-j&#{;|SW` z6F{O46e!9*6g;jAkfB~5CbA&?eR^Bc@*{g$(F}UeGWZ100=66 z-VhWGJyKJNTk)eEMyfWwyw^x2fvYIy%&^AP%K@rt9h`3};=#SvNJMQYR?NyNr>Z3- zE84Db`x*y`&vunp44RNnI9sX>8W!8IU!^%WITGmgX z94d?(-a)%F%(wYEe|aBLW-Gn_u{PsZQ~D!LXj zc0JL>EE;P~Syjx6tQ)7d(;5M~;2OHFS{4%YUX1E2g8i2d{Ot*egQMG@zvRnLw9+Z+ zE*7$UAy0w8ol;MfbBwHrpXX>yz2c$hJj&IRo9)UZD^r}K-lCw+AWu0Le`D^;T!e`% zGWAX<##&u1vT)`Nk%wqT`Xtt$D1A*3{3Al}$Gf?NR&|5dx ziK(8V<;}BSmhi`Pdq|w|U!+@cGeF+Zq1xKGPPiCq)ywC3HmqoZ^?r+ne0!@4hjzOT z8`f67eaBvTJVB0Awq8Mp+zIC5FK`da_YyTJ(v_WVh7eVa@gYP>9ikC5L0@-02NJ5d z$T&a{*K!P0Zn%|ixP3&t-`zo`ii|lrl%nmQfV6)O=QKEjp*T0swfVs}Tt!5>QD?ji zxgmE3+{^#e;XY98VPy&I()KM3iXkj`^|xR`-%dGazwxU`6#m!h?l-0#`-rMo@5Db*-G)iI{;f_NT>K9`2hE5!vyJ+EhewdP2pM_dwKP z5-F0fj?Y!PQvwvyw`-H?8;At8dW9z`K1KcAWDX#JZRCVS2ZKNVb_%P5iGL&J-4gQ* zA|5t6xehHQnF6T%1B{b@enGMc|Dv<>UmJZ)Fw0+r6HH?l*#xs%H(O|Ta~S}Ghet=z+)ne^ znk73H@i|fD!a+;azOk>E!EvpSry+!V(<#Kucd7Q~s)alaAq3l8&SdJdmsI2-b~nijW=Dh5 z4zSVw`I!||G=i$lk}WDsu7&9QAttfrHGH2Xt3|ec*C|6L?X=U~GQqjW3Rx$R+u=H{ z)&Xqq9dR!IhM*s>ltk2{S4yk$4^CyUQCm=Q(RL6NySF?w19B`>a{fj6xs~dAOcJ1H`5}@r532e91H6=cOxRU>$`r3 z*2a>W-F_e}OHofC9Lh`UY8LDP5wu%{{3@UerfiqiI^T)i=R^8gz^83h@)^Z|U7$H* zAZ*b&3z;8MX!Y_QC@z~F@NT^`K>`}4c^UgU6bw}yIPMi zFL<6(LCgMg2(`{}BBj=eoe4U{;Ew2(o?QLEq)P2d| zC*4;`IK2^#2~@mFAcafaUl82MzDkx3xBE)LC`bHC8HcOw+SQ{iOy!gbSdk{{^uI}| z*@_8mc`fp+5~XYyjlgEV;e&Z4{XfN>*?QtS5`gdX6nTNpG>~oPjqQQY>(Lbw_mw@qrrJ%B$YI7}Td0)8d8rr4&xLPmY>eX(aJj>Fa3+H zwT+z@IvX-2-jdA{<5jBujoq^nLi+;egQ@SkU{HL6S~vd4@~oAv8-xgfaB)GBrcS8qhK=9J7AyKL32cqtGZP==lsHS$ zA6~K?mOGHmn*^qNfhr3)5j3UTSHJePsY-lq*;zMVn9>7;EI*xq=+|KpUHj&>>W5I< zEZ`UtZI4&Fm*irn}5=aEnB(1y8gTN zHf2HqICV-GCRlFlnhitL)7RqXWc|D0^=RZfT-DYdn^R0asLBC@ONZ$IZ$pHAXFA#v@m%p5k zue?Ea0TCzm15aMZ&AZ;KL)Jt*{34>jOkiO5Q2-^p|_{R4>6 z+w;5wd6USHX1=gZv$29KOU6aF$7UyPuxTxdDEO9OkgHbQZb4k*`v_wu|;(0 zBYy^(XrwUE+$qJZlT~wop%wh3AZ2Z=y$RY4OHuIUK+4($ktaYX=!5W9WhRa${IwLF9-d~SVhjz3c@&ZhTuL@SK?rKeVMTFu8P#;?#6X8_DEda zxq2k9{w(EZ(Id8^cA?DzWrBD_((m#k|MR+Wn_XCJVWcwiA+{jQYJ_1;oCj^Y9#xUUK7BkSAoC<^<0UQ$%YC;BwQX4QEbb>=@ z)Cmi#J*q(Dvc+lT862=|!7cQUs5{kanmn@d5q3Z543Zb%%tt85Gi2Ba-7%N*>d;qm z!P-L4-ZO|h+ut5Qo@I`RKr8Jm4B9=Nc<~UV8gx{KkI6ngq}I%rmwpmGYulXc8Nzma z&eIY1BF+oJSe>7t&%L15EE4?sGf9X+$|ilAjwo{OBecgoO@;Nhr zUi;JPTv%MdPL7BT>)jZl)rG|dD*y&SM&~3tUpIg%LG10(B^$P2fE^g7IM9HcV%Ef2 ztx?jqjc=zxL7xE6xf#%xvkWmb(!KGa0FWvXci)IwjiC1>R1jKK;FR8$&sPmT1*W|W|I-QA zfajIi%Jg?TMEwIbT{tc}C=P4ojs7rx-Y*PyyBHh_&{E5L-XMU5yBaU`!D0uU}jvVA&ho?X^=L z+#LXM|JIF5{SU__)C^gOW>zN%f2&7`AFI^#&!I*K6ki5IwJkmK7=SrYjMCiWQllRQ zK+ebt!9&L*7OjJP>`Vq*f*72dT_F0+Az#3%93$8eqVGEE2=?L7fK9FrgrX8U{iQmlY>At7;a8q#-tD{P=?c}769 zZUOf-3ix5UAB~#%&6@dj<`2NFM-@EZAt6h#a^~)#sW`^FzqY`KYYTk*QM<&>+M+JR8P>jzR2eFMYgUl^8Zkl z57!r2>p*fw=F=5K);)Tua++6?bB!8V^BAUxX;cwooK=n@eFih)XZ@)o+vCr>OV%9qXJ+5%h533 zzmw<}vU0qRwGDTaK&qa~nfb4jwJ``TZdY2?NqGcd^FBt2bd92?<22Pt7^g!)-cc*c zForWE(49OVQ$)BL&~dGe_HwNPe@3l3@&6{&hvDU3CM<_jh6qhRws#?#dO0-v%hsO> zOZepdKg8C%S(78c>pHjpYjk1hkqlwc1&dOE*m@aqkqPRQ03(YoZ&Moi!&8XuF zGx^CjbT(0gK)`=EZb&Fn%JoN7m9nw+ZO;k(kowyLf>w;D7&s%e7D;uyTBsb^$1Idh zs^yDg1qeis&*)sX&|P+XR@X6*RJe9QNpE1hzF=V3g2-i^0&Myb5wiuV*HfhoIjv4U zg09b$!NvVL7#lEU&k^7qjG@#sJzZi#WLzRY^rV*wz{Ra1`Z#HC!rUJw0kmkzFklmD z1=Z|gFwoR#A*h4WYoIM!>xB88%v;J##Hb3}=&p0g4Psz|jBVvj$$LbP?mXw-F>RME&t@ zWA4O*h5Q1)y$*Cie9z>UemO+ZAxW9eegUF_B+C{!^jB~KL=S|LR63K~NL(#q!5CZu zFh~#?{ahwk#=9H88MDr!XBZP>@iHF_XM^!#GVK}d* zL%~L44#u;dF>Vj#M?~yfd&s8pgYk4eL#Ixr$ax^KQ)BXKv`1*%aeLHT^nOi^o@w&> zFrLg7LK2>Y$znM9*)w`wAr^{(D+dpfu^xMy(U+2qFdy(L@8s*P^k-2blwYXmk>%|ym;F2l zE!WLP5J53id=Mu`9{Xt6zfi@%U@MxqcSeU#ORDntpEk@#OXnWf8_pGRw&Tlx2m zT4PScNNduq_>=uUbUx*QR_51nXq<%j^G#f$>d!N!d^?Fg-6%Oxl0|VLs5wQW{Srk^ z6eQ!s`zd2TBtiV^e>56*_jljFc|7n$)XewCwLpHGpuIf$p#=R;QHXAwM?aNl{Zy#+Q<2tBC0aifYW-BC^>c~V&xKk)7is-mqV;p3*3ZRSH%rjX zLUgkT-6}!13el}|=;qz`!n{_#KY - Overview - CUDA Python 12.6.0 documentation + Overview - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • @@ -266,7 +268,7 @@

    CUDA Python workflowNumPy to store data on the host.

    -
    from cuda import cuda, nvrtc
    +
    from cuda.bindings import driver, nvrtc
     import numpy as np
     
    @@ -279,9 +281,9 @@

    CUDA Python workflow
    def _cudaGetErrorEnum(error):
    -    if isinstance(error, cuda.CUresult):
    -        err, name = cuda.cuGetErrorName(error)
    -        return name if err == cuda.CUresult.CUDA_SUCCESS else "<unknown>"
    +    if isinstance(error, driver.CUresult):
    +        err, name = driver.cuGetErrorName(error)
    +        return name if err == driver.CUresult.CUDA_SUCCESS else "<unknown>"
         elif isinstance(error, nvrtc.nvrtcResult):
             return nvrtc.nvrtcGetErrorString(error)[1]
         else:
    @@ -334,14 +336,14 @@ 

    CUDA Python workflow
    # Initialize CUDA Driver API
    -checkCudaErrors(cuda.cuInit(0))
    +checkCudaErrors(driver.cuInit(0))
     
     # Retrieve handle for device 0
    -cuDevice = checkCudaErrors(cuda.cuDeviceGet(0))
    +cuDevice = checkCudaErrors(driver.cuDeviceGet(0))
     
     # Derive target architecture for device 0
    -major = checkCudaErrors(cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevice))
    -minor = checkCudaErrors(cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevice))
    +major = checkCudaErrors(driver.cuDeviceGetAttribute(driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevice))
    +minor = checkCudaErrors(driver.cuDeviceGetAttribute(driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevice))
     arch_arg = bytes(f'--gpu-architecture=compute_{major}{minor}', 'ascii')
     
     # Create program
    @@ -366,7 +368,7 @@ 

    CUDA Python workflow
    # Create context
    -context = checkCudaErrors(cuda.cuCtxCreate(0, cuDevice))
    +context = checkCudaErrors(driver.cuCtxCreate(0, cuDevice))
     
    @@ -380,8 +382,8 @@

    CUDA Python workflow
    # Load PTX as module data and retrieve function
     ptx = np.char.array(ptx)
     # Note: Incompatible --gpu-architecture would be detected here
    -module = checkCudaErrors(cuda.cuModuleLoadData(ptx.ctypes.data))
    -kernel = checkCudaErrors(cuda.cuModuleGetFunction(module, b"saxpy"))
    +module = checkCudaErrors(driver.cuModuleLoadData(ptx.ctypes.data))
    +kernel = checkCudaErrors(driver.cuModuleGetFunction(module, b"saxpy"))
     

    @@ -416,16 +418,16 @@

    CUDA Python workflow
    -
    dXclass = checkCudaErrors(cuda.cuMemAlloc(bufferSize))
    -dYclass = checkCudaErrors(cuda.cuMemAlloc(bufferSize))
    -dOutclass = checkCudaErrors(cuda.cuMemAlloc(bufferSize))
    +
    dXclass = checkCudaErrors(driver.cuMemAlloc(bufferSize))
    +dYclass = checkCudaErrors(driver.cuMemAlloc(bufferSize))
    +dOutclass = checkCudaErrors(driver.cuMemAlloc(bufferSize))
     
    -stream = checkCudaErrors(cuda.cuStreamCreate(0))
    +stream = checkCudaErrors(driver.cuStreamCreate(0))
     
    -checkCudaErrors(cuda.cuMemcpyHtoDAsync(
    +checkCudaErrors(driver.cuMemcpyHtoDAsync(
        dXclass, hX.ctypes.data, bufferSize, stream
     ))
    -checkCudaErrors(cuda.cuMemcpyHtoDAsync(
    +checkCudaErrors(driver.cuMemcpyHtoDAsync(
        dYclass, hY.ctypes.data, bufferSize, stream
     ))
     
    @@ -457,7 +459,7 @@

    CUDA Python workflowNow the kernel can be launched:

    -
    @@ -500,12 +502,12 @@

    CUDA Python workflow
    -
    checkCudaErrors(cuda.cuStreamDestroy(stream))
    -checkCudaErrors(cuda.cuMemFree(dXclass))
    -checkCudaErrors(cuda.cuMemFree(dYclass))
    -checkCudaErrors(cuda.cuMemFree(dOutclass))
    -checkCudaErrors(cuda.cuModuleUnload(module))
    -checkCudaErrors(cuda.cuCtxDestroy(context))
    +
    checkCudaErrors(driver.cuStreamDestroy(stream))
    +checkCudaErrors(driver.cuMemFree(dXclass))
    +checkCudaErrors(driver.cuMemFree(dYclass))
    +checkCudaErrors(driver.cuMemFree(dOutclass))
    +checkCudaErrors(driver.cuModuleUnload(module))
    +checkCudaErrors(driver.cuCtxDestroy(context))
     
    diff --git a/docs/release.html b/docs/release.html index 08b59581..28cee66f 100644 --- a/docs/release.html +++ b/docs/release.html @@ -3,11 +3,11 @@ - + - Release Notes - CUDA Python 12.6.0 documentation + Release Notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • @@ -232,6 +234,14 @@

    Release Notes#

    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/11.5.0-notes.html b/docs/release/11.5.0-notes.html index 2234b5d7..f2771025 100644 --- a/docs/release/11.5.0-notes.html +++ b/docs/release/11.5.0-notes.html @@ -7,7 +7,7 @@ - CUDA Python 11.5.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 11.5.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/11.6.0-notes.html b/docs/release/11.6.0-notes.html index 4c0dd1b6..3f982858 100644 --- a/docs/release/11.6.0-notes.html +++ b/docs/release/11.6.0-notes.html @@ -7,7 +7,7 @@ - CUDA Python 11.6.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 11.6.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/11.6.1-notes.html b/docs/release/11.6.1-notes.html index f29f0827..6d370900 100644 --- a/docs/release/11.6.1-notes.html +++ b/docs/release/11.6.1-notes.html @@ -7,7 +7,7 @@ - CUDA Python 11.6.1 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 11.6.1 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/11.7.0-notes.html b/docs/release/11.7.0-notes.html index 11990cb2..970c6092 100644 --- a/docs/release/11.7.0-notes.html +++ b/docs/release/11.7.0-notes.html @@ -7,7 +7,7 @@ - CUDA Python 11.7.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 11.7.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/11.7.1-notes.html b/docs/release/11.7.1-notes.html index 128a323c..2d567baf 100644 --- a/docs/release/11.7.1-notes.html +++ b/docs/release/11.7.1-notes.html @@ -7,7 +7,7 @@ - CUDA Python 11.7.1 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 11.7.1 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/11.8.0-notes.html b/docs/release/11.8.0-notes.html index 5875a623..4d2bb529 100644 --- a/docs/release/11.8.0-notes.html +++ b/docs/release/11.8.0-notes.html @@ -7,7 +7,7 @@ - CUDA Python 11.8.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 11.8.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/11.8.1-notes.html b/docs/release/11.8.1-notes.html index 35705113..e78937d0 100644 --- a/docs/release/11.8.1-notes.html +++ b/docs/release/11.8.1-notes.html @@ -7,7 +7,7 @@ - CUDA Python 11.8.1 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 11.8.1 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/11.8.2-notes.html b/docs/release/11.8.2-notes.html index cb081305..0000a86a 100644 --- a/docs/release/11.8.2-notes.html +++ b/docs/release/11.8.2-notes.html @@ -7,7 +7,7 @@ - CUDA Python 11.8.2 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 11.8.2 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/11.8.3-notes.html b/docs/release/11.8.3-notes.html index 24a6c77b..7083e1c1 100644 --- a/docs/release/11.8.3-notes.html +++ b/docs/release/11.8.3-notes.html @@ -3,11 +3,11 @@ - + - CUDA Python 11.8.3 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 11.8.3 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • @@ -289,14 +291,14 @@

    CUDA Functions Not Supported in this Release - +
    Previous
    -
    CUDA Python 12.0.0 Release notes
    +
    CUDA Python 11.8.4 Release notes
    diff --git a/docs/release/11.8.4-notes.html b/docs/release/11.8.4-notes.html new file mode 100644 index 00000000..8e2d33bc --- /dev/null +++ b/docs/release/11.8.4-notes.html @@ -0,0 +1,358 @@ + + + + + + + + + + CUDA Python 11.8.4 Release notes - CUDA Python 12.6.1 documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
    +
    +
    + +
    + +
    +
    + +
    + +
    +
    + +
    +
    +
    + + + + + Back to top + +
    + +
    + +
    + +
    +
    +
    +

    CUDA Python 11.8.4 Release notes#

    +

    Released on October 7, 2024

    +
    +

    Hightlights#

    +
      +
    • Resolve Issue #89: Fix getLocalRuntimeVersion searching for wrong libcudart version

    • +
    • Resolve Issue #90: Use new layout in preperation for cuda-python becoming a metapackage

    • +
    +
    +
    +

    Limitations#

    +
    +

    CUDA Functions Not Supported in this Release#

    +
      +
    • Symbol APIs

      +
        +
      • cudaGraphExecMemcpyNodeSetParamsFromSymbol

      • +
      • cudaGraphExecMemcpyNodeSetParamsToSymbol

      • +
      • cudaGraphAddMemcpyNodeToSymbol

      • +
      • cudaGraphAddMemcpyNodeFromSymbol

      • +
      • cudaGraphMemcpyNodeSetParamsToSymbol

      • +
      • cudaGraphMemcpyNodeSetParamsFromSymbol

      • +
      • cudaMemcpyToSymbol

      • +
      • cudaMemcpyFromSymbol

      • +
      • cudaMemcpyToSymbolAsync

      • +
      • cudaMemcpyFromSymbolAsync

      • +
      • cudaGetSymbolAddress

      • +
      • cudaGetSymbolSize

      • +
      • cudaGetFuncBySymbol

      • +
      +
    • +
    • Launch Options

      +
        +
      • cudaLaunchKernel

      • +
      • cudaLaunchCooperativeKernel

      • +
      • cudaLaunchCooperativeKernelMultiDevice

      • +
      +
    • +
    • cudaSetValidDevices

    • +
    • cudaVDPAUSetVDPAUDevice

    • +
    +
    +
    +
    + +
    +
    + +
    + +
    +
    + + + + + + + \ No newline at end of file diff --git a/docs/release/12.0.0-notes.html b/docs/release/12.0.0-notes.html index ad35a5c3..c1136146 100644 --- a/docs/release/12.0.0-notes.html +++ b/docs/release/12.0.0-notes.html @@ -3,11 +3,11 @@ - + - CUDA Python 12.0.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 12.0.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@

    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • @@ -280,12 +282,12 @@

    CUDA Functions Not Supported in this Release - +
    Next
    -
    CUDA Python 11.8.3 Release notes
    +
    CUDA Python 11.8.4 Release notes
    diff --git a/docs/release/12.1.0-notes.html b/docs/release/12.1.0-notes.html index 0e2113f0..2f07a8ba 100644 --- a/docs/release/12.1.0-notes.html +++ b/docs/release/12.1.0-notes.html @@ -7,7 +7,7 @@ - CUDA Python 12.1.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 12.1.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@

    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/12.2.0-notes.html b/docs/release/12.2.0-notes.html index a745e64e..0ca899bd 100644 --- a/docs/release/12.2.0-notes.html +++ b/docs/release/12.2.0-notes.html @@ -7,7 +7,7 @@ - CUDA Python 12.2.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 12.2.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/12.2.1-notes.html b/docs/release/12.2.1-notes.html index 8753d028..6906e2a9 100644 --- a/docs/release/12.2.1-notes.html +++ b/docs/release/12.2.1-notes.html @@ -7,7 +7,7 @@ - CUDA Python 12.2.1 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 12.2.1 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/12.3.0-notes.html b/docs/release/12.3.0-notes.html index ca6327a7..304a56d9 100644 --- a/docs/release/12.3.0-notes.html +++ b/docs/release/12.3.0-notes.html @@ -7,7 +7,7 @@ - CUDA Python 12.3.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 12.3.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/12.4.0-notes.html b/docs/release/12.4.0-notes.html index 64bcfee6..e2deb824 100644 --- a/docs/release/12.4.0-notes.html +++ b/docs/release/12.4.0-notes.html @@ -7,7 +7,7 @@ - CUDA Python 12.4.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 12.4.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/12.5.0-notes.html b/docs/release/12.5.0-notes.html index 63d60b27..4b50b53d 100644 --- a/docs/release/12.5.0-notes.html +++ b/docs/release/12.5.0-notes.html @@ -7,7 +7,7 @@ - CUDA Python 12.5.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 12.5.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/release/12.6.0-notes.html b/docs/release/12.6.0-notes.html index c715202b..d9c287fa 100644 --- a/docs/release/12.6.0-notes.html +++ b/docs/release/12.6.0-notes.html @@ -3,11 +3,11 @@ - + - CUDA Python 12.6.0 Release notes - CUDA Python 12.6.0 documentation + CUDA Python 12.6.0 Release notes - CUDA Python 12.6.1 documentation @@ -125,7 +125,7 @@
    @@ -152,7 +152,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -168,6 +168,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • @@ -292,14 +294,14 @@

    CUDA Functions Not Supported in this Release - +
    Previous
    -
    Release Notes
    +
    CUDA Python 12.6.1 Release notes
    diff --git a/docs/release/12.6.1-notes.html b/docs/release/12.6.1-notes.html new file mode 100644 index 00000000..c0656f47 --- /dev/null +++ b/docs/release/12.6.1-notes.html @@ -0,0 +1,359 @@ + + + + + + + + + + CUDA Python 12.6.1 Release notes - CUDA Python 12.6.1 documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
    +
    +
    + +
    + +
    +
    + +
    + +
    +
    + +
    +
    +
    + + + + + Back to top + +
    + +
    + +
    + +
    +
    +
    +

    CUDA Python 12.6.1 Release notes#

    +

    Released on October 7, 2024

    +
    +

    Hightlights#

    +
      +
    • Resolve Issue #90: Use new layout in preperation for cuda-python becoming a metapackage

    • +
    +
    +
    +

    Limitations#

    +
    +

    CUDA Functions Not Supported in this Release#

    +
      +
    • Symbol APIs

      +
        +
      • cudaGraphExecMemcpyNodeSetParamsFromSymbol

      • +
      • cudaGraphExecMemcpyNodeSetParamsToSymbol

      • +
      • cudaGraphAddMemcpyNodeToSymbol

      • +
      • cudaGraphAddMemcpyNodeFromSymbol

      • +
      • cudaGraphMemcpyNodeSetParamsToSymbol

      • +
      • cudaGraphMemcpyNodeSetParamsFromSymbol

      • +
      • cudaMemcpyToSymbol

      • +
      • cudaMemcpyFromSymbol

      • +
      • cudaMemcpyToSymbolAsync

      • +
      • cudaMemcpyFromSymbolAsync

      • +
      • cudaGetSymbolAddress

      • +
      • cudaGetSymbolSize

      • +
      • cudaGetFuncBySymbol

      • +
      +
    • +
    • Launch Options

      +
        +
      • cudaLaunchKernel

      • +
      • cudaLaunchCooperativeKernel

      • +
      • cudaLaunchCooperativeKernelMultiDevice

      • +
      +
    • +
    • cudaSetValidDevices

    • +
    • cudaVDPAUSetVDPAUDevice

    • +
    • cudaFuncGetName

    • +
    • cudaFuncGetParamInfo

    • +
    +
    +
    +
    + +
    +
    + +
    + +
    +
    + + + + + + + \ No newline at end of file diff --git a/docs/search.html b/docs/search.html index 0af4fe57..ecfa3063 100644 --- a/docs/search.html +++ b/docs/search.html @@ -5,7 +5,7 @@ - Search - CUDA Python 12.6.0 documentation + Search - CUDA Python 12.6.1 documentation @@ -122,7 +122,7 @@

    @@ -149,7 +149,7 @@
    - CUDA Python 12.6.0 documentation + CUDA Python 12.6.1 documentation @@ -165,6 +165,7 @@
  • Code of Conduct
  • Contributing
  • Release Notes
  • CUDA Python API Reference
  • diff --git a/docs/searchindex.js b/docs/searchindex.js index 3af573eb..73799934 100644 --- a/docs/searchindex.js +++ b/docs/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["api", "conduct", "contribute", "index", "install", "module/cuda", "module/cudart", "module/nvrtc", "motivation", "overview", "release", "release/11.4.0-notes", "release/11.5.0-notes", "release/11.6.0-notes", "release/11.6.1-notes", "release/11.7.0-notes", "release/11.7.1-notes", "release/11.8.0-notes", "release/11.8.1-notes", "release/11.8.2-notes", "release/11.8.3-notes", "release/12.0.0-notes", "release/12.1.0-notes", "release/12.2.0-notes", "release/12.2.1-notes", "release/12.3.0-notes", "release/12.4.0-notes", "release/12.5.0-notes", "release/12.6.0-notes"], "filenames": ["api.rst", "conduct.md", "contribute.md", "index.rst", "install.md", "module/cuda.rst", "module/cudart.rst", "module/nvrtc.rst", "motivation.md", "overview.md", "release.md", "release/11.4.0-notes.md", "release/11.5.0-notes.md", "release/11.6.0-notes.md", "release/11.6.1-notes.md", "release/11.7.0-notes.md", "release/11.7.1-notes.md", "release/11.8.0-notes.md", "release/11.8.1-notes.md", "release/11.8.2-notes.md", "release/11.8.3-notes.md", "release/12.0.0-notes.md", "release/12.1.0-notes.md", "release/12.2.0-notes.md", "release/12.2.1-notes.md", "release/12.3.0-notes.md", "release/12.4.0-notes.md", "release/12.5.0-notes.md", "release/12.6.0-notes.md"], "titles": ["CUDA Python API Reference", "Code of Conduct", "Contributing", "CUDA Python Manual", "Installation", "cuda", "cudart", "nvrtc", "Motivation", "Overview", "Release Notes", "CUDA Python 11.4.0 Release notes", "CUDA Python 11.5.0 Release notes", "CUDA Python 11.6.0 Release notes", "CUDA Python 11.6.1 Release notes", "CUDA Python 11.7.0 Release notes", "CUDA Python 11.7.1 Release notes", "CUDA Python 11.8.0 Release notes", "CUDA Python 11.8.1 Release notes", "CUDA Python 11.8.2 Release notes", "CUDA Python 11.8.3 Release notes", "CUDA Python 12.0.0 Release notes", "CUDA Python 12.1.0 Release notes", "CUDA Python 12.2.0 Release notes", "CUDA Python 12.2.1 Release notes", "CUDA Python 12.3.0 Release notes", "CUDA Python 12.4.0 Release notes", "CUDA Python 12.5.0 Release notes", "CUDA Python 12.6.0 Release notes"], "terms": {"data": [0, 7, 8, 9], "type": [0, 2, 4, 7, 9, 16, 17], "us": [0, 1, 4, 7, 8, 9, 16, 17, 18], "driver": [0, 4, 7, 8, 9], "error": [0, 9, 16, 23], "handl": [0, 9, 13], "initi": [0, 6, 7, 9, 11], "version": [0, 1, 4, 7, 9, 25], "manag": 0, "devic": [0, 7, 9], "primari": [0, 6, 9], "context": [0, 6, 9], "modul": [0, 3, 4, 6, 8, 9], "librari": [0, 4, 6, 7, 8, 9, 14], "memori": [0, 7, 8, 9], "virtual": [0, 6, 7, 9], "stream": [0, 9, 10, 12], "order": [0, 7], "alloc": [0, 7, 9], "multicast": 0, "object": [0, 9], "unifi": [0, 8, 9], "address": [0, 1, 7], "event": [0, 1, 9], "extern": [0, 9], "resourc": [0, 9], "interoper": [0, 9, 10], "oper": [0, 6, 7, 9], "execut": [0, 7, 8, 9, 13], "control": [0, 9], "graph": 0, "occup": 0, "textur": [0, 7], "surfac": [0, 7], "tensor": 0, "map": [0, 6], "peer": 0, "access": [0, 8, 9, 16], "graphic": [0, 13], "entri": [0, 8], "point": [0, 7, 9], "coredump": 0, "attribut": [0, 3, 6], "green": 0, "egl": [0, 12, 13], "opengl": 0, "profil": [0, 9, 13], "vdpau": [0, 12, 13], "cudart": [0, 3, 4, 7, 13, 20, 25], "direct3d": 0, "9": [0, 5, 7], "10": [0, 5, 7, 9], "11": [0, 3, 5, 7, 9, 10, 22], "c": [0, 4, 5, 7, 9, 16], "routin": 0, "interact": [0, 5, 9], "runtim": [0, 3, 5, 7, 8, 9, 16, 25, 27], "nvrtc": [0, 3, 4, 9, 16], "gener": [0, 5, 6, 9], "inform": [0, 1, 5, 6, 9], "queri": [0, 5, 6, 9, 25], "compil": [0, 4, 5, 6, 8, 9], "support": [0, 4, 5, 6, 8, 10], "option": [0, 5, 6, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "defin": [1, 5, 6, 7, 16, 17], "follow": [1, 4, 5, 6, 7, 9, 16], "cuda": [1, 2, 4, 7, 10], "python": [1, 2, 4], "project": 1, "In": [1, 5, 6, 7, 8, 9, 16], "interest": [1, 2], "foster": 1, "an": [1, 2, 5, 6, 7, 8, 9, 13], "open": [1, 4, 5, 6, 19], "welcom": 1, "environ": [1, 4, 5, 6, 13], "we": [1, 2, 8, 9], "contributor": 1, "maintain": [1, 4, 5, 6], "make": [1, 4, 5, 6, 7], "particip": [1, 5, 6], "commun": 1, "harass": 1, "free": [1, 5, 6], "experi": [1, 9], "everyon": 1, "regardless": [1, 5, 6], "ag": 1, "bodi": [1, 5, 6], "size": [1, 5, 6, 7, 9], "disabl": [1, 5, 6, 7, 17], "ethnic": 1, "sex": 1, "characterist": [1, 5], "gender": 1, "ident": [1, 5, 6, 7, 9], "express": [1, 5, 6, 7], "level": [1, 5, 6, 9, 11, 12], "educ": 1, "socio": 1, "econom": 1, "statu": [1, 5, 6], "nation": 1, "person": 1, "appear": [1, 5, 7], "race": [1, 5, 6], "religion": 1, "sexual": 1, "orient": [1, 9], "exampl": [1, 5, 6, 9, 13, 21], "behavior": [1, 5, 6, 13], "contribut": [1, 3], "creat": [1, 4, 5, 6, 7, 9], "posit": [1, 5, 6], "includ": [1, 4, 5, 6, 7, 16, 17], "inclus": [1, 7, 8], "languag": [1, 7, 8, 9], "Being": 1, "respect": [1, 5, 6], "differ": [1, 5, 6, 8, 9], "viewpoint": 1, "gracefulli": 1, "accept": [1, 2, 5, 6, 7, 13], "construct": [1, 5, 6, 9], "critic": 1, "focus": 1, "what": [1, 2, 3, 5, 6], "best": [1, 5, 6, 8, 9], "show": [1, 7, 9], "empathi": 1, "toward": [1, 9], "other": [1, 4, 5, 6, 7], "member": [1, 5, 6, 7, 13], "unaccept": 1, "The": [1, 2, 4, 5, 6, 7, 8, 9], "imageri": 1, "unwelcom": 1, "attent": [1, 2], "advanc": [1, 5, 6], "troll": 1, "insult": 1, "derogatori": 1, "comment": [1, 2], "polit": 1, "attack": 1, "public": [1, 5, 6], "privat": 1, "publish": 1, "physic": [1, 5, 6], "electron": 1, "without": [1, 5, 6, 7], "explicit": [1, 5, 6], "permiss": 1, "which": [1, 5, 6, 7, 9], "could": [1, 5, 6], "reason": [1, 5, 6], "consid": [1, 5, 6, 7], "inappropri": 1, "profession": 1, "set": [1, 4, 5, 6, 7, 8, 9, 13], "ar": [1, 4, 5, 6, 7, 8, 9, 13, 16], "clarifi": 1, "expect": [1, 5, 6, 9], "take": [1, 5, 6, 7, 9], "appropri": [1, 5, 6, 7], "fair": 1, "correct": [1, 5, 6, 9], "action": [1, 5, 6], "ani": [1, 5, 6, 7, 9], "instanc": [1, 5, 6, 7], "have": [1, 5, 6, 7, 8, 9, 16, 17], "right": [1, 5], "remov": [1, 5, 6, 7, 13, 17], "edit": [1, 4], "reject": [1, 5, 6], "commit": [1, 5, 6, 9], "wiki": 1, "issu": [1, 2, 5, 6, 16, 17, 18, 22, 23, 25, 27, 28], "align": [1, 5, 6], "thi": [1, 2, 5, 6, 7, 9, 10], "ban": 1, "temporarili": 1, "perman": [1, 5], "thei": [1, 5, 6, 7], "deem": [1, 7], "threaten": 1, "offens": 1, "harm": 1, "appli": [1, 5, 6, 21], "both": [1, 5, 6, 8, 28], "within": [1, 5, 6, 9], "space": [1, 5, 6, 7], "when": [1, 5, 6, 7, 8, 9, 13, 23], "individu": [1, 5, 6, 7, 9], "repres": [1, 5, 6], "its": [1, 5, 6, 8, 9], "offici": 1, "e": [1, 4, 5, 6, 7, 17], "mail": 1, "post": [1, 5], "via": [1, 4, 5, 6, 7], "social": 1, "media": 1, "account": [1, 5], "act": 1, "appoint": 1, "onlin": [1, 5], "offlin": [1, 5], "represent": [1, 5, 6], "mai": [1, 5, 6, 7, 9, 15, 16, 19, 27], "further": [1, 5, 6], "abus": 1, "otherwis": [1, 5, 6], "report": [1, 2, 5, 6, 7], "contact": 1, "team": [1, 2, 5], "nvidia": [1, 2, 4, 5, 6, 8, 9, 16, 25], "com": [1, 5, 6, 16], "all": [1, 4, 5, 6, 7, 9, 11, 12, 16, 17], "complaint": 1, "review": 1, "investig": 1, "result": [1, 5, 6, 7, 9], "necessari": [1, 5, 6], "circumst": [1, 5, 6], "oblig": 1, "confidenti": 1, "regard": [1, 5, 6], "incid": 1, "detail": [1, 5, 6, 9], "specif": [1, 4, 5, 6, 7, 9], "polici": [1, 5, 6], "separ": [1, 5, 6, 7], "who": 1, "do": [1, 2, 5, 6, 7, 9], "good": [1, 5, 6, 7], "faith": 1, "face": [1, 5, 6, 16], "temporari": [1, 16], "repercuss": 1, "determin": [1, 5, 6, 7], "s": [1, 5, 6, 7, 8, 9, 17], "leadership": 1, "adapt": 1, "from": [1, 3, 5, 6, 7, 8, 9, 13, 16, 21], "coven": 1, "1": [1, 3, 5, 6, 7, 9, 10, 13, 17, 19, 28], "4": [1, 3, 5, 6, 7, 9, 10, 18], "avail": [1, 5, 6, 7], "http": [1, 5, 6, 16], "www": 1, "org": 1, "html": [1, 4, 5], "For": [1, 5, 6, 7, 9], "answer": 1, "common": [1, 5, 6, 9], "question": 1, "about": [1, 5, 6], "see": [1, 2, 5, 6, 7, 9, 13], "faq": 1, "thank": 2, "you": [2, 4, 5, 6, 8, 9], "your": [2, 4, 6, 9], "base": [2, 5, 6, 8], "fall": [2, 5, 6], "two": [2, 5, 6, 7], "categori": 2, "want": [2, 5, 6, 8, 9], "bug": [2, 7], "featur": [2, 5, 6, 7], "request": [2, 5, 6], "document": [2, 4, 5, 6], "file": [2, 5, 6, 7], "describ": [2, 5, 6, 7], "encount": [2, 5, 6], "chang": [2, 5, 6, 7, 9, 12, 13], "evalu": 2, "triag": 2, "them": [2, 5, 6, 7, 9], "schedul": [2, 5, 6], "releas": [2, 3, 5, 6, 7, 8, 9], "If": [2, 5, 6, 7], "believ": 2, "need": [2, 5, 6, 9, 16, 17], "prioriti": [2, 5, 6], "notifi": 2, "implement": [2, 5, 6, 7], "fix": [2, 5, 6, 14, 16, 21, 28], "At": [2, 5, 6], "time": [2, 5, 6, 7], "code": [2, 3, 5, 6, 7, 8, 9, 11, 25], "instal": [3, 5, 6, 18], "requir": [3, 5, 6, 7, 9, 17, 28], "pypi": [3, 12, 25, 26], "conda": [3, 12, 22, 25, 26], "sourc": [3, 5, 6, 7, 10, 11], "build": [3, 8, 9, 10, 20], "doc": [3, 5, 6], "overview": [3, 5, 6], "workflow": 3, "perform": [3, 5, 6, 7], "futur": [3, 5, 6, 7, 8, 11, 12, 16], "motiv": 3, "why": [3, 5, 6], "conduct": 3, "our": [3, 8, 9], "pledg": 3, "standard": [3, 5, 6, 8, 9], "respons": [3, 5, 6], "scope": [3, 5, 6], "enforc": [3, 6], "note": [3, 4, 5, 6, 7, 9], "12": [3, 4, 5, 6, 7, 9, 10, 13], "6": [3, 4, 5, 6, 7, 10], "0": [3, 4, 5, 6, 7, 9, 10], "5": [3, 5, 6, 7, 9, 10, 26], "3": [3, 5, 6, 7, 10, 17, 22, 24], "2": [3, 5, 6, 7, 9, 10, 13, 16], "8": [3, 5, 6, 7, 10, 21, 24, 25], "7": [3, 5, 6, 7, 10, 17, 22], "api": [3, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "refer": [3, 5, 6], "index": [3, 4, 5, 6], "search": [3, 5, 6, 7], "page": [3, 4, 5, 6], "platform": [4, 5, 6, 11, 12, 16, 17], "depend": [4, 5, 6, 16, 22], "linux": [4, 5, 6, 7], "450": 4, "80": [4, 5, 6], "02": 4, "later": [4, 5, 6, 7, 9], "window": [4, 5, 6, 28], "456": 4, "38": [4, 5, 6], "toolkit": [4, 5, 6, 8, 13, 15, 17, 21, 22, 23, 25, 26, 27, 28], "onli": [4, 5, 6, 7, 9, 11], "redistribut": 4, "compon": [4, 5, 6], "guid": [4, 5, 6], "can": [4, 5, 6, 7, 8, 9], "guidanc": 4, "obtain": [4, 5, 6], "local": [4, 5, 6, 7, 9], "pip": 4, "packag": [4, 5, 6, 9, 11, 22, 25], "assign": [4, 5, 6, 7, 9], "provid": [4, 5, 6, 7, 8, 9], "header": [4, 5, 6, 7, 16, 17], "enabl": [4, 5, 6, 7, 9, 17], "writ": 4, "kernel": [4, 5, 6, 7], "share": [4, 5, 6, 9], "cython": [4, 8, 11, 12, 20, 24], "pyclibrari": 4, "remain": [4, 5, 6], "test": [4, 5, 6], "outlin": [4, 5, 6], "txt": 4, "must": [4, 5, 6, 7, 9, 23], "match": [4, 5, 6, 9], "major": [4, 5, 6, 7, 9], "minor": [4, 5, 6, 7, 9, 17], "compat": [4, 5, 6, 8, 9, 20, 24], "still": [4, 5, 6, 17], "dure": [4, 5, 6, 7, 16], "process": [4, 5, 6, 7, 8, 9, 28], "variabl": [4, 5, 6, 7, 13], "cuda_hom": [4, 16, 17], "cuda_path": 4, "find": [4, 5, 6], "locat": [4, 5, 6, 9, 17], "particular": [4, 5, 6, 7, 16], "path": [4, 5, 6, 7, 8], "usr": 4, "should": [4, 5, 6, 7, 9], "export": [4, 5, 6, 13], "To": [4, 5, 6, 7, 9], "extens": [4, 5, 6, 7], "run": [4, 5, 6, 7, 9], "setup": [4, 5, 6], "py": [4, 5, 6], "build_ext": 4, "inplac": 4, "debug": [4, 5, 6, 7], "gdb": [4, 5], "pass": [4, 5, 6, 7, 9], "argument": [4, 5, 6, 7, 9], "current": [4, 5, 6, 7, 9], "g": [4, 5, 6, 7], "port": [4, 5, 6], "bind": [4, 5, 6, 8, 9, 11, 12, 16], "env": [4, 5], "f": [4, 5, 6, 9], "docs_src": 4, "yml": 4, "activ": [4, 5, 6], "Then": 4, "step": [4, 5, 9], "abov": [4, 5, 6, 9], "cd": 4, "git": 4, "checkout": 4, "gh": 4, "cp": 4, "class": [5, 6, 7, 13], "cuuuid_st": [5, 6], "void_ptr": [5, 6], "_ptr": [5, 6], "byte": [5, 6, 7, 9], "definit": [5, 6, 7], "uuid": [5, 6], "getptr": [5, 6, 7], "get": [5, 6, 7, 8, 9], "cumemfabrichandle_st": 5, "fabric": [5, 6], "opaqu": [5, 6, 7], "same": [5, 6, 7, 9], "node": [5, 6], "ipc": [5, 6], "between": [5, 6, 7, 9, 27], "connect": [5, 6], "nvswitch": 5, "cuipceventhandle_st": 5, "reserv": [5, 6], "cuipcmemhandle_st": 5, "mem": 5, "custreambatchmemopparams_union": 5, "per": [5, 6, 9, 13], "paramet": [5, 6, 7, 9, 28], "custreambatchmemop": 5, "custreambatchmemoptyp": 5, "waitvalu": 5, "custreammemopwaitvalueparams_st": [5, 13], "writevalu": 5, "custreammemopwritevalueparams_st": 5, "flushremotewrit": 5, "custreammemopflushremotewritesparams_st": 5, "memorybarri": 5, "custreammemopmemorybarrierparams_st": 5, "pad": [5, 6], "list": [5, 6, 7, 9, 28], "cuuint64_t": [5, 6, 13], "cuda_batch_mem_op_node_params_v1_st": 5, "ctx": 5, "cucontext": [5, 6], "count": [5, 6], "unsign": [5, 6], "int": [5, 6, 7, 9], "paramarrai": 5, "custreambatchmemopparam": 5, "flag": [5, 6], "cuda_batch_mem_op_node_params_v2_st": 5, "batch": 5, "number": [5, 6, 7], "arrai": [5, 6, 7, 8, 9], "cuasyncnotificationinfo_st": 5, "user": [5, 6, 8], "async": [5, 6], "notif": [5, 6], "callback": [5, 6], "cuasyncnotificationtyp": 5, "info": [5, 6, 7], "anon_union2": [5, 6], "cudevprop_st": 5, "legaci": [5, 6, 13], "properti": [5, 6], "maxthreadsperblock": [5, 6], "maximum": [5, 6, 7], "thread": [5, 6, 7, 9, 13], "block": [5, 6, 7, 9], "maxthreadsdim": [5, 6], "each": [5, 6, 7, 9], "dimens": [5, 6], "maxgrids": [5, 6], "grid": [5, 6, 9], "sharedmemperblock": [5, 6], "totalconstantmemori": 5, "constant": [5, 6, 7], "simdwidth": 5, "warp": [5, 6], "mempitch": [5, 6], "pitch": [5, 6], "allow": [5, 6, 7, 8, 9, 17, 28], "copi": [5, 6, 9], "regsperblock": [5, 6], "32": [5, 6, 28], "bit": [5, 6], "regist": [5, 6, 7], "clockrat": [5, 6], "clock": [5, 6], "frequenc": [5, 6], "kilohertz": [5, 6], "texturealign": [5, 6], "cuaccesspolicywindow_st": 5, "specifi": [5, 6, 7], "contigu": [5, 6, 9], "extent": [5, 6], "begin": [5, 6], "base_ptr": [5, 6], "end": [5, 6, 7, 23], "num_byt": [5, 6], "limit": [5, 6, 7, 10], "cu_device_attribute_max_access_policy_window_s": 5, "partit": [5, 6], "mani": [5, 6], "segment": [5, 6], "sum": [5, 6], "hit": [5, 6], "approx": [5, 6], "ratio": [5, 6], "miss": [5, 6, 17], "fit": [5, 6], "capabl": [5, 6, 8, 9], "architectur": [5, 6, 7, 9], "hitprop": [5, 6], "missprop": [5, 6], "start": [5, 6], "restrict": [5, 6, 7], "size_t": [5, 6, 9], "hitratio": [5, 6], "percentag": [5, 6, 7], "line": [5, 6, 7, 9, 23], "rest": [5, 6], "float": [5, 6, 7, 9], "cuaccessproperti": [5, 6], "either": [5, 6, 7], "normal": [5, 6], "cuda_kernel_node_params_st": 5, "gpu": [5, 6, 7, 8, 9], "func": [5, 6], "launch": [5, 6, 7, 9, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cufunct": [5, 6], "griddimx": 5, "width": [5, 6], "griddimi": 5, "height": [5, 6], "griddimz": 5, "depth": [5, 6], "blockdimx": 5, "x": [5, 6, 9, 11, 12], "blockdimi": 5, "y": [5, 6, 9], "blockdimz": 5, "z": [5, 6, 9], "sharedmembyt": [5, 6], "dynam": [5, 6, 9], "kernelparam": [5, 6], "pointer": [5, 6, 7, 9], "extra": [5, 6, 7, 9], "cuda_kernel_node_params_v2_st": 5, "kern": 5, "referenc": [5, 6], "null": [5, 6, 7], "cukernel": 5, "task": [5, 6], "valu": [5, 6, 7, 9, 13], "indic": [5, 6, 7], "field": [5, 6], "ignor": [5, 6, 7, 9], "cuda_kernel_node_params_v3_st": 5, "cuda_memset_node_params_st": 5, "memset": [5, 6], "dst": [5, 6], "destin": [5, 6], "cudeviceptr": [5, 9, 27], "unus": [5, 6], "elements": [5, 6], "element": [5, 6], "row": [5, 6, 7], "cuda_memset_node_params_v2_st": 5, "cuda_host_node_params_st": 5, "host": [5, 6, 8, 9], "fn": [5, 6], "function": [5, 6, 7, 9, 10], "call": [5, 6, 7, 9], "cuhostfn": 5, "userdata": [5, 6], "cuda_host_node_params_v2_st": 5, "cuda_conditional_node_param": 5, "condit": [5, 6], "cugraphconditionalhandlecr": 5, "cugraphconditionalhandl": 5, "cugraphconditionalnodetyp": 5, "output": [5, 6, 7, 9], "phgraph_out": [5, 6], "own": [5, 6, 8, 9], "popul": [5, 6, 7], "child": [5, 6], "creation": [5, 6, 9], "valid": [5, 6, 7], "lifetim": [5, 6], "content": [5, 6, 7], "subject": [5, 6, 9], "constraint": [5, 6], "empti": [5, 6], "memcopi": [5, 6], "recurs": [5, 6], "nest": [5, 6], "belong": [5, 6], "These": [5, 6, 7], "custreambegincapturetograph": 5, "cugraph": [5, 6], "cugraphedgedata_st": 5, "annot": [5, 6, 7], "edg": [5, 6], "implicitli": [5, 6], "default": [5, 6, 7, 10, 12], "zero": [5, 6, 7, 9], "A": [5, 6, 9, 16], "struct": [5, 6], "full": [5, 6, 8, 9], "serial": [5, 6, 9], "visibl": [5, 6], "from_port": [5, 6], "trigger": [5, 6], "upstream": [5, 6], "mean": [5, 6], "specfic": [5, 6], "case": [5, 6, 7], "complet": [5, 6, 9], "downstream": [5, 6], "portion": [5, 6], "thereof": [5, 6], "to_port": [5, 6], "non": [5, 6, 7], "cu_graph_kernel_node_port_default": 5, "cu_graph_kernel_node_port_programmat": 5, "cu_graph_kernel_node_port_launch_ord": 5, "entireti": [5, 6], "work": [5, 6, 9], "accordingli": [5, 6], "cugraphdependencytyp": 5, "It": [5, 6, 7, 9, 17], "char": [5, 6, 7, 9], "due": [5, 6], "layout": [5, 6], "bitfield": [5, 6], "ensur": [5, 6, 9], "addit": [5, 6], "ad": [5, 6], "cuda_graph_instantiate_params_st": 5, "instanti": [5, 6], "huploadstream": 5, "upload": [5, 6], "custream": [5, 6], "herrnode_out": [5, 6], "caus": [5, 6, 7], "fail": [5, 6], "cugraphnod": [5, 6], "result_out": [5, 6], "whether": [5, 6, 7], "wa": [5, 6, 7, 9], "success": [5, 6], "cugraphinstantiateresult": 5, "culaunchmemsyncdomainmap_st": 5, "synchron": [5, 6, 13], "domain": [5, 6], "cudalaunchmemsyncdomain": [5, 6], "By": [5, 6, 17], "cu_launch_mem_sync_domain_remot": 5, "id": [5, 6], "also": [5, 6, 7, 8, 9], "alter": [5, 6], "culaunchmemsyncdomainmap": 5, "cu_launch_attribute_mem_sync_domain_map": 5, "rang": [5, 6, 7], "through": [5, 6, 7, 13], "cu_device_attribute_mem_sync_domain_count": 5, "default_": [5, 6], "design": [5, 6, 9], "remot": [5, 6], "culaunchattributevalue_union": 5, "union": [5, 6], "culaunchattribut": 5, "accesspolicywindow": [5, 6], "cu_launch_attribute_access_policy_window": 5, "cuaccesspolicywindow": 5, "cooper": [5, 6], "cu_launch_attribute_coop": 5, "nonzero": [5, 6], "culaunchcooperativekernel": 5, "syncpolici": [5, 6], "cu_launch_attribute_synchronization_polici": 5, "cusynchronizationpolici": 5, "queu": [5, 6], "up": [5, 6, 7, 9], "clusterdim": [5, 6], "cu_launch_attribute_cluster_dimens": 5, "desir": [5, 6], "cluster": [5, 6], "divisor": [5, 6], "anon_struct1": 5, "clusterschedulingpolicyprefer": [5, 6], "cu_launch_attribute_cluster_scheduling_policy_prefer": 5, "prefer": [5, 6, 8], "cuclusterschedulingpolici": 5, "programmaticstreamserializationallow": [5, 6], "cu_launch_attribute_programmatic_stream_seri": 5, "programmaticev": [5, 6], "cu_launch_attribute_programmatic_ev": 5, "cuevent": [5, 6], "fire": [5, 6], "record": [5, 6], "cueventrecordwithflag": 5, "doe": [5, 6, 7, 9], "cu_event_record_extern": 5, "triggeratblockstart": [5, 6], "automat": [5, 6, 9], "anon_struct2": 5, "launchcompletionev": [5, 6], "cu_launch_attribute_launch_completion_ev": 5, "last": [5, 6, 9], "anon_struct3": 5, "cu_launch_attribute_prior": 5, "memsyncdomainmap": [5, 6], "memsyncdomain": [5, 6], "cu_launch_attribute_mem_sync_domain": 5, "culaunchmemsyncdomain": 5, "deviceupdatablekernelnod": [5, 6], "cu_launch_attribute_device_updatable_kernel_nod": 5, "deviceupdat": [5, 6], "updat": [5, 6, 7, 18], "cugraphdevicenod": 5, "devnod": [5, 6], "return": [5, 6, 7, 9], "variou": [5, 6], "side": [5, 6], "anon_struct4": 5, "sharedmemcarveout": [5, 6], "cu_launch_attribute_preferred_shared_memory_carveout": 5, "culaunchattribute_st": 5, "culaunchattributeid": 5, "culaunchattributevalu": 5, "culaunchconfig_st": 5, "configur": [5, 6, 9], "hstream": [5, 6], "identifi": [5, 6], "attr": [5, 6], "nullabl": [5, 6], "culaunchconfig": 5, "numattr": 5, "cuexecaffinitysmcount_st": 5, "cu_exec_affinity_type_sm_count": 5, "val": [5, 6], "sm": [5, 6], "cuexecaffinityparam_st": 5, "affin": [5, 6], "cuexecaffinitytyp": 5, "param": [5, 6], "anon_union3": 5, "cuctxcigparam_st": 5, "cig": [5, 6], "shareddatatyp": 5, "cucigdatatyp": 5, "shareddata": 5, "cuctxcreateparams_st": 5, "exactli": [5, 6], "one": [5, 6, 7, 8], "execaffinityparam": 5, "cigparam": 5, "cuexecaffinityparam": 5, "numexecaffinityparam": 5, "cuctxcigparam": 5, "culibraryhostuniversalfunctionanddatatable_st": 5, "functiont": 5, "functionwindows": 5, "datat": 5, "datawindows": 5, "cuda_memcpy2d_st": 5, "2d": [5, 6], "srcxinbyt": 5, "srcy": 5, "srcmemorytyp": 5, "cumemorytyp": 5, "srchost": 5, "srcdevic": [5, 6], "srcarrai": [5, 6], "cuarrai": [5, 6], "srcpitch": 5, "src": [5, 6, 7], "dstxinbyt": 5, "dsty": 5, "dstmemorytyp": 5, "dsthost": 5, "dstdevic": [5, 6], "dstarrai": [5, 6], "dstpitch": 5, "widthinbyt": 5, "cuda_memcpy3d_st": 5, "3d": [5, 6], "srcz": 5, "srclod": 5, "lod": 5, "reserved0": [5, 6], "srcheight": 5, "dstz": 5, "dstlod": 5, "reserved1": [5, 6], "dstheight": 5, "cuda_memcpy3d_peer_st": 5, "cross": [5, 6], "srccontext": 5, "cu_memorytype_arrai": [5, 6], "dstcontext": 5, "cuda_memcpy_node_params_st": 5, "memcpi": [5, 6], "copyctx": 5, "copyparam": [5, 6], "cuda_memcpy3d": 5, "cuda_array_descriptor_st": 5, "descriptor": [5, 6], "format": [5, 6, 9], "cuarray_format": 5, "numchannel": [5, 6], "channel": [5, 6, 25], "cuda_array3d_descriptor_st": 5, "cuda_array_sparse_properties_st": 5, "spars": [5, 6], "tileext": [5, 6], "anon_struct5": 5, "miptailfirstlevel": [5, 6], "first": [5, 6, 7, 9], "mip": [5, 6], "tail": [5, 6], "miptails": [5, 6], "total": [5, 6], "long": [5, 6, 7, 9], "cu_array_sparse_properties_single_miptail": 5, "cuda_array_memory_requirements_st": 5, "cuda_resource_desc_st": 5, "restyp": [5, 6], "curesourcetyp": [5, 6], "re": [5, 6, 9, 16], "anon_union4": 5, "cuda_texture_desc_st": 5, "addressmod": [5, 6], "mode": [5, 6, 7], "cuaddress_mod": 5, "filtermod": [5, 6], "filter": [5, 6], "cufilter_mod": 5, "maxanisotropi": [5, 6], "anisotropi": [5, 6], "mipmapfiltermod": [5, 6], "mipmap": [5, 6], "mipmaplevelbia": [5, 6], "bia": 5, "minmipmaplevelclamp": [5, 6], "minimum": [5, 6, 7, 8], "clamp": [5, 6], "maxmipmaplevelclamp": [5, 6], "bordercolor": [5, 6], "border": [5, 6], "color": [5, 6], "cuda_resource_view_desc_st": 5, "view": [5, 6], "curesourceviewformat": 5, "firstmipmaplevel": [5, 6], "lastmipmaplevel": [5, 6], "firstlay": [5, 6], "layer": [5, 6], "lastlay": [5, 6], "cutensormap_st": 5, "64": [5, 6], "cuda_pointer_attribute_p2p_tokens_st": 5, "direct": [5, 6, 7], "v3": 5, "token": [5, 7], "p2ptoken": 5, "vaspacetoken": 5, "cuda_launch_params_st": 5, "cuda_external_memory_handle_desc_st": 5, "cuexternalmemoryhandletyp": 5, "anon_union5": 5, "cuda_external_memory_ded": 5, "cuda_external_memory_buffer_desc_st": 5, "buffer": [5, 6], "offset": [5, 6], "where": [5, 6], "cuda_external_memory_mipmapped_array_desc_st": 5, "chain": [5, 6], "arraydesc": 5, "cuda_array3d_descriptor": 5, "numlevel": [5, 6], "cuda_external_semaphore_handle_desc_st": 5, "semaphor": [5, 6], "cuexternalsemaphorehandletyp": 5, "anon_union6": 5, "cuda_external_semaphore_signal_params_st": 5, "signal": [5, 6], "anon_struct15": [5, 6], "cuda_external_semaphore_signal_param": 5, "cuexternalsemaphor": 5, "cu_external_semaphore_handle_type_nvscisync": 5, "cuda_external_semaphore_signal_skip_nvscibuf_memsync": 5, "while": [5, 6], "import": [5, 6, 8, 9], "cu_external_memory_handle_type_nvscibuf": 5, "cuda_external_semaphore_wait_params_st": 5, "wait": [5, 6], "anon_struct18": [5, 6], "cuda_external_semaphore_wait_param": 5, "cuda_external_semaphore_wait_skip_nvscibuf_memsync": 5, "cuda_ext_sem_signal_node_params_st": 5, "extsemarrai": [5, 6], "paramsarrai": [5, 6], "numextsem": [5, 6], "suppli": [5, 6], "cuda_ext_sem_signal_node_params_v2_st": 5, "cuda_ext_sem_wait_node_params_st": 5, "cuda_ext_sem_wait_node_params_v2_st": 5, "cuarraymapinfo_st": 5, "resourcetyp": 5, "anon_union9": [5, 6], "subresourcetyp": 5, "subresourc": [5, 6], "cuarraysparsesubresourcetyp": 5, "anon_union10": [5, 6], "memoperationtyp": 5, "cumemoperationtyp": 5, "memhandletyp": 5, "cumemhandletyp": 5, "memhandl": 5, "anon_union11": 5, "devicebitmask": 5, "ordin": [5, 6], "mask": [5, 6], "now": [5, 6, 8, 9, 13, 16, 17], "cumemlocation_st": 5, "modifi": [5, 6, 7], "cumemlocationtyp": [5, 6], "given": [5, 6, 7], "cumemallocationprop_st": 5, "cumemallocationtyp": 5, "requestedhandletyp": 5, "cumemallocationhandletyp": [5, 6], "cumemloc": 5, "win32handlemetadata": 5, "pobject_attribut": 5, "cu_mem_handle_type_win32": 5, "structur": [5, 6, 13], "secur": [5, 6], "transfer": [5, 6, 9], "allocflag": 5, "anon_struct21": [5, 6], "cumulticastobjectprop_st": 5, "numdevic": 5, "amount": [5, 6, 7], "bound": [5, 6], "handletyp": [5, 6], "bitmask": [5, 6], "cumemaccessdesc_st": 5, "cumemprot": [5, 6], "cumemaccess_flag": 5, "cugraphexecupdateresultinfo_st": 5, "cugraphexecupd": 5, "give": [5, 6], "more": [5, 6, 7, 8, 9], "cugraphexecupdateresult": 5, "errornod": [5, 6], "topolog": [5, 6], "associ": [5, 6, 7, 9], "errorfromnod": [5, 6], "cumempoolprops_st": 5, "made": [5, 6], "pool": [5, 6, 7], "alloctyp": [5, 6], "cu_mem_allocation_type_pin": 5, "resid": [5, 6, 9], "win32securityattribut": [5, 6], "lpsecurityattribut": [5, 6], "maxsiz": [5, 6], "system": [5, 6, 7, 9], "usag": [5, 6], "intend": [5, 6, 7], "short": [5, 6, 7, 16], "cumempoolptrexportdata_st": 5, "cuda_mem_alloc_node_params_v1_st": 5, "poolprop": [5, 6], "cu_mem_handle_type_non": 5, "cumempoolprop": 5, "accessdesc": [5, 6], "cumemaccessdesc": 5, "accessdesccount": [5, 6], "exce": [5, 6], "bytes": [5, 6], "dptr": [5, 6], "out": [5, 6, 9], "cuda_mem_alloc_node_params_v2_st": 5, "cuda_mem_free_node_params_st": 5, "cuda_child_graph_node_params_st": 5, "clone": [5, 6], "cuda_event_record_node_params_st": 5, "cuda_event_wait_node_params_st": 5, "cugraphnodeparams_st": 5, "cugraphaddnod": [5, 6], "cugraphnodetyp": 5, "cuda_kernel_node_params_v3": 5, "cuda_memcpy_node_param": 5, "cuda_memset_node_params_v2": 5, "cuda_host_node_params_v2": 5, "cuda_child_graph_node_param": 5, "eventwait": [5, 6], "cuda_event_wait_node_param": 5, "eventrecord": [5, 6], "cuda_event_record_node_param": 5, "extsemsign": [5, 6], "cuda_ext_sem_signal_node_params_v2": 5, "extsemwait": [5, 6], "cuda_ext_sem_wait_node_params_v2": 5, "cuda_mem_alloc_node_params_v2": 5, "cuda_mem_free_node_param": 5, "memop": [5, 6], "cuda_batch_mem_op_node_params_v2": 5, "reserved2": [5, 6], "cueglframe_st": 5, "eglfram": [5, 6], "frame": [5, 6], "contain": [5, 6, 7], "plane": [5, 6], "multiplanar": [5, 6], "anon_union14": 5, "planecount": [5, 6], "frametyp": [5, 6], "cueglframetyp": 5, "eglcolorformat": [5, 6], "cueglcolorformat": 5, "cuformat": 5, "cuipcmem_flag": 5, "cu_ipc_mem_lazy_enable_peer_access": 5, "cumemattach_flag": 5, "attach": [5, 6], "cu_mem_attach_glob": 5, "cu_mem_attach_host": 5, "cannot": [5, 6], "cu_mem_attach_singl": 5, "singl": [5, 6, 7, 8, 9], "cuctx_flag": 5, "cu_ctx_sched_auto": 5, "cu_ctx_sched_spin": 5, "spin": [5, 6], "cu_ctx_sched_yield": 5, "yield": [5, 6], "cu_ctx_sched_blocking_sync": 5, "cu_ctx_blocking_sync": 5, "deprec": [5, 6, 7, 13], "cu_ctx_sched_mask": 5, "cu_ctx_map_host": 5, "cu_ctx_lmem_resize_to_max": 5, "16": [5, 6, 11, 25], "keep": [5, 6], "after": [5, 6, 7, 9], "cu_ctx_coredump_en": 5, "except": [5, 6, 9], "cu_ctx_user_coredump_en": 5, "pipe": 5, "cu_ctx_sync_memop": 5, "128": [5, 6], "cu_ctx_flags_mask": 5, "255": [5, 6], "cuevent_sched_flag": 5, "sched": 5, "cu_event_sched_auto": 5, "cu_event_sched_spin": 5, "cu_event_sched_yield": 5, "cu_event_sched_blocking_sync": 5, "cl_event_flag": 5, "nvcl": 5, "nvcl_event_sched_auto": 5, "nvcl_event_sched_spin": 5, "nvcl_event_sched_yield": 5, "nvcl_event_sched_blocking_sync": 5, "cl_context_flag": 5, "nvcl_ctx_sched_auto": 5, "nvcl_ctx_sched_spin": 5, "nvcl_ctx_sched_yield": 5, "nvcl_ctx_sched_blocking_sync": 5, "custream_flag": 5, "cu_stream_default": 5, "cu_stream_non_block": 5, "cuevent_flag": 5, "cu_event_default": 5, "cu_event_blocking_sync": 5, "cu_event_disable_tim": 5, "cu_event_interprocess": 5, "suitabl": [5, 6], "interprocess": [5, 6], "cuevent_record_flag": 5, "cu_event_record_default": 5, "captur": [5, 6], "instead": [5, 6, 7, 16, 19], "invalid": [5, 6], "outsid": [5, 6, 9], "cuevent_wait_flag": 5, "cu_event_wait_default": 5, "cu_event_wait_extern": 5, "custreamwaitvalue_flag": 5, "custreamwaitvalue32": 5, "custreamwaitvalue64": 5, "cu_stream_wait_value_geq": 5, "until": [5, 6, 7, 9], "int32_t": 5, "addr": 5, "int64_t": 5, "cyclic": 5, "comparison": 5, "wraparound": 5, "cu_stream_wait_value_eq": 5, "cu_stream_wait_value_and": 5, "cu_stream_wait_value_nor": 5, "cudevicegetattribut": [5, 6, 9], "cu_device_attribute_can_use_stream_wait_value_nor": 5, "cu_stream_wait_value_flush": [5, 6], "1073741824": 5, "flush": [5, 6, 7], "outstand": [5, 6], "write": [5, 6, 9], "guarante": [5, 6], "reach": [5, 6], "befor": [5, 6, 7, 9, 13], "satisfi": [5, 6], "permit": [5, 6], "reorder": 5, "intern": [5, 6], "would": [5, 6, 9], "arriv": 5, "second": [5, 6], "observ": [5, 6], "select": [5, 6, 7], "cu_device_attribute_can_flush_remote_writ": 5, "custreamwritevalue_flag": 5, "custreamwritevalue32": 5, "cu_stream_write_value_default": 5, "cu_stream_write_value_no_memory_barri": 5, "were": [5, 6], "optim": [5, 6, 7], "fenc": [5, 6], "ha": [5, 6, 8, 9, 16, 17], "similar": [5, 6], "semant": [5, 6], "__threadfence_system": 5, "rather": [5, 6, 25], "than": [5, 6, 7, 25], "v2": 5, "cu_stream_mem_op_wait_value_32": 5, "cu_stream_mem_op_write_value_32": 5, "cu_stream_mem_op_wait_value_64": 5, "cu_stream_mem_op_write_value_64": 5, "custreamwritevalue64": 5, "cu_stream_mem_op_barri": 5, "insert": [5, 6], "barrier": [5, 6, 8], "cu_stream_mem_op_flush_remote_writ": [5, 6], "effect": [5, 6], "standalon": 5, "custreammemorybarrier_flag": 5, "custreammemorybarri": 5, "cu_stream_memory_barrier_type_si": 5, "wide": [5, 6], "cu_stream_memory_barrier_type_gpu": 5, "cuoccupancy_flag": 5, "calcul": [5, 6], "cu_occupancy_default": 5, "cu_occupancy_disable_caching_overrid": 5, "assum": [5, 6, 7], "global": [5, 6, 7], "cach": [5, 6], "turn": [5, 6, 7], "off": [5, 6, 7], "custreamupdatecapturedependencies_flag": 5, "custreamupdatecapturedepend": 5, "cu_stream_add_capture_depend": 5, "add": [5, 6, 7, 22, 26, 28], "new": [5, 6, 7, 8, 20, 25], "cu_stream_set_capture_depend": 5, "replac": [5, 6, 8], "sent": [5, 6], "cu_async_notification_type_over_budget": 5, "cu_ad_format_unsigned_int8": 5, "integ": [5, 6], "cu_ad_format_unsigned_int16": 5, "cu_ad_format_unsigned_int32": 5, "cu_ad_format_signed_int8": 5, "sign": [5, 6], "cu_ad_format_signed_int16": 5, "cu_ad_format_signed_int32": 5, "cu_ad_format_half": 5, "cu_ad_format_float": 5, "cu_ad_format_nv12": 5, "176": 5, "yuv": [5, 6], "planar": [5, 6], "sampl": [5, 6], "cu_ad_format_unorm_int8x1": 5, "192": 5, "cu_ad_format_unorm_int8x2": 5, "193": 5, "cu_ad_format_unorm_int8x4": 5, "194": 5, "cu_ad_format_unorm_int16x1": 5, "195": 5, "cu_ad_format_unorm_int16x2": 5, "196": 5, "cu_ad_format_unorm_int16x4": 5, "197": 5, "cu_ad_format_snorm_int8x1": 5, "198": 5, "cu_ad_format_snorm_int8x2": 5, "199": 5, "cu_ad_format_snorm_int8x4": 5, "200": [5, 6], "cu_ad_format_snorm_int16x1": 5, "201": [5, 6], "cu_ad_format_snorm_int16x2": 5, "202": 5, "cu_ad_format_snorm_int16x4": 5, "203": 5, "cu_ad_format_bc1_unorm": 5, "145": 5, "compress": [5, 6], "bc1": [5, 6], "cu_ad_format_bc1_unorm_srgb": 5, "146": 5, "srgb": [5, 6], "encod": [5, 6, 9], "cu_ad_format_bc2_unorm": 5, "147": 5, "bc2": [5, 6], "cu_ad_format_bc2_unorm_srgb": 5, "148": 5, "cu_ad_format_bc3_unorm": 5, "149": 5, "bc3": [5, 6], "cu_ad_format_bc3_unorm_srgb": 5, "150": 5, "cu_ad_format_bc4_unorm": 5, "151": 5, "bc4": [5, 6], "cu_ad_format_bc4_snorm": 5, "152": 5, "cu_ad_format_bc5_unorm": 5, "153": 5, "bc5": [5, 6], "cu_ad_format_bc5_snorm": 5, "154": 5, "cu_ad_format_bc6h_uf16": 5, "155": 5, "half": [5, 6], "bc6h": [5, 6], "cu_ad_format_bc6h_sf16": 5, "156": 5, "cu_ad_format_bc7_unorm": 5, "157": 5, "bc7": [5, 6], "cu_ad_format_bc7_unorm_srgb": 5, "158": 5, "cu_ad_format_p010": 5, "159": 5, "cu_ad_format_p016": 5, "161": 5, "cu_ad_format_nv16": 5, "162": 5, "cu_ad_format_p210": 5, "163": 5, "cu_ad_format_p216": 5, "164": 5, "cu_ad_format_yuy2": 5, "165": 5, "pack": 5, "cu_ad_format_y210": 5, "166": 5, "cu_ad_format_y216": 5, "167": 5, "cu_ad_format_ayuv": 5, "168": 5, "cu_ad_format_y410": 5, "169": 5, "cu_ad_format_y416": 5, "177": 5, "cu_ad_format_y444_planar8": 5, "178": 5, "cu_ad_format_y444_planar10": 5, "179": 5, "cu_ad_format_max": 5, "2147483647": [5, 6], "cu_tr_address_mode_wrap": 5, "wrap": [5, 6, 9, 13], "cu_tr_address_mode_clamp": 5, "cu_tr_address_mode_mirror": 5, "mirror": [5, 6], "cu_tr_address_mode_bord": 5, "cu_tr_filter_mode_point": 5, "cu_tr_filter_mode_linear": 5, "linear": [5, 6], "cudevice_attribut": [5, 9], "cu_device_attribute_max_threads_per_block": 5, "cu_device_attribute_max_block_dim_x": 5, "cu_device_attribute_max_block_dim_i": 5, "cu_device_attribute_max_block_dim_z": 5, "cu_device_attribute_max_grid_dim_x": 5, "cu_device_attribute_max_grid_dim_i": 5, "cu_device_attribute_max_grid_dim_z": 5, "cu_device_attribute_max_shared_memory_per_block": 5, "cu_device_attribute_shared_memory_per_block": 5, "cu_device_attribute_total_constant_memori": 5, "cu_device_attribute_warp_s": 5, "cu_device_attribute_max_pitch": 5, "cu_device_attribute_max_registers_per_block": 5, "cu_device_attribute_registers_per_block": 5, "cu_device_attribute_clock_r": 5, "13": [5, 6, 9], "typic": [5, 6, 7], "cu_device_attribute_texture_align": 5, "14": [5, 6, 7, 9], "cu_device_attribute_gpu_overlap": 5, "15": [5, 6], "possibl": [5, 6, 9], "concurr": [5, 6, 7], "cu_device_attribute_async_engine_count": 5, "cu_device_attribute_multiprocessor_count": 5, "multiprocessor": [5, 6], "cu_device_attribute_kernel_exec_timeout": 5, "17": [5, 6, 7], "cu_device_attribute_integr": 5, "18": [5, 6, 12, 14, 19], "integr": [5, 6], "cu_device_attribute_can_map_host_memori": 5, "19": [5, 6, 25], "cu_device_attribute_compute_mod": 5, "20": [5, 6, 7], "comput": [5, 6, 8, 9], "cucomputemod": 5, "cu_device_attribute_maximum_texture1d_width": 5, "21": [5, 6, 27], "1d": [5, 6], "cu_device_attribute_maximum_texture2d_width": 5, "22": [5, 6, 16], "cu_device_attribute_maximum_texture2d_height": 5, "23": [5, 6, 20], "cu_device_attribute_maximum_texture3d_width": 5, "24": [5, 6, 17], "cu_device_attribute_maximum_texture3d_height": 5, "25": [5, 6], "cu_device_attribute_maximum_texture3d_depth": 5, "26": [5, 6], "cu_device_attribute_maximum_texture2d_layered_width": 5, "27": [5, 6, 18], "cu_device_attribute_maximum_texture2d_layered_height": 5, "28": [5, 6, 22, 23], "cu_device_attribute_maximum_texture2d_layered_lay": 5, "29": [5, 6, 16], "cu_device_attribute_maximum_texture2d_array_width": 5, "cu_device_attribute_maximum_texture2d_array_height": 5, "cu_device_attribute_maximum_texture2d_array_numslic": 5, "cu_device_attribute_surface_align": 5, "30": [5, 6], "cu_device_attribute_concurrent_kernel": 5, "31": [5, 6], "multipl": [5, 6, 9], "cu_device_attribute_ecc_en": 5, "ecc": [5, 6], "cu_device_attribute_pci_bus_id": 5, "33": [5, 6], "pci": [5, 6], "bu": [5, 6], "cu_device_attribute_pci_device_id": 5, "34": [5, 6], "cu_device_attribute_tcc_driv": 5, "35": [5, 6], "tcc": [5, 6], "model": [5, 6, 9], "cu_device_attribute_memory_clock_r": 5, "36": [5, 6], "peak": [5, 6], "cu_device_attribute_global_memory_bus_width": 5, "37": [5, 6], "cu_device_attribute_l2_cache_s": 5, "l2": [5, 6], "cu_device_attribute_max_threads_per_multiprocessor": 5, "39": [5, 6], "40": [5, 6], "asynchron": [5, 6, 9], "engin": [5, 6, 8, 9], "cu_device_attribute_unified_address": 5, "41": [5, 6, 22], "cu_device_attribute_maximum_texture1d_layered_width": 5, "42": [5, 6, 22], "cu_device_attribute_maximum_texture1d_layered_lay": 5, "43": [5, 6, 22], "cu_device_attribute_can_tex2d_gath": 5, "44": [5, 6, 23], "cu_device_attribute_maximum_texture2d_gather_width": 5, "45": [5, 6, 23], "cuda_array3d_texture_gath": 5, "cu_device_attribute_maximum_texture2d_gather_height": 5, "46": [5, 6], "cu_device_attribute_maximum_texture3d_width_altern": 5, "47": [5, 6], "altern": [5, 6, 7], "cu_device_attribute_maximum_texture3d_height_altern": 5, "48": [5, 6, 25], "cu_device_attribute_maximum_texture3d_depth_altern": 5, "49": [5, 6], "cu_device_attribute_pci_domain_id": 5, "50": [5, 6], "cu_device_attribute_texture_pitch_align": 5, "51": [5, 6, 25], "cu_device_attribute_maximum_texturecubemap_width": 5, "52": [5, 6], "cubemap": [5, 6], "cu_device_attribute_maximum_texturecubemap_layered_width": 5, "53": [5, 6], "cu_device_attribute_maximum_texturecubemap_layered_lay": 5, "54": [5, 6], "cu_device_attribute_maximum_surface1d_width": 5, "55": [5, 6], "cu_device_attribute_maximum_surface2d_width": 5, "56": [5, 6], "cu_device_attribute_maximum_surface2d_height": 5, "57": [5, 6], "cu_device_attribute_maximum_surface3d_width": 5, "58": [5, 6, 27], "cu_device_attribute_maximum_surface3d_height": 5, "59": [5, 6], "cu_device_attribute_maximum_surface3d_depth": 5, "60": [5, 6], "cu_device_attribute_maximum_surface1d_layered_width": 5, "61": [5, 6], "cu_device_attribute_maximum_surface1d_layered_lay": 5, "62": [5, 6], "cu_device_attribute_maximum_surface2d_layered_width": 5, "63": [5, 6], "cu_device_attribute_maximum_surface2d_layered_height": 5, "cu_device_attribute_maximum_surface2d_layered_lay": 5, "65": [5, 6], "cu_device_attribute_maximum_surfacecubemap_width": 5, "66": [5, 6], "cu_device_attribute_maximum_surfacecubemap_layered_width": 5, "67": [5, 6], "cu_device_attribute_maximum_surfacecubemap_layered_lay": 5, "68": [5, 6], "cu_device_attribute_maximum_texture1d_linear_width": 5, "69": [5, 6], "cudadevicegettexture1dlinearmaxwidth": [5, 6], "cudevicegettexture1dlinearmaxwidth": [5, 6], "cu_device_attribute_maximum_texture2d_linear_width": 5, "70": [5, 6], "cu_device_attribute_maximum_texture2d_linear_height": 5, "71": [5, 6], "cu_device_attribute_maximum_texture2d_linear_pitch": 5, "72": [5, 6, 28], "cu_device_attribute_maximum_texture2d_mipmapped_width": 5, "73": [5, 6, 28], "cu_device_attribute_maximum_texture2d_mipmapped_height": 5, "74": [5, 6], "cu_device_attribute_compute_capability_major": [5, 9], "75": [5, 6], "cu_device_attribute_compute_capability_minor": [5, 9], "76": [5, 6], "cu_device_attribute_maximum_texture1d_mipmapped_width": 5, "77": [5, 6], "cu_device_attribute_stream_priorities_support": 5, "78": [5, 6], "cu_device_attribute_global_l1_cache_support": 5, "79": [5, 6], "l1": [5, 6], "cu_device_attribute_local_l1_cache_support": 5, "cu_device_attribute_max_shared_memory_per_multiprocessor": 5, "81": [5, 6], "cu_device_attribute_max_registers_per_multiprocessor": 5, "82": [5, 6], "cu_device_attribute_managed_memori": 5, "83": [5, 6], "cu_device_attribute_multi_gpu_board": 5, "84": [5, 6], "multi": [5, 6], "board": [5, 6], "cu_device_attribute_multi_gpu_board_group_id": 5, "85": [5, 6], "uniqu": [5, 6], "group": [5, 6], "cu_device_attribute_host_native_atomic_support": 5, "86": [5, 6], "link": [5, 6, 7], "nativ": [5, 6], "atom": [5, 6], "placehold": 5, "hardwar": [5, 6], "cu_device_attribute_single_to_double_precision_perf_ratio": 5, "87": [5, 6], "precis": [5, 6, 7], "doubl": [5, 6], "cu_device_attribute_pageable_memory_access": 5, "88": [5, 6], "coher": [5, 6], "pageabl": [5, 6], "cudahostregist": [5, 6], "cu_device_attribute_concurrent_managed_access": 5, "89": [5, 6], "cpu": [5, 6, 8, 9], "cu_device_attribute_compute_preemption_support": 5, "90": [5, 6], "preemption": [5, 6], "cu_device_attribute_can_use_host_pointer_for_registered_mem": 5, "91": [5, 6], "cu_device_attribute_can_use_stream_mem_ops_v1": 5, "92": [5, 6], "along": [5, 6, 7], "v1": 5, "relat": [5, 6], "cu_device_attribute_can_use_64_bit_stream_mem_ops_v1": 5, "93": [5, 6], "cu_device_attribute_can_use_stream_wait_value_nor_v1": 5, "94": [5, 6], "cu_device_attribute_cooperative_launch": 5, "95": [5, 6], "cu_device_attribute_cooperative_multi_device_launch": 5, "96": [5, 6], "culaunchcooperativekernelmultidevic": 5, "cu_device_attribute_max_shared_memory_per_block_optin": 5, "97": [5, 6], "optin": [5, 6], "98": [5, 6], "cu_device_attribute_host_register_support": 5, "99": [5, 6], "registr": [5, 6], "cu_device_attribute_pageable_memory_access_uses_host_page_t": 5, "100": [5, 6], "tabl": [5, 6, 7, 9], "cu_device_attribute_direct_managed_mem_access_from_host": 5, "101": [5, 6, 7], "directli": [5, 6, 7], "migrat": [5, 6], "cu_device_attribute_virtual_address_management_support": 5, "102": [5, 6], "cu_device_attribute_virtual_memory_management_support": 5, "like": [5, 6, 9], "cumemaddressreserv": 5, "cumemcr": [5, 6], "cumemmap": 5, "cu_device_attribute_handle_type_posix_file_descriptor_support": 5, "103": [5, 6], "posix": [5, 6], "cumemexporttoshareablehandl": 5, "cu_device_attribute_handle_type_win32_handle_support": 5, "104": [5, 6], "win32": [5, 6], "nt": [5, 6], "cu_device_attribute_handle_type_win32_kmt_handle_support": 5, "105": [5, 6], "kmt": [5, 6], "cu_device_attribute_max_blocks_per_multiprocessor": 5, "106": [5, 6], "cu_device_attribute_generic_compression_support": 5, "107": [5, 6], "cu_device_attribute_max_persisting_l2_cache_s": 5, "108": [5, 6], "persist": [5, 6], "capac": [5, 6], "109": [5, 6], "cu_device_attribute_gpu_direct_rdma_with_cuda_vmm_support": 5, "110": [5, 6], "gpudirect": [5, 6], "rdma": [5, 6], "cu_device_attribute_reserved_shared_memory_per_block": 5, "111": [5, 6], "cu_device_attribute_sparse_cuda_array_support": 5, "112": [5, 6], "cu_device_attribute_read_only_host_register_support": 5, "113": [5, 6], "cumemhostregist": [5, 6], "cu_memhostergister_read_onli": 5, "read": [5, 6], "cu_device_attribute_timeline_semaphore_interop_support": 5, "114": [5, 6], "timelin": [5, 6], "interop": [5, 6, 27], "cu_device_attribute_memory_pools_support": 5, "115": [5, 6], "cumemallocasync": [5, 6], "cumempool": 5, "famili": [5, 6], "cu_device_attribute_gpu_direct_rdma_support": 5, "116": [5, 6], "nvidia_p2p_get_pag": [5, 6], "cu_device_attribute_gpu_direct_rdma_flush_writes_opt": 5, "117": [5, 6], "shall": [5, 6], "interpret": [5, 6, 9], "cuflushgpudirectrdmawritesopt": 5, "enum": [5, 6], "cu_device_attribute_gpu_direct_rdma_writes_ord": 5, "118": [5, 6], "consum": [5, 6], "cugpudirectrdmawritesord": 5, "numer": [5, 6, 7], "here": [5, 6, 7, 9], "cu_device_attribute_mempool_supported_handle_typ": 5, "119": [5, 6], "mempool": [5, 6], "cu_device_attribute_cluster_launch": 5, "120": [5, 6], "cu_device_attribute_deferred_mapping_cuda_array_support": 5, "121": [5, 6], "defer": [5, 6], "cu_device_attribute_can_use_64_bit_stream_mem_op": 5, "122": [5, 6], "123": [5, 6], "cu_device_attribute_dma_buf_support": 5, "124": [5, 6], "dma_buf": 5, "mechan": [5, 6], "cu_device_attribute_ipc_event_support": 5, "125": [5, 6], "126": [5, 6], "cu_device_attribute_tensor_map_access_support": 5, "127": [5, 6], "cu_device_attribute_handle_type_fabric_support": 5, "cu_device_attribute_unified_function_point": 5, "129": [5, 6], "cu_device_attribute_numa_config": 5, "130": [5, 6], "numa": [5, 6], "cudevicenumaconfig": 5, "cu_device_attribute_numa_id": 5, "131": [5, 6], "cu_device_attribute_multicast_support": 5, "132": [5, 6], "switch": [5, 6, 7], "reduct": 5, "cu_device_attribute_mps_en": 5, "133": [5, 6], "mp": [5, 6], "cu_device_attribute_host_numa_id": 5, "134": [5, 6], "closest": [5, 6], "cu_device_attribute_d3d12_cig_support": 5, "135": [5, 6], "d3d12": [5, 6], "cu_device_attribute_max": 5, "136": [5, 6], "cupointer_attribut": 5, "cu_pointer_attribute_context": 5, "cu_pointer_attribute_memory_typ": 5, "cu_pointer_attribute_device_point": 5, "cu_pointer_attribute_host_point": 5, "cu_pointer_attribute_p2p_token": 5, "pair": [5, 6], "nv": 5, "p2p": [5, 6], "h": [5, 6, 7, 16], "interfac": [5, 6, 8, 9], "cu_pointer_attribute_sync_memop": 5, "everi": [5, 6], "region": [5, 6], "cu_pointer_attribute_buffer_id": 5, "cu_pointer_attribute_is_manag": 5, "cu_pointer_attribute_device_ordin": 5, "cu_pointer_attribute_is_legacy_cuda_ipc_cap": 5, "cudaipcgetmemhandl": [5, 6], "cu_pointer_attribute_range_start_addr": 5, "cu_pointer_attribute_range_s": 5, "cu_pointer_attribute_map": 5, "back": [5, 6, 9], "cu_pointer_attribute_allowed_handle_typ": 5, "cu_pointer_attribute_is_gpu_direct_rdma_cap": 5, "cu_pointer_attribute_access_flag": 5, "correspond": [5, 6, 7], "cu_pointer_attribute_mempool_handl": 5, "cu_pointer_attribute_mapping_s": 5, "actual": [5, 6, 7], "underli": [5, 6], "cu_pointer_attribute_mapping_base_addr": 5, "cu_pointer_attribute_memory_block_id": 5, "cufunction_attribut": 5, "cu_func_attribute_max_threads_per_block": 5, "beyond": [5, 6], "load": [5, 6, 9, 14], "cu_func_attribute_shared_size_byt": 5, "static": [5, 6], "cu_func_attribute_const_size_byt": 5, "cu_func_attribute_local_size_byt": 5, "cu_func_attribute_num_reg": 5, "cu_func_attribute_ptx_vers": 5, "ptx": [5, 6, 7, 9], "so": [5, 6, 9, 19], "undefin": [5, 6, 7], "cubin": [5, 7], "prior": [5, 6], "cu_func_attribute_binary_vers": 5, "binari": [5, 6], "properli": [5, 6], "cu_func_attribute_cache_mode_ca": 5, "been": [5, 6, 7, 9, 16], "xptxa": [5, 6, 7], "dlcm": [5, 6], "ca": [5, 6], "cu_func_attribute_max_dynamic_shared_size_byt": 5, "larger": [5, 6], "cufuncsetattribut": 5, "cukernelsetattribut": 5, "cu_func_attribute_preferred_shared_memory_carveout": 5, "On": [5, 6, 7], "carveout": [5, 6], "percent": [5, 6], "hint": [5, 6], "choos": [5, 6], "cu_func_attribute_cluster_size_must_be_set": 5, "cu_func_attribute_required_cluster_width": 5, "check": [5, 6, 7, 9], "cuda_error_not_permit": 5, "cu_func_attribute_required_cluster_height": 5, "cu_func_attribute_required_cluster_depth": 5, "cu_func_attribute_non_portable_cluster_size_allow": 5, "portabl": [5, 6, 9], "disallow": [5, 6], "sku": [5, 6], "program": [5, 6, 7, 8, 9], "might": [5, 6], "cudaoccupancymaxactiveclust": [5, 6], "assist": [5, 6], "higher": [5, 6, 7, 9], "target": [5, 6, 7, 8, 9], "sm_90": [5, 6, 7], "increas": [5, 6, 7, 9], "unit": [5, 6, 7, 9], "cu_func_attribute_cluster_scheduling_policy_prefer": 5, "cudaclusterschedulingpolici": [5, 6], "cu_func_attribute_max": 5, "cufunc_cach": 5, "cu_func_cache_prefer_non": 5, "cu_func_cache_prefer_shar": 5, "smaller": [5, 6, 8], "cu_func_cache_prefer_l1": 5, "cu_func_cache_prefer_equ": 5, "equal": [5, 6, 7], "cusharedconfig": 5, "cu_shared_mem_config_default_bank_s": 5, "bank": 5, "cu_shared_mem_config_four_byte_bank_s": 5, "four": [5, 6], "cu_shared_mem_config_eight_byte_bank_s": 5, "eight": 5, "cushared_carveout": 5, "cu_sharedmem_carveout_default": 5, "No": [5, 6, 7], "cu_sharedmem_carveout_max_shar": 5, "cu_sharedmem_carveout_max_l1": 5, "cu_memorytype_host": 5, "cu_memorytype_devic": [5, 6], "cu_memorytype_unifi": 5, "cu_computemode_default": 5, "cu_computemode_prohibit": 5, "prohibit": [5, 6], "cu_computemode_exclusive_process": 5, "exclus": [5, 6], "present": [5, 6, 7], "cumem_advis": 5, "advis": [5, 6], "cu_mem_advise_set_read_mostli": 5, "mostli": [5, 6], "occasion": [5, 6], "written": [5, 6, 7], "cu_mem_advise_unset_read_mostli": 5, "undo": [5, 6], "cu_mem_advise_set_preferred_loc": [5, 6], "cu_mem_advise_unset_preferred_loc": 5, "clear": [5, 6], "cu_mem_advise_set_accessed_bi": 5, "prevent": [5, 6], "fault": [5, 6], "much": [5, 6], "cu_mem_advise_unset_accessed_bi": [5, 6], "let": [5, 6], "subsystem": [5, 6], "decid": [5, 6], "cumem_range_attribut": 5, "cu_mem_range_attribute_read_mostli": 5, "cu_mem_range_attribute_preferred_loc": 5, "cu_mem_range_attribute_accessed_bi": 5, "cu_mem_range_attribute_last_prefetch_loc": 5, "prefetch": [5, 6], "cu_mem_range_attribute_preferred_location_typ": 5, "cu_mem_range_attribute_preferred_location_id": 5, "cu_mem_range_attribute_last_prefetch_location_typ": 5, "cu_mem_range_attribute_last_prefetch_location_id": 5, "cujit_opt": 5, "linker": 5, "cu_jit_max_regist": 5, "max": [5, 6], "cu_jit_threads_per_block": 5, "IN": 5, "util": [5, 6, 8], "abl": [5, 6], "combin": [5, 6], "cu_jit_target": 5, "cu_jit_wall_tim": 5, "overwrit": [5, 6], "wall": 5, "millisecond": [5, 6], "spent": 5, "cu_jit_info_log_buff": 5, "print": 5, "log": [5, 7], "messag": [5, 7], "natur": [5, 6, 9], "cu_jit_info_log_buffer_size_byt": 5, "cap": 5, "termin": [5, 6], "fill": [5, 6], "cu_jit_error_log_buff": 5, "reflect": [5, 6], "cu_jit_error_log_buffer_size_byt": 5, "cu_jit_optimization_level": 5, "being": [5, 6], "highest": [5, 6], "cu_jit_target_from_cucontext": 5, "chosen": [5, 6], "cujit_target": 5, "enumer": [5, 6, 7], "cu_jit_fallback_strategi": 5, "choic": 5, "fallback": [5, 6], "strategi": 5, "found": [5, 6, 16, 23], "cujit_fallback": 5, "culink": 5, "exact": [5, 6, 7], "cu_jit_generate_debug_info": 5, "fals": [5, 7, 9], "cu_jit_log_verbos": 5, "verbos": 5, "cu_jit_generate_line_info": 5, "lineinfo": [5, 7], "cu_jit_cache_mod": 5, "explicitli": [5, 6, 7], "cujit_cachemode_enum": 5, "cu_jit_new_sm3x_opt": 5, "cu_jit_fast_compil": 5, "jit": [5, 6], "purpos": [5, 6], "cu_jit_global_symbol_nam": 5, "symbol": [5, 6, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "name": [5, 6, 7, 9], "reloc": 5, "store": [5, 6, 7, 9], "cu_jit_global_symbol_address": 5, "cu_jit_global_symbol_count": 5, "unresolv": 5, "illeg": [5, 6], "const": 5, "void": [5, 6, 9], "cu_jit_lto": 5, "lto": [5, 7], "ir": [5, 7], "cu_jit_ftz": 5, "cu_jit_prec_div": 5, "cu_jit_prec_sqrt": 5, "cu_jit_fma": 5, "cu_jit_referenced_kernel_nam": 5, "cu_jit_referenced_kernel_count": 5, "cu_jit_referenced_variable_nam": 5, "cu_jit_referenced_variable_count": 5, "cu_jit_optimize_unused_device_vari": 5, "cu_jit_position_independent_cod": 5, "independ": [5, 6], "cu_jit_min_cta_per_sm": 5, "cta": 5, "togeth": [5, 8], "cu_jit_max_threads_per_block": 5, "well": [5, 6], "alreadi": [5, 6], "minnctapersm": 5, "cu_jit_override_directive_valu": 5, "preced": [5, 6, 7], "over": [5, 6], "product": [5, 6], "exeed": 5, "invoc": 5, "exceed": [5, 6], "failur": [5, 6], "maxntid": 5, "cu_jit_num_opt": 5, "cu_target_compute_30": 5, "cu_target_compute_32": 5, "cu_target_compute_35": 5, "cu_target_compute_37": 5, "cu_target_compute_50": 5, "cu_target_compute_52": 5, "cu_target_compute_53": 5, "cu_target_compute_60": 5, "cu_target_compute_61": 5, "cu_target_compute_62": 5, "cu_target_compute_70": 5, "cu_target_compute_72": 5, "cu_target_compute_75": 5, "cu_target_compute_80": 5, "cu_target_compute_86": 5, "cu_target_compute_87": 5, "cu_target_compute_89": 5, "cu_target_compute_90": 5, "acceler": [5, 6, 8, 9], "cu_target_compute_90a": 5, "65626": 5, "cu_prefer_ptx": 5, "cu_prefer_binari": 5, "cujit_cachemod": 5, "cu_jit_cache_option_non": 5, "cu_jit_cache_option_cg": 5, "cu_jit_cache_option_ca": 5, "cujitinputtyp": 5, "cu_jit_input_cubin": 5, "applic": [5, 6, 8], "none": [5, 6, 7, 9], "cu_jit_input_ptx": 5, "cu_jit_input_fatbinari": 5, "bundl": 5, "some": [5, 6, 9], "cu_jit_input_object": 5, "embed": [5, 6, 7], "cu_jit_input_librari": 5, "archiv": 5, "cu_jit_input_nvvm": 5, "cu_jit_num_input_typ": 5, "cugraphicsregisterflag": 5, "cu_graphics_register_flags_non": 5, "cu_graphics_register_flags_read_onli": 5, "cu_graphics_register_flags_write_discard": 5, "cu_graphics_register_flags_surface_ldst": 5, "cu_graphics_register_flags_texture_gath": 5, "cugraphicsmapresourceflag": 5, "unmap": [5, 6], "cu_graphics_map_resource_flags_non": 5, "cu_graphics_map_resource_flags_read_onli": 5, "cu_graphics_map_resource_flags_write_discard": 5, "cuarray_cubemap_fac": 5, "cube": [5, 6], "cu_cubemap_face_positive_x": 5, "cu_cubemap_face_negative_x": 5, "neg": [5, 6], "cu_cubemap_face_positive_i": 5, "cu_cubemap_face_negative_i": 5, "cu_cubemap_face_positive_z": 5, "cu_cubemap_face_negative_z": 5, "culimit": 5, "cu_limit_stack_s": 5, "stack": [5, 6, 7], "cu_limit_printf_fifo_s": 5, "printf": [5, 6], "fifo": [5, 6], "cu_limit_malloc_heap_s": 5, "malloc": [5, 6], "heap": [5, 6], "cu_limit_dev_runtime_sync_depth": 5, "cu_limit_dev_runtime_pending_launch_count": 5, "pend": [5, 6], "cu_limit_max_l2_fetch_granular": 5, "fetch": [5, 6], "granular": [5, 6], "cu_limit_persisting_l2_cache_s": 5, "cu_limit_shmem_s": 5, "cu_limit_cig_en": 5, "cu_limit_cig_shmem_fallback_en": 5, "cu_limit_max": 5, "cu_resource_type_arrai": 5, "cu_resource_type_mipmapped_arrai": 5, "cu_resource_type_linear": 5, "cu_resource_type_pitch2d": 5, "cu_access_property_norm": 5, "cu_access_property_stream": 5, "less": [5, 6, 7], "persit": [5, 6], "cu_access_property_persist": 5, "cu_graph_cond_type_if": 5, "onc": [5, 6], "cu_graph_cond_type_whil": 5, "repeatedli": [5, 6], "cu_graph_node_type_kernel": 5, "cu_graph_node_type_memcpi": 5, "cu_graph_node_type_memset": 5, "cu_graph_node_type_host": 5, "cu_graph_node_type_graph": 5, "cu_graph_node_type_empti": 5, "op": [5, 6], "cu_graph_node_type_wait_ev": 5, "cu_graph_node_type_event_record": 5, "cu_graph_node_type_ext_semas_sign": 5, "cu_graph_node_type_ext_semas_wait": 5, "cu_graph_node_type_mem_alloc": 5, "cu_graph_node_type_mem_fre": 5, "cu_graph_node_type_batch_mem_op": 5, "cu_graph_node_type_condit": 5, "loop": [5, 6], "insid": [5, 6], "iter": [5, 6, 8], "upon": [5, 6], "exist": [5, 6, 7, 8], "cudagraphsetcondit": [5, 6], "part": [5, 6, 9], "cugraphedgedata": 5, "cu_graph_dependency_type_default": 5, "ordinari": [5, 6], "cu_graph_dependency_type_programmat": 5, "cudagriddependencysynchron": [5, 6], "outgo": [5, 6], "cuda_graph_instantiate_success": 5, "succeed": [5, 6], "cuda_graph_instantiate_error": 5, "unexpect": [5, 6], "cuda_graph_instantiate_invalid_structur": 5, "cycl": [5, 6], "cuda_graph_instantiate_node_operation_not_support": 5, "becaus": [5, 6, 7, 9], "unsupport": [5, 6], "cuda_graph_instantiate_multiple_ctxs_not_support": 5, "cu_sync_policy_auto": 5, "cu_sync_policy_spin": 5, "cu_sync_policy_yield": 5, "cu_sync_policy_blocking_sync": 5, "cu_cluster_scheduling_policy_default": 5, "cu_cluster_scheduling_policy_spread": 5, "spread": [5, 6], "cu_cluster_scheduling_policy_load_balanc": 5, "balanc": [5, 6], "affect": [5, 6], "thu": [5, 6], "elimin": [5, 6, 9], "latenc": [5, 6], "unrel": [5, 6], "traffic": [5, 6], "custreamsetattribut": 5, "culaunchkernelex": 5, "cugraphkernelnodesetattribut": 5, "done": [5, 6, 13], "distanc": [5, 6], "word": [5, 6], "suffici": [5, 6], "anoth": [5, 6, 9], "even": [5, 6, 7, 9], "cu_launch_mem_sync_domain_default": 5, "cu_launch_attribute_ignor": 5, "conveni": [5, 6], "composit": [5, 6], "programmat": [5, 6], "resolv": [5, 6, 16, 17, 18, 22, 23, 25, 27, 28], "opportunist": [5, 6], "overlap": [5, 6, 9], "previou": [5, 6, 7, 17], "sync": [5, 6], "equival": [5, 6, 7], "instruct": [5, 6, 7, 9, 18], "launchdep": 5, "builtin": [5, 7], "cudatriggerprogrammaticlaunchcomplet": [5, 6], "cueventsynchron": [5, 6], "primarili": [5, 6], "meant": [5, 6], "establish": [5, 6], "i": [5, 6, 7, 17], "nomin": [5, 6], "begun": [5, 6], "effort": [5, 6], "b": [5, 6, 9], "claim": [5, 6], "unavail": [5, 6], "exercis": [5, 6], "caution": [5, 6], "invers": [5, 6], "lead": [5, 6], "deadlock": [5, 6], "relev": [5, 6], "cudagraphkernelnodeupdatesappli": [5, 6], "compar": [5, 6, 9], "regular": [5, 6], "firstli": [5, 6], "cugraphdestroynod": 5, "addition": [5, 6], "opt": [5, 6, 7, 9], "attempt": [5, 6, 7], "cugraphkernelnodecopyattribut": 5, "neither": [5, 6], "nor": [5, 6], "those": [5, 6, 7], "cugraphupload": [5, 6], "again": [5, 6], "custreamcapturestatu": 5, "status": [5, 6], "custreamiscaptur": 5, "cu_stream_capture_status_non": 5, "cu_stream_capture_status_act": 5, "cu_stream_capture_status_invalid": 5, "sequenc": [5, 6], "custreamcapturemod": 5, "custreambegincaptur": [5, 6], "cuthreadexchangestreamcapturemod": 5, "cu_stream_capture_mode_glob": 5, "cu_stream_capture_mode_thread_loc": 5, "cu_stream_capture_mode_relax": 5, "cudriverprocaddress_flag": 5, "cugetprocaddress": [5, 6], "cu_get_proc_address_default": 5, "cu_get_proc_address_legacy_stream": 5, "cu_get_proc_address_per_thread_default_stream": 5, "cudriverprocaddressqueryresult": 5, "cu_get_proc_address_success": 5, "succesfulli": [5, 6], "cu_get_proc_address_symbol_not_found": 5, "cu_get_proc_address_version_not_suffici": 5, "cu_exec_affinity_type_max": 5, "cig_data_type_d3d12_command_queu": 5, "culibraryopt": 5, "culibraryloaddata": [5, 28], "culibraryloadfromfil": 5, "cu_library_host_universal_function_and_data_t": 5, "cu_library_binary_is_preserv": 5, "preserv": [5, 6, 7], "know": [5, 6], "culibraryunload": 5, "cuda_error_invalid_valu": [5, 6], "cu_library_num_opt": 5, "curesult": [5, 9], "cuda_success": [5, 6, 9], "cueventqueri": [5, 6], "custreamqueri": [5, 6], "cuda_error_out_of_memori": [5, 6], "unabl": [5, 6], "enough": [5, 6], "cuda_error_not_initi": [5, 6], "cuinit": [5, 9], "cuda_error_deiniti": 5, "shut": [5, 6], "down": [5, 6], "cuda_error_profiler_dis": 5, "happen": [5, 6], "tool": [5, 6, 7, 9], "visual": [5, 6], "cuda_error_profiler_not_initi": 5, "cuda_error_profiler_already_start": 5, "cuda_error_profiler_already_stop": 5, "cuda_error_stub_librari": 5, "stub": [5, 6], "real": [5, 6], "cuda_error_device_unavail": 5, "often": [5, 6], "cuda_error_no_devic": 5, "detect": [5, 6, 9], "cuda_error_invalid_devic": 5, "cuda_error_device_not_licens": 5, "licens": [5, 6], "cuda_error_invalid_imag": 5, "imag": [5, 6], "cuda_error_invalid_context": 5, "most": [5, 6, 8, 9], "frequent": [5, 6], "had": [5, 6], "cuctxdestroi": [5, 6, 9], "invok": [5, 6], "mix": [5, 6], "3010": [5, 6], "3020": [5, 6], "cuctxgetapivers": [5, 6], "convert": [5, 6], "cuctxfromgreenctx": 5, "cuda_error_context_already_curr": 5, "cuda_error_map_fail": 5, "205": [5, 6], "cuda_error_unmap_fail": 5, "206": [5, 6], "unregist": [5, 6], "cuda_error_array_is_map": 5, "207": [5, 6], "destroi": [5, 6, 7], "cuda_error_already_map": 5, "208": [5, 6], "cuda_error_no_binary_for_gpu": 5, "209": [5, 6], "occur": [5, 6], "cuda_error_already_acquir": 5, "210": [5, 6], "acquir": [5, 6], "cuda_error_not_map": 5, "211": [5, 6], "cuda_error_not_mapped_as_arrai": 5, "212": [5, 6], "cuda_error_not_mapped_as_point": 5, "213": [5, 6], "cuda_error_ecc_uncorrect": 5, "214": [5, 6], "uncorrect": [5, 6], "cuda_error_unsupported_limit": 5, "215": [5, 6], "cuda_error_context_already_in_us": 5, "216": [5, 6], "cuda_error_peer_access_unsupport": 5, "217": [5, 6], "across": [5, 6], "cuda_error_invalid_ptx": 5, "218": [5, 6], "cuda_error_invalid_graphics_context": 5, "219": [5, 6], "directx": [5, 6], "cuda_error_nvlink_uncorrect": 5, "220": [5, 6], "nvlink": [5, 6], "cuda_error_jit_compiler_not_found": 5, "221": [5, 6], "cuda_error_unsupported_ptx_vers": 5, "222": [5, 6], "toolchain": [5, 6], "cuda_error_jit_compilation_dis": 5, "223": [5, 6], "cuda_error_unsupported_exec_affin": 5, "224": [5, 6], "cuda_error_unsupported_devside_sync": 5, "225": [5, 6], "cudadevicesynchron": [5, 6], "cuda_error_invalid_sourc": 5, "300": [5, 6], "cuda_error_file_not_found": 5, "301": [5, 6], "cuda_error_shared_object_symbol_not_found": 5, "302": [5, 6], "cuda_error_shared_object_init_fail": 5, "303": [5, 6], "cuda_error_operating_system": 5, "304": [5, 6], "os": [5, 6], "cuda_error_invalid_handl": 5, "400": [5, 6], "cuda_error_illegal_st": 5, "401": [5, 6], "state": [5, 6], "cuda_error_lossy_queri": 5, "402": [5, 6], "introspect": [5, 6], "wai": [5, 6, 9], "discard": [5, 6], "funtion": [5, 6], "newer": [5, 6], "omiss": [5, 6], "cuda_error_not_found": 5, "500": [5, 6], "cuda_error_not_readi": 5, "600": [5, 6], "previous": [5, 6, 7], "yet": [5, 6, 9], "cuda_error_illegal_address": 5, "700": [5, 6], "leav": [5, 6], "inconsist": [5, 6], "continu": [5, 6], "relaunch": [5, 6], "cuda_error_launch_out_of_resourc": 5, "701": [5, 6], "did": [5, 6], "usual": [5, 6], "too": [5, 6], "wrong": 5, "cuda_error_launch_timeout": 5, "702": [5, 6], "took": [5, 6], "timeout": [5, 6], "cuda_error_launch_incompatible_textur": 5, "703": [5, 6], "incompat": [5, 6, 7, 9], "cuda_error_peer_access_already_en": 5, "704": [5, 6], "cuctxenablepeeraccess": [5, 6], "try": [5, 6, 9], "cuda_error_peer_access_not_en": 5, "705": [5, 6], "cuctxdisablepeeraccess": [5, 6], "cuda_error_primary_context_act": 5, "708": [5, 6], "cuda_error_context_is_destroi": 5, "709": [5, 6], "cuda_error_assert": 5, "710": [5, 6], "assert": [5, 6, 7, 9], "anymor": 5, "reconstruct": 5, "cuda_error_too_many_p": 5, "711": [5, 6], "exhaust": [5, 6], "cuda_error_host_memory_already_regist": 5, "712": [5, 6], "cuda_error_host_memory_not_regist": 5, "713": [5, 6], "cumemhostunregist": [5, 6], "cuda_error_hardware_stack_error": 5, "714": [5, 6], "corrupt": [5, 6], "cuda_error_illegal_instruct": 5, "715": [5, 6], "cuda_error_misaligned_address": 5, "716": [5, 6], "cuda_error_invalid_address_spac": 5, "717": [5, 6], "certain": [5, 6, 7, 16], "cuda_error_invalid_pc": 5, "718": [5, 6], "counter": [5, 6], "cuda_error_launch_fail": 5, "719": [5, 6], "dereferenc": [5, 6], "cuda_error_cooperative_launch_too_larg": 5, "720": [5, 6], "cuoccupancymaxactiveblockspermultiprocessor": [5, 6], "cuoccupancymaxactiveblockspermultiprocessorwithflag": [5, 6], "800": [5, 6], "cuda_error_not_support": [5, 6], "801": [5, 6], "cuda_error_system_not_readi": 5, "802": [5, 6], "readi": [5, 6, 9], "verifi": [5, 6], "daemon": [5, 6], "cuda_error_system_driver_mismatch": [5, 9], "803": [5, 6, 9], "mismatch": [5, 6], "displai": [5, 6, 7], "cuda_error_compat_not_supported_on_devic": 5, "804": [5, 6], "upgrad": [5, 6], "forward": [5, 6, 7], "matrix": [5, 6], "cuda_visible_devic": [5, 6], "cuda_error_mps_connection_fail": 5, "805": [5, 6], "client": [5, 6], "server": [5, 6], "cuda_error_mps_rpc_failur": 5, "806": [5, 6], "procedur": [5, 6], "cuda_error_mps_server_not_readi": 5, "807": [5, 6], "recov": [5, 6], "fatal": [5, 6], "cuda_error_mps_max_clients_reach": 5, "808": [5, 6], "cuda_error_mps_max_connections_reach": 5, "809": [5, 6], "cuda_error_mps_client_termin": 5, "810": [5, 6], "cuda_error_cdp_not_support": 5, "811": [5, 6], "parallel": [5, 6, 7, 8, 9], "cuda_error_cdp_version_mismatch": 5, "812": [5, 6], "cuda_error_stream_capture_unsupport": 5, "900": [5, 6], "cuda_error_stream_capture_invalid": 5, "901": [5, 6], "cuda_error_stream_capture_merg": 5, "902": [5, 6], "merg": [5, 6], "cuda_error_stream_capture_unmatch": 5, "903": [5, 6], "cuda_error_stream_capture_unjoin": 5, "904": [5, 6], "fork": [5, 6], "join": [5, 6], "cuda_error_stream_capture_isol": 5, "905": [5, 6], "boundari": [5, 6], "implicit": [5, 6, 7], "cuda_error_stream_capture_implicit": 5, "906": [5, 6], "cudastreamlegaci": [5, 6], "cuda_error_captured_ev": 5, "907": [5, 6], "cuda_error_stream_capture_wrong_thread": 5, "908": [5, 6], "custreamendcaptur": 5, "cuda_error_timeout": 5, "909": [5, 6], "laps": [5, 6], "cuda_error_graph_exec_update_failur": 5, "910": [5, 6], "violat": [5, 6], "cuda_error_external_devic": 5, "911": [5, 6], "consumpt": [5, 6, 7], "cuda_error_invalid_cluster_s": 5, "912": [5, 6], "misconfigur": [5, 6], "cuda_error_function_not_load": 5, "913": [5, 6], "indici": [5, 6], "cuda_error_invalid_resource_typ": 5, "914": [5, 6], "cuda_error_invalid_resource_configur": 5, "915": [5, 6], "insuffici": [5, 6], "cuda_error_unknown": 5, "999": [5, 6], "unknown": [5, 6, 7, 9], "cudevice_p2pattribut": 5, "cu_device_p2p_attribute_performance_rank": 5, "rel": [5, 6], "cu_device_p2p_attribute_access_support": 5, "cu_device_p2p_attribute_native_atomic_support": 5, "cu_device_p2p_attribute_access_access_support": 5, "cu_device_p2p_attribute_cuda_array_access_support": 5, "cu_res_view_format_non": 5, "cu_res_view_format_uint_1x8": 5, "cu_res_view_format_uint_2x8": 5, "cu_res_view_format_uint_4x8": 5, "cu_res_view_format_sint_1x8": 5, "cu_res_view_format_sint_2x8": 5, "cu_res_view_format_sint_4x8": 5, "cu_res_view_format_uint_1x16": 5, "cu_res_view_format_uint_2x16": 5, "cu_res_view_format_uint_4x16": 5, "cu_res_view_format_sint_1x16": 5, "cu_res_view_format_sint_2x16": 5, "cu_res_view_format_sint_4x16": 5, "cu_res_view_format_uint_1x32": 5, "cu_res_view_format_uint_2x32": 5, "cu_res_view_format_uint_4x32": 5, "cu_res_view_format_sint_1x32": 5, "cu_res_view_format_sint_2x32": 5, "cu_res_view_format_sint_4x32": 5, "cu_res_view_format_float_1x16": 5, "cu_res_view_format_float_2x16": 5, "cu_res_view_format_float_4x16": 5, "cu_res_view_format_float_1x32": 5, "cu_res_view_format_float_2x32": 5, "cu_res_view_format_float_4x32": 5, "cu_res_view_format_unsigned_bc1": 5, "cu_res_view_format_unsigned_bc2": 5, "cu_res_view_format_unsigned_bc3": 5, "cu_res_view_format_unsigned_bc4": 5, "cu_res_view_format_signed_bc4": 5, "cu_res_view_format_unsigned_bc5": 5, "cu_res_view_format_signed_bc5": 5, "cu_res_view_format_unsigned_bc6h": 5, "cu_res_view_format_signed_bc6h": 5, "cu_res_view_format_unsigned_bc7": 5, "cutensormapdatatyp": 5, "cu_tensor_map_data_type_uint8": 5, "cu_tensor_map_data_type_uint16": 5, "cu_tensor_map_data_type_uint32": 5, "cu_tensor_map_data_type_int32": 5, "cu_tensor_map_data_type_uint64": 5, "cu_tensor_map_data_type_int64": 5, "cu_tensor_map_data_type_float16": 5, "cu_tensor_map_data_type_float32": 5, "cu_tensor_map_data_type_float64": 5, "cu_tensor_map_data_type_bfloat16": 5, "cu_tensor_map_data_type_float32_ftz": 5, "cu_tensor_map_data_type_tfloat32": 5, "cu_tensor_map_data_type_tfloat32_ftz": 5, "cutensormapinterleav": 5, "interleav": [5, 6], "cu_tensor_map_interleave_non": 5, "cu_tensor_map_interleave_16b": 5, "cu_tensor_map_interleave_32b": 5, "cutensormapswizzl": 5, "swizzl": 5, "cu_tensor_map_swizzle_non": 5, "cu_tensor_map_swizzle_32b": 5, "cu_tensor_map_swizzle_64b": 5, "cu_tensor_map_swizzle_128b": 5, "cutensormapl2promot": 5, "promot": [5, 6], "cu_tensor_map_l2_promotion_non": 5, "cu_tensor_map_l2_promotion_l2_64b": 5, "cu_tensor_map_l2_promotion_l2_128b": 5, "cu_tensor_map_l2_promotion_l2_256b": 5, "cutensormapfloatoobfil": 5, "cu_tensor_map_float_oob_fill_non": 5, "cu_tensor_map_float_oob_fill_nan_request_zero_fma": 5, "cuda_pointer_attribute_access_flag": 5, "cu_pointer_attribute_access_flag_non": 5, "stage": [5, 6], "cu_pointer_attribute_access_flag_read": 5, "cu_pointer_attribute_access_flag_readwrit": 5, "cu_external_memory_handle_type_opaque_fd": 5, "cu_external_memory_handle_type_opaque_win32": 5, "cu_external_memory_handle_type_opaque_win32_kmt": 5, "cu_external_memory_handle_type_d3d12_heap": 5, "cu_external_memory_handle_type_d3d12_resourc": 5, "cu_external_memory_handle_type_d3d11_resourc": 5, "d3d11": [5, 6], "cu_external_memory_handle_type_d3d11_resource_kmt": 5, "nvscibuf": [5, 6], "cu_external_semaphore_handle_type_opaque_fd": 5, "cu_external_semaphore_handle_type_opaque_win32": 5, "cu_external_semaphore_handle_type_opaque_win32_kmt": 5, "cu_external_semaphore_handle_type_d3d12_f": 5, "cu_external_semaphore_handle_type_d3d11_f": 5, "nvscisync": [5, 6], "cu_external_semaphore_handle_type_d3d11_keyed_mutex": 5, "kei": [5, 6, 9], "mutex": [5, 6], "cu_external_semaphore_handle_type_d3d11_keyed_mutex_kmt": 5, "cu_external_semaphore_handle_type_timeline_semaphore_fd": 5, "cu_external_semaphore_handle_type_timeline_semaphore_win32": 5, "cu_mem_handle_type_posix_file_descriptor": 5, "cu_mem_handle_type_win32_kmt": 5, "d3dkmt_handl": [5, 6], "cu_mem_handle_type_fabr": 5, "cumemfabrichandl": 5, "cu_mem_handle_type_max": 5, "protect": [5, 6], "cu_mem_access_flags_prot_non": 5, "cu_mem_access_flags_prot_read": 5, "cu_mem_access_flags_prot_readwrit": 5, "cu_mem_access_flags_prot_max": 5, "cu_mem_location_type_invalid": 5, "cu_mem_location_type_devic": 5, "cu_mem_location_type_host": 5, "cu_mem_location_type_host_numa": 5, "cu_mem_location_type_host_numa_curr": 5, "cu_mem_location_type_max": 5, "cu_mem_allocation_type_invalid": 5, "pin": [5, 6], "cu_mem_allocation_type_max": 5, "cumemallocationgranularity_flag": 5, "cu_mem_alloc_granularity_minimum": 5, "cu_mem_alloc_granularity_recommend": 5, "recommend": [5, 6], "cumemrangehandletyp": 5, "cu_mem_range_handle_type_dma_buf_fd": 5, "cu_mem_range_handle_type_max": 5, "cu_array_sparse_subresource_type_sparse_level": 5, "cu_array_sparse_subresource_type_miptail": 5, "cu_mem_operation_type_map": 5, "cu_mem_operation_type_unmap": 5, "cu_mem_handle_type_gener": 5, "cumemallocationcomptyp": 5, "cu_mem_allocation_comp_non": 5, "cu_mem_allocation_comp_gener": 5, "cumulticastgranularity_flag": 5, "cu_multicast_granularity_minimum": 5, "cu_multicast_granularity_recommend": 5, "cu_graph_exec_update_success": 5, "cu_graph_exec_update_error": 5, "cu_graph_exec_update_error_topology_chang": 5, "cu_graph_exec_update_error_node_type_chang": 5, "cu_graph_exec_update_error_function_chang": 5, "cu_graph_exec_update_error_parameters_chang": 5, "cu_graph_exec_update_error_not_support": 5, "someth": [5, 6], "cu_graph_exec_update_error_unsupported_function_chang": 5, "cu_graph_exec_update_error_attributes_chang": 5, "cumempool_attribut": 5, "cu_mempool_attr_reuse_follow_event_depend": 5, "freed": [5, 6], "cu_mempool_attr_reuse_allow_opportunist": 5, "reus": [5, 6], "cu_mempool_attr_reuse_allow_internal_depend": 5, "piec": [5, 6], "cufreeasync": [5, 6], "cu_mempool_attr_release_threshold": 5, "hold": [5, 6], "onto": [5, 6], "threshold": [5, 6, 7], "held": [5, 6], "next": [5, 6, 9], "cu_mempool_attr_reserved_mem_curr": 5, "cu_mempool_attr_reserved_mem_high": 5, "high": [5, 6, 7], "watermark": [5, 6], "sinc": [5, 6], "reset": [5, 6, 7], "cu_mempool_attr_used_mem_curr": 5, "cu_mempool_attr_used_mem_high": 5, "cugraphmem_attribut": 5, "cu_graph_mem_attr_used_mem_curr": 5, "cu_graph_mem_attr_used_mem_high": 5, "cu_graph_mem_attr_reserved_mem_curr": 5, "cu_graph_mem_attr_reserved_mem_high": 5, "cu_flush_gpu_direct_rdma_writes_option_host": 5, "cuflushgpudirectrdmawrit": [5, 6], "counterpart": [5, 6], "cu_flush_gpu_direct_rdma_writes_option_memop": 5, "cu_gpu_direct_rdma_writes_ordering_non": 5, "leverag": [5, 6, 9], "cu_gpu_direct_rdma_writes_ordering_own": 5, "consist": [5, 6], "although": [5, 6], "cu_gpu_direct_rdma_writes_ordering_all_devic": 5, "cuflushgpudirectrdmawritesscop": 5, "cu_flush_gpu_direct_rdma_writes_to_own": 5, "cu_flush_gpu_direct_rdma_writes_to_all_devic": 5, "cuflushgpudirectrdmawritestarget": 5, "cu_flush_gpu_direct_rdma_writes_target_current_ctx": 5, "cugraphdebugdot_flag": 5, "cugraphdebugdotprint": 5, "cu_graph_debug_dot_flags_verbos": 5, "cu_graph_debug_dot_flags_runtime_typ": 5, "cu_graph_debug_dot_flags_kernel_node_param": 5, "cuda_kernel_node_param": 5, "cu_graph_debug_dot_flags_memcpy_node_param": 5, "cu_graph_debug_dot_flags_memset_node_param": 5, "cuda_memset_node_param": 5, "cu_graph_debug_dot_flags_host_node_param": 5, "cuda_host_node_param": 5, "cu_graph_debug_dot_flags_event_node_param": 5, "cu_graph_debug_dot_flags_ext_semas_signal_node_param": 5, "cuda_ext_sem_signal_node_param": 5, "cu_graph_debug_dot_flags_ext_semas_wait_node_param": 5, "256": [5, 6], "cuda_ext_sem_wait_node_param": 5, "cu_graph_debug_dot_flags_kernel_node_attribut": 5, "512": [5, 6, 9], "cukernelnodeattrvalu": 5, "cu_graph_debug_dot_flags_handl": 5, "1024": [5, 6], "cu_graph_debug_dot_flags_mem_alloc_node_param": 5, "2048": [5, 6], "cu_graph_debug_dot_flags_mem_free_node_param": 5, "4096": 5, "cu_graph_debug_dot_flags_batch_mem_op_node_param": 5, "8192": 5, "cu_graph_debug_dot_flags_extra_topo_info": 5, "16384": 5, "cu_graph_debug_dot_flags_conditional_node_param": 5, "32768": [5, 6, 9], "cuuserobject_flag": 5, "cu_user_object_no_destructor_sync": 5, "destructor": [5, 6], "cuuserobjectretain_flag": 5, "retain": [5, 6], "cu_graph_user_object_mov": 5, "caller": [5, 6], "cugraphinstantiate_flag": [5, 6], "cuda_graph_instantiate_flag_auto_free_on_launch": 5, "cuda_graph_instantiate_flag_upload": 5, "cugraphinstantiatewithparam": 5, "instantiateparam": [5, 6], "cuda_graph_instantiate_flag_device_launch": 5, "launchabl": [5, 6], "conjunct": [5, 6], "cuda_graph_instantiate_flag_use_node_prior": 5, "cu_device_numa_config_non": 5, "cu_device_numa_config_numa_nod": 5, "cu_egl_frame_type_arrai": 5, "cu_egl_frame_type_pitch": 5, "cueglresourcelocationflag": 5, "sysmem": [5, 6], "vidmem": [5, 6], "igpu": [5, 6], "video": [5, 6], "dgpu": [5, 6], "cu_egl_resource_location_sysmem": 5, "cu_egl_resource_location_vidmem": 5, "dedic": [5, 6], "There": [5, 6, 9], "produc": [5, 6], "cuda_egl": [5, 6], "three": [5, 6, 7, 9], "cu_egl_color_format_yuv420_planar": 5, "u": [5, 6, 7], "v": [5, 6], "cu_egl_color_format_yuv420_semiplanar": 5, "uv": [5, 6], "vu": [5, 6], "yuv420planar": [5, 6], "cu_egl_color_format_yuv422_planar": 5, "cu_egl_color_format_yuv422_semiplanar": 5, "yuv422planar": [5, 6], "cu_egl_color_format_rgb": 5, "r": [5, 6], "bgr": 5, "cu_egl_color_format_bgr": 5, "rgb": 5, "cu_egl_color_format_argb": 5, "bgra": [5, 6], "cu_egl_color_format_rgba": 5, "abgr": [5, 6], "cu_egl_color_format_l": 5, "lumin": [5, 6], "cu_egl_color_format_r": 5, "cu_egl_color_format_yuv444_planar": 5, "cu_egl_color_format_yuv444_semiplanar": 5, "yuv444planar": [5, 6], "cu_egl_color_format_yuyv_422": 5, "uyvi": [5, 6], "cu_egl_color_format_uyvy_422": 5, "yuyv": [5, 6], "cu_egl_color_format_abgr": 5, "rgba": [5, 6], "cu_egl_color_format_bgra": 5, "argb": [5, 6], "cu_egl_color_format_a": 5, "alpha": [5, 6], "cu_egl_color_format_rg": 5, "gr": [5, 6], "cu_egl_color_format_ayuv": 5, "vuya": [5, 6], "cu_egl_color_format_yvu444_semiplanar": 5, "cu_egl_color_format_yvu422_semiplanar": 5, "cu_egl_color_format_yvu420_semiplanar": 5, "cu_egl_color_format_y10v10u10_444_semiplanar": 5, "y10": [5, 6], "v10u10": [5, 6], "cu_egl_color_format_y10v10u10_420_semiplanar": 5, "cu_egl_color_format_y12v12u12_444_semiplanar": 5, "y12": [5, 6], "v12u12": [5, 6], "cu_egl_color_format_y12v12u12_420_semiplanar": 5, "cu_egl_color_format_vyuy_": 5, "extend": [5, 6], "yvyu": [5, 6], "cu_egl_color_format_uyvy_": 5, "cu_egl_color_format_yuyv_": 5, "cu_egl_color_format_yvyu_": 5, "vyui": [5, 6], "cu_egl_color_format_yuv_": 5, "vui": 5, "cu_egl_color_format_yuva_": 5, "avui": [5, 6], "cu_egl_color_format_ayuv_": 5, "cu_egl_color_format_yuv444_planar_": 5, "cu_egl_color_format_yuv422_planar_": 5, "cu_egl_color_format_yuv420_planar_": 5, "cu_egl_color_format_yuv444_semiplanar_": 5, "cu_egl_color_format_yuv422_semiplanar_": 5, "cu_egl_color_format_yuv420_semiplanar_": 5, "cu_egl_color_format_yvu444_planar_": 5, "cu_egl_color_format_yvu422_planar_": 5, "cu_egl_color_format_yvu420_planar_": 5, "cu_egl_color_format_yvu444_semiplanar_": 5, "cu_egl_color_format_yvu422_semiplanar_": 5, "cu_egl_color_format_yvu420_semiplanar_": 5, "cu_egl_color_format_bayer_rggb": 5, "bayer": [5, 6], "rggb": [5, 6], "cu_egl_color_format_bayer_bggr": 5, "bggr": [5, 6], "cu_egl_color_format_bayer_grbg": 5, "grbg": [5, 6], "cu_egl_color_format_bayer_gbrg": 5, "gbrg": [5, 6], "cu_egl_color_format_bayer10_rggb": 5, "bayer10": [5, 6], "cu_egl_color_format_bayer10_bggr": 5, "cu_egl_color_format_bayer10_grbg": 5, "cu_egl_color_format_bayer10_gbrg": 5, "cu_egl_color_format_bayer12_rggb": 5, "bayer12": [5, 6], "cu_egl_color_format_bayer12_bggr": 5, "cu_egl_color_format_bayer12_grbg": 5, "cu_egl_color_format_bayer12_gbrg": 5, "cu_egl_color_format_bayer14_rggb": 5, "bayer14": [5, 6], "cu_egl_color_format_bayer14_bggr": 5, "cu_egl_color_format_bayer14_grbg": 5, "cu_egl_color_format_bayer14_gbrg": 5, "cu_egl_color_format_bayer20_rggb": 5, "bayer20": [5, 6], "cu_egl_color_format_bayer20_bggr": 5, "cu_egl_color_format_bayer20_grbg": 5, "cu_egl_color_format_bayer20_gbrg": 5, "cu_egl_color_format_yvu444_planar": 5, "cu_egl_color_format_yvu422_planar": 5, "cu_egl_color_format_yvu420_planar": 5, "cu_egl_color_format_bayer_isp_rggb": 5, "proprietari": [5, 6], "isp": [5, 6], "datatyp": [5, 6], "cu_egl_color_format_bayer_isp_bggr": 5, "cu_egl_color_format_bayer_isp_grbg": 5, "cu_egl_color_format_bayer_isp_gbrg": 5, "cu_egl_color_format_bayer_bccr": 5, "bccr": [5, 6], "cu_egl_color_format_bayer_rccb": 5, "rccb": [5, 6], "cu_egl_color_format_bayer_crbc": 5, "crbc": [5, 6], "cu_egl_color_format_bayer_cbrc": 5, "cbrc": [5, 6], "cu_egl_color_format_bayer10_cccc": 5, "cccc": [5, 6], "cu_egl_color_format_bayer12_bccr": 5, "cu_egl_color_format_bayer12_rccb": 5, "cu_egl_color_format_bayer12_crbc": 5, "cu_egl_color_format_bayer12_cbrc": 5, "cu_egl_color_format_bayer12_cccc": 5, "cu_egl_color_format_i": 5, "cu_egl_color_format_yuv420_semiplanar_2020": 5, "cu_egl_color_format_yvu420_semiplanar_2020": 5, "cu_egl_color_format_yuv420_planar_2020": 5, "cu_egl_color_format_yvu420_planar_2020": 5, "cu_egl_color_format_yuv420_semiplanar_709": 5, "cu_egl_color_format_yvu420_semiplanar_709": 5, "cu_egl_color_format_yuv420_planar_709": 5, "cu_egl_color_format_yvu420_planar_709": 5, "cu_egl_color_format_y10v10u10_420_semiplanar_709": 5, "cu_egl_color_format_y10v10u10_420_semiplanar_2020": 5, "cu_egl_color_format_y10v10u10_422_semiplanar_2020": 5, "cu_egl_color_format_y10v10u10_422_semiplanar": 5, "cu_egl_color_format_y10v10u10_422_semiplanar_709": 5, "cu_egl_color_format_y_": 5, "cu_egl_color_format_y_709_": 5, "cu_egl_color_format_y10_": 5, "cu_egl_color_format_y10_709_": 5, "cu_egl_color_format_y12_": 5, "cu_egl_color_format_y12_709_": 5, "cu_egl_color_format_yuva": 5, "cu_egl_color_format_yuv": 5, "cu_egl_color_format_yvyu": 5, "cu_egl_color_format_vyui": 5, "cu_egl_color_format_y10v10u10_420_semiplanar_": 5, "cu_egl_color_format_y10v10u10_420_semiplanar_709_": 5, "cu_egl_color_format_y10v10u10_444_semiplanar_": 5, "cu_egl_color_format_y10v10u10_444_semiplanar_709_": 5, "cu_egl_color_format_y12v12u12_420_semiplanar_": 5, "cu_egl_color_format_y12v12u12_420_semiplanar_709_": 5, "cu_egl_color_format_y12v12u12_444_semiplanar_": 5, "cu_egl_color_format_y12v12u12_444_semiplanar_709_": 5, "cu_egl_color_format_max": 5, "cudeviceptr_v2": 5, "whose": [5, 6], "cudevice_v1": 5, "cudevic": [5, 9], "arg": [5, 6, 7, 9], "kwarg": [5, 6, 7], "cumodul": 5, "culibrari": 5, "cumipmappedarrai": 5, "cutexref": 5, "cusurfref": 5, "cugraphicsresourc": [5, 6], "cutexobject_v1": 5, "cutexobject": [5, 6], "cusurfobject_v1": 5, "cusurfobject": [5, 6], "cuexternalmemori": 5, "cugraphexec": [5, 6], "cumemorypool": [5, 6], "cuuserobject": 5, "cuasynccallbackhandl": 5, "cugreenctx": 5, "safe": [5, 6], "cugreenctxcr": 5, "cuuuid": [5, 6], "cumemfabrichandle_v1": 5, "cuipceventhandle_v1": 5, "cuipceventhandl": 5, "cuipcmemhandle_v1": 5, "cuipcmemhandl": 5, "custreambatchmemopparams_v1": 5, "cuda_batch_mem_op_node_params_v1": 5, "cuda_batch_mem_op_node_param": 5, "cuasyncnotificationinfo": 5, "cuasynccallback": 5, "cudevprop_v1": 5, "cudevprop": 5, "culinkst": 5, "cuaccesspolicywindow_v1": 5, "cuda_kernel_node_params_v1": 5, "cuda_kernel_node_params_v2": 5, "cuda_memset_node_params_v1": 5, "cuda_host_node_params_v1": 5, "cuda_graph_instantiate_param": 5, "cukernelnodeattrid": 5, "cukernelnodeattrvalue_v1": 5, "custreamattrid": 5, "custreamattrvalue_v1": 5, "custreamattrvalu": 5, "cuexecaffinitysmcount_v1": 5, "cuexecaffinitysmcount": 5, "cuexecaffinityparam_v1": 5, "cuctxcreateparam": 5, "culibraryhostuniversalfunctionanddatat": 5, "custreamcallback": 5, "cuoccupancyb2ds": 5, "cuda_memcpy2d_v2": 5, "cuda_memcpy2d": 5, "cuda_memcpy3d_v2": 5, "cuda_memcpy3d_peer_v1": 5, "cuda_memcpy3d_p": 5, "cuda_array_descriptor_v2": 5, "cuda_array_descriptor": 5, "cuda_array3d_descriptor_v2": 5, "cuda_array_sparse_properties_v1": 5, "cuda_array_sparse_properti": 5, "cuda_array_memory_requirements_v1": 5, "cuda_array_memory_requir": 5, "cuda_resource_desc_v1": 5, "cuda_resource_desc": 5, "cuda_texture_desc_v1": 5, "cuda_texture_desc": 5, "cuda_resource_view_desc_v1": 5, "cuda_resource_view_desc": 5, "cutensormap": 5, "cuda_pointer_attribute_p2p_tokens_v1": 5, "cuda_pointer_attribute_p2p_token": 5, "cuda_launch_params_v1": 5, "cuda_launch_param": 5, "cuda_external_memory_handle_desc_v1": 5, "cuda_external_memory_handle_desc": 5, "cuda_external_memory_buffer_desc_v1": 5, "cuda_external_memory_buffer_desc": 5, "cuda_external_memory_mipmapped_array_desc_v1": 5, "cuda_external_memory_mipmapped_array_desc": 5, "cuda_external_semaphore_handle_desc_v1": 5, "cuda_external_semaphore_handle_desc": 5, "cuda_external_semaphore_signal_params_v1": 5, "cuda_external_semaphore_wait_params_v1": 5, "cuda_ext_sem_signal_node_params_v1": 5, "cuda_ext_sem_wait_node_params_v1": 5, "cumemgenericallocationhandle_v1": 5, "cumemgenericallocationhandl": 5, "cuarraymapinfo_v1": 5, "cuarraymapinfo": 5, "cumemlocation_v1": 5, "cumemallocationprop_v1": 5, "cumemallocationprop": 5, "cumulticastobjectprop_v1": 5, "cumulticastobjectprop": 5, "cumemaccessdesc_v1": 5, "cugraphexecupdateresultinfo_v1": 5, "cugraphexecupdateresultinfo": 5, "cumempoolprops_v1": 5, "cumempoolptrexportdata_v1": 5, "cumempoolptrexportdata": 5, "cuda_mem_alloc_node_params_v1": 5, "cuda_mem_alloc_node_param": 5, "cugraphnodeparam": 5, "cueglframe_v1": 5, "cueglfram": 5, "cueglstreamconnect": 5, "eglsream": [5, 6], "cuda_vers": 5, "12060": 5, "cu_ipc_handle_s": 5, "cu_stream_legaci": [5, 6], "link_sync_behavior": [5, 6], "cu_stream_per_thread": [5, 6], "cu_compute_accelerated_target_bas": 5, "65536": 5, "cu_graph_cond_assign_default": 5, "finish": [5, 6, 9], "cu_kernel_node_attribute_access_policy_window": 5, "cu_kernel_node_attribute_coop": 5, "cu_kernel_node_attribute_cluster_dimens": 5, "cu_kernel_node_attribute_cluster_scheduling_policy_prefer": 5, "cu_kernel_node_attribute_prior": 5, "cu_kernel_node_attribute_mem_sync_domain_map": 5, "cu_kernel_node_attribute_mem_sync_domain": 5, "cu_kernel_node_attribute_device_updatable_kernel_nod": 5, "cu_kernel_node_attribute_preferred_shared_memory_carveout": 5, "cu_stream_attribute_access_policy_window": 5, "cu_stream_attribute_synchronization_polici": 5, "cu_stream_attribute_prior": 5, "cu_stream_attribute_mem_sync_domain_map": 5, "cu_stream_attribute_mem_sync_domain": 5, "cu_memhostalloc_port": 5, "cumemhostalloc": [5, 6], "cu_memhostalloc_devicemap": 5, "cumemhostgetdevicepoint": [5, 6], "cu_memhostalloc_writecombin": 5, "fast": [5, 7], "faster": [5, 6, 7, 8], "dma": 5, "slow": 5, "sse4": 5, "movntdqa": 5, "cu_memhostregister_port": 5, "cu_memhostregister_devicemap": 5, "cu_memhostregister_iomemori": 5, "treat": [5, 6, 7], "o": [5, 6], "third": [5, 6], "parti": [5, 6], "pcie": [5, 6], "mark": [5, 6], "unprivileg": 5, "older": [5, 6], "cu_memhostregister_read_onli": 5, "cu_tensor_map_num_qword": 5, "skip": [5, 6], "cuda_nvscisync_attr_sign": 5, "cudevicegetnvscisyncattribut": 5, "nvscisyncattr": [5, 6], "cuda_nvscisync_attr_wait": 5, "waiter": [5, 6], "cu_mem_create_usage_tile_pool": 5, "tile": [5, 6], "cuda_cooperative_launch_multi_device_no_pre_launch_sync": 5, "cuda_cooperative_launch_multi_device_no_post_launch_sync": 5, "subsequ": [5, 6, 7], "push": [5, 6], "cuda_array3d_lay": 5, "collect": [5, 6], "cuda_array3d_2darrai": 5, "cuda_array3d_surface_ldst": 5, "cuda_array3d_cubemap": 5, "six": [5, 6], "gather": [5, 6], "cuda_array3d_depth_textur": 5, "depth_textur": 5, "cuda_array3d_color_attach": 5, "cuda_array3d_spars": 5, "cuda_array3d_deferred_map": 5, "cuda_array3d_video_encode_decod": 5, "decod": 5, "cu_trsa_override_format": 5, "overrid": [5, 6], "texref": 5, "infer": [5, 6], "cutexrefsetarrai": 5, "cu_trsf_read_as_integ": 5, "cutexrefsetflag": 5, "cutexobjectcr": [5, 6], "cu_trsf_normalized_coordin": 5, "coordin": [5, 6], "dim": [5, 9], "cu_trsf_srgb": 5, "convers": [5, 6], "cu_trsf_disable_trilinear_optim": 5, "trilinear": [5, 6], "cu_trsf_seamless_cubemap": 5, "seamless": [5, 6], "cu_launch_param_end_as_int": 5, "cu_launch_param_end": [5, 6], "culaunchkernel": [5, 9], "cu_launch_param_buffer_pointer_as_int": 5, "cu_launch_param_buffer_point": [5, 6], "honor": 5, "cu_launch_param_buffer_s": [5, 6], "cu_launch_param_buffer_size_as_int": 5, "cu_param_tr_default": 5, "texunit": 5, "cu_device_cpu": 5, "cu_device_invalid": 5, "max_plan": 5, "cuda_egl_infinite_timeout": 5, "cueglstreamconsumeracquirefram": [5, 6, 12], "infinit": [5, 6], "section": [5, 6], "low": [5, 6, 9, 11, 12], "cugeterrorstr": [5, 6], "string": [5, 6, 7, 9, 14], "descript": [5, 6], "pstr": 5, "recogn": [5, 6], "cudageterrorstr": [5, 6], "cugeterrornam": [5, 6, 9], "cudageterrornam": [5, 6], "cudrivergetvers": [5, 6], "latest": [5, 6, 16, 17, 18], "driververs": [5, 6], "1000": [5, 6], "9020": [5, 6], "cudadrivergetvers": [5, 6], "cudaruntimegetvers": [5, 6, 25], "cudeviceget": [5, 9], "cudevicegetcount": [5, 6], "cudevicegetnam": [5, 6], "cudevicegetuuid": 5, "cudevicegetluid": 5, "cudevicetotalmem": 5, "cudevicegetexecaffinitysupport": 5, "greater": [5, 6, 7], "cudagetdevicecount": [5, 6], "length": [5, 6], "dev": [5, 6], "ascii": [5, 6, 9], "cudagetdeviceproperti": [5, 6], "cudevicegetuuid_v2": 5, "supplant": 5, "octet": 5, "obj": [5, 6], "mig": 5, "subscrib": 5, "luid": [5, 6], "devicenodemask": 5, "numbyt": 5, "cudamemgetinfo": [5, 6], "pformat": 5, "allocat": [5, 6], "maxwidthinel": [5, 6], "attrib": [5, 6], "pi": 5, "involv": [5, 6, 7], "cumemallocpitch": [5, 6], "simultan": [5, 6], "reli": [5, 6], "known": [5, 6], "slot": [5, 6], "tesla": [5, 6], "vista": [5, 6], "doesn": [5, 6, 9], "t": [5, 6, 9], "cudadevicegetattribut": [5, 6], "nvscisyncattrlist": [5, 6], "nvscisyncattrkey_requiredperm": [5, 6], "how": [5, 6, 9], "least": [5, 6], "orthogon": [5, 6], "develop": [5, 6, 8, 9], "input": [5, 6, 7, 9], "nvscisyncaccessperm_signalonli": [5, 6], "nvscisyncaccessperm_waitonli": [5, 6], "nvscisyncaccessperm_waitsign": [5, 6], "nvscisyncattrkey_primitiveinfo": [5, 6], "nvscisyncattrvalprimitivetype_sysmemsemaphor": [5, 6], "nvscisyncattrvalprimitivetype_syncpoint": [5, 6], "tegra": [5, 6], "nvscisyncattrvalprimitivetype_sysmemsemaphorepayload64b": [5, 6], "ga10x": [5, 6], "nvscisyncattrkey_gpuid": [5, 6], "cuimportexternalsemaphor": 5, "cudestroyexternalsemaphor": 5, "cusignalexternalsemaphoresasync": 5, "cuwaitexternalsemaphoresasync": 5, "cudevicesetmempool": [5, 6], "cudamempool_t": [5, 6], "cudevicegetdefaultmempool": [5, 6], "cudevicegetmempool": [5, 6], "cumempoolcr": [5, 6], "cumempooldestroi": [5, 6], "cumemallocfrompoolasync": [5, 6], "never": [5, 6], "pool_out": [5, 6], "cumempooltrimto": [5, 6], "cumempoolgetattribut": [5, 6], "cumempoolsetattribut": [5, 6], "cumempoolsetaccess": [5, 6], "typenam": [5, 6], "li": [5, 6], "omit": [5, 6, 7], "cu_device_attribute_flush_flush_gpu_direct_rdma_opt": 5, "cudeviceprimaryctxretain": 5, "successfulli": [5, 6], "cudeviceprimaryctxreleas": 5, "cudeviceprimaryctxreset": 5, "unlik": [5, 6], "cuctxcreat": [5, 9], "newli": [5, 6], "smi": 5, "pleas": [5, 6, 7], "alwai": [5, 6], "cudeviceprimaryctxsetflag": [5, 6], "pctx": 5, "cuctxgetcacheconfig": [5, 6], "cuctxgetdevic": 5, "cuctxgetflag": [5, 6], "cuctxgetlimit": [5, 6], "cuctxpopcurr": 5, "cuctxpushcurr": 5, "cuctxsetcacheconfig": [5, 6], "cuctxsetlimit": [5, 6], "cuctxsynchron": [5, 6], "earlier": [5, 6, 9], "method": 5, "pop": [5, 6], "pervious": 5, "ones": 5, "lsb": [5, 6], "decreas": [5, 6], "lower": [5, 6, 7, 8], "primit": [5, 6, 10], "heurist": [5, 6, 7], "logic": [5, 6], "processor": [5, 6], "p": [5, 6], "power": [5, 6], "reduc": [5, 6, 7], "resiz": [5, 6], "thrash": [5, 6], "cost": [5, 6], "potenti": [5, 6], "cucoredumpsetattributeglob": 5, "rais": [5, 6, 9], "under": [5, 6], "core": 5, "dump": 5, "taken": [5, 6, 7], "cucoredumpsetattribut": 5, "becom": [5, 6], "impli": [5, 6, 7], "titl": [5, 6], "learn": [5, 6, 8, 9], "exhibit": [5, 6], "cudeviceprimaryctxgetst": [5, 6], "cuctxsetflag": 5, "cudasetdeviceflag": [5, 6], "inact": 5, "cudagetdeviceflag": [5, 6], "clean": [5, 6, 9], "howev": [5, 6, 7], "cudadevicereset": [5, 6], "below": [5, 6, 7], "restor": [5, 6], "cu_coredump_enable_user_trigg": 5, "true": [5, 6, 7, 9], "cuctxcreate_v3": 5, "tupl": [5, 6, 7, 28], "numparam": 5, "latter": 5, "former": 5, "round": [5, 6, 7], "henc": [5, 6, 7], "imper": 5, "cuctxgetexecaffin": 5, "volta": [5, 6], "cuctxcreate_v4": 5, "ctxcreateparam": 5, "execaffin": 5, "softwar": 5, "pcigparam": 5, "id3d12commandqueu": 5, "cumemalloc": [5, 6, 9], "cumemallochost": [5, 6], "cumemallocmanag": [5, 6], "though": [5, 6], "old": 5, "That": [5, 9], "cuctxsetcurr": [5, 6], "unbound": [5, 6], "top": [5, 6, 8, 9], "cuctxgetcurr": [5, 6], "cudasetdevic": [5, 6], "cudagetdevic": [5, 6], "cuctxgetsharedmemconfig": 5, "cuctxgetstreampriorityrang": [5, 6], "cuctxgetid": 5, "ctxid": 5, "life": [5, 6], "meet": [5, 6], "w": [5, 6, 7], "nearest": [5, 6, 7], "etc": [5, 6], "discuss": [5, 6], "isn": 5, "origin": [5, 6], "immedi": [5, 6, 9], "cudaerrorsyncdepthexceed": [5, 6], "mind": [5, 6], "larg": [5, 6], "longer": [5, 6, 16, 17], "cudaerrorlaunchpendingcountexceed": [5, 6], "cudagetlasterror": [5, 6], "sustain": [5, 6], "upfront": [5, 6], "0b": [5, 6], "128b": [5, 6], "pure": [5, 6], "cudadevicesetlimit": [5, 6], "pvalu": [5, 6], "cudadevicegetlimit": [5, 6], "pconfig": 5, "cufuncsetcacheconfig": [5, 6], "cudadevicegetcacheconfig": [5, 6], "config": [5, 6, 20], "cukernelsetcacheconfig": 5, "unless": [5, 6], "noth": [5, 6], "recent": [5, 6, 9], "cudadevicesetcacheconfig": [5, 6], "introduc": [5, 6, 9], "break": 5, "4020": 5, "greatest": [5, 6], "leastprior": [5, 6], "greatestprior": [5, 6], "convent": [5, 6], "meaning": [5, 6], "custreamcreatewithprior": [5, 6], "custreamgetprior": [5, 6], "cudadevicegetstreampriorityrang": [5, 6], "cuctxresetpersistingl2cach": 5, "pexecaffin": 5, "cuctxrecordev": 5, "hctx": 5, "hevent": 5, "cuctxwaitev": 5, "examin": [5, 6], "cudaevent_t": [5, 6], "cugreenctxrecordev": 5, "cugreenctxwaitev": 5, "cueventrecord": [5, 6], "conflict": [5, 6], "submit": [5, 6], "custreamwaitev": [5, 6], "ongo": [5, 6], "cumoduleloadingmod": 5, "lazi": 5, "cu_module_eager_load": 5, "cu_module_lazy_load": 5, "cumoduleload": 5, "fname": 5, "filenam": 5, "lazili": 5, "nvcc": [5, 6], "handwritten": 5, "fatbin": 5, "cumodulegetfunct": [5, 9], "cumodulegetglob": 5, "cumodulegettexref": 5, "cumoduleloaddata": [5, 9], "cumoduleloaddataex": 5, "cumoduleloadfatbinari": 5, "cumoduleunload": [5, 9], "hand": [5, 6, 8], "numopt": [5, 7], "optionvalu": 5, "fatcubin": 5, "fat": 5, "programm": [5, 6, 7], "hmod": 5, "unload": [5, 6], "culibrarygetmodul": 5, "cumodulegetloadingmod": 5, "cuda_module_load": 5, "hfunc": 5, "retriev": [5, 6, 9], "cumodulegetfunctioncount": 5, "mod": 5, "cumoduleenumeratefunct": 5, "numfunct": 5, "partial": 5, "cufunctionisload": 5, "incur": [5, 6], "cufunctionload": 5, "cufuncisload": 5, "cufuncload": 5, "One": [5, 7, 9], "cudagetsymboladdress": [5, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudagetsymbols": [5, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "culinkcr": 5, "eventu": [5, 6], "culinkdestroi": 5, "machin": [5, 9], "accumul": 5, "culinkadddata": 5, "culinkaddfil": 5, "relocat": [5, 7], "rdc": [5, 7], "final": [5, 9], "culinkcomplet": 5, "consequ": 5, "cast": [5, 6], "stateout": 5, "ownership": [5, 6], "cubinout": 5, "sizeout": 5, "receiv": [5, 6], "jitopt": 5, "jitoptionsvalu": 5, "numjitopt": 5, "libraryopt": 5, "libraryoptionvalu": 5, "numlibraryopt": 5, "eager": 5, "eagerli": 5, "culibrarygetkernel": 5, "pkernel": 5, "cukernelgetfunct": 5, "culibrarygetkernelcount": 5, "lib": 5, "culibraryenumeratekernel": 5, "numkernel": 5, "pmod": 5, "pfunc": 5, "cukernelgetlibrari": 5, "plib": 5, "culibrarygetglob": 5, "culibrarygetmanag": 5, "atleast": 5, "culibrarygetunifiedfunct": 5, "fptr": 5, "denot": [5, 6, 7], "cukernelgetattribut": 5, "cu_func_cache_mode_ca": 5, "split": [5, 6, 7], "cufuncgetattribut": [5, 6], "Not": [5, 6, 10], "irrespect": 5, "maxim": [5, 6, 9], "stricter": 5, "lock": [5, 6], "overridden": 5, "cukernelgetnam": 5, "reload": 5, "mangl": [5, 7], "declar": [5, 6, 16], "linkag": 5, "cukernelgetparaminfo": 5, "paramindex": 5, "paramoffset": 5, "params": 5, "cudagraphkernelnodesetparam": [5, 6], "cufuncgetparaminfo": 5, "cumemgetinfo": [5, 6], "accord": [5, 6], "tenet": [5, 6], "situat": [5, 6], "estim": [5, 6], "prone": [5, 6], "deviat": [5, 6], "soc": [5, 6], "exclud": [5, 6], "swap": [5, 6], "move": [5, 6, 7], "area": [5, 6], "app": [5, 6], "cuarray3dcr": [5, 6], "cuarray3dgetdescriptor": [5, 6], "cuarraycr": [5, 6], "cuarraydestroi": [5, 6], "cuarraygetdescriptor": [5, 6], "cumemcpy2d": [5, 6], "cumemcpy2dasync": [5, 6], "cumemcpy2dunalign": [5, 6], "cumemcpy3d": [5, 6], "cumemcpy3dasync": [5, 6], "cumemcpyatoa": 5, "cumemcpyatod": 5, "cumemcpyatoh": 5, "cumemcpyatohasync": 5, "cumemcpydtoa": 5, "cumemcpydtod": [5, 6], "cumemcpydtodasync": [5, 6], "cumemcpydtoh": [5, 6], "cumemcpydtohasync": [5, 6, 9], "cumemcpyhtoa": 5, "cumemcpyhtoaasync": 5, "cumemcpyhtod": [5, 6], "cumemcpyhtodasync": [5, 6, 9], "cumemfre": [5, 6, 9], "cumemfreehost": [5, 6], "cumemgetaddressrang": 5, "cumemsetd2d8": [5, 6], "cumemsetd2d16": [5, 6], "cumemsetd2d32": [5, 6], "cumemsetd8": [5, 6], "cumemsetd16": [5, 6], "cumemsetd32": [5, 6], "kind": [5, 6, 7], "cudamalloc": [5, 6, 7], "elementsizebyt": 5, "coalesc": [5, 6], "largest": 5, "transact": 5, "correctli": 5, "speed": [5, 8], "ppitch": 5, "column": [5, 6, 7], "especi": [5, 6], "cutexrefsetaddress2d": 5, "cudamallocpitch": [5, 6], "implict": 5, "cumemfreeasync": [5, 6], "cudafre": [5, 6], "pbase": 5, "psize": 5, "track": [5, 6, 13], "cumemcpi": [5, 6], "bandwidth": [5, 6], "excess": [5, 6], "degrad": [5, 6], "As": [5, 6, 16], "sparingli": [5, 6], "exchang": [5, 6], "pp": 5, "cudamallochost": [5, 6], "cudafreehost": [5, 6], "just": [5, 6], "wc": [5, 6], "quickli": [5, 6], "effici": [5, 6], "cudahostalloc": [5, 6], "pdptr": 5, "cudahostgetdevicepoint": [5, 6], "cumemhostgetflag": [5, 6], "pflag": [5, 6], "cudahostgetflag": [5, 6], "obei": [5, 6], "custreamattachmemasync": [5, 6], "won": [5, 6], "oversubscript": [5, 6], "evict": [5, 6], "room": [5, 6], "emploi": [5, 6], "pattern": [5, 6], "cumemadvis": [5, 6], "cumemprefetchasync": [5, 6], "storag": [5, 6], "among": [5, 6, 9], "cuda_managed_force_device_alloc": [5, 6], "forc": [5, 6], "arm": 5, "discret": [5, 6], "drive": 5, "px": 5, "cudamallocmanag": [5, 6], "cudeviceregisterasyncnotif": 5, "callbackfunc": [5, 6], "likewis": [5, 6], "distinguish": [5, 6], "10m": [5, 6], "cudeviceunregisterasyncnotif": 5, "stop": [5, 6], "cudevicegetbypcibusid": [5, 6], "pcibusid": [5, 6], "hexadecim": [5, 6], "form": [5, 6, 7, 9], "cudevicegetpcibusid": [5, 6], "cudadevicegetbypcibusid": [5, 6], "charact": [5, 6, 7], "cudadevicegetpcibusid": [5, 6], "cuipcgeteventhandl": [5, 6], "cuipcopeneventhandl": [5, 6], "cueventdestroi": [5, 6], "come": [5, 6, 11, 12], "cuapidevicegetattribut": 5, "phandl": 5, "cueventcr": [5, 6], "cuipcgetmemhandl": [5, 6], "cuipcopenmemhandl": [5, 6], "cuipcclosememhandl": [5, 6], "cudaipcgeteventhandl": [5, 6], "behav": [5, 6], "phevent": [5, 6], "cudaipcopeneventhandl": [5, 6], "lightweight": [5, 6], "advers": [5, 6], "usabl": [5, 6], "cudevicecanaccessp": [5, 6], "increment": [5, 6], "cudaipcopenmemhandl": [5, 6], "close": [5, 6, 9], "decrement": [5, 6], "unaffect": [5, 6], "cudaipcclosememhandl": [5, 6], "ptr": [5, 6], "unpopul": [5, 6], "3rd": 5, "cudahostunregist": [5, 6], "bytecount": 5, "cudamemcpi": [5, 6], "cudamemcpytosymbol": [5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudamemcpyfromsymbol": [5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cumemcpyp": [5, 6], "cumemcpy3dp": [5, 6], "cumemcpypeerasync": [5, 6], "cumemcpy3dpeerasync": [5, 6], "cudamemcpyp": [5, 6], "dstoffset": 5, "cudamemcpytoarrai": 5, "srcoffset": 5, "evenli": 5, "divis": [5, 7], "cudamemcpyfromarrai": 5, "psrc": 5, "cudamemcpyarraytoarrai": 5, "pcopi": 5, "cumemorytype_enum": 5, "intra": [5, 6], "significantli": [5, 6], "slower": 5, "cudamemcpy2d": [5, 6], "cudamemcpy2dtoarrai": [5, 6], "cudamemcpy2dfromarrai": [5, 6], "slice": [5, 6], "cudamemcpy3d": [5, 6], "cudamemcpy3dp": [5, 6], "cumemcpyasync": [5, 6], "cudastream_t": [5, 6, 13], "cumemsetd2d8async": [5, 6], "cumemsetd2d16async": [5, 6], "cumemsetd2d32async": [5, 6], "cumemsetd8async": [5, 6], "cumemsetd16async": [5, 6], "cumemsetd32async": [5, 6], "cudamemcpyasync": [5, 6], "cudamemcpytosymbolasync": [5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudamemcpyfromsymbolasync": [5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudamemcpypeerasync": [5, 6], "cudamemcpytoarrayasync": 5, "cudamemcpyfromarrayasync": 5, "cudamemcpy2dasync": [5, 6], "cudamemcpy2dtoarrayasync": [5, 6], "cudamemcpy2dfromarrayasync": [5, 6], "cudamemcpy3dasync": [5, 6], "cudamemcpy3dpeerasync": [5, 6], "uc": 5, "n": [5, 6, 7, 9], "cudamemset": [5, 6], "ui": 5, "fastest": [5, 6], "cudamemset2d": [5, 6], "cudamemsetasync": [5, 6], "cudamemset2dasync": [5, 6], "pallocatearrai": 5, "dimension": [5, 6], "4x16": 5, "float16": 5, "cudamallocarrai": [5, 6], "harrai": [5, 6], "parraydescriptor": 5, "subroutin": 5, "cudaarraygetinfo": [5, 6], "cuarraygetsparseproperti": 5, "sparseproperti": [5, 6], "cumipmappedarraygetlevel": [5, 6], "cumipmappedarraygetsparseproperti": 5, "entir": [5, 6, 7, 9], "cumemmaparrayasync": [5, 6], "cuarraygetmemoryrequir": 5, "memoryrequir": [5, 6], "cumipmappedarraygetmemoryrequir": 5, "cuarraygetplan": [5, 6], "planeidx": [5, 6], "pplanearrai": [5, 6], "cudaarraygetplan": [5, 6], "cudafreearrai": [5, 6], "special": [5, 6], "cusurfrefsetarrai": 5, "breviti": [5, 6], "sake": [5, 6], "ex": [5, 6], "texture1d_width": 5, "cudamalloc3darrai": [5, 6], "cumipmappedarraycr": [5, 6], "pmipmappedarraydesc": 5, "nummipmaplevel": 5, "floor": [5, 6], "log2": [5, 6], "texture1d_mipmapped_width": 5, "cumipmappedarraydestroi": [5, 6], "cudamallocmipmappedarrai": [5, 6], "hmipmappedarrai": 5, "plevelarrai": 5, "cudagetmipmappedarraylevel": [5, 6], "cudafreemipmappedarrai": [5, 6], "cumemgethandleforaddressrang": 5, "fulli": [5, 6], "va": 5, "uva": 5, "cumemaddressfre": 5, "memaddressreserv": 5, "prop": [5, 6], "transmit": 5, "recipi": [5, 6], "cumemgetallocationgranular": 5, "imex": [5, 6], "entiti": [5, 6, 7], "aim": [5, 6], "fine": [5, 6], "grain": [5, 6], "grant": [5, 6], "modprob": [5, 6], "cli": [5, 6, 9], "cumemreleas": 5, "cumemimportfromshareablehandl": 5, "shareabl": [5, 6], "cumulticastgetgranular": 5, "cu_multicast_minimum_granular": 5, "cu_multicast_recommended_granular": 5, "cumemsetaccess": 5, "cumemunmap": 5, "mapinfolist": 5, "subregion": [5, 6], "cuarraysparsesubresourcetype_enum": 5, "miplevel": [5, 6], "span": 5, "small": [5, 7], "constitut": 5, "sparselevel": 5, "offsetx": 5, "offseti": 5, "offsetz": 5, "extentwidth": 5, "extentheight": 5, "extentdepth": 5, "miptail": 5, "don": [5, 6], "sub": [5, 6], "unreleas": 5, "desc": [5, 6], "cumemgetaccess": 5, "afterward": 5, "vulkan": [5, 6], "shareablehandl": [5, 6], "oshandl": 5, "shhandletyp": 5, "sli": [5, 6], "minim": [5, 6, 7], "cumemgetallocationpropertiesfromhandl": 5, "cumemretainallocationhandl": 5, "necessarili": [5, 6], "expos": 5, "promis": [5, 6], "realloc": [5, 6], "compliant": [5, 6], "tempor": [5, 6], "inter": [5, 6], "contract": [5, 6, 7], "therefor": [5, 6, 9, 17], "basic": [5, 6, 9], "minbytestokeep": [5, 6], "tri": [5, 6], "fewer": [5, 6, 8], "trim": [5, 6, 22], "trimto": [5, 6], "eg": [5, 6], "cumempoolgetaccess": [5, 6], "cumempoolexporttoshareablehandl": [5, 6], "haven": [5, 6], "cumempoolimportfromshareablehandl": [5, 6], "cumempoolexportpoint": [5, 6], "cumempoolimportpoint": [5, 6], "handle_out": [5, 6], "sharedata_out": [5, 6], "sharedata": [5, 6], "ptr_out": [5, 6], "cumulticastcr": 5, "broadcast": 5, "cumulticastadddevic": 5, "cumulticastbindmem": 5, "cumulticastbindaddr": 5, "memmori": 5, "cumulticastunbind": 5, "mchandl": 5, "similarli": 5, "mcoffset": 5, "memoffset": 5, "beeen": 5, "memptr": 5, "cudamallocasync": [5, 6], "unbind": 5, "upto": 5, "warn": [5, 7], "distinct": [5, 6], "look": [5, 6], "cupointergetattribut": [5, 6], "unnecessari": 5, "multidimension": [5, 6], "disjoint": [5, 6], "boolean": 5, "against": [5, 6], "mappabl": 5, "cupointersetattribut": 5, "cudapointergetattribut": [5, 6], "devptr": [5, 6], "cumemprefetchasync_v2": 5, "enqueu": [5, 6], "subset": [5, 6], "serv": [5, 6], "improv": [5, 6, 9], "etih": [5, 6], "OR": [5, 6], "advic": [5, 6], "cumemadvise_v2": [5, 6], "impos": [5, 6], "go": [5, 6, 9], "duplic": [5, 6], "collaps": [5, 6], "arbitrari": [5, 6], "avoid": [5, 6], "resolut": [5, 6], "constantli": [5, 6], "But": [5, 6], "indefinit": [5, 6], "impact": [5, 6], "se": [5, 6], "scenario": [5, 6], "infrequ": [5, 6], "overhead": [5, 6, 8], "help": [5, 6, 8, 9], "procesor": [5, 6], "alu": [5, 6], "cumemrangegetattribut": [5, 6], "datas": [5, 6], "simpli": [5, 6], "sizeof": [5, 6], "cudamemrangegetattribut": [5, 6], "numattribut": [5, 6], "unset": [5, 6], "custreamcr": [5, 6, 9], "phstream": 5, "custreamdestroi": [5, 6, 9], "cugreenctxstreamcr": 5, "custreamgetflag": [5, 6], "custreamsynchron": [5, 6, 9], "custreamaddcallback": [5, 6], "cudastreamcr": [5, 6], "cudastreamcreatewithflag": [5, 6], "preferenti": [5, 6], "preempt": [5, 6], "lowest": [5, 6], "cudastreamcreatewithprior": [5, 6], "cudastreamgetprior": [5, 6], "cudastreamgetflag": [5, 6], "custreamgetid": [5, 6], "streamid": [5, 6], "cudastreamperthread": [5, 6], "cudastreamgetid": [5, 6], "custreamgetctx": 5, "custreamgetctx_v2": 5, "till": 5, "pgreenctx": 5, "cuevent_capture_flag": 5, "cudastreamwaitev": [5, 6], "item": [5, 6], "mandat": [5, 6], "idl": [5, 6], "durat": [5, 6], "behind": [5, 6], "consecut": [5, 6], "culaunchhostfunc": [5, 6], "cudastreamaddcallback": [5, 6], "slate": [5, 6], "custreamgetcaptureinfo": 5, "unsaf": [5, 6], "hgraph": [5, 6], "dependencydata": [5, 6], "numdepend": [5, 6], "place": [5, 6, 9], "cudagraph_t": [5, 6], "facilit": [5, 6], "determinist": [5, 6], "encourag": [5, 6], "fashion": [5, 6], "replai": [5, 6], "whenev": [5, 6], "phgraph": 5, "rule": [5, 6], "cugraphdestroi": 5, "capturestatu": 5, "unspecifi": [5, 6], "capturestatus_out": [5, 6], "id_out": [5, 6], "graph_out": [5, 6], "progress": [5, 6], "unreach": [5, 6], "dependencies_out": [5, 6], "absent": [5, 6], "numdependencies_out": [5, 6], "custreamgetcaptureinfo_v3": 5, "edgedata_out": [5, 6], "custreamupdatecapturedependencies_v2": 5, "keyword": [5, 6], "singli": [5, 6], "constrain": [5, 6], "whole": [5, 6, 7], "legal": [5, 6], "revert": [5, 6], "cudastreamattachmemasync": [5, 6], "cudastreamqueri": [5, 6, 13], "ote_null_stream": 5, "cudastreamsynchron": [5, 6], "cudastreamdestroi": [5, 6], "custreamcopyattribut": 5, "custreamgetattribut": 5, "value_out": [5, 6], "cueventelapsedtim": [5, 6], "cudaeventcr": [5, 6], "cudaeventcreatewithflag": [5, 6], "cudaeventrecord": [5, 6], "incomplet": [5, 6], "cudaeventqueri": [5, 6], "busi": [5, 6], "cudaeventsynchron": [5, 6], "cudaeventdestroi": [5, 6], "hstart": 5, "hend": 5, "elaps": [5, 6], "around": [5, 6], "microsecond": [5, 6], "measur": [5, 6], "signific": [5, 6], "pmillisecond": 5, "ms": [5, 6], "cudaeventelapsedtim": [5, 6], "cuimportexternalmemori": 5, "memhandledesc": [5, 6], "extmem_out": [5, 6], "fd": [5, 6], "utf": [5, 6], "id3d12devic": [5, 6], "createsharedhandl": [5, 6], "id3d12heap": [5, 6], "id3d12resourc": [5, 6], "idxgiresource1": [5, 6], "id3d11resourc": [5, 6], "idxgiresourc": [5, 6], "getsharedhandl": [5, 6], "nvscibufobject": [5, 6], "cudestroyexternalmemori": 5, "cuexternalmemorygetmappedbuff": 5, "cuexternalmemorygetmappedmipmappedarrai": 5, "vkinvalidatemappedmemoryrang": [5, 6], "vkflushmappedmemoryrang": [5, 6], "pipelin": [5, 6], "chapter": [5, 6], "extmem": [5, 6], "bufferdesc": [5, 6], "volatil": [5, 6], "deriv": [5, 6, 9], "mipmapdesc": [5, 6], "semhandledesc": [5, 6], "extsem_out": [5, 6], "id3d12fenc": [5, 6], "id3d11fenc": [5, 6], "nvscisyncobj": [5, 6], "idxgikeyedmutex": [5, 6], "Such": [5, 6], "nvscisyncf": 5, "nvscisyncattrkey_requiredeterministicf": [5, 6], "indeterminist": [5, 6], "unblock": [5, 6], "incorrectli": [5, 6], "amongst": [5, 6], "cuda_external_semaphore_param": 5, "keyedmutex": [5, 6], "timeoutm": [5, 6], "interv": [5, 6], "finit": [5, 6], "macro": [5, 6, 7], "extsem": [5, 6], "cu_device_attribute_can_use_stream_wait_value_nor_v2": 5, "improp": 5, "indirectli": 5, "versu": 5, "expans": 5, "cufunctionloadingst": 5, "cu_function_loading_state_unload": 5, "cu_function_loading_state_load": 5, "cu_function_loading_state_max": 5, "With": [5, 8, 9], "few": 5, "execept": 5, "cudafuncgetattribut": [5, 6], "cudafuncsetattribut": [5, 6], "cudafuncsetcacheconfig": [5, 6], "cufuncgetmodul": 5, "cufuncgetnam": 5, "burden": [5, 6], "manner": 5, "commonli": [5, 6, 9], "cufuncsetblockshap": 5, "cufuncsetshareds": 5, "cuparamsets": 5, "cuparamseti": 5, "cuparamsetf": 5, "cuparamsetv": 5, "met": [5, 6], "cudalaunchkernel": [5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "contingu": 5, "unavai": 5, "cudalaunchkernelex": [5, 6], "shape": [5, 6], "overwritten": [5, 6], "cudalaunchcooperativekernel": [5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "launchparamslist": 5, "themselv": 5, "cudalaunchcooperativekernelmultidevic": [5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "contrast": 5, "cugraphcr": 5, "cugraphaddchildgraphnod": 5, "cugraphaddemptynod": 5, "cugraphaddkernelnod": 5, "cugraphaddhostnod": 5, "cugraphaddmemcpynod": 5, "cugraphaddmemsetnod": 5, "cugraphinstanti": 5, "cugraphgetnod": 5, "cugraphgetrootnod": 5, "cugraphgetedg": 5, "cugraphclon": 5, "nodeparam": [5, 6], "root": [5, 6, 7], "phgraphnod": [5, 6], "cugraphkernelnodegetparam": 5, "cugraphkernelnodesetparam": 5, "hnode": [5, 6], "cudagraphnode_t": [5, 6], "cugraphnodesetparam": 5, "operand": [5, 6], "cugraphmemcpynodegetparam": 5, "cugraphmemcpynodesetparam": 5, "memsetparam": 5, "cugraphmemsetnodegetparam": 5, "cugraphmemsetnodesetparam": 5, "pre": [5, 6, 7], "cugraphhostnodegetparam": 5, "cugraphhostnodesetparam": 5, "childgraph": [5, 6], "cugraphchildgraphnodegetgraph": 5, "cugraphnodefindinclon": 5, "transit": [5, 6], "phase": [5, 6, 7], "cugraphaddeventrecordnod": 5, "cugraphaddeventwaitnod": 5, "cugrapheventrecordnodegetev": 5, "event_out": [5, 6], "cugrapheventrecordnodesetev": 5, "cugrapheventwaitnodegetev": 5, "cugrapheventwaitnodesetev": 5, "cugraphaddexternalsemaphoressignalnod": 5, "cugraphexternalsemaphoressignalnodegetparam": 5, "cugraphexternalsemaphoressignalnodesetparam": 5, "cugraphexecexternalsemaphoressignalnodesetparam": 5, "cugraphaddexternalsemaphoreswaitnod": 5, "params_out": [5, 6], "cugraphexternalsemaphoreswaitnodegetparam": 5, "cugraphexternalsemaphoreswaitnodesetparam": 5, "cugraphexecexternalsemaphoreswaitnodesetparam": 5, "cugraphaddbatchmemopnod": 5, "cugraphbatchmemopnodegetparam": 5, "cugraphbatchmemopnodesetparam": 5, "nodeparams_out": 5, "cugraphexecbatchmemopnodesetparam": 5, "hgraphexec": [5, 6], "graphexec": [5, 6], "membar": 5, "modif": [5, 6], "cudagraphexec_t": [5, 6], "cugraphexecnodesetparam": 5, "cugraphaddmemallocnod": 5, "cugraphaddmemfreenod": 5, "unfre": [5, 6], "delet": [5, 6], "cugraphmemallocnodegetparam": 5, "cudevicegraphmemtrim": 5, "cudevicegetgraphmemattribut": 5, "cudevicesetgraphmemattribut": 5, "cugraphmemfreenodegetparam": 5, "twice": [5, 6], "dptr_out": [5, 6], "originalgraph": [5, 6], "phgraphclon": 5, "horiginalnod": 5, "hclonedgraph": 5, "horiginalgraph": 5, "phclonednod": 5, "phnode": 5, "cugraphnodegettyp": 5, "numnod": [5, 6], "cugraphnodegetdepend": 5, "cugraphnodegetdependentnod": 5, "numrootnod": 5, "rootnod": 5, "numedg": [5, 6], "endpoint": [5, 6], "cugraphadddepend": 5, "cugraphremovedepend": 5, "cugraphgetedges_v2": 5, "edgedata": [5, 6], "alon": [5, 6], "lossi": 5, "cugraphnodegetdependencies_v2": 5, "numdependentnod": 5, "dependentnod": 5, "cugraphnodegetdependentnodes_v2": 5, "from_": [5, 6], "cugraphadddependencies_v2": 5, "cugraphremovedependencies_v2": 5, "sever": [5, 6], "vice": [5, 6], "versa": [5, 6], "phgraphexec": 5, "cugraphexecdestroi": 5, "cudagraphlaunch": [5, 6], "cugraphlaunch": 5, "offend": [5, 6], "cugraphexecgetflag": 5, "cugraphexeckernelnodesetparam": 5, "cdp": [5, 6], "cugraphexecmemcpynodesetparam": 5, "cugraphexecmemsetnodesetparam": 5, "cugraphexechostnodesetparam": 5, "cugraphexecchildgraphnodesetparam": 5, "cugraphexeceventrecordnodesetev": 5, "cugraphexeceventwaitnodesetev": 5, "aspect": [5, 6], "oportunist": [5, 6], "cugraphnodeseten": 5, "isen": [5, 6], "reenabl": [5, 6], "cugraphnodegeten": 5, "flight": 5, "assing": [5, 6], "resultinfo": [5, 6], "exit": [5, 6], "herrornode_out": [5, 6], "succe": [5, 6, 7], "cugraphkernelnodegetattribut": 5, "dot": [5, 6], "cuuserobjectcr": 5, "initialrefcount": [5, 6], "refcount": [5, 6], "manual": [5, 6, 9], "object_out": [5, 6], "cuuserobjectretain": 5, "cuuserobjectreleas": 5, "cugraphretainuserobject": 5, "cugraphreleaseuserobject": 5, "int_max": [5, 6], "tag": [5, 6, 7], "past": [5, 6], "brace": [5, 6], "cugraphaddnode_v2": 5, "defaultlaunchvalu": [5, 6], "children": [5, 6], "phandle_out": [5, 6], "blocksiz": [5, 6], "dynamicsmems": [5, 6], "numblock": [5, 6], "cudaoccupancymaxactiveblockspermultiprocessor": [5, 6], "suppress": [5, 6, 7], "maxwel": [5, 6], "tune": [5, 6], "cudaoccupancymaxactiveblockspermultiprocessorwithflag": [5, 6], "cuoccupancymaxpotentialblocks": 5, "blocksizetodynamicsmems": 5, "blocksizelimit": 5, "suggest": 5, "achiev": 5, "fewest": 5, "mingrids": 5, "vari": [5, 6], "unari": 5, "signatur": [5, 6, 13, 23], "cudaoccupancymaxpotentialblocks": [5, 6], "cuoccupancymaxpotentialblocksizewithflag": 5, "cudaoccupancymaxpotentialblocksizewithflag": [5, 6], "cuoccupancyavailabledynamicsmemperblock": 5, "cuoccupancymaxpotentialclusters": 5, "clusters": 5, "cuoccupancymaxactiveclust": 5, "co": 5, "numclust": 5, "els": [5, 9], "presdesc": [5, 6], "ptexdesc": [5, 6], "presviewdesc": [5, 6], "ptexobject": [5, 6], "sizeinbyt": [5, 6], "pitch2d": [5, 6], "pitchinbyt": [5, 6], "breadth": 5, "bilinear": 5, "approxim": [5, 7], "anisotrop": [5, 6], "upper": [5, 6], "bc": [5, 6], "cutexobjectdestroi": [5, 6], "cudacreatetextureobject": [5, 6], "texobject": [5, 6], "cudadestroytextureobject": [5, 6], "cutexobjectgetresourcedesc": [5, 6], "cudagettextureobjectresourcedesc": [5, 6], "cutexobjectgettexturedesc": [5, 6], "cudagettextureobjecttexturedesc": [5, 6], "cutexobjectgetresourceviewdesc": [5, 6], "cudagettextureobjectresourceviewdesc": [5, 6], "cusurfobjectcr": [5, 6], "psurfobject": [5, 6], "cusurfobjectdestroi": [5, 6], "cudacreatesurfaceobject": [5, 6], "surfobject": [5, 6], "cudadestroysurfaceobject": [5, 6], "cusurfobjectgetresourcedesc": [5, 6], "cudagetsurfaceobjectresourcedesc": [5, 6], "cutensormapencodetil": 5, "tensordatatyp": 5, "tensorrank": 5, "globaladdress": 5, "globaldim": 5, "globalstrid": 5, "boxdim": 5, "cuuint32_t": 5, "elementstrid": 5, "l2promot": 5, "oobfil": 5, "tma": 5, "tensormap": 5, "stride": 5, "travers": 5, "elementsizeinbyt": 5, "ceil": 5, "th": 5, "nc": 5, "8hwc8": 5, "c8": 5, "16hwc16": 5, "c16": 5, "box": 5, "inner": 5, "multipli": [5, 7], "organ": 5, "problem": [5, 8], "shuffl": 5, "granur": 5, "dram": 5, "nan": 5, "elment": 5, "cutensormapencodeim2col": 5, "cutensormapreplaceaddress": 5, "pixelboxlowercorn": 5, "pixelboxuppercorn": 5, "channelsperpixel": 5, "pixelspercolumn": 5, "im2col": 5, "d": [5, 6, 7], "left": [5, 6], "front": [5, 7], "corner": [5, 6], "32767": 5, "bottom": 5, "dhw": 5, "pixel": 5, "peerdev": 5, "canaccessp": [5, 6], "cudadevicecanaccessp": [5, 6], "peercontext": 5, "unidirect": [5, 6], "symmetr": [5, 6], "cudadeviceenablepeeraccess": [5, 6], "cudadevicedisablepeeraccess": [5, 6], "cudevicegetp2pattribut": [5, 6], "cudaarrai": [5, 6], "cudadevicegetp2pattribut": [5, 6], "cugraphicsunregisterresourc": [5, 6], "cugraphicsd3d9registerresourc": 5, "cugraphicsd3d10registerresourc": 5, "cugraphicsd3d11registerresourc": 5, "cugraphicsglregisterbuff": [5, 6, 12], "cugraphicsglregisterimag": [5, 6, 12], "cudagraphicsunregisterresourc": [5, 6], "cugraphicssubresourcegetmappedarrai": [5, 6], "arrayindex": [5, 6], "parrai": 5, "cugraphicsresourcegetmappedpoint": 5, "cudagraphicssubresourcegetmappedarrai": [5, 6], "cugraphicsresourcegetmappedmipmappedarrai": [5, 6], "pmipmappedarrai": 5, "cudagraphicsresourcegetmappedmipmappedarrai": [5, 6], "pdevptr": 5, "ppointer": 5, "cugraphicsresourcesetmapflag": [5, 6], "cu_graphics_map_resource_flags_readonli": 5, "cu_graphics_map_resource_flags_writediscard": 5, "cugraphicsmapresourc": [5, 6], "cudagraphicsresourcesetmapflag": [5, 6], "cugraphicsunmapresourc": [5, 6], "cudagraphicsmapresourc": [5, 6], "cudagraphicsunmapresourc": [5, 6], "cudavers": [5, 6], "pfn": [5, 6], "11020": [5, 6], "typedef": [5, 6], "pick": [5, 6], "cudatypedef": [5, 6], "abi": [5, 6, 7], "symbolstatu": 5, "cuda_api_per_thread_default_stream": [5, 6], "cumemalloc_v2": [5, 6], "_v2": 5, "variant": 5, "cudagetdriverentrypoint": [5, 6], "cucoredumpset": 5, "cu_coredump_enable_on_except": 5, "cu_coredump_trigger_host": 5, "cu_coredump_lightweight": 5, "cu_coredump_fil": 5, "cu_coredump_pip": 5, "cu_coredump_generation_flag": 5, "cu_coredump_max": 5, "cucoredumpgenerationflag": 5, "cu_coredump_default_flag": 5, "cu_coredump_skip_nonrelocated_elf_imag": 5, "cu_coredump_skip_global_memori": 5, "cu_coredump_skip_shared_memori": 5, "cu_coredump_skip_local_memori": 5, "cu_coredump_skip_abort": 5, "cu_coredump_skip_constbank_memori": 5, "cu_coredump_lightweight_flag": 5, "cucoredumpgetattribut": 5, "bool": 5, "abort": 5, "elf": 5, "1023": 5, "hostnam": 5, "pid": 5, "monitor": 5, "corepip": 5, "bitwis": 5, "itself": [5, 6], "scale": 5, "equiavl": 5, "goal": [5, 8, 9], "better": [5, 6], "cucoredumpgetattributeglob": 5, "decis": 5, "basi": 5, "manipul": 5, "tradit": 5, "abil": 5, "spatial": 5, "provis": 5, "main": 5, "cudevicegetdevresourc": 5, "todai": [5, 9], "cudevsmresourcesplitbycount": 5, "cudevresourcegeneratedesc": 5, "cu_dev_resource_type_sm": 5, "mincount": 5, "guidelin": 5, "tradeoff": 5, "finer": 5, "hw": 5, "cuda_device_max_connect": 5, "workload": 5, "cuda_mps_active_thread_percentag": 5, "cudevsmresource_st": 5, "smcount": 5, "cudevresource_st": 5, "dictat": 5, "cudevresourcetyp": 5, "_internal_pad": 5, "cudevsmresourc": 5, "_overs": 5, "cudevresourc": 5, "cugreenctxcreate_flag": 5, "cu_green_ctx_default_stream": 5, "cudevsmresourcesplit_flag": 5, "cu_dev_sm_resource_split_ignore_sm_coschedul": 5, "cu_dev_sm_resource_split_max_potential_cluster_s": 5, "cu_dev_resource_type_invalid": 5, "cudevresourcedesc": 5, "encapsul": 5, "phctx": 5, "heavi": 5, "deiniti": [5, 6], "cugreenctxdestroi": 5, "pcontext": 5, "cuctxgetdevresourc": 5, "cugreenctxgetdevresourc": 5, "nbgroup": 5, "input_": 5, "useflag": 5, "adher": 5, "simul": 5, "divid": 5, "remaind": 5, "Its": 5, "carefulli": 5, "plan": 5, "discourag": [5, 6], "hierarchi": 5, "abid": 5, "cleanli": 5, "ommit": 5, "nbresourc": 5, "phdesc": 5, "came": 5, "custreamgetgreenctx": 5, "greenctx": 5, "resource_abi_vers": 5, "resource_abi_external_byt": 5, "cugraphicseglregisterimag": [5, 6, 12], "eglimagekhr": [5, 6], "pcudaresourc": [5, 6], "cugraphicsresourcegetmappedeglfram": [5, 6, 12], "accomplish": [5, 6], "glfinish": [5, 6], "command": [5, 6, 9], "glcontext": [5, 6], "acces": [5, 6], "eglimag": [5, 6], "cudagraphicseglregisterimag": [5, 6, 12], "cueglstreamconsumerconnect": [5, 6, 12], "eglstream": [5, 6], "eglstreamkhr": [5, 6], "conn": [5, 6], "cueglstreamconsumerdisconnect": [5, 6, 12], "cueglstreamconsumerreleasefram": [5, 6, 12], "cudaeglstreamconsumerconnect": [5, 6, 12], "cueglstreamconsumerconnectwithflag": [5, 6, 12], "cudaeglstreamconsumerconnectwithflag": [5, 6, 12], "disconnect": [5, 6], "conect": [5, 6], "cudaeglstreamconsumerdisconnect": [5, 6, 12], "pstream": [5, 6], "egl_support_reuse_nv": 5, "egl_fals": 5, "egl_tru": 5, "usec": [5, 6], "cudaeglstreamconsumeracquirefram": [5, 6, 12], "cudaeglstreamconsumerreleasefram": [5, 6, 12], "cueglstreamproducerconnect": [5, 6, 12], "eglint": [5, 6], "cueglstreamproducerdisconnect": [5, 6, 12], "cueglstreamproducerpresentfram": [5, 6, 12], "cudaeglstreamproducerconnect": [5, 6, 12], "cudaeglstreamproducerdisconnect": [5, 6, 12], "proucer": [5, 6], "cueglstreamproducerreturnfram": [5, 6, 12], "cudaeglstreamproducerpresentfram": [5, 6, 12], "retri": [5, 6, 9], "cudaeglstreamproducerreturnfram": [5, 6, 12], "cueventcreatefromeglsync": [5, 12], "eglsync": [5, 6], "eglsynckhr": [5, 6], "timingdata": [5, 6], "agnost": [5, 6], "cugldevicelist": 5, "cu_gl_device_list_al": 5, "cu_gl_device_list_current_fram": 5, "render": [5, 6], "cu_gl_device_list_next_fram": 5, "gluint": [5, 6], "cudagraphicsglregisterbuff": [5, 6, 12], "renderbuff": [5, 6], "gl_texture_2d": [5, 6], "gl_texture_rectangl": [5, 6], "gl_texture_cube_map": [5, 6], "gl_texture_3d": [5, 6], "gl_texture_2d_arrai": [5, 6], "gl_renderbuff": [5, 6], "abbrevi": [5, 6], "gl_r": [5, 6], "gl_rg": [5, 6], "expand": [5, 6], "gl_r8": [5, 6], "gl_r16": [5, 6], "gl_rg8": [5, 6], "gl_rg16": [5, 6], "gl_red": [5, 6], "gl_rgba": [5, 6], "gl_lumin": [5, 6], "gl_alpha": [5, 6], "gl_luminance_alpha": [5, 6], "gl_intens": [5, 6], "16f": [5, 6], "32f": [5, 6], "8ui": [5, 6], "16ui": [5, 6], "32ui": [5, 6], "8i": [5, 6], "16i": [5, 6], "32i": [5, 6], "16f_arb": [5, 6], "32f_arb": [5, 6], "8ui_ext": [5, 6], "16ui_ext": [5, 6], "32ui_ext": [5, 6], "8i_ext": [5, 6], "16i_ext": [5, 6], "32i_ext": [5, 6], "multisampl": [5, 6], "glenum": [5, 6], "cudagraphicsglregisterimag": [5, 6, 12], "cuglgetdevic": [5, 6, 12], "cudadevicecount": [5, 6], "devicelist": [5, 6], "pcudadevicecount": [5, 6], "pcudadevic": [5, 6], "predict": 5, "cudaglgetdevic": [5, 6, 12], "mac": [5, 6], "cuprofilerstart": [5, 6, 12], "cuprofilerstop": [5, 6, 12], "cuprofileriniti": [5, 12], "cudaprofilerstart": [5, 6, 12], "cudaprofilerstop": [5, 6, 12], "cuvdpaugetdevic": [5, 6, 12], "vdpdevic": [5, 6], "vdpgetprocaddress": [5, 6], "pdevic": [5, 6], "cuvdpauctxcr": [5, 12], "cugraphicsvdpauregistervideosurfac": [5, 6, 12], "cugraphicsvdpauregisteroutputsurfac": [5, 6, 12], "cudavdpaugetdevic": [5, 6, 12], "facil": 5, "vdpsurfac": [5, 6], "vdpvideosurfac": [5, 6], "shown": 5, "cudagraphicsvdpauregistervideosurfac": [5, 6, 12], "vdpoutputsurfac": [5, 6], "cudagraphicsvdpauregisteroutputsurfac": [5, 6, 12], "cudasuccess": [6, 13], "cudaerror_t": [6, 7, 13], "impl_priv": 6, "cudaarray_t": 6, "cudamipmappedarray_t": 6, "cudapitchedptr": 6, "cudatextureobject_t": [6, 7], "cudasurfaceobject_t": 6, "texturerefer": 6, "surfacerefer": 6, "cudaexternalmemory_t": 6, "cudaexternalsemaphore_t": 6, "cudagraphicsresource_t": 6, "reiniti": 6, "cudamallocfrompoolasync": 6, "rt": 6, "cudadevicescheduleblockingsync": 6, "cudalimit": 6, "cudalimitstacks": 6, "cudalimitprintffifos": 6, "cudaerrorinvalidvalu": 6, "cudalimitmallocheaps": 6, "cudalimitdevruntimesyncdepth": 6, "cudaerrormemoryalloc": 6, "cudaerrorunsupportedlimit": 6, "cudalimitdevruntimependinglaunchcount": 6, "cudalimitmaxl2fetchgranular": 6, "cudalimitpersistingl2caches": 6, "isssu": 6, "cudachannelformatdesc": 6, "fmtdesc": 6, "pcacheconfig": 6, "cudafunccacheprefernon": 6, "cudafunccacheprefershar": 6, "cudafunccachepreferl1": 6, "cudafunccachepreferequ": 6, "cudafunccach": 6, "cacheconfig": 6, "cudaerrorinvaliddevic": 6, "cudaeventinterprocess": 6, "cudaeventdisabletim": 6, "cudadevattripceventsupport": 6, "cudaerrorinvalidresourcehandl": 6, "cudaerrormapbufferobjectfail": 6, "cudaerrornotsupport": 6, "cudaipceventhandle_t": 6, "cudaipceventhandl": 6, "cudaerrordeviceuniniti": 6, "cudaipcmemhandle_t": 6, "cudaipcmemhandl": 6, "cudaipcmemlazyenablepeeraccess": 6, "cudaerrortoomanyp": 6, "returnd": 6, "cudadeviceflushgpudirectrdmawrit": 6, "cudaflushgpudirectrdmawritestarget": 6, "cudaflushgpudirectrdmawritesscop": 6, "cudadevattrgpudirectrdmawritesord": 6, "cudadevattrgpudirectrdmaflushwritesopt": 6, "cudadeviceregisterasyncnotif": 6, "cudadeviceunregisterasyncnotif": 6, "cudaerrornotpermit": 6, "cudaasynccallback": 6, "cudaerrorunknown": 6, "cudaasynccallbackhandle_t": 6, "cudachoosedevic": 6, "cudainitdevic": 6, "cudadeviceprop": 6, "totalglobalmem": 6, "warpsiz": 6, "totalconstmem": 6, "revis": 6, "texturepitchalign": 6, "deviceoverlap": 6, "asyncenginecount": 6, "multiprocessorcount": 6, "kernelexectimeouten": 6, "motherboard": 6, "card": 6, "canmaphostmemori": 6, "computemod": 6, "cudacomputemodedefault": 6, "cudacomputemodeprohibit": 6, "cudacomputemodeexclusiveprocess": 6, "occupi": 6, "cudaerrordevicesunavail": 6, "maxtexture1d": 6, "maxtexture1dmipmap": 6, "maxtexture1dlinear": 6, "maxtexture2d": 6, "maxtexture2dmipmap": 6, "maxtexture2dlinear": 6, "maxtexture2dgath": 6, "maxtexture3d": 6, "maxtexture3dalt": 6, "maxtexturecubemap": 6, "maxtexture1dlay": 6, "maxtexture2dlay": 6, "maxtexturecubemaplay": 6, "maxsurface1d": 6, "maxsurface2d": 6, "maxsurface3d": 6, "maxsurface1dlay": 6, "maxsurface2dlay": 6, "maxsurfacecubemap": 6, "maxsurfacecubemaplay": 6, "surfacealign": 6, "concurrentkernel": 6, "eccen": 6, "pcideviceid": 6, "sometim": 6, "pcidomainid": 6, "tccdriver": 6, "unifiedaddress": 6, "memoryclockr": 6, "memorybuswidth": 6, "l2caches": 6, "persistingl2cachemaxs": 6, "maxthreadspermultiprocessor": 6, "streamprioritiessupport": 6, "globall1cachesupport": 6, "locall1cachesupport": 6, "sharedmempermultiprocessor": 6, "regspermultiprocessor": 6, "managedmemori": 6, "ismultigpuboard": 6, "gemini": 6, "multigpuboardgroupid": 6, "hostnativeatomicsupport": 6, "singletodoubleprecisionperfratio": 6, "pageablememoryaccess": 6, "concurrentmanagedaccess": 6, "computepreemptionsupport": 6, "canusehostpointerforregisteredmem": 6, "cooperativelaunch": 6, "cooperativemultidevicelaunch": 6, "sharedmemperblockoptin": 6, "pageablememoryaccessuseshostpaget": 6, "directmanagedmemaccessfromhost": 6, "maxblockspermultiprocessor": 6, "accesspolicymaxwindows": 6, "reservedsharedmemperblock": 6, "hostregistersupport": 6, "sparsecudaarraysupport": 6, "hostregisterreadonlysupport": 6, "cudahostregisterreadonli": 6, "timelinesemaphoreinteropsupport": 6, "memorypoolssupport": 6, "cudamempool": 6, "gpudirectrdmasupport": 6, "gpudirectrdmaflushwritesopt": 6, "cudaflushgpudirectrdmawritesopt": 6, "gpudirectrdmawritesord": 6, "cudagpudirectrdmawritesord": 6, "memorypoolsupportedhandletyp": 6, "deferredmappingcudaarraysupport": 6, "ipceventsupport": 6, "unifiedfunctionpoint": 6, "cudadeviceattr": 6, "cudadevattrmaxthreadsperblock": 6, "cudadevattrmaxblockdimx": 6, "cudadevattrmaxblockdimi": 6, "cudadevattrmaxblockdimz": 6, "cudadevattrmaxgriddimx": 6, "cudadevattrmaxgriddimi": 6, "cudadevattrmaxgriddimz": 6, "cudadevattrmaxsharedmemoryperblock": 6, "cudadevattrtotalconstantmemori": 6, "cudadevattrwarps": 6, "cudadevattrmaxpitch": 6, "cudadevattrmaxtexture1dwidth": 6, "cudadevattrmaxtexture1dlinearwidth": 6, "cudadevattrmaxtexture1dmipmappedwidth": 6, "cudadevattrmaxtexture2dwidth": 6, "cudadevattrmaxtexture2dheight": 6, "cudadevattrmaxtexture2dlinearwidth": 6, "cudadevattrmaxtexture2dlinearheight": 6, "cudadevattrmaxtexture2dlinearpitch": 6, "cudadevattrmaxtexture2dmipmappedwidth": 6, "cudadevattrmaxtexture2dmipmappedheight": 6, "cudadevattrmaxtexture3dwidth": 6, "cudadevattrmaxtexture3dheight": 6, "cudadevattrmaxtexture3ddepth": 6, "cudadevattrmaxtexture3dwidthalt": 6, "cudadevattrmaxtexture3dheightalt": 6, "cudadevattrmaxtexture3ddepthalt": 6, "cudadevattrmaxtexturecubemapwidth": 6, "cudadevattrmaxtexture1dlayeredwidth": 6, "cudadevattrmaxtexture1dlayeredlay": 6, "cudadevattrmaxtexture2dlayeredwidth": 6, "cudadevattrmaxtexture2dlayeredheight": 6, "cudadevattrmaxtexture2dlayeredlay": 6, "cudadevattrmaxtexturecubemaplayeredwidth": 6, "cudadevattrmaxtexturecubemaplayeredlay": 6, "cudadevattrmaxsurface1dwidth": 6, "cudadevattrmaxsurface2dwidth": 6, "cudadevattrmaxsurface2dheight": 6, "cudadevattrmaxsurface3dwidth": 6, "cudadevattrmaxsurface3dheight": 6, "cudadevattrmaxsurface3ddepth": 6, "cudadevattrmaxsurface1dlayeredwidth": 6, "cudadevattrmaxsurface1dlayeredlay": 6, "cudadevattrmaxsurface2dlayeredwidth": 6, "cudadevattrmaxsurface2dlayeredheight": 6, "cudadevattrmaxsurface2dlayeredlay": 6, "cudadevattrmaxsurfacecubemapwidth": 6, "cudadevattrmaxsurfacecubemaplayeredwidth": 6, "cudadevattrmaxsurfacecubemaplayeredlay": 6, "cudadevattrmaxregistersperblock": 6, "cudadevattrclockr": 6, "cudadevattrtexturealign": 6, "cudadevattrtexturepitchalign": 6, "cudadevattrgpuoverlap": 6, "cudadevattrmultiprocessorcount": 6, "cudadevattrkernelexectimeout": 6, "cudadevattrintegr": 6, "cudadevattrcanmaphostmemori": 6, "cudadevattrcomputemod": 6, "cudadevattrconcurrentkernel": 6, "cudadevattreccen": 6, "cudadevattrpcibusid": 6, "cudadevattrpcideviceid": 6, "cudadevattrtccdriv": 6, "cudadevattrmemoryclockr": 6, "cudadevattrglobalmemorybuswidth": 6, "cudadevattrl2caches": 6, "cudadevattrmaxthreadspermultiprocessor": 6, "cudadevattrunifiedaddress": 6, "cudadevattrcomputecapabilitymajor": 6, "cudadevattrcomputecapabilityminor": 6, "cudadevattrstreamprioritiessupport": 6, "cudadevattrgloball1cachesupport": 6, "cudadevattrlocall1cachesupport": 6, "cudadevattrmaxsharedmemorypermultiprocessor": 6, "cudadevattrmaxregisterspermultiprocessor": 6, "cudadevattrmanagedmemori": 6, "cudadevattrismultigpuboard": 6, "cudadevattrmultigpuboardgroupid": 6, "cudadevattrhostnativeatomicsupport": 6, "cudadevattrsingletodoubleprecisionperfratio": 6, "cudadevattrpageablememoryaccess": 6, "cudadevattrconcurrentmanagedaccess": 6, "cudadevattrcomputepreemptionsupport": 6, "cudadevattrcanusehostpointerforregisteredmem": 6, "cudadevattrcooperativelaunch": 6, "cudadevattrcooperativemultidevicelaunch": 6, "cudadevattrcanflushremotewrit": 6, "cudadevattrhostregistersupport": 6, "cudadevattrpageablememoryaccessuseshostpaget": 6, "cudadevattrdirectmanagedmemaccessfromhost": 6, "cudadevattrmaxsharedmemoryperblockoptin": 6, "cudadevattrmaxblockspermultiprocessor": 6, "cudadevattrmaxpersistingl2caches": 6, "cudadevattrmaxaccesspolicywindows": 6, "cudadevattrreservedsharedmemoryperblock": 6, "cudadevattrsparsecudaarraysupport": 6, "cudadevattrhostregisterreadonlysupport": 6, "cudadevattrmemorypoolssupport": 6, "cudadevattrgpudirectrdmasupport": 6, "cudadevattrmemorypoolsupportedhandletyp": 6, "cudadevattrdeferredmappingcudaarraysupport": 6, "cudadevattrnumaconfig": 6, "cudadevicenumaconfig": 6, "cudadevattrnumaid": 6, "cudadevicegetdefaultmempool": 6, "cudamempooltrimto": 6, "cudamempoolgetattribut": 6, "cudadevicesetmempool": 6, "cudamempoolsetattribut": 6, "cudamempoolsetaccess": 6, "cudadevicegetmempool": 6, "cudamempoolcr": 6, "cudamempooldestroi": 6, "cudadevicegetnvscisyncattribut": 6, "cudaerrorinvalidhandl": 6, "cudanvscisyncattrsign": 6, "cudanvscisyncattrwait": 6, "cudadevicegetproperti": 6, "cudaimportexternalsemaphor": 6, "cudadestroyexternalsemaphor": 6, "cudasignalexternalsemaphoresasync": 6, "cudawaitexternalsemaphoresasync": 6, "cudadevicep2pattr": 6, "cudadevp2pattrperformancerank": 6, "cudadevp2pattraccesssupport": 6, "cudadevp2pattrnativeatomicsupport": 6, "cudadevp2pattrcudaarrayaccesssupport": 6, "criteria": 6, "deviceflag": 6, "cudainitdeviceflagsarevalid": 6, "cudaerrordeviceunavail": 6, "cudadevicescheduleauto": 6, "cudadeviceschedulespin": 6, "cudadevicescheduleyield": 6, "cudadeviceblockingsync": 6, "cudadevicemaphost": 6, "cudadevicelmemresizetomax": 6, "cudadevicesyncmemop": 6, "cudasetvaliddevic": [6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "seen": 6, "inbetween": 6, "contrari": 6, "assumpt": 6, "cudaerrormissingconfigur": 6, "cudaerrorinitializationerror": 6, "cudaerrorlaunchfailur": 6, "cudaerrorlaunchtimeout": 6, "cudaerrorlaunchoutofresourc": 6, "cudaerrorinvaliddevicefunct": 6, "cudaerrorinvalidconfigur": 6, "cudaerrorinvalidpitchvalu": 6, "cudaerrorinvalidsymbol": 6, "cudaerrorunmapbufferobjectfail": 6, "cudaerrorinvaliddevicepoint": 6, "cudaerrorinvalidtextur": 6, "cudaerrorinvalidtexturebind": 6, "cudaerrorinvalidchanneldescriptor": 6, "cudaerrorinvalidmemcpydirect": 6, "cudaerrorinvalidfilterset": 6, "cudaerrorinvalidnormset": 6, "cudaerrorinsufficientdriv": 6, "cudaerrornodevic": 6, "cudaerrorsetonactiveprocess": 6, "cudaerrorstartupfailur": 6, "cudaerrorinvalidptx": 6, "cudaerrorunsupportedptxvers": 6, "cudaerrornokernelimagefordevic": 6, "cudaerrorjitcompilernotfound": 6, "cudaerrorjitcompilationdis": 6, "cudapeekatlasterror": 6, "cudaerror": 6, "unrecogn": [6, 7], "cudastreamcallback_t": 6, "cudastreamdefault": 6, "cudastreamnonblock": [6, 13], "cudactxresetpersistingl2cach": 6, "cudaaccesspolicywindow": 6, "cudastreamcopyattribut": 6, "cudastreamattrid": 6, "cudastreamgetattribut": 6, "cudastreamattrvalu": 6, "cudastreamsetattribut": 6, "cudaeventwaitdefault": 6, "cudaeventwaitextern": 6, "cudalaunchhostfunc": 6, "cudastreambegincaptur": 6, "cudastreamendcaptur": 6, "cudaerrornotreadi": 6, "cudamemattachglob": 6, "cudamemattachhost": 6, "cudamemattachsingl": 6, "cudastreamcapturemod": 6, "cudastreamiscaptur": 6, "cudastreamgetcaptureinfo": 6, "cudastreamcapturemoderelax": 6, "cudathreadexchangestreamcapturemod": 6, "cudastreambegincapturetograph": 6, "cudagraphedgedata": 6, "cudastreamcapturemodeglob": 6, "cudastreamcapturemodethreadloc": 6, "pgraph": 6, "cudaerrorstreamcapturewrongthread": 6, "cudagraphdestroi": 6, "pcapturestatu": 6, "cudastreamcapturestatusnon": 6, "cudastreamcapturestatusact": 6, "cudastreamcapturestatusinvalid": 6, "cudaerrorstreamcaptureimplicit": 6, "cudastreamcapturestatu": 6, "cudaerrorstreamcaptureunjoin": 6, "cudastreamgetcaptureinfo_v3": 6, "cudastreamupdatecapturedepend": 6, "cudaerrorlossyqueri": 6, "cudastreamaddcapturedepend": 6, "cudastreamsetcapturedepend": 6, "cudaerrorillegalst": 6, "cudastreamupdatecapturedependencies_v2": 6, "cudaeventdefault": 6, "cudaeventblockingsync": 6, "cudaeventrecordwithflag": 6, "cudaeventrecorddefault": 6, "cudaeventrecordextern": 6, "cudaimportexternalmemori": 6, "cudaexternalmemoryhandledesc": 6, "cudaexternalmemoryhandletyp": 6, "cudaexternalmemoryhandletypeopaquefd": 6, "cudaexternalmemoryhandletypeopaquewin32": 6, "cudaexternalmemoryhandletypeopaquewin32kmt": 6, "cudaexternalmemoryhandletyped3d12heap": 6, "cudaexternalmemoryhandletyped3d12resourc": 6, "cudaexternalmemoryhandletyped3d11resourc": 6, "cudaexternalmemoryhandletyped3d11resourcekmt": 6, "cudaexternalmemoryhandletypenvscibuf": 6, "approprri": 6, "cudaexternalsemaphorewaitskipnvscibufmemsync": 6, "cudaexternalsemaphoresignalskipnvscibufmemsync": 6, "cudaexternalmemoryded": 6, "cudaerroroperatingsystem": 6, "cudadestroyexternalmemori": 6, "cudaexternalmemorygetmappedbuff": 6, "cudaexternalmemorygetmappedmipmappedarrai": 6, "cudaexternalmemorybufferdesc": 6, "cudaexternalmemorymipmappedarraydesc": 6, "formatdesc": 6, "cudaarraycolorattach": 6, "cudaexternalsemaphorehandledesc": 6, "cudaexternalsemaphorehandletyp": 6, "cudaexternalsemaphorehandletypeopaquefd": 6, "cudaexternalsemaphorehandletypeopaquewin32": 6, "cudaexternalsemaphorehandletypeopaquewin32kmt": 6, "cudaexternalsemaphorehandletyped3d12f": 6, "cudaexternalsemaphorehandletyped3d11f": 6, "cudaexternalsemaphorehandletypenvscisync": 6, "cudaexternalsemaphorehandletypekeyedmutex": 6, "cudaexternalsemaphorehandletypekeyedmutexkmt": 6, "cudaexternalsemaphorehandletypetimelinesemaphorefd": 6, "cudaexternalsemaphorehandletypetimelinesemaphorewin32": 6, "cudaexternalsemaphoresignalparam": 6, "cudaexternalsemaphorewaitparam": 6, "cudaerrortimeout": 6, "overload": 6, "templat": 6, "func_nam": 6, "template_arg_0": 6, "template_arg_n": 6, "cudafuncattribut": 6, "incorrect": 6, "cudafuncattributemaxdynamicsharedmemorys": 6, "sharedsizebyt": 6, "cudafuncattributepreferredsharedmemorycarveout": 6, "cudafuncattributerequiredclusterwidth": 6, "cudafuncattributerequiredclusterheight": 6, "cudafuncattributerequiredclusterdepth": 6, "cudafuncattributenonportableclustersizeallow": 6, "cudafuncattributeclusterschedulingpolicyprefer": 6, "constrast": 6, "cudahostfn_t": 6, "besid": 6, "cudaoccupancymaxpotentialblocksizevariablesmem": 6, "cudaoccupancyavailabledynamicsmemperblock": 6, "cudaoccupancymaxpotentialblocksizevariablesmemwithflag": 6, "cudaoccupancydefault": 6, "cudaoccupancydisablecachingoverrid": 6, "cudamemadvis": 6, "cudamemprefetchasync": 6, "cudamalloc3d": 6, "cudachannelformatkind": 6, "cudachannelformatkindsign": 6, "cudachannelformatkindunsign": 6, "cudachannelformatkindfloat": 6, "cudaarraydefault": 6, "cudaarraysurfaceloadstor": 6, "cudaarraytexturegath": 6, "cudaarrayspars": 6, "cudaarraydeferredmap": 6, "cudafreeasync": 6, "cudaerrorvalu": 6, "hostptr": 6, "mipmappedarrai": 6, "cudahostallocdefault": 6, "emul": 6, "cudahostallocport": 6, "cudahostallocmap": 6, "cudahostallocwritecombin": 6, "phost": 6, "cudahostregisterdefault": 6, "cudahostregisterport": 6, "cudahostregistermap": 6, "cudahostregisteriomemori": 6, "cudadeviceattrreadonlyhostregistersupport": 6, "cudamaphost": 6, "cudaerrorhostmemoryalreadyregist": 6, "cudaerrorhostmemorynotregist": 6, "cudaext": 6, "pitcheddevptr": 6, "xsize": 6, "ysize": 6, "highli": 6, "cudamemset3d": 6, "make_cudapitchedptr": 6, "make_cudaext": 6, "cudaarraylay": 6, "cudaarraycubemap": 6, "cudagraphicscubefac": 6, "levelarrai": 6, "cudamipmappedarray_const_t": 6, "cudamemcpy3dparm": 6, "betwen": 6, "srcptr": 6, "dstptr": 6, "srcpo": 6, "dstpo": 6, "term": [6, 16], "cudamemcpyhosttohost": 6, "cudamemcpyhosttodevic": 6, "cudamemcpydevicetohost": 6, "cudamemcpydevicetodevic": 6, "cudamemcpydefault": 6, "disregard": 6, "implic": 6, "silent": 6, "fact": 6, "cudamemcpy2darraytoarrai": 6, "make_cudapo": 6, "cudamemcpy3dpeerparm": 6, "lie": 6, "cudachannelformatkindnv12": 6, "cudaarraygetmemoryrequir": 6, "cudaarraymemoryrequir": 6, "cudamipmappedarraygetmemoryrequir": 6, "cudaarraygetsparseproperti": 6, "cudaarraysparsepropertiessinglemiptail": 6, "cudamipmappedarraygetlevel": 6, "cudamipmappedarraygetsparseproperti": 6, "cudaarraysparseproperti": 6, "cudamemcpykind": 6, "ote_sync": 6, "dpitch": 6, "spitch": 6, "woffset": 6, "hoffset": 6, "cudaarray_const_t": 6, "woffsetdst": 6, "hoffsetdst": 6, "woffsetsrc": 6, "hoffsetsrc": 6, "cudamemset3dasync": 6, "narrow": 6, "secondarili": 6, "shorter": 6, "cudacpudeviceid": 6, "cudamemadvisesetreadmostli": 6, "cudamemadvisesetpreferredloc": 6, "cudamemadvisesetaccessedbi": 6, "cudamemadvise_v2": 6, "cudamemprefetchasync_v2": 6, "cudamemloc": 6, "cudamemlocationtypedevic": 6, "cudamemlocationtypehost": 6, "cudamemlocationtypehostnuma": 6, "cudamemlocationtypehostnumacurr": 6, "cudamemoryadvis": 6, "cudamemadviceunsetreadmostli": 6, "cudamemadvicereadmostli": 6, "cudamemadviseunsetpreferredloc": 6, "cudamemadvicesetaccessedbi": 6, "cudamemadviseunsetaccessedbi": 6, "cudamemrangeattribut": 6, "cudamemrangeattributereadmostli": 6, "cudamemrangeattributepreferredloc": 6, "cudainvaliddeviceid": 6, "cudamemrangeattributeaccessedbi": 6, "cudamemrangeattributelastprefetchloc": 6, "applicaton": 6, "cudamemrangeattributepreferredlocationtyp": 6, "cudamemlocationtyp": 6, "cudamemlocationtypeinvalid": 6, "cudamemrangeattributepreferredlocationid": 6, "cudamemrangeattributelastprefetchlocationtyp": 6, "cudamemrangeattributelastprefetchlocationid": 6, "xsz": 6, "ysz": 6, "cudapo": 6, "cudaerroroutofmemori": 6, "cudamempoolattr": 6, "cudamempoolattrreleasethreshold": 6, "cudamempoolreusefolloweventdepend": 6, "cudamempoolreuseallowopportunist": 6, "cudamempoolreuseallowinternaldepend": 6, "cudamempoolattrreservedmemhigh": 6, "cudamempoolattrusedmemhigh": 6, "cudamempoolattrreservedmemcurr": 6, "cudamempoolattrusedmemcurr": 6, "desclist": 6, "cudamemaccessdesc": 6, "cudamempoolgetaccess": 6, "cudamemaccessflag": 6, "cudamempoolprop": 6, "cudamemhandletypefabr": 6, "cudamempoolexporttoshareablehandl": 6, "cudamemhandletypenon": 6, "cudamemallocationhandletyp": 6, "cudamempoolimportfromshareablehandl": 6, "cudamempoolexportpoint": 6, "cudamempoolimportpoint": 6, "cudamempoolptrexportdata": 6, "exportdata": 6, "cudapointerattribut": 6, "cudamemorytypeunregist": 6, "cudamemorytypehost": 6, "cudamemorytypedevic": 6, "cudamemorytypemanag": 6, "vanish": 6, "devicepoint": 6, "alia": 6, "hostpoint": 6, "peerdevic": 6, "cudaerrorpeeraccessalreadyen": 6, "cudaerrorpeeraccessnoten": 6, "cudagldevicelist": 6, "cudagldevicelistal": 6, "cudagldevicelistcurrentfram": 6, "cudagldevicelistnextfram": 6, "cudaerrorinvalidgraphicscontext": 6, "cudagraphicsresourcegetmappedpoint": 6, "cudagraphicsregisterflagsnon": 6, "cudagraphicsregisterflagsreadonli": 6, "cudagraphicsregisterflagswritediscard": 6, "cudagraphicsregisterflagssurfaceloadstor": 6, "cudagraphicsregisterflagstexturegath": 6, "cudagraphicsresourc": 6, "cudavdpausetvdpaudevic": [6, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudagraphicsmapflagsnon": 6, "cudagraphicsmapflagsreadonli": 6, "cudagraphicsmapflagswritediscard": 6, "cudagraphicsresourcegetmappedeglfram": [6, 12], "cudaeglstreamconnect": 6, "cudaeglresourcelocationflag": 6, "cudaeglresourcelocationvidmem": 6, "cudaeglfram": 6, "cudaeglframetypepitch": 6, "cudaeglplanedesc": 6, "cudaeventcreatefromeglsync": [6, 12], "flage": 6, "cudagraphicsd3d9registerresourc": 6, "cudagraphicsd3d10registerresourc": 6, "cudagraphicsd3d11registerresourc": 6, "cudagetchanneldesc": 6, "cudacreatechanneldesc": 6, "cudaresourcedesc": 6, "cudatexturedesc": 6, "cudaresourceviewdesc": 6, "cudaresourcetypearrai": 6, "cudaresourcetypemipmappedarrai": 6, "normalizedcoord": 6, "cudaresourcetypelinear": 6, "cudaresourcetypepitch2d": 6, "cudatextureaddressmod": 6, "cudaaddressmodewrap": 6, "cudaaddressmodemirror": 6, "cudaaddressmodeclamp": 6, "cudatexturefiltermod": 6, "readmod": 6, "cudatexturereadmod": 6, "cudareadmodenormalizedfloat": 6, "cudaaddressmodebord": 6, "anistropi": 6, "disabletrilinearoptim": 6, "seamlesscubemap": 6, "cudafiltermodepoint": 6, "cudafiltermodelinear": 6, "runtimevers": 6, "sole": 6, "getlocalruntimevers": [6, 20, 25], "cudagraphcr": 6, "cudagraphaddchildgraphnod": 6, "cudagraphaddemptynod": 6, "cudagraphaddkernelnod": 6, "cudagraphaddhostnod": 6, "cudagraphaddmemcpynod": 6, "cudagraphaddmemsetnod": 6, "cudagraphinstanti": 6, "cudagraphgetnod": 6, "cudagraphgetrootnod": 6, "cudagraphgetedg": 6, "cudagraphclon": 6, "pdepend": 6, "cudakernelnodeparam": 6, "pnodeparam": 6, "pgraphnod": 6, "griddim": 6, "blockdim": [6, 9], "sharedmem": 6, "cudagraphaddnod": 6, "cudagraphkernelnodegetparam": 6, "cudagraphdestroynod": 6, "cudagraphnodesetparam": 6, "cudagraphkernelnodecopyattribut": 6, "hsrc": 6, "hdst": 6, "cudakernelnodeattrid": 6, "cudaerrorinvalidcontext": 6, "cudagraphkernelnodegetattribut": 6, "cudakernelnodeattrvalu": 6, "cudagraphkernelnodesetattribut": 6, "pcopyparam": 6, "cudagraphaddmemcpynodetosymbol": [6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudagraphaddmemcpynodefromsymbol": [6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudagraphaddmemcpynode1d": 6, "cudagraphmemcpynodegetparam": 6, "cudagraphmemcpynodesetparam": 6, "cudagraphmemcpynodesetparams1d": 6, "cudagraphmemcpynodesetparamstosymbol": [6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudagraphmemcpynodesetparamsfromsymbol": [6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudamemsetparam": 6, "pmemsetparam": 6, "cudagraphmemsetnodegetparam": 6, "cudagraphmemsetnodesetparam": 6, "cudahostnodeparam": 6, "cudagraphhostnodegetparam": 6, "cudagraphhostnodesetparam": 6, "cudagraphchildgraphnodegetgraph": 6, "cudagraphnodefindinclon": 6, "cudagraphaddeventrecordnod": 6, "cudagraphaddeventwaitnod": 6, "cudagrapheventrecordnodegetev": 6, "cudagrapheventrecordnodesetev": 6, "cudagrapheventwaitnodegetev": 6, "cudagrapheventwaitnodesetev": 6, "cudagraphaddexternalsemaphoressignalnod": 6, "cudaexternalsemaphoresignalnodeparam": 6, "cudagraphexternalsemaphoressignalnodegetparam": 6, "cudagraphexternalsemaphoressignalnodesetparam": 6, "cudagraphexecexternalsemaphoressignalnodesetparam": 6, "cudagraphaddexternalsemaphoreswaitnod": 6, "cudaexternalsemaphorewaitnodeparam": 6, "cudagraphexternalsemaphoreswaitnodegetparam": 6, "cudagraphexternalsemaphoreswaitnodesetparam": 6, "cudagraphexecexternalsemaphoreswaitnodesetparam": 6, "cudagraphaddmemallocnod": 6, "cudamemallocnodeparam": 6, "cudagraphaddmemfreenod": 6, "cudamemfreeasync": 6, "cudamemfre": 6, "cudagraphinstantiateflagautofreeonlaunch": 6, "cudaerrorcudartunload": 6, "cudagraphmemallocnodegetparam": 6, "cudadevicegraphmemtrim": 6, "cudadevicegetgraphmemattribut": 6, "cudadevicesetgraphmemattribut": 6, "cudagraphmemfreenodegetparam": 6, "cudagraphmemattributetyp": 6, "cudagraphmemattrusedmemcurr": 6, "cudagraphmemattrusedmemhigh": 6, "cudagraphmemattrreservedmemcurr": 6, "cudagraphmemattrreservedmemhigh": 6, "pgraphclon": 6, "originalnod": 6, "clonedgraph": 6, "pclonednod": 6, "pnode": 6, "cudagraphnodegettyp": 6, "ptype": 6, "cudagraphnodetyp": 6, "cudagraphnodegetdepend": 6, "cudagraphnodegetdependentnod": 6, "pnumrootnod": 6, "prootnod": 6, "cudagraphadddepend": 6, "cudagraphremovedepend": 6, "cudagraphgetedges_v2": 6, "losst": 6, "pnumdepend": 6, "cudagraphnodegetdependencies_v2": 6, "pnumdependentnod": 6, "pdependentnod": 6, "cudagraphnodegetdependentnodes_v2": 6, "pfrom": 6, "pto": 6, "cudagraphadddependencies_v2": 6, "cudagraphremovedependencies_v2": 6, "pgraphexec": 6, "cudagraphinstantiateflagdevicelaunch": 6, "cudagraphinstantiateflagusenodeprior": 6, "cudagraphexecdestroi": 6, "cudagraphinstantiatewithflag": 6, "cudagraphupload": 6, "cudagraphinstantiatewithparam": 6, "cudagraphinstantiateparam": 6, "cudagraphinstantiateflagupload": 6, "uploadstream": 6, "errnode_out": 6, "cudagraphinstantiateerror": 6, "cudagraphinstantiateinvalidstructur": 6, "cudagraphinstantiatenodeoperationnotsupport": 6, "cudagraphinstantiatemultipledevicesnotsupport": 6, "cudagraphinstantiatesuccess": 6, "cudagraphexecgetflag": 6, "cudagraphexeckernelnodesetparam": 6, "cudagraphexecnodesetparam": 6, "cudagraphexecmemcpynodesetparam": 6, "cudagraphexecmemsetnodesetparam": 6, "cudagraphexechostnodesetparam": 6, "cudagraphexecchildgraphnodesetparam": 6, "cudagraphexeceventrecordnodesetev": 6, "cudagraphexeceventwaitnodesetev": 6, "cudagraphexecupd": 6, "cudagraphexecmemcpynodesetparamstosymbol": [6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudagraphexecmemcpynodesetparamsfromsymbol": [6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "cudagraphexecmemcpynodesetparams1d": 6, "cudagraphnodeseten": 6, "cudagraphnodegeten": 6, "cudagraphexecupdateerrortopologychang": 6, "cudagraphexecupdateerror": 6, "cudagraphexecupdateerrornodetypechang": 6, "cudagraphexecupdateerrorfunctionchang": 6, "cudagraphexecupdateerrorunsupportedfunctionchang": 6, "cudagraphexecupdateerrorparameterschang": 6, "cudagraphexecupdateerrorattributeschang": 6, "cudagraphexecupdateerrornotsupport": 6, "cudagraphexecupdatesuccess": 6, "cudaerrorgraphexecupdatefailur": 6, "cudagraphexecupdateresultinfo": 6, "cudagraphdebugdotprint": 6, "cudagraphdebugdotflag": 6, "cudauserobjectcr": 6, "cudauserobjectnodestructorsync": 6, "cudauserobject_t": 6, "cudauserobjectretain": 6, "cudauserobjectreleas": 6, "cudagraphretainuserobject": 6, "cudagraphreleaseuserobject": 6, "cudagraphuserobjectmov": 6, "cudagraphnodeparam": 6, "cudagraphaddnode_v2": 6, "cudagraphnodetypememalloc": 6, "cudagraphnodetypememfre": 6, "cudagraphconditionalhandlecr": 6, "cudagraphcondassigndefault": 6, "cudagraphconditionalhandl": 6, "funcptr": 6, "driverstatu": 6, "cudadriverentrypointqueryresult": 6, "cudadriverentrypointsuccess": 6, "cudadriverentrypointsymbolnotfound": 6, "cudadriverentrypointversionnotsuffic": 6, "cudart_vers": 6, "cudaenabledefault": 6, "cudaenableperthreaddefaultstream": 6, "cudaenablelegacystream": 6, "cudagetdriverentrypointbyvers": 6, "custom": 6, "style": 6, "built": [6, 9], "relationship": 6, "perspect": 6, "synonym": 6, "tear": 6, "cudad3d9setdirect3ddevic": 6, "cudad3d10setdirect3ddevic": 6, "cudad3d11setdirect3ddevic": 6, "cudaglsetgldevic": [6, 12], "substanti": 6, "strongli": 6, "cudaerrorincompatibledrivercontext": 6, "travel": 6, "interchang": 6, "cudafunction_t": 6, "cudagetkernel": 6, "entryfuncaddr": 6, "kernelptr": 6, "cudakernel_t": 6, "cudaeglplanedesc_st": 6, "channeldesc": 6, "cudaeglframe_st": 6, "typedefstructcudaeglplanedesc_st": 6, "unsignedintwidth": 6, "unsignedintheight": 6, "unsignedintdepth": 6, "unsignedintpitch": 6, "unsignedintnumchannel": 6, "structcudachannelformatdescchanneldesc": 6, "unsignedintreserv": 6, "planedesc": 6, "cudaeglframetyp": 6, "cudaeglcolorformat": 6, "anon_struct0": 6, "cudamemcpynodeparam": 6, "cudamemsetparamsv2": 6, "cudaaccessproperti": 6, "cudahostnodeparamsv2": 6, "cudaresourcetyp": 6, "anon_union0": 6, "cudaresourceviewformat": 6, "cudamemorytyp": 6, "constsizebyt": 6, "localsizebyt": 6, "numreg": 6, "ptxversion": 6, "binaryvers": 6, "cachemodeca": 6, "maxdynamicsharedsizebyt": 6, "preferredshmemcarveout": 6, "clusterdimmustbeset": 6, "requiredclusterwidth": 6, "requiredclusterheight": 6, "requiredclusterdepth": 6, "nonportableclustersizeallow": 6, "cudamemallocationtypepin": 6, "cudamemallocationtyp": 6, "cudamemhandletypewin32": 6, "tranfer": 6, "cudamemallocnodeparamsv2": 6, "cudamemfreenodeparam": 6, "cudauuid_t": 6, "luiddevicenodemask": 6, "oppos": 6, "cudacomputemod": 6, "clusterlaunch": 6, "cudaipceventhandle_st": 6, "cudaipcmemhandle_st": 6, "cudamemfabrichandle_st": 6, "anon_union1": 6, "dim3": 6, "cudakernelnodeparamsv2": 6, "cudaexternalsemaphoresignalnodeparamsv2": 6, "cudaexternalsemaphorewaitnodeparamsv2": 6, "cudaconditionalnodeparam": 6, "cudagraphconditionalnodetyp": 6, "cudachildgraphnodeparam": 6, "cudaeventrecordnodeparam": 6, "cudaeventwaitnodeparam": 6, "cudagraphedgedata_st": 6, "cudagraphkernelnodeportdefault": 6, "cudagraphkernelnodeportprogrammat": 6, "cudagraphkernelnodeportlaunchcomplet": 6, "cudagraphdependencytyp": 6, "cudagraphinstantiateparams_st": 6, "cudagraphinstantiateresult": 6, "cudagraphexecupdateresultinfo_st": 6, "cudagraphexecupdateresult": 6, "cudagraphkernelnodeupd": 6, "cudagraphdevicenode_t": 6, "updatedata": 6, "cudagraphkernelnodefield": 6, "anon_union8": 6, "cudalaunchmemsyncdomainmap_st": 6, "cudalaunchmemsyncdomainremot": 6, "cudalaunchmemsyncdomainmap": 6, "cudalaunchattributememsyncdomainmap": 6, "cudadevattrmemsyncdomaincount": 6, "cudalaunchattributevalu": 6, "cudalaunchattribut": 6, "cudalaunchattributeaccesspolicywindow": 6, "cudalaunchattributecoop": 6, "cudalaunchattributesynchronizationpolici": 6, "cudasynchronizationpolici": 6, "cudalaunchattributeclusterdimens": 6, "anon_struct20": 6, "cudalaunchattributeclusterschedulingpolicyprefer": 6, "cudalaunchattributeprogrammaticstreamseri": 6, "cudalaunchattributeprogrammaticev": 6, "cudalaunchattributeprior": 6, "cudalaunchattributememsyncdomain": 6, "cudalaunchattributelaunchcompletionev": 6, "anon_struct22": 6, "cudalaunchattributedeviceupdatablekernelnod": 6, "anon_struct23": 6, "cudalaunchattributepreferredsharedmemorycarveout": 6, "cudalaunchattribute_st": 6, "cudalaunchattributeid": 6, "cudaasyncnotificationinfo": 6, "cudaasyncnotificationtyp": 6, "cudaeglframetypearrai": 6, "cudaeglresourcelocationsysmem": 6, "cudaeglcolorformatyuv420planar": 6, "cudaeglcolorformatyuv420semiplanar": 6, "cudaeglcolorformatyuv422planar": 6, "cudaeglcolorformatyuv422semiplanar": 6, "cudaeglcolorformatargb": 6, "cudaeglcolorformatrgba": 6, "cudaeglcolorformatl": 6, "cudaeglcolorformatr": 6, "cudaeglcolorformatyuv444planar": 6, "cudaeglcolorformatyuv444semiplanar": 6, "cudaeglcolorformatyuyv422": 6, "cudaeglcolorformatuyvy422": 6, "cudaeglcolorformatabgr": 6, "cudaeglcolorformatbgra": 6, "cudaeglcolorformata": 6, "cudaeglcolorformatrg": 6, "cudaeglcolorformatayuv": 6, "cudaeglcolorformatyvu444semiplanar": 6, "cudaeglcolorformatyvu422semiplanar": 6, "cudaeglcolorformatyvu420semiplanar": 6, "cudaeglcolorformaty10v10u10_444semiplanar": 6, "cudaeglcolorformaty10v10u10_420semiplanar": 6, "cudaeglcolorformaty12v12u12_444semiplanar": 6, "cudaeglcolorformaty12v12u12_420semiplanar": 6, "cudaeglcolorformatvyuy_": 6, "cudaeglcolorformatuyvy_": 6, "cudaeglcolorformatyuyv_": 6, "cudaeglcolorformatyvyu_": 6, "cudaeglcolorformatyuva_": 6, "cudaeglcolorformatayuv_": 6, "cudaeglcolorformatyuv444planar_": 6, "cudaeglcolorformatyuv422planar_": 6, "cudaeglcolorformatyuv420planar_": 6, "cudaeglcolorformatyuv444semiplanar_": 6, "cudaeglcolorformatyuv422semiplanar_": 6, "cudaeglcolorformatyuv420semiplanar_": 6, "cudaeglcolorformatyvu444planar_": 6, "cudaeglcolorformatyvu422planar_": 6, "cudaeglcolorformatyvu420planar_": 6, "cudaeglcolorformatyvu444semiplanar_": 6, "cudaeglcolorformatyvu422semiplanar_": 6, "cudaeglcolorformatyvu420semiplanar_": 6, "cudaeglcolorformatbayerrggb": 6, "cudaeglcolorformatbayerbggr": 6, "cudaeglcolorformatbayergrbg": 6, "cudaeglcolorformatbayergbrg": 6, "cudaeglcolorformatbayer10rggb": 6, "cudaeglcolorformatbayer10bggr": 6, "cudaeglcolorformatbayer10grbg": 6, "cudaeglcolorformatbayer10gbrg": 6, "cudaeglcolorformatbayer12rggb": 6, "cudaeglcolorformatbayer12bggr": 6, "cudaeglcolorformatbayer12grbg": 6, "cudaeglcolorformatbayer12gbrg": 6, "cudaeglcolorformatbayer14rggb": 6, "cudaeglcolorformatbayer14bggr": 6, "cudaeglcolorformatbayer14grbg": 6, "cudaeglcolorformatbayer14gbrg": 6, "cudaeglcolorformatbayer20rggb": 6, "cudaeglcolorformatbayer20bggr": 6, "cudaeglcolorformatbayer20grbg": 6, "cudaeglcolorformatbayer20gbrg": 6, "cudaeglcolorformatyvu444planar": 6, "cudaeglcolorformatyvu422planar": 6, "cudaeglcolorformatyvu420planar": 6, "cudaeglcolorformatbayerisprggb": 6, "cudaeglcolorformatbayerispbggr": 6, "cudaeglcolorformatbayerispgrbg": 6, "cudaeglcolorformatbayerispgbrg": 6, "cudaeglcolorformatbayerbccr": 6, "cudaeglcolorformatbayerrccb": 6, "cudaeglcolorformatbayercrbc": 6, "cudaeglcolorformatbayercbrc": 6, "cudaeglcolorformatbayer10cccc": 6, "cudaeglcolorformatbayer12bccr": 6, "cudaeglcolorformatbayer12rccb": 6, "cudaeglcolorformatbayer12crbc": 6, "cudaeglcolorformatbayer12cbrc": 6, "cudaeglcolorformatbayer12cccc": 6, "cudaeglcolorformati": 6, "cudaeglcolorformatyuv420semiplanar_2020": 6, "cudaeglcolorformatyvu420semiplanar_2020": 6, "cudaeglcolorformatyuv420planar_2020": 6, "cudaeglcolorformatyvu420planar_2020": 6, "cudaeglcolorformatyuv420semiplanar_709": 6, "cudaeglcolorformatyvu420semiplanar_709": 6, "cudaeglcolorformatyuv420planar_709": 6, "cudaeglcolorformatyvu420planar_709": 6, "cudaeglcolorformaty10v10u10_420semiplanar_709": 6, "cudaeglcolorformaty10v10u10_420semiplanar_2020": 6, "cudaeglcolorformaty10v10u10_422semiplanar_2020": 6, "cudaeglcolorformaty10v10u10_422semiplanar": 6, "cudaeglcolorformaty10v10u10_422semiplanar_709": 6, "cudaeglcolorformaty_": 6, "cudaeglcolorformaty_709_": 6, "cudaeglcolorformaty10_": 6, "cudaeglcolorformaty10_709_": 6, "cudaeglcolorformaty12_": 6, "cudaeglcolorformaty12_709_": 6, "cudaeglcolorformatyuva": 6, "cudaeglcolorformatyvyu": 6, "cudaeglcolorformatvyui": 6, "cudaeglcolorformaty10v10u10_420semiplanar_": 6, "cudaeglcolorformaty10v10u10_420semiplanar_709_": 6, "cudaeglcolorformaty10v10u10_444semiplanar_": 6, "cudaeglcolorformaty10v10u10_444semiplanar_709_": 6, "cudaeglcolorformaty12v12u12_420semiplanar_": 6, "cudaeglcolorformaty12v12u12_420semiplanar_709_": 6, "cudaeglcolorformaty12v12u12_444semiplanar_": 6, "cudaeglcolorformaty12v12u12_444semiplanar_709_": 6, "cudaerrorprofilerdis": 6, "cudaerrorprofilernotiniti": 6, "cudaerrorprofileralreadystart": 6, "cudaerrorprofileralreadystop": 6, "cudaerrorinvalidhostpoint": 6, "cudagettexturealignmentoffset": [6, 11, 12], "cudaerroraddressofconst": 6, "forbidden": 6, "cudaerrortexturefetchfail": 6, "cudaerrortexturenotbound": 6, "cudaerrorsynchronizationerror": 6, "cudaerrormixeddeviceexecut": 6, "cudaerrornotyetimpl": 6, "cudaerrormemoryvaluetoolarg": 6, "cudaerrorstublibrari": 6, "cudaerrorcallrequiresnewerdriv": 6, "cudaerrorinvalidsurfac": 6, "cudaerrorduplicatevariablenam": 6, "cudaerrorduplicatetexturenam": 6, "cudaerrorduplicatesurfacenam": 6, "cudaconfigurecal": 6, "cudaerrorpriorlaunchfailur": 6, "cudaerrorlaunchmaxdepthexceed": 6, "cudaerrorlaunchfilescopedtex": 6, "cudaerrorlaunchfilescopedsurf": 6, "proce": 6, "proper": 6, "cudaerrordevicenotlicens": 6, "cudaerrorsoftwarevaliditynotestablish": 6, "self": 6, "startup": 6, "cudaerrorinvalidkernelimag": 6, "cudaerrorarrayismap": 6, "cudaerroralreadymap": 6, "cudaerroralreadyacquir": 6, "cudaerrornotmap": 6, "cudaerrornotmappedasarrai": 6, "cudaerrornotmappedaspoint": 6, "cudaerroreccuncorrect": 6, "cudaerrordevicealreadyinus": 6, "cudaerrorpeeraccessunsupport": 6, "cudaerrornvlinkuncorrect": 6, "cudaerrorunsupportedexecaffin": 6, "cudaerrorunsupporteddevsidesync": 6, "cudaerrorinvalidsourc": 6, "cudaerrorfilenotfound": 6, "cudaerrorsharedobjectsymbolnotfound": 6, "cudaerrorsharedobjectinitfail": 6, "cudaerrorsymbolnotfound": 6, "cudaerrorillegaladdress": 6, "cudaerrorlaunchincompatibletextur": 6, "cudaerrorcontextisdestroi": 6, "cudaerrorassert": 6, "cudaenablepeeraccess": 6, "cudaerrorhardwarestackerror": 6, "cudaerrorillegalinstruct": 6, "cudaerrormisalignedaddress": 6, "cudaerrorinvalidaddressspac": 6, "cudaerrorinvalidpc": 6, "cudaerrorcooperativelaunchtoolarg": 6, "cudaerrorsystemnotreadi": 6, "cudaerrorsystemdrivermismatch": 6, "cudaerrorcompatnotsupportedondevic": 6, "cudaerrormpsconnectionfail": 6, "cudaerrormpsrpcfailur": 6, "cudaerrormpsservernotreadi": 6, "cudaerrormpsmaxclientsreach": 6, "cudaerrormpsmaxconnectionsreach": 6, "cudaerrormpsclienttermin": 6, "cudaerrorcdpnotsupport": 6, "cudaerrorcdpversionmismatch": 6, "cudaerrorstreamcaptureunsupport": 6, "cudaerrorstreamcaptureinvalid": 6, "cudaerrorstreamcapturemerg": 6, "cudaerrorstreamcaptureunmatch": 6, "cudaerrorstreamcaptureisol": 6, "cudaerrorcapturedev": 6, "cudaerrorexternaldevic": 6, "cudaerrorinvalidclusters": 6, "cudaerrorfunctionnotload": 6, "cudaerrorinvalidresourcetyp": 6, "cudaerrorinvalidresourceconfigur": 6, "cudaerrorapifailurebas": 6, "10000": 6, "cudachannelformatkindnon": 6, "cudachannelformatkindunsignednormalized8x1": 6, "cudachannelformatkindunsignednormalized8x2": 6, "cudachannelformatkindunsignednormalized8x4": 6, "cudachannelformatkindunsignednormalized16x1": 6, "cudachannelformatkindunsignednormalized16x2": 6, "cudachannelformatkindunsignednormalized16x4": 6, "cudachannelformatkindsignednormalized8x1": 6, "cudachannelformatkindsignednormalized8x2": 6, "cudachannelformatkindsignednormalized8x4": 6, "cudachannelformatkindsignednormalized16x1": 6, "cudachannelformatkindsignednormalized16x2": 6, "cudachannelformatkindsignednormalized16x4": 6, "cudachannelformatkindunsignedblockcompressed1": 6, "cudachannelformatkindunsignedblockcompressed1srgb": 6, "cudachannelformatkindunsignedblockcompressed2": 6, "cudachannelformatkindunsignedblockcompressed2srgb": 6, "cudachannelformatkindunsignedblockcompressed3": 6, "cudachannelformatkindunsignedblockcompressed3srgb": 6, "cudachannelformatkindunsignedblockcompressed4": 6, "cudachannelformatkindsignedblockcompressed4": 6, "cudachannelformatkindunsignedblockcompressed5": 6, "cudachannelformatkindsignedblockcompressed5": 6, "cudachannelformatkindunsignedblockcompressed6h": 6, "cudachannelformatkindsignedblockcompressed6h": 6, "cudachannelformatkindunsignedblockcompressed7": 6, "cudachannelformatkindunsignedblockcompressed7srgb": 6, "cudaaccesspropertynorm": 6, "cudaaccesspropertystream": 6, "cudaaccesspropertypersist": 6, "cudasyncpolicyauto": 6, "cudasyncpolicyspin": 6, "cudasyncpolicyyield": 6, "cudasyncpolicyblockingsync": 6, "cudaclusterschedulingpolicydefault": 6, "cudaclusterschedulingpolicyspread": 6, "cudaclusterschedulingpolicyloadbalanc": 6, "cudastreamupdatecapturedependenciesflag": 6, "cudauserobjectflag": 6, "cudauserobjectretainflag": 6, "cudagraphicsregisterflag": 6, "cudagraphicsmapflag": 6, "cudagraphicscubefacepositivex": 6, "cudagraphicscubefacenegativex": 6, "cudagraphicscubefacepositivei": 6, "cudagraphicscubefacenegativei": 6, "cudagraphicscubefacepositivez": 6, "cudagraphicscubefacenegativez": 6, "cudaresviewformatnon": 6, "cudaresviewformatunsignedchar1": 6, "cudaresviewformatunsignedchar2": 6, "cudaresviewformatunsignedchar4": 6, "cudaresviewformatsignedchar1": 6, "cudaresviewformatsignedchar2": 6, "cudaresviewformatsignedchar4": 6, "cudaresviewformatunsignedshort1": 6, "cudaresviewformatunsignedshort2": 6, "cudaresviewformatunsignedshort4": 6, "cudaresviewformatsignedshort1": 6, "cudaresviewformatsignedshort2": 6, "cudaresviewformatsignedshort4": 6, "cudaresviewformatunsignedint1": 6, "cudaresviewformatunsignedint2": 6, "cudaresviewformatunsignedint4": 6, "cudaresviewformatsignedint1": 6, "cudaresviewformatsignedint2": 6, "cudaresviewformatsignedint4": 6, "cudaresviewformathalf1": 6, "cudaresviewformathalf2": 6, "cudaresviewformathalf4": 6, "cudaresviewformatfloat1": 6, "cudaresviewformatfloat2": 6, "cudaresviewformatfloat4": 6, "cudaresviewformatunsignedblockcompressed1": 6, "cudaresviewformatunsignedblockcompressed2": 6, "cudaresviewformatunsignedblockcompressed3": 6, "cudaresviewformatunsignedblockcompressed4": 6, "cudaresviewformatsignedblockcompressed4": 6, "cudaresviewformatunsignedblockcompressed5": 6, "cudaresviewformatsignedblockcompressed5": 6, "cudaresviewformatunsignedblockcompressed6h": 6, "cudaresviewformatsignedblockcompressed6h": 6, "cudaresviewformatunsignedblockcompressed7": 6, "cudafuncattributeclusterdimmustbeset": 6, "cudafuncattributemax": 6, "cudasharedmemconfig": 6, "cudasharedmembanksizedefault": 6, "cudasharedmembanksizefourbyt": 6, "cudasharedmembanksizeeightbyt": 6, "cudasharedcarveout": 6, "cudasharedmemcarveoutdefault": 6, "cudasharedmemcarveoutmaxshar": 6, "cudasharedmemcarveoutmaxl1": 6, "cudacomputemodeexclus": 6, "occassion": 6, "cudamemadviseunsetreadmostli": 6, "cudaflushgpudirectrdmawritesoptionhost": 6, "cudaflushgpudirectrdmawritesoptionmemop": 6, "cudagpudirectrdmawritesorderingnon": 6, "cudaflushgpudirectrdmawrit": 6, "cudagpudirectrdmawritesorderingown": 6, "cudagpudirectrdmawritesorderingalldevic": 6, "cudaflushgpudirectrdmawritestoown": 6, "cudaflushgpudirectrdmawritestoalldevic": 6, "cudaflushgpudirectrdmawritestargetcurrentdevic": 6, "cudadevattrsurfacealign": 6, "cudadevattrasyncenginecount": 6, "cudadevattrmaxtexture2dgatherwidth": 6, "cudadevattrmaxtexture2dgatherheight": 6, "cudadevattrpcidomainid": 6, "cudadevattrreserved92": 6, "cudadevattrreserved93": 6, "cudadevattrreserved94": 6, "chip": 6, "cudadevattrtimelinesemaphoreinteropsupport": 6, "cudadevattrmaxtimelinesemaphoreinteropsupport": 6, "cudadevattrclusterlaunch": 6, "cudadevattrreserved122": 6, "cudadevattrreserved123": 6, "cudadevattrreserved124": 6, "cudadevattrreserved127": 6, "cudadevattrreserved128": 6, "cudadevattrreserved129": 6, "cudadevattrreserved132": 6, "cudadevattrmpsen": 6, "cudadevattrhostnumaid": 6, "cudadevattrd3d12cigsupport": 6, "cudadevattrmax": 6, "cudamemaccessflagsprotnon": 6, "cudamemaccessflagsprotread": 6, "cudamemaccessflagsprotreadwrit": 6, "cudamemallocationtypeinvalid": 6, "cudamemallocationtypemax": 6, "cudamemhandletypeposixfiledescriptor": 6, "cudamemhandletypewin32kmt": 6, "cudamemfabrichandle_t": 6, "cudacgscop": 6, "cudacgscopeinvalid": 6, "cudacgscopegrid": 6, "grid_group": 6, "cudacgscopemultigrid": 6, "multi_grid_group": 6, "cudagraphconditionalhandleflag": 6, "cudagraphcondtypeif": 6, "cudagraphcondtypewhil": 6, "cudagraphnodetypekernel": 6, "cudagraphnodetypememcpi": 6, "cudagraphnodetypememset": 6, "cudagraphnodetypehost": 6, "cudagraphnodetypegraph": 6, "cudagraphnodetypeempti": 6, "cudagraphnodetypewaitev": 6, "cudagraphnodetypeeventrecord": 6, "cudagraphnodetypeextsemaphoresign": 6, "cudagraphnodetypeextsemaphorewait": 6, "cudagraphnodetypecondit": 6, "cudagraphnodetypecount": 6, "cudagraphdependencytypedefault": 6, "cudagraphdependencytypeprogrammat": 6, "cudagraphkernelnodefieldinvalid": 6, "cudagraphkernelnodefieldgriddim": 6, "cudagraphkernelnodefieldparam": 6, "cudagraphkernelnodefielden": 6, "cudagetdriverentrypointflag": 6, "cudaapigetdriverentrypoint": 6, "wasn": 6, "great": 6, "cudagraphdebugdotflagsverbos": 6, "cudagraphdebugdotflagskernelnodeparam": 6, "cudagraphdebugdotflagsmemcpynodeparam": 6, "cudagraphdebugdotflagsmemsetnodeparam": 6, "cudagraphdebugdotflagshostnodeparam": 6, "cudagraphdebugdotflagseventnodeparam": 6, "cudagraphdebugdotflagsextsemassignalnodeparam": 6, "cudagraphdebugdotflagsextsemaswaitnodeparam": 6, "cudagraphdebugdotflagskernelnodeattribut": 6, "cudagraphdebugdotflagshandl": 6, "cudagraphdebugdotflagsconditionalnodeparam": 6, "cudagraphinstantiateflag": 6, "cudalaunchmemsyncdomaindefault": 6, "cudalaunchattributeignor": 6, "cudadevicenumaconfignon": 6, "cudadevicenumaconfignumanod": 6, "cudaasyncnotificationtypeoverbudget": 6, "cudasurfaceboundarymod": 6, "cudaboundarymodezero": 6, "cudaboundarymodeclamp": 6, "cudaboundarymodetrap": 6, "trap": 6, "cudasurfaceformatmod": 6, "cudaformatmodeforc": 6, "cudaformatmodeauto": 6, "auto": 6, "cudareadmodeelementtyp": 6, "cudaasyncnotificationinfo_t": 6, "cuda_egl_max_plan": 6, "cudapeeraccessdefault": 6, "cudadeviceschedulemask": 6, "cudadevicemask": 6, "tell": 6, "cudacooperativelaunchmultidevicenopresync": 6, "cudacooperativelaunchmultidevicenopostsync": 6, "cuda_ipc_handle_s": 6, "cudastreamattributeaccesspolicywindow": 6, "cudastreamattributesynchronizationpolici": 6, "cudastreamattributememsyncdomainmap": 6, "cudastreamattributememsyncdomain": 6, "cudastreamattributeprior": 6, "cudakernelnodeattributeaccesspolicywindow": 6, "cudakernelnodeattributecoop": 6, "cudakernelnodeattributeprior": 6, "cudakernelnodeattributeclusterdimens": 6, "cudakernelnodeattributeclusterschedulingpolicyprefer": 6, "cudakernelnodeattributememsyncdomainmap": 6, "cudakernelnodeattributememsyncdomain": 6, "cudakernelnodeattributepreferredsharedmemorycarveout": 6, "cudakernelnodeattributedeviceupdatablekernelnod": 6, "cudasurfacetype1d": 6, "cudasurfacetype2d": 6, "cudasurfacetype3d": 6, "cudasurfacetypecubemap": 6, "cudasurfacetype1dlay": 6, "241": 6, "cudasurfacetype2dlay": 6, "242": 6, "cudasurfacetypecubemaplay": 6, "252": 6, "cudatexturetype1d": 6, "cudatexturetype2d": 6, "cudatexturetype3d": 6, "cudatexturetypecubemap": 6, "cudatexturetype1dlay": 6, "cudatexturetype2dlay": 6, "cudatexturetypecubemaplay": 6, "nvrtcresult": [7, 9], "nvrtc_success": 7, "nvrtc_error_out_of_memori": 7, "nvrtc_error_program_creation_failur": 7, "nvrtc_error_invalid_input": 7, "nvrtc_error_invalid_program": 7, "nvrtc_error_invalid_opt": 7, "nvrtc_error_compil": 7, "nvrtc_error_builtin_operation_failur": 7, "nvrtc_error_no_name_expressions_after_compil": 7, "nvrtc_error_no_lowered_names_before_compil": 7, "nvrtc_error_name_expression_not_valid": 7, "nvrtc_error_internal_error": 7, "nvrtc_error_time_file_write_fail": 7, "nvrtcgeterrorstr": [7, 9], "helper": 7, "nvrtc_error": 7, "nvrtcversion": 7, "nvrtcgetnumsupportedarch": 7, "numarch": 7, "nvrtcgetsupportedarch": 7, "supportedarch": 7, "sort": 7, "ascend": 7, "nvrtcprogram": 7, "nvrtccreateprogram": [7, 9], "nvrtccompileprogram": [7, 9], "numhead": 7, "includenam": 7, "prog": [7, 9], "default_program": 7, "nvrtcdestroyprogram": 7, "nvrtcgetptxsiz": [7, 9], "ptxsizeret": 7, "trail": 7, "nvrtcgetptx": [7, 9], "nvrtcgetcubins": 7, "cubinsizeret": 7, "arch": 7, "nvrtcgetcubin": 7, "assembl": 7, "nvrtcgetnvvmsiz": 7, "notic": 7, "nvrtcgetltoirs": 7, "nvrtcgetltoir": 7, "nvvmsizeret": 7, "nvrtcgetnvvm": 7, "nvvm": 7, "ltoirsizeret": 7, "dlto": 7, "ltoir": 7, "nvrtcgetoptixirs": 7, "optixirsizeret": 7, "optix": 7, "nvrtcgetoptixir": 7, "optixir": 7, "nvrtcgetprogramlogs": 7, "logsizeret": 7, "nvrtcgetprogramlog": 7, "nvrtcaddnameexpress": 7, "name_express": 7, "__constant__": 7, "nvrtcgetlowerednam": 7, "extract": [7, 9], "lowered_nam": 7, "dash": 7, "compute_60": 7, "def": [7, 9], "compute_50": 7, "compute_52": 7, "compute_53": 7, "compute_61": 7, "compute_62": 7, "compute_70": 7, "compute_72": 7, "compute_75": 7, "compute_80": 7, "compute_87": 7, "compute_89": 7, "compute_90": 7, "compute_90a": 7, "sm_50": 7, "sm_52": 7, "sm_53": 7, "sm_60": 7, "sm_61": 7, "sm_62": 7, "sm_70": 7, "sm_72": 7, "sm_75": 7, "sm_80": 7, "sm_87": 7, "sm_89": 7, "sm_90a": 7, "dc": 7, "dw": 7, "ewp": 7, "dopt": 7, "ptxa": 7, "maxrregcount": 7, "therebi": 7, "trade": 7, "bump": 7, "ftz": 7, "denorm": 7, "use_fast_math": 7, "prec": 7, "sqrt": 7, "squar": 7, "ieee": 7, "div": 7, "reciproc": 7, "fmad": [7, 9], "subtract": 7, "ffma": 7, "dfma": 7, "math": 7, "vector": [7, 9], "aggress": 7, "setrlimit": 7, "dlink": 7, "intermedi": 7, "gen": 7, "nvrtcgetoptix": 7, "jump": [7, 9], "densiti": 7, "jtd": 7, "statement": 7, "brx": 7, "idx": 7, "protector": 7, "canari": 7, "preprocess": 7, "predefin": 7, "translat": [7, 9], "truncat": 7, "cancel": 7, "dir": 7, "directori": [7, 17], "preinclud": 7, "preprocessor": 7, "dialect": 7, "std": 7, "03": 7, "initializer_list": 7, "misc": 7, "inhibit": 7, "int128": 7, "__int128": 7, "cudacc_rtc_int128": 7, "inlin": 7, "emit": 7, "remark": 7, "err": [7, 9], "diagnost": 7, "diag": 7, "comma": 7, "brief": 7, "append": 7, "head": 7, "fdevic": 7, "syntax": [7, 8], "cudadevrt": 7, "prefix": 7, "driver_typ": [7, 16], "difficult": 7, "exploit": 7, "safeti": 7, "assess": 7, "risk": 7, "wrapper": [8, 11, 12], "simplifi": 8, "popular": 8, "scienc": [8, 9], "analyt": [8, 9], "deep": [8, 9], "ecosystem": [8, 9], "coverag": [8, 9], "uniform": 8, "hpc": 8, "ai": 8, "numba": 8, "anaconda": 8, "easi": [8, 9], "increasingli": 8, "sophist": 8, "jargon": 8, "world": 8, "rapid": 8, "cupi": 8, "numpi": [8, 9], "scipi": 8, "network": 8, "footprint": 8, "lighter": 8, "mainten": 8, "wheel": 8, "benefit": 8, "foundat": [8, 9], "unison": 8, "compos": 8, "solv": 8, "matthew": 9, "nice": 9, "plai": 9, "role": 9, "massiv": 9, "deliv": 9, "simplif": 9, "importantli": 9, "understand": 9, "isa": 9, "saxpi": 9, "thing": 9, "np": 9, "fundament": 9, "practic": 9, "_cudageterrorenum": 9, "isinst": 9, "elif": 9, "runtimeerror": 9, "checkcudaerror": 9, "len": 9, "tripl": 9, "quot": 9, "easier": 9, "introduct": 9, "__global__": 9, "tid": 9, "blockidx": 9, "threadidx": 9, "ahead": 9, "rememb": 9, "arch_arg": 9, "compute_": 9, "str": 9, "cu": 9, "ptxsize": 9, "traceback": 9, "cell": 9, "analog": 9, "uncommon": 9, "ctype": 9, "prepar": 9, "num_thread": 9, "num_block": 9, "dtype": 9, "float32": 9, "uint32": 9, "buffers": 9, "items": 9, "hx": 9, "random": 9, "rand": 9, "astyp": 9, "hy": 9, "hout": 9, "transform": 9, "movement": 9, "concept": 9, "xx": 9, "dxclass": 9, "dyclass": 9, "doutclass": 9, "prep": 9, "grab": 9, "intuit": 9, "dx": 9, "uint64": 9, "dy": 9, "dout": 9, "halt": 9, "hz": 9, "allclos": 9, "valueerror": 9, "toler": 9, "verif": 9, "nearli": 9, "nsight": 9, "nsy": 9, "stat": 9, "352\u00b5": 9, "1076m": 9, "1080m": 9, "insight": 9, "fig": 9, "screenshot": 9, "overal": 9, "hightlight": 10, "august": [11, 28], "2021": [11, 12], "ea": 11, "cudagettexturerefer": [11, 12], "cudagetsurfacerefer": [11, 12], "cudabindtextur": [11, 12], "cudabindtexture2d": [11, 12], "cudabindtexturetoarrai": [11, 12], "cudabindtexturetomipmappedarrai": [11, 12], "cudaunbindtextur": [11, 12], "cudabindsurfacetoarrai": [11, 12], "cudagetfuncbysymbol": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "octob": [12, 17, 20, 25], "ga": 12, "cudaprofileriniti": 12, "gl": [12, 13], "cuwglgetdevic": 12, "cuglctxcreat": 12, "cuglinit": 12, "cuglregisterbufferobject": 12, "cuglmapbufferobject": 12, "cuglunmapbufferobject": 12, "cuglunregisterbufferobject": 12, "cuglsetbufferobjectmapflag": 12, "cuglmapbufferobjectasync": 12, "cuglunmapbufferobjectasync": 12, "cudawglgetdevic": 12, "cudaglregisterbufferobject": 12, "cudaglmapbufferobject": 12, "cudaglunmapbufferobject": 12, "cudaglunregisterbufferobject": 12, "cudaglsetbufferobjectmapflag": 12, "cudaglmapbufferobjectasync": 12, "cudaglunmapbufferobjectasync": 12, "janurai": 13, "2022": [13, 14, 15, 16, 17, 18, 21], "relax": [13, 16], "ptd": 13, "cuda_python_cuda_per_thread_default_stream": 13, "explan": 13, "underlin": 13, "waitparam": 13, "value64": 13, "march": [14, 26], "decomposit": 14, "wsl": 14, "june": [16, 23], "propag": 16, "ctk": [16, 17, 18], "mobil": [16, 17], "workaround": [16, 17], "gitlab": 16, "cudaprofil": 16, "cuda_runtim": 16, "pars": 17, "variat": 17, "novemb": 18, "2023": [19, 20, 22, 23, 25], "libcuda": 19, "modern": 20, "decemb": 21, "rebas": [21, 22, 23, 25, 26, 27, 28], "mr28": 21, "mr35": 21, "februari": 22, "drop": [22, 25], "nogil": 23, "pypars": 23, "januari": 24, "2024": [24, 26, 27, 28], "hard": 25, "ppc64": 25, "cudafuncgetnam": [25, 26, 27, 28], "cudafuncgetparaminfo": [26, 27, 28], "pywin32": 28}, "objects": {"cuda.cuda": [[5, 0, 1, "", "CUCoredumpGenerationFlags"], [5, 1, 1, "", "CUDA_ARRAY3D_2DARRAY"], [5, 1, 1, "", "CUDA_ARRAY3D_COLOR_ATTACHMENT"], [5, 1, 1, "", "CUDA_ARRAY3D_CUBEMAP"], [5, 1, 1, "", "CUDA_ARRAY3D_DEFERRED_MAPPING"], [5, 1, 1, "", "CUDA_ARRAY3D_DEPTH_TEXTURE"], [5, 0, 1, "", "CUDA_ARRAY3D_DESCRIPTOR"], [5, 0, 1, "", "CUDA_ARRAY3D_DESCRIPTOR_st"], [5, 0, 1, "", "CUDA_ARRAY3D_DESCRIPTOR_v2"], [5, 1, 1, "", "CUDA_ARRAY3D_LAYERED"], [5, 1, 1, "", "CUDA_ARRAY3D_SPARSE"], [5, 1, 1, "", "CUDA_ARRAY3D_SURFACE_LDST"], [5, 1, 1, "", "CUDA_ARRAY3D_TEXTURE_GATHER"], [5, 1, 1, "", "CUDA_ARRAY3D_VIDEO_ENCODE_DECODE"], [5, 0, 1, "", "CUDA_ARRAY_DESCRIPTOR"], [5, 0, 1, "", "CUDA_ARRAY_DESCRIPTOR_st"], [5, 0, 1, "", "CUDA_ARRAY_DESCRIPTOR_v2"], [5, 0, 1, "", "CUDA_ARRAY_MEMORY_REQUIREMENTS"], [5, 0, 1, "", "CUDA_ARRAY_MEMORY_REQUIREMENTS_st"], [5, 0, 1, "", "CUDA_ARRAY_MEMORY_REQUIREMENTS_v1"], [5, 0, 1, "", "CUDA_ARRAY_SPARSE_PROPERTIES"], [5, 0, 1, "", "CUDA_ARRAY_SPARSE_PROPERTIES_st"], [5, 0, 1, "", "CUDA_ARRAY_SPARSE_PROPERTIES_v1"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_CHILD_GRAPH_NODE_PARAMS"], [5, 0, 1, "", "CUDA_CHILD_GRAPH_NODE_PARAMS_st"], [5, 0, 1, "id0", "CUDA_CONDITIONAL_NODE_PARAMS"], [5, 1, 1, "", "CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC"], [5, 1, 1, "", "CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC"], [5, 1, 1, "", "CUDA_EGL_INFINITE_TIMEOUT"], [5, 0, 1, "", "CUDA_EVENT_RECORD_NODE_PARAMS"], [5, 0, 1, "", "CUDA_EVENT_RECORD_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_EVENT_WAIT_NODE_PARAMS"], [5, 0, 1, "", "CUDA_EVENT_WAIT_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_BUFFER_DESC"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1"], [5, 1, 1, "", "CUDA_EXTERNAL_MEMORY_DEDICATED"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_HANDLE_DESC"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1"], [5, 1, 1, "", "CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1"], [5, 1, 1, "", "CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_GRAPH_INSTANTIATE_PARAMS"], [5, 0, 1, "", "CUDA_GRAPH_INSTANTIATE_PARAMS_st"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v3"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v3_st"], [5, 0, 1, "", "CUDA_LAUNCH_PARAMS"], [5, 0, 1, "", "CUDA_LAUNCH_PARAMS_st"], [5, 0, 1, "", "CUDA_LAUNCH_PARAMS_v1"], [5, 0, 1, "", "CUDA_MEMCPY2D"], [5, 0, 1, "", "CUDA_MEMCPY2D_st"], [5, 0, 1, "", "CUDA_MEMCPY2D_v2"], [5, 0, 1, "", "CUDA_MEMCPY3D"], [5, 0, 1, "", "CUDA_MEMCPY3D_PEER"], [5, 0, 1, "", "CUDA_MEMCPY3D_PEER_st"], [5, 0, 1, "", "CUDA_MEMCPY3D_PEER_v1"], [5, 0, 1, "", "CUDA_MEMCPY3D_st"], [5, 0, 1, "", "CUDA_MEMCPY3D_v2"], [5, 0, 1, "", "CUDA_MEMCPY_NODE_PARAMS"], [5, 0, 1, "", "CUDA_MEMCPY_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS_v1_st"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_MEM_FREE_NODE_PARAMS"], [5, 0, 1, "", "CUDA_MEM_FREE_NODE_PARAMS_st"], [5, 1, 1, "", "CUDA_NVSCISYNC_ATTR_SIGNAL"], [5, 1, 1, "", "CUDA_NVSCISYNC_ATTR_WAIT"], [5, 0, 1, "", "CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS"], [5, 0, 1, "", "CUDA_POINTER_ATTRIBUTE_P2P_TOKENS"], [5, 0, 1, "", "CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st"], [5, 0, 1, "", "CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1"], [5, 0, 1, "", "CUDA_RESOURCE_DESC"], [5, 0, 1, "", "CUDA_RESOURCE_DESC_st"], [5, 0, 1, "", "CUDA_RESOURCE_DESC_v1"], [5, 0, 1, "", "CUDA_RESOURCE_VIEW_DESC"], [5, 0, 1, "", "CUDA_RESOURCE_VIEW_DESC_st"], [5, 0, 1, "", "CUDA_RESOURCE_VIEW_DESC_v1"], [5, 0, 1, "", "CUDA_TEXTURE_DESC"], [5, 0, 1, "", "CUDA_TEXTURE_DESC_st"], [5, 0, 1, "", "CUDA_TEXTURE_DESC_v1"], [5, 1, 1, "", "CUDA_VERSION"], [5, 0, 1, "", "CUGLDeviceList"], [5, 0, 1, "", "CUGPUDirectRDMAWritesOrdering"], [5, 1, 1, "", "CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL"], [5, 1, 1, "", "CU_COMPUTE_ACCELERATED_TARGET_BASE"], [5, 1, 1, "", "CU_DEVICE_CPU"], [5, 1, 1, "", "CU_DEVICE_INVALID"], [5, 1, 1, "", "CU_GRAPH_COND_ASSIGN_DEFAULT"], [5, 1, 1, "", "CU_GRAPH_KERNEL_NODE_PORT_DEFAULT"], [5, 1, 1, "", "CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER"], [5, 1, 1, "", "CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC"], [5, 1, 1, "", "CU_IPC_HANDLE_SIZE"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_PRIORITY"], [5, 1, 1, "", "CU_LAUNCH_PARAM_BUFFER_POINTER"], [5, 1, 1, "", "CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT"], [5, 1, 1, "", "CU_LAUNCH_PARAM_BUFFER_SIZE"], [5, 1, 1, "", "CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT"], [5, 1, 1, "", "CU_LAUNCH_PARAM_END"], [5, 1, 1, "", "CU_LAUNCH_PARAM_END_AS_INT"], [5, 1, 1, "", "CU_MEMHOSTALLOC_DEVICEMAP"], [5, 1, 1, "", "CU_MEMHOSTALLOC_PORTABLE"], [5, 1, 1, "", "CU_MEMHOSTALLOC_WRITECOMBINED"], [5, 1, 1, "", "CU_MEMHOSTREGISTER_DEVICEMAP"], [5, 1, 1, "", "CU_MEMHOSTREGISTER_IOMEMORY"], [5, 1, 1, "", "CU_MEMHOSTREGISTER_PORTABLE"], [5, 1, 1, "", "CU_MEMHOSTREGISTER_READ_ONLY"], [5, 1, 1, "", "CU_MEM_CREATE_USAGE_TILE_POOL"], [5, 1, 1, "", "CU_PARAM_TR_DEFAULT"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_PRIORITY"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY"], [5, 1, 1, "", "CU_STREAM_LEGACY"], [5, 1, 1, "", "CU_STREAM_PER_THREAD"], [5, 1, 1, "", "CU_TENSOR_MAP_NUM_QWORDS"], [5, 1, 1, "", "CU_TRSA_OVERRIDE_FORMAT"], [5, 1, 1, "", "CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION"], [5, 1, 1, "", "CU_TRSF_NORMALIZED_COORDINATES"], [5, 1, 1, "", "CU_TRSF_READ_AS_INTEGER"], [5, 1, 1, "", "CU_TRSF_SEAMLESS_CUBEMAP"], [5, 1, 1, "", "CU_TRSF_SRGB"], [5, 0, 1, "", "CUaccessPolicyWindow"], [5, 0, 1, "", "CUaccessPolicyWindow_st"], [5, 0, 1, "", "CUaccessPolicyWindow_v1"], [5, 0, 1, "", "CUaccessProperty"], [5, 0, 1, "", "CUaddress_mode"], [5, 0, 1, "", "CUarray"], [5, 0, 1, "", "CUarrayMapInfo"], [5, 0, 1, "", "CUarrayMapInfo_st"], [5, 0, 1, "", "CUarrayMapInfo_v1"], [5, 0, 1, "", "CUarraySparseSubresourceType"], [5, 0, 1, "", "CUarray_cubemap_face"], [5, 0, 1, "", "CUarray_format"], [5, 0, 1, "", "CUasyncCallback"], [5, 0, 1, "", "CUasyncCallbackHandle"], [5, 0, 1, "", "CUasyncNotificationInfo"], [5, 0, 1, "", "CUasyncNotificationInfo_st"], [5, 0, 1, "", "CUasyncNotificationType"], [5, 0, 1, "", "CUcigDataType"], [5, 0, 1, "", "CUclusterSchedulingPolicy"], [5, 0, 1, "", "CUcomputemode"], [5, 0, 1, "", "CUcontext"], [5, 0, 1, "", "CUcoredumpSettings"], [5, 0, 1, "", "CUctxCigParam"], [5, 0, 1, "", "CUctxCigParam_st"], [5, 0, 1, "", "CUctxCreateParams"], [5, 0, 1, "", "CUctxCreateParams_st"], [5, 0, 1, "", "CUctx_flags"], [5, 0, 1, "", "CUdevResource"], [5, 0, 1, "", "CUdevResourceDesc"], [5, 0, 1, "", "CUdevResourceType"], [5, 0, 1, "", "CUdevResource_st"], [5, 0, 1, "id73", "CUdevSmResource"], [5, 0, 1, "", "CUdevSmResourceSplit_flags"], [5, 0, 1, "", "CUdevSmResource_st"], [5, 0, 1, "", "CUdevice"], [5, 0, 1, "", "CUdeviceNumaConfig"], [5, 0, 1, "", "CUdevice_P2PAttribute"], [5, 0, 1, "", "CUdevice_attribute"], [5, 0, 1, "", "CUdevice_v1"], [5, 0, 1, "", "CUdeviceptr"], [5, 0, 1, "", "CUdeviceptr_v2"], [5, 0, 1, "", "CUdevprop"], [5, 0, 1, "", "CUdevprop_st"], [5, 0, 1, "", "CUdevprop_v1"], [5, 0, 1, "", "CUdriverProcAddressQueryResult"], [5, 0, 1, "", "CUdriverProcAddress_flags"], [5, 0, 1, "", "CUeglColorFormat"], [5, 0, 1, "", "CUeglFrame"], [5, 0, 1, "", "CUeglFrameType"], [5, 0, 1, "", "CUeglFrame_st"], [5, 0, 1, "", "CUeglFrame_v1"], [5, 0, 1, "", "CUeglResourceLocationFlags"], [5, 0, 1, "", "CUeglStreamConnection"], [5, 0, 1, "", "CUevent"], [5, 0, 1, "", "CUevent_flags"], [5, 0, 1, "", "CUevent_record_flags"], [5, 0, 1, "", "CUevent_sched_flags"], [5, 0, 1, "", "CUevent_wait_flags"], [5, 0, 1, "", "CUexecAffinityParam"], [5, 0, 1, "", "CUexecAffinityParam_st"], [5, 0, 1, "", "CUexecAffinityParam_v1"], [5, 0, 1, "", "CUexecAffinitySmCount"], [5, 0, 1, "", "CUexecAffinitySmCount_st"], [5, 0, 1, "", "CUexecAffinitySmCount_v1"], [5, 0, 1, "", "CUexecAffinityType"], [5, 0, 1, "", "CUexternalMemory"], [5, 0, 1, "", "CUexternalMemoryHandleType"], [5, 0, 1, "", "CUexternalSemaphore"], [5, 0, 1, "", "CUexternalSemaphoreHandleType"], [5, 0, 1, "", "CUfilter_mode"], [5, 0, 1, "", "CUflushGPUDirectRDMAWritesOptions"], [5, 0, 1, "", "CUflushGPUDirectRDMAWritesScope"], [5, 0, 1, "", "CUflushGPUDirectRDMAWritesTarget"], [5, 0, 1, "", "CUfunc_cache"], [5, 0, 1, "", "CUfunction"], [5, 0, 1, "", "CUfunctionLoadingState"], [5, 0, 1, "", "CUfunction_attribute"], [5, 0, 1, "", "CUgraph"], [5, 0, 1, "", "CUgraphConditionalHandle"], [5, 0, 1, "", "CUgraphConditionalNodeType"], [5, 0, 1, "", "CUgraphDebugDot_flags"], [5, 0, 1, "", "CUgraphDependencyType"], [5, 0, 1, "", "CUgraphDeviceNode"], [5, 0, 1, "", "CUgraphEdgeData"], [5, 0, 1, "", "CUgraphEdgeData_st"], [5, 0, 1, "", "CUgraphExec"], [5, 0, 1, "", "CUgraphExecUpdateResult"], [5, 0, 1, "", "CUgraphExecUpdateResultInfo"], [5, 0, 1, "", "CUgraphExecUpdateResultInfo_st"], [5, 0, 1, "", "CUgraphExecUpdateResultInfo_v1"], [5, 0, 1, "", "CUgraphInstantiateResult"], [5, 0, 1, "", "CUgraphInstantiate_flags"], [5, 0, 1, "", "CUgraphMem_attribute"], [5, 0, 1, "", "CUgraphNode"], [5, 0, 1, "", "CUgraphNodeParams"], [5, 0, 1, "", "CUgraphNodeParams_st"], [5, 0, 1, "", "CUgraphNodeType"], [5, 0, 1, "", "CUgraphicsMapResourceFlags"], [5, 0, 1, "", "CUgraphicsRegisterFlags"], [5, 0, 1, "", "CUgraphicsResource"], [5, 0, 1, "", "CUgreenCtx"], [5, 0, 1, "", "CUgreenCtxCreate_flags"], [5, 0, 1, "", "CUhostFn"], [5, 0, 1, "", "CUipcEventHandle"], [5, 0, 1, "", "CUipcEventHandle_st"], [5, 0, 1, "", "CUipcEventHandle_v1"], [5, 0, 1, "", "CUipcMemHandle"], [5, 0, 1, "", "CUipcMemHandle_st"], [5, 0, 1, "", "CUipcMemHandle_v1"], [5, 0, 1, "", "CUipcMem_flags"], [5, 0, 1, "", "CUjitInputType"], [5, 0, 1, "", "CUjit_cacheMode"], [5, 0, 1, "", "CUjit_fallback"], [5, 0, 1, "", "CUjit_option"], [5, 0, 1, "", "CUjit_target"], [5, 0, 1, "", "CUkernel"], [5, 0, 1, "", "CUkernelNodeAttrID"], [5, 0, 1, "", "CUkernelNodeAttrValue"], [5, 0, 1, "", "CUkernelNodeAttrValue_v1"], [5, 0, 1, "", "CUlaunchAttribute"], [5, 0, 1, "", "CUlaunchAttributeID"], [5, 0, 1, "", "CUlaunchAttributeValue"], [5, 0, 1, "", "CUlaunchAttributeValue_union"], [5, 0, 1, "", "CUlaunchAttribute_st"], [5, 0, 1, "", "CUlaunchConfig"], [5, 0, 1, "", "CUlaunchConfig_st"], [5, 0, 1, "", "CUlaunchMemSyncDomain"], [5, 0, 1, "", "CUlaunchMemSyncDomainMap"], [5, 0, 1, "", "CUlaunchMemSyncDomainMap_st"], [5, 0, 1, "", "CUlibrary"], [5, 0, 1, "", "CUlibraryHostUniversalFunctionAndDataTable"], [5, 0, 1, "", "CUlibraryHostUniversalFunctionAndDataTable_st"], [5, 0, 1, "", "CUlibraryOption"], [5, 0, 1, "", "CUlimit"], [5, 0, 1, "", "CUlinkState"], [5, 0, 1, "", "CUmemAccessDesc"], [5, 0, 1, "", "CUmemAccessDesc_st"], [5, 0, 1, "", "CUmemAccessDesc_v1"], [5, 0, 1, "", "CUmemAccess_flags"], [5, 0, 1, "", "CUmemAllocationCompType"], [5, 0, 1, "", "CUmemAllocationGranularity_flags"], [5, 0, 1, "", "CUmemAllocationHandleType"], [5, 0, 1, "", "CUmemAllocationProp"], [5, 0, 1, "", "CUmemAllocationProp_st"], [5, 0, 1, "", "CUmemAllocationProp_v1"], [5, 0, 1, "", "CUmemAllocationType"], [5, 0, 1, "", "CUmemAttach_flags"], [5, 0, 1, "", "CUmemFabricHandle"], [5, 0, 1, "", "CUmemFabricHandle_st"], [5, 0, 1, "", "CUmemFabricHandle_v1"], [5, 0, 1, "", "CUmemGenericAllocationHandle"], [5, 0, 1, "", "CUmemGenericAllocationHandle_v1"], [5, 0, 1, "", "CUmemHandleType"], [5, 0, 1, "", "CUmemLocation"], [5, 0, 1, "", "CUmemLocationType"], [5, 0, 1, "", "CUmemLocation_st"], [5, 0, 1, "", "CUmemLocation_v1"], [5, 0, 1, "", "CUmemOperationType"], [5, 0, 1, "", "CUmemPoolProps"], [5, 0, 1, "", "CUmemPoolProps_st"], [5, 0, 1, "", "CUmemPoolProps_v1"], [5, 0, 1, "", "CUmemPoolPtrExportData"], [5, 0, 1, "", "CUmemPoolPtrExportData_st"], [5, 0, 1, "", "CUmemPoolPtrExportData_v1"], [5, 0, 1, "", "CUmemPool_attribute"], [5, 0, 1, "", "CUmemRangeHandleType"], [5, 0, 1, "", "CUmem_advise"], [5, 0, 1, "", "CUmem_range_attribute"], [5, 0, 1, "", "CUmemoryPool"], [5, 0, 1, "", "CUmemorytype"], [5, 0, 1, "", "CUmipmappedArray"], [5, 0, 1, "", "CUmodule"], [5, 0, 1, "", "CUmoduleLoadingMode"], [5, 0, 1, "", "CUmulticastGranularity_flags"], [5, 0, 1, "", "CUmulticastObjectProp"], [5, 0, 1, "", "CUmulticastObjectProp_st"], [5, 0, 1, "", "CUmulticastObjectProp_v1"], [5, 0, 1, "", "CUoccupancyB2DSize"], [5, 0, 1, "", "CUoccupancy_flags"], [5, 0, 1, "", "CUpointer_attribute"], [5, 0, 1, "", "CUresourceViewFormat"], [5, 0, 1, "", "CUresourcetype"], [5, 0, 1, "", "CUresult"], [5, 0, 1, "", "CUshared_carveout"], [5, 0, 1, "", "CUsharedconfig"], [5, 0, 1, "", "CUstream"], [5, 0, 1, "", "CUstreamAttrID"], [5, 0, 1, "", "CUstreamAttrValue"], [5, 0, 1, "", "CUstreamAttrValue_v1"], [5, 0, 1, "", "CUstreamBatchMemOpParams"], [5, 0, 1, "", "CUstreamBatchMemOpParams_union"], [5, 0, 1, "", "CUstreamBatchMemOpParams_v1"], [5, 0, 1, "", "CUstreamBatchMemOpType"], [5, 0, 1, "", "CUstreamCallback"], [5, 0, 1, "", "CUstreamCaptureMode"], [5, 0, 1, "", "CUstreamCaptureStatus"], [5, 0, 1, "", "CUstreamMemoryBarrier_flags"], [5, 0, 1, "", "CUstreamUpdateCaptureDependencies_flags"], [5, 0, 1, "", "CUstreamWaitValue_flags"], [5, 0, 1, "", "CUstreamWriteValue_flags"], [5, 0, 1, "", "CUstream_flags"], [5, 0, 1, "", "CUsurfObject"], [5, 0, 1, "", "CUsurfObject_v1"], [5, 0, 1, "", "CUsurfref"], [5, 0, 1, "", "CUsynchronizationPolicy"], [5, 0, 1, "", "CUtensorMap"], [5, 0, 1, "", "CUtensorMapDataType"], [5, 0, 1, "", "CUtensorMapFloatOOBfill"], [5, 0, 1, "", "CUtensorMapInterleave"], [5, 0, 1, "", "CUtensorMapL2promotion"], [5, 0, 1, "", "CUtensorMapSwizzle"], [5, 0, 1, "", "CUtensorMap_st"], [5, 0, 1, "", "CUtexObject"], [5, 0, 1, "", "CUtexObject_v1"], [5, 0, 1, "", "CUtexref"], [5, 0, 1, "", "CUuserObject"], [5, 0, 1, "", "CUuserObjectRetain_flags"], [5, 0, 1, "", "CUuserObject_flags"], [5, 0, 1, "", "CUuuid"], [5, 0, 1, "", "CUuuid_st"], [5, 1, 1, "", "MAX_PLANES"], [5, 1, 1, "", "RESOURCE_ABI_EXTERNAL_BYTES"], [5, 1, 1, "", "RESOURCE_ABI_VERSION"], [5, 0, 1, "", "cl_context_flags"], [5, 0, 1, "", "cl_event_flags"], [5, 3, 1, "", "cuArray3DCreate"], [5, 3, 1, "", "cuArray3DGetDescriptor"], [5, 3, 1, "", "cuArrayCreate"], [5, 3, 1, "", "cuArrayDestroy"], [5, 3, 1, "", "cuArrayGetDescriptor"], [5, 3, 1, "", "cuArrayGetMemoryRequirements"], [5, 3, 1, "", "cuArrayGetPlane"], [5, 3, 1, "", "cuArrayGetSparseProperties"], [5, 3, 1, "", "cuCoredumpGetAttribute"], [5, 3, 1, "", "cuCoredumpGetAttributeGlobal"], [5, 3, 1, "", "cuCoredumpSetAttribute"], [5, 3, 1, "", "cuCoredumpSetAttributeGlobal"], [5, 3, 1, "", "cuCtxCreate"], [5, 3, 1, "", "cuCtxCreate_v3"], [5, 3, 1, "", "cuCtxCreate_v4"], [5, 3, 1, "", "cuCtxDestroy"], [5, 3, 1, "", "cuCtxDisablePeerAccess"], [5, 3, 1, "", "cuCtxEnablePeerAccess"], [5, 3, 1, "", "cuCtxFromGreenCtx"], [5, 3, 1, "", "cuCtxGetApiVersion"], [5, 3, 1, "", "cuCtxGetCacheConfig"], [5, 3, 1, "", "cuCtxGetCurrent"], [5, 3, 1, "", "cuCtxGetDevResource"], [5, 3, 1, "", "cuCtxGetDevice"], [5, 3, 1, "", "cuCtxGetExecAffinity"], [5, 3, 1, "", "cuCtxGetFlags"], [5, 3, 1, "", "cuCtxGetId"], [5, 3, 1, "", "cuCtxGetLimit"], [5, 3, 1, "", "cuCtxGetStreamPriorityRange"], [5, 3, 1, "", "cuCtxPopCurrent"], [5, 3, 1, "", "cuCtxPushCurrent"], [5, 3, 1, "", "cuCtxRecordEvent"], [5, 3, 1, "", "cuCtxResetPersistingL2Cache"], [5, 3, 1, "", "cuCtxSetCacheConfig"], [5, 3, 1, "", "cuCtxSetCurrent"], [5, 3, 1, "", "cuCtxSetFlags"], [5, 3, 1, "", "cuCtxSetLimit"], [5, 3, 1, "", "cuCtxSynchronize"], [5, 3, 1, "", "cuCtxWaitEvent"], [5, 3, 1, "", "cuDestroyExternalMemory"], [5, 3, 1, "", "cuDestroyExternalSemaphore"], [5, 3, 1, "", "cuDevResourceGenerateDesc"], [5, 3, 1, "", "cuDevSmResourceSplitByCount"], [5, 3, 1, "", "cuDeviceCanAccessPeer"], [5, 3, 1, "", "cuDeviceGet"], [5, 3, 1, "", "cuDeviceGetAttribute"], [5, 3, 1, "", "cuDeviceGetByPCIBusId"], [5, 3, 1, "", "cuDeviceGetCount"], [5, 3, 1, "", "cuDeviceGetDefaultMemPool"], [5, 3, 1, "", "cuDeviceGetDevResource"], [5, 3, 1, "", "cuDeviceGetExecAffinitySupport"], [5, 3, 1, "", "cuDeviceGetGraphMemAttribute"], [5, 3, 1, "", "cuDeviceGetLuid"], [5, 3, 1, "", "cuDeviceGetMemPool"], [5, 3, 1, "", "cuDeviceGetName"], [5, 3, 1, "", "cuDeviceGetNvSciSyncAttributes"], [5, 3, 1, "", "cuDeviceGetP2PAttribute"], [5, 3, 1, "", "cuDeviceGetPCIBusId"], [5, 3, 1, "", "cuDeviceGetTexture1DLinearMaxWidth"], [5, 3, 1, "", "cuDeviceGetUuid"], [5, 3, 1, "", "cuDeviceGetUuid_v2"], [5, 3, 1, "", "cuDeviceGraphMemTrim"], [5, 3, 1, "", "cuDevicePrimaryCtxGetState"], [5, 3, 1, "", "cuDevicePrimaryCtxRelease"], [5, 3, 1, "", "cuDevicePrimaryCtxReset"], [5, 3, 1, "", "cuDevicePrimaryCtxRetain"], [5, 3, 1, "", "cuDevicePrimaryCtxSetFlags"], [5, 3, 1, "", "cuDeviceRegisterAsyncNotification"], [5, 3, 1, "", "cuDeviceSetGraphMemAttribute"], [5, 3, 1, "", "cuDeviceSetMemPool"], [5, 3, 1, "", "cuDeviceTotalMem"], [5, 3, 1, "", "cuDeviceUnregisterAsyncNotification"], [5, 3, 1, "", "cuDriverGetVersion"], [5, 3, 1, "", "cuEGLStreamConsumerAcquireFrame"], [5, 3, 1, "", "cuEGLStreamConsumerConnect"], [5, 3, 1, "", "cuEGLStreamConsumerConnectWithFlags"], [5, 3, 1, "", "cuEGLStreamConsumerDisconnect"], [5, 3, 1, "", "cuEGLStreamConsumerReleaseFrame"], [5, 3, 1, "", "cuEGLStreamProducerConnect"], [5, 3, 1, "", "cuEGLStreamProducerDisconnect"], [5, 3, 1, "", "cuEGLStreamProducerPresentFrame"], [5, 3, 1, "", "cuEGLStreamProducerReturnFrame"], [5, 3, 1, "", "cuEventCreate"], [5, 3, 1, "", "cuEventCreateFromEGLSync"], [5, 3, 1, "", "cuEventDestroy"], [5, 3, 1, "", "cuEventElapsedTime"], [5, 3, 1, "", "cuEventQuery"], [5, 3, 1, "", "cuEventRecord"], [5, 3, 1, "", "cuEventRecordWithFlags"], [5, 3, 1, "", "cuEventSynchronize"], [5, 3, 1, "", "cuExternalMemoryGetMappedBuffer"], [5, 3, 1, "", "cuExternalMemoryGetMappedMipmappedArray"], [5, 3, 1, "", "cuFlushGPUDirectRDMAWrites"], [5, 3, 1, "", "cuFuncGetAttribute"], [5, 3, 1, "", "cuFuncGetModule"], [5, 3, 1, "", "cuFuncGetName"], [5, 3, 1, "", "cuFuncGetParamInfo"], [5, 3, 1, "", "cuFuncIsLoaded"], [5, 3, 1, "", "cuFuncLoad"], [5, 3, 1, "", "cuFuncSetAttribute"], [5, 3, 1, "", "cuFuncSetCacheConfig"], [5, 3, 1, "", "cuGLGetDevices"], [5, 3, 1, "", "cuGetErrorName"], [5, 3, 1, "", "cuGetErrorString"], [5, 3, 1, "", "cuGetProcAddress"], [5, 3, 1, "", "cuGraphAddBatchMemOpNode"], [5, 3, 1, "", "cuGraphAddChildGraphNode"], [5, 3, 1, "", "cuGraphAddDependencies"], [5, 3, 1, "", "cuGraphAddDependencies_v2"], [5, 3, 1, "", "cuGraphAddEmptyNode"], [5, 3, 1, "", "cuGraphAddEventRecordNode"], [5, 3, 1, "", "cuGraphAddEventWaitNode"], [5, 3, 1, "", "cuGraphAddExternalSemaphoresSignalNode"], [5, 3, 1, "", "cuGraphAddExternalSemaphoresWaitNode"], [5, 3, 1, "", "cuGraphAddHostNode"], [5, 3, 1, "", "cuGraphAddKernelNode"], [5, 3, 1, "", "cuGraphAddMemAllocNode"], [5, 3, 1, "", "cuGraphAddMemFreeNode"], [5, 3, 1, "", "cuGraphAddMemcpyNode"], [5, 3, 1, "", "cuGraphAddMemsetNode"], [5, 3, 1, "", "cuGraphAddNode"], [5, 3, 1, "", "cuGraphAddNode_v2"], [5, 3, 1, "", "cuGraphBatchMemOpNodeGetParams"], [5, 3, 1, "", "cuGraphBatchMemOpNodeSetParams"], [5, 3, 1, "", "cuGraphChildGraphNodeGetGraph"], [5, 3, 1, "", "cuGraphClone"], [5, 3, 1, "", "cuGraphConditionalHandleCreate"], [5, 3, 1, "", "cuGraphCreate"], [5, 3, 1, "", "cuGraphDebugDotPrint"], [5, 3, 1, "", "cuGraphDestroy"], [5, 3, 1, "", "cuGraphDestroyNode"], [5, 3, 1, "", "cuGraphEventRecordNodeGetEvent"], [5, 3, 1, "", "cuGraphEventRecordNodeSetEvent"], [5, 3, 1, "", "cuGraphEventWaitNodeGetEvent"], [5, 3, 1, "", "cuGraphEventWaitNodeSetEvent"], [5, 3, 1, "", "cuGraphExecBatchMemOpNodeSetParams"], [5, 3, 1, "", "cuGraphExecChildGraphNodeSetParams"], [5, 3, 1, "", "cuGraphExecDestroy"], [5, 3, 1, "", "cuGraphExecEventRecordNodeSetEvent"], [5, 3, 1, "", "cuGraphExecEventWaitNodeSetEvent"], [5, 3, 1, "", "cuGraphExecExternalSemaphoresSignalNodeSetParams"], [5, 3, 1, "", "cuGraphExecExternalSemaphoresWaitNodeSetParams"], [5, 3, 1, "", "cuGraphExecGetFlags"], [5, 3, 1, "", "cuGraphExecHostNodeSetParams"], [5, 3, 1, "", "cuGraphExecKernelNodeSetParams"], [5, 3, 1, "", "cuGraphExecMemcpyNodeSetParams"], [5, 3, 1, "", "cuGraphExecMemsetNodeSetParams"], [5, 3, 1, "", "cuGraphExecNodeSetParams"], [5, 3, 1, "", "cuGraphExecUpdate"], [5, 3, 1, "", "cuGraphExternalSemaphoresSignalNodeGetParams"], [5, 3, 1, "", "cuGraphExternalSemaphoresSignalNodeSetParams"], [5, 3, 1, "", "cuGraphExternalSemaphoresWaitNodeGetParams"], [5, 3, 1, "", "cuGraphExternalSemaphoresWaitNodeSetParams"], [5, 3, 1, "", "cuGraphGetEdges"], [5, 3, 1, "", "cuGraphGetEdges_v2"], [5, 3, 1, "", "cuGraphGetNodes"], [5, 3, 1, "", "cuGraphGetRootNodes"], [5, 3, 1, "", "cuGraphHostNodeGetParams"], [5, 3, 1, "", "cuGraphHostNodeSetParams"], [5, 3, 1, "", "cuGraphInstantiate"], [5, 3, 1, "", "cuGraphInstantiateWithParams"], [5, 3, 1, "", "cuGraphKernelNodeCopyAttributes"], [5, 3, 1, "", "cuGraphKernelNodeGetAttribute"], [5, 3, 1, "", "cuGraphKernelNodeGetParams"], [5, 3, 1, "", "cuGraphKernelNodeSetAttribute"], [5, 3, 1, "", "cuGraphKernelNodeSetParams"], [5, 3, 1, "", "cuGraphLaunch"], [5, 3, 1, "", "cuGraphMemAllocNodeGetParams"], [5, 3, 1, "", "cuGraphMemFreeNodeGetParams"], [5, 3, 1, "", "cuGraphMemcpyNodeGetParams"], [5, 3, 1, "", "cuGraphMemcpyNodeSetParams"], [5, 3, 1, "", "cuGraphMemsetNodeGetParams"], [5, 3, 1, "", "cuGraphMemsetNodeSetParams"], [5, 3, 1, "", "cuGraphNodeFindInClone"], [5, 3, 1, "", "cuGraphNodeGetDependencies"], [5, 3, 1, "", "cuGraphNodeGetDependencies_v2"], [5, 3, 1, "", "cuGraphNodeGetDependentNodes"], [5, 3, 1, "", "cuGraphNodeGetDependentNodes_v2"], [5, 3, 1, "", "cuGraphNodeGetEnabled"], [5, 3, 1, "", "cuGraphNodeGetType"], [5, 3, 1, "", "cuGraphNodeSetEnabled"], [5, 3, 1, "", "cuGraphNodeSetParams"], [5, 3, 1, "", "cuGraphReleaseUserObject"], [5, 3, 1, "", "cuGraphRemoveDependencies"], [5, 3, 1, "", "cuGraphRemoveDependencies_v2"], [5, 3, 1, "", "cuGraphRetainUserObject"], [5, 3, 1, "", "cuGraphUpload"], [5, 3, 1, "", "cuGraphicsEGLRegisterImage"], [5, 3, 1, "", "cuGraphicsGLRegisterBuffer"], [5, 3, 1, "", "cuGraphicsGLRegisterImage"], [5, 3, 1, "", "cuGraphicsMapResources"], [5, 3, 1, "", "cuGraphicsResourceGetMappedEglFrame"], [5, 3, 1, "", "cuGraphicsResourceGetMappedMipmappedArray"], [5, 3, 1, "", "cuGraphicsResourceGetMappedPointer"], [5, 3, 1, "", "cuGraphicsResourceSetMapFlags"], [5, 3, 1, "", "cuGraphicsSubResourceGetMappedArray"], [5, 3, 1, "", "cuGraphicsUnmapResources"], [5, 3, 1, "", "cuGraphicsUnregisterResource"], [5, 3, 1, "", "cuGraphicsVDPAURegisterOutputSurface"], [5, 3, 1, "", "cuGraphicsVDPAURegisterVideoSurface"], [5, 3, 1, "", "cuGreenCtxCreate"], [5, 3, 1, "", "cuGreenCtxDestroy"], [5, 3, 1, "", "cuGreenCtxGetDevResource"], [5, 3, 1, "", "cuGreenCtxRecordEvent"], [5, 3, 1, "", "cuGreenCtxStreamCreate"], [5, 3, 1, "", "cuGreenCtxWaitEvent"], [5, 3, 1, "", "cuImportExternalMemory"], [5, 3, 1, "", "cuImportExternalSemaphore"], [5, 3, 1, "", "cuInit"], [5, 3, 1, "", "cuIpcCloseMemHandle"], [5, 3, 1, "", "cuIpcGetEventHandle"], [5, 3, 1, "", "cuIpcGetMemHandle"], [5, 3, 1, "", "cuIpcOpenEventHandle"], [5, 3, 1, "", "cuIpcOpenMemHandle"], [5, 3, 1, "", "cuKernelGetAttribute"], [5, 3, 1, "", "cuKernelGetFunction"], [5, 3, 1, "", "cuKernelGetLibrary"], [5, 3, 1, "", "cuKernelGetName"], [5, 3, 1, "", "cuKernelGetParamInfo"], [5, 3, 1, "", "cuKernelSetAttribute"], [5, 3, 1, "", "cuKernelSetCacheConfig"], [5, 3, 1, "", "cuLaunchCooperativeKernel"], [5, 3, 1, "", "cuLaunchCooperativeKernelMultiDevice"], [5, 3, 1, "", "cuLaunchHostFunc"], [5, 3, 1, "", "cuLaunchKernel"], [5, 3, 1, "", "cuLaunchKernelEx"], [5, 3, 1, "", "cuLibraryEnumerateKernels"], [5, 3, 1, "", "cuLibraryGetGlobal"], [5, 3, 1, "", "cuLibraryGetKernel"], [5, 3, 1, "", "cuLibraryGetKernelCount"], [5, 3, 1, "", "cuLibraryGetManaged"], [5, 3, 1, "", "cuLibraryGetModule"], [5, 3, 1, "", "cuLibraryGetUnifiedFunction"], [5, 3, 1, "", "cuLibraryLoadData"], [5, 3, 1, "", "cuLibraryLoadFromFile"], [5, 3, 1, "", "cuLibraryUnload"], [5, 3, 1, "", "cuLinkAddData"], [5, 3, 1, "", "cuLinkAddFile"], [5, 3, 1, "", "cuLinkComplete"], [5, 3, 1, "", "cuLinkCreate"], [5, 3, 1, "", "cuLinkDestroy"], [5, 3, 1, "", "cuMemAddressFree"], [5, 3, 1, "", "cuMemAddressReserve"], [5, 3, 1, "", "cuMemAdvise"], [5, 3, 1, "", "cuMemAdvise_v2"], [5, 3, 1, "", "cuMemAlloc"], [5, 3, 1, "", "cuMemAllocAsync"], [5, 3, 1, "", "cuMemAllocFromPoolAsync"], [5, 3, 1, "", "cuMemAllocHost"], [5, 3, 1, "", "cuMemAllocManaged"], [5, 3, 1, "", "cuMemAllocPitch"], [5, 3, 1, "", "cuMemCreate"], [5, 3, 1, "", "cuMemExportToShareableHandle"], [5, 3, 1, "", "cuMemFree"], [5, 3, 1, "", "cuMemFreeAsync"], [5, 3, 1, "", "cuMemFreeHost"], [5, 3, 1, "", "cuMemGetAccess"], [5, 3, 1, "", "cuMemGetAddressRange"], [5, 3, 1, "", "cuMemGetAllocationGranularity"], [5, 3, 1, "", "cuMemGetAllocationPropertiesFromHandle"], [5, 3, 1, "", "cuMemGetHandleForAddressRange"], [5, 3, 1, "", "cuMemGetInfo"], [5, 3, 1, "", "cuMemHostAlloc"], [5, 3, 1, "", "cuMemHostGetDevicePointer"], [5, 3, 1, "", "cuMemHostGetFlags"], [5, 3, 1, "", "cuMemHostRegister"], [5, 3, 1, "", "cuMemHostUnregister"], [5, 3, 1, "", "cuMemImportFromShareableHandle"], [5, 3, 1, "", "cuMemMap"], [5, 3, 1, "", "cuMemMapArrayAsync"], [5, 3, 1, "", "cuMemPoolCreate"], [5, 3, 1, "", "cuMemPoolDestroy"], [5, 3, 1, "", "cuMemPoolExportPointer"], [5, 3, 1, "", "cuMemPoolExportToShareableHandle"], [5, 3, 1, "", "cuMemPoolGetAccess"], [5, 3, 1, "", "cuMemPoolGetAttribute"], [5, 3, 1, "", "cuMemPoolImportFromShareableHandle"], [5, 3, 1, "", "cuMemPoolImportPointer"], [5, 3, 1, "", "cuMemPoolSetAccess"], [5, 3, 1, "", "cuMemPoolSetAttribute"], [5, 3, 1, "", "cuMemPoolTrimTo"], [5, 3, 1, "", "cuMemPrefetchAsync"], [5, 3, 1, "", "cuMemPrefetchAsync_v2"], [5, 3, 1, "", "cuMemRangeGetAttribute"], [5, 3, 1, "", "cuMemRangeGetAttributes"], [5, 3, 1, "", "cuMemRelease"], [5, 3, 1, "", "cuMemRetainAllocationHandle"], [5, 3, 1, "", "cuMemSetAccess"], [5, 3, 1, "", "cuMemUnmap"], [5, 3, 1, "", "cuMemcpy"], [5, 3, 1, "", "cuMemcpy2D"], [5, 3, 1, "", "cuMemcpy2DAsync"], [5, 3, 1, "", "cuMemcpy2DUnaligned"], [5, 3, 1, "", "cuMemcpy3D"], [5, 3, 1, "", "cuMemcpy3DAsync"], [5, 3, 1, "", "cuMemcpy3DPeer"], [5, 3, 1, "", "cuMemcpy3DPeerAsync"], [5, 3, 1, "", "cuMemcpyAsync"], [5, 3, 1, "", "cuMemcpyAtoA"], [5, 3, 1, "", "cuMemcpyAtoD"], [5, 3, 1, "", "cuMemcpyAtoH"], [5, 3, 1, "", "cuMemcpyAtoHAsync"], [5, 3, 1, "", "cuMemcpyDtoA"], [5, 3, 1, "", "cuMemcpyDtoD"], [5, 3, 1, "", "cuMemcpyDtoDAsync"], [5, 3, 1, "", "cuMemcpyDtoH"], [5, 3, 1, "", "cuMemcpyDtoHAsync"], [5, 3, 1, "", "cuMemcpyHtoA"], [5, 3, 1, "", "cuMemcpyHtoAAsync"], [5, 3, 1, "", "cuMemcpyHtoD"], [5, 3, 1, "", "cuMemcpyHtoDAsync"], [5, 3, 1, "", "cuMemcpyPeer"], [5, 3, 1, "", "cuMemcpyPeerAsync"], [5, 3, 1, "", "cuMemsetD16"], [5, 3, 1, "", "cuMemsetD16Async"], [5, 3, 1, "", "cuMemsetD2D16"], [5, 3, 1, "", "cuMemsetD2D16Async"], [5, 3, 1, "", "cuMemsetD2D32"], [5, 3, 1, "", "cuMemsetD2D32Async"], [5, 3, 1, "", "cuMemsetD2D8"], [5, 3, 1, "", "cuMemsetD2D8Async"], [5, 3, 1, "", "cuMemsetD32"], [5, 3, 1, "", "cuMemsetD32Async"], [5, 3, 1, "", "cuMemsetD8"], [5, 3, 1, "", "cuMemsetD8Async"], [5, 3, 1, "", "cuMipmappedArrayCreate"], [5, 3, 1, "", "cuMipmappedArrayDestroy"], [5, 3, 1, "", "cuMipmappedArrayGetLevel"], [5, 3, 1, "", "cuMipmappedArrayGetMemoryRequirements"], [5, 3, 1, "", "cuMipmappedArrayGetSparseProperties"], [5, 3, 1, "", "cuModuleEnumerateFunctions"], [5, 3, 1, "", "cuModuleGetFunction"], [5, 3, 1, "", "cuModuleGetFunctionCount"], [5, 3, 1, "", "cuModuleGetGlobal"], [5, 3, 1, "", "cuModuleGetLoadingMode"], [5, 3, 1, "", "cuModuleLoad"], [5, 3, 1, "", "cuModuleLoadData"], [5, 3, 1, "", "cuModuleLoadDataEx"], [5, 3, 1, "", "cuModuleLoadFatBinary"], [5, 3, 1, "", "cuModuleUnload"], [5, 3, 1, "", "cuMulticastAddDevice"], [5, 3, 1, "", "cuMulticastBindAddr"], [5, 3, 1, "", "cuMulticastBindMem"], [5, 3, 1, "", "cuMulticastCreate"], [5, 3, 1, "", "cuMulticastGetGranularity"], [5, 3, 1, "", "cuMulticastUnbind"], [5, 3, 1, "", "cuOccupancyAvailableDynamicSMemPerBlock"], [5, 3, 1, "", "cuOccupancyMaxActiveBlocksPerMultiprocessor"], [5, 3, 1, "", "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags"], [5, 3, 1, "", "cuOccupancyMaxActiveClusters"], [5, 3, 1, "", "cuOccupancyMaxPotentialBlockSize"], [5, 3, 1, "", "cuOccupancyMaxPotentialBlockSizeWithFlags"], [5, 3, 1, "", "cuOccupancyMaxPotentialClusterSize"], [5, 3, 1, "", "cuPointerGetAttribute"], [5, 3, 1, "", "cuPointerGetAttributes"], [5, 3, 1, "", "cuPointerSetAttribute"], [5, 3, 1, "", "cuProfilerStart"], [5, 3, 1, "", "cuProfilerStop"], [5, 3, 1, "", "cuSignalExternalSemaphoresAsync"], [5, 3, 1, "", "cuStreamAddCallback"], [5, 3, 1, "", "cuStreamAttachMemAsync"], [5, 3, 1, "", "cuStreamBatchMemOp"], [5, 3, 1, "", "cuStreamBeginCapture"], [5, 3, 1, "", "cuStreamBeginCaptureToGraph"], [5, 3, 1, "", "cuStreamCopyAttributes"], [5, 3, 1, "", "cuStreamCreate"], [5, 3, 1, "", "cuStreamCreateWithPriority"], [5, 3, 1, "", "cuStreamDestroy"], [5, 3, 1, "", "cuStreamEndCapture"], [5, 3, 1, "", "cuStreamGetAttribute"], [5, 3, 1, "", "cuStreamGetCaptureInfo"], [5, 3, 1, "", "cuStreamGetCaptureInfo_v3"], [5, 3, 1, "", "cuStreamGetCtx"], [5, 3, 1, "", "cuStreamGetCtx_v2"], [5, 3, 1, "", "cuStreamGetFlags"], [5, 3, 1, "", "cuStreamGetGreenCtx"], [5, 3, 1, "", "cuStreamGetId"], [5, 3, 1, "", "cuStreamGetPriority"], [5, 3, 1, "", "cuStreamIsCapturing"], [5, 3, 1, "", "cuStreamQuery"], [5, 3, 1, "", "cuStreamSetAttribute"], [5, 3, 1, "", "cuStreamSynchronize"], [5, 3, 1, "", "cuStreamUpdateCaptureDependencies"], [5, 3, 1, "", "cuStreamUpdateCaptureDependencies_v2"], [5, 3, 1, "", "cuStreamWaitEvent"], [5, 3, 1, "", "cuStreamWaitValue32"], [5, 3, 1, "", "cuStreamWaitValue64"], [5, 3, 1, "", "cuStreamWriteValue32"], [5, 3, 1, "", "cuStreamWriteValue64"], [5, 3, 1, "", "cuSurfObjectCreate"], [5, 3, 1, "", "cuSurfObjectDestroy"], [5, 3, 1, "", "cuSurfObjectGetResourceDesc"], [5, 3, 1, "", "cuTensorMapEncodeIm2col"], [5, 3, 1, "", "cuTensorMapEncodeTiled"], [5, 3, 1, "", "cuTensorMapReplaceAddress"], [5, 3, 1, "", "cuTexObjectCreate"], [5, 3, 1, "", "cuTexObjectDestroy"], [5, 3, 1, "", "cuTexObjectGetResourceDesc"], [5, 3, 1, "", "cuTexObjectGetResourceViewDesc"], [5, 3, 1, "", "cuTexObjectGetTextureDesc"], [5, 3, 1, "", "cuThreadExchangeStreamCaptureMode"], [5, 3, 1, "", "cuUserObjectCreate"], [5, 3, 1, "", "cuUserObjectRelease"], [5, 3, 1, "", "cuUserObjectRetain"], [5, 3, 1, "", "cuVDPAUCtxCreate"], [5, 3, 1, "", "cuVDPAUGetDevice"], [5, 3, 1, "", "cuWaitExternalSemaphoresAsync"]], "cuda.cuda.CUCoredumpGenerationFlags": [[5, 1, 1, "", "CU_COREDUMP_DEFAULT_FLAGS"], [5, 1, 1, "", "CU_COREDUMP_LIGHTWEIGHT_FLAGS"], [5, 1, 1, "", "CU_COREDUMP_SKIP_ABORT"], [5, 1, 1, "", "CU_COREDUMP_SKIP_CONSTBANK_MEMORY"], [5, 1, 1, "", "CU_COREDUMP_SKIP_GLOBAL_MEMORY"], [5, 1, 1, "", "CU_COREDUMP_SKIP_LOCAL_MEMORY"], [5, 1, 1, "", "CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES"], [5, 1, 1, "", "CU_COREDUMP_SKIP_SHARED_MEMORY"]], "cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Flags"], [5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR_st": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Flags"], [5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR_v2": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Flags"], [5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_ARRAY_DESCRIPTOR": [[5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_ARRAY_DESCRIPTOR_st": [[5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_ARRAY_DESCRIPTOR_v2": [[5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS": [[5, 1, 1, "", "alignment"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_st": [[5, 1, 1, "", "alignment"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1": [[5, 1, 1, "", "alignment"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "miptailFirstLevel"], [5, 1, 1, "", "miptailSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "tileExtent"]], "cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "miptailFirstLevel"], [5, 1, 1, "", "miptailSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "tileExtent"]], "cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "miptailFirstLevel"], [5, 1, 1, "", "miptailSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "tileExtent"]], "cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.cuda.CUDA_CHILD_GRAPH_NODE_PARAMS": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "graph"]], "cuda.cuda.CUDA_CHILD_GRAPH_NODE_PARAMS_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "graph"]], "cuda.cuda.CUDA_CONDITIONAL_NODE_PARAMS": [[5, 1, 1, "id13", "ctx"], [5, 2, 1, "id14", "getPtr"], [5, 1, 1, "id9", "handle"], [5, 1, 1, "id12", "phGraph_out"], [5, 1, 1, "id11", "size"], [5, 1, 1, "id10", "type"]], "cuda.cuda.CUDA_EVENT_RECORD_NODE_PARAMS": [[5, 1, 1, "", "event"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_EVENT_RECORD_NODE_PARAMS_st": [[5, 1, 1, "", "event"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_EVENT_WAIT_NODE_PARAMS": [[5, 1, 1, "", "event"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_EVENT_WAIT_NODE_PARAMS_st": [[5, 1, 1, "", "event"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"], [5, 1, 1, "", "type"]], "cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"], [5, 1, 1, "", "type"]], "cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"], [5, 1, 1, "", "type"]], "cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC": [[5, 1, 1, "", "arrayDesc"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numLevels"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st": [[5, 1, 1, "", "arrayDesc"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numLevels"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1": [[5, 1, 1, "", "arrayDesc"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numLevels"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "type"]], "cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "type"]], "cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "type"]], "cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.cuda.CUDA_GRAPH_INSTANTIATE_PARAMS": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hErrNode_out"], [5, 1, 1, "", "hUploadStream"], [5, 1, 1, "", "result_out"]], "cuda.cuda.CUDA_GRAPH_INSTANTIATE_PARAMS_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hErrNode_out"], [5, 1, 1, "", "hUploadStream"], [5, 1, 1, "", "result_out"]], "cuda.cuda.CUDA_HOST_NODE_PARAMS": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.cuda.CUDA_HOST_NODE_PARAMS_st": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.cuda.CUDA_HOST_NODE_PARAMS_v1": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.cuda.CUDA_HOST_NODE_PARAMS_v2": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.cuda.CUDA_HOST_NODE_PARAMS_v2_st": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.cuda.CUDA_KERNEL_NODE_PARAMS": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_KERNEL_NODE_PARAMS_st": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v1": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v2": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v2_st": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v3": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v3_st": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_LAUNCH_PARAMS": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "function"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_LAUNCH_PARAMS_st": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "function"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_LAUNCH_PARAMS_v1": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "function"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUDA_MEMCPY2D": [[5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"]], "cuda.cuda.CUDA_MEMCPY2D_st": [[5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"]], "cuda.cuda.CUDA_MEMCPY2D_v2": [[5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"]], "cuda.cuda.CUDA_MEMCPY3D": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.cuda.CUDA_MEMCPY3D_PEER": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstContext"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcContext"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.cuda.CUDA_MEMCPY3D_PEER_st": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstContext"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcContext"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.cuda.CUDA_MEMCPY3D_PEER_v1": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstContext"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcContext"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.cuda.CUDA_MEMCPY3D_st": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.cuda.CUDA_MEMCPY3D_v2": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.cuda.CUDA_MEMCPY_NODE_PARAMS": [[5, 1, 1, "", "copyCtx"], [5, 1, 1, "", "copyParams"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_MEMCPY_NODE_PARAMS_st": [[5, 1, 1, "", "copyCtx"], [5, 1, 1, "", "copyParams"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_MEMSET_NODE_PARAMS": [[5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.cuda.CUDA_MEMSET_NODE_PARAMS_st": [[5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v1": [[5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v2": [[5, 1, 1, "", "ctx"], [5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v2_st": [[5, 1, 1, "", "ctx"], [5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.cuda.CUDA_MEM_FREE_NODE_PARAMS": [[5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_MEM_FREE_NODE_PARAMS_st": [[5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS": [[5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE"]], "cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "p2pToken"], [5, 1, 1, "", "vaSpaceToken"]], "cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "p2pToken"], [5, 1, 1, "", "vaSpaceToken"]], "cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "p2pToken"], [5, 1, 1, "", "vaSpaceToken"]], "cuda.cuda.CUDA_RESOURCE_DESC": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "res"], [5, 1, 1, "", "resType"]], "cuda.cuda.CUDA_RESOURCE_DESC_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "res"], [5, 1, 1, "", "resType"]], "cuda.cuda.CUDA_RESOURCE_DESC_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "res"], [5, 1, 1, "", "resType"]], "cuda.cuda.CUDA_RESOURCE_VIEW_DESC": [[5, 1, 1, "", "depth"], [5, 1, 1, "", "firstLayer"], [5, 1, 1, "", "firstMipmapLevel"], [5, 1, 1, "", "format"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "lastLayer"], [5, 1, 1, "", "lastMipmapLevel"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "width"]], "cuda.cuda.CUDA_RESOURCE_VIEW_DESC_st": [[5, 1, 1, "", "depth"], [5, 1, 1, "", "firstLayer"], [5, 1, 1, "", "firstMipmapLevel"], [5, 1, 1, "", "format"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "lastLayer"], [5, 1, 1, "", "lastMipmapLevel"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "width"]], "cuda.cuda.CUDA_RESOURCE_VIEW_DESC_v1": [[5, 1, 1, "", "depth"], [5, 1, 1, "", "firstLayer"], [5, 1, 1, "", "firstMipmapLevel"], [5, 1, 1, "", "format"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "lastLayer"], [5, 1, 1, "", "lastMipmapLevel"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "width"]], "cuda.cuda.CUDA_TEXTURE_DESC": [[5, 1, 1, "", "addressMode"], [5, 1, 1, "", "borderColor"], [5, 1, 1, "", "filterMode"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxAnisotropy"], [5, 1, 1, "", "maxMipmapLevelClamp"], [5, 1, 1, "", "minMipmapLevelClamp"], [5, 1, 1, "", "mipmapFilterMode"], [5, 1, 1, "", "mipmapLevelBias"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_TEXTURE_DESC_st": [[5, 1, 1, "", "addressMode"], [5, 1, 1, "", "borderColor"], [5, 1, 1, "", "filterMode"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxAnisotropy"], [5, 1, 1, "", "maxMipmapLevelClamp"], [5, 1, 1, "", "minMipmapLevelClamp"], [5, 1, 1, "", "mipmapFilterMode"], [5, 1, 1, "", "mipmapLevelBias"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUDA_TEXTURE_DESC_v1": [[5, 1, 1, "", "addressMode"], [5, 1, 1, "", "borderColor"], [5, 1, 1, "", "filterMode"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxAnisotropy"], [5, 1, 1, "", "maxMipmapLevelClamp"], [5, 1, 1, "", "minMipmapLevelClamp"], [5, 1, 1, "", "mipmapFilterMode"], [5, 1, 1, "", "mipmapLevelBias"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUGLDeviceList": [[5, 1, 1, "", "CU_GL_DEVICE_LIST_ALL"], [5, 1, 1, "", "CU_GL_DEVICE_LIST_CURRENT_FRAME"], [5, 1, 1, "", "CU_GL_DEVICE_LIST_NEXT_FRAME"]], "cuda.cuda.CUGPUDirectRDMAWritesOrdering": [[5, 1, 1, "", "CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES"], [5, 1, 1, "", "CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE"], [5, 1, 1, "", "CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER"]], "cuda.cuda.CUaccessPolicyWindow": [[5, 1, 1, "", "base_ptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hitProp"], [5, 1, 1, "", "hitRatio"], [5, 1, 1, "", "missProp"], [5, 1, 1, "", "num_bytes"]], "cuda.cuda.CUaccessPolicyWindow_st": [[5, 1, 1, "", "base_ptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hitProp"], [5, 1, 1, "", "hitRatio"], [5, 1, 1, "", "missProp"], [5, 1, 1, "", "num_bytes"]], "cuda.cuda.CUaccessPolicyWindow_v1": [[5, 1, 1, "", "base_ptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hitProp"], [5, 1, 1, "", "hitRatio"], [5, 1, 1, "", "missProp"], [5, 1, 1, "", "num_bytes"]], "cuda.cuda.CUaccessProperty": [[5, 1, 1, "", "CU_ACCESS_PROPERTY_NORMAL"], [5, 1, 1, "", "CU_ACCESS_PROPERTY_PERSISTING"], [5, 1, 1, "", "CU_ACCESS_PROPERTY_STREAMING"]], "cuda.cuda.CUaddress_mode": [[5, 1, 1, "", "CU_TR_ADDRESS_MODE_BORDER"], [5, 1, 1, "", "CU_TR_ADDRESS_MODE_CLAMP"], [5, 1, 1, "", "CU_TR_ADDRESS_MODE_MIRROR"], [5, 1, 1, "", "CU_TR_ADDRESS_MODE_WRAP"]], "cuda.cuda.CUarray": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUarrayMapInfo": [[5, 1, 1, "", "deviceBitMask"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memHandle"], [5, 1, 1, "", "memHandleType"], [5, 1, 1, "", "memOperationType"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "resource"], [5, 1, 1, "", "resourceType"], [5, 1, 1, "", "subresource"], [5, 1, 1, "", "subresourceType"]], "cuda.cuda.CUarrayMapInfo_st": [[5, 1, 1, "", "deviceBitMask"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memHandle"], [5, 1, 1, "", "memHandleType"], [5, 1, 1, "", "memOperationType"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "resource"], [5, 1, 1, "", "resourceType"], [5, 1, 1, "", "subresource"], [5, 1, 1, "", "subresourceType"]], "cuda.cuda.CUarrayMapInfo_v1": [[5, 1, 1, "", "deviceBitMask"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memHandle"], [5, 1, 1, "", "memHandleType"], [5, 1, 1, "", "memOperationType"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "resource"], [5, 1, 1, "", "resourceType"], [5, 1, 1, "", "subresource"], [5, 1, 1, "", "subresourceType"]], "cuda.cuda.CUarraySparseSubresourceType": [[5, 1, 1, "", "CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL"], [5, 1, 1, "", "CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL"]], "cuda.cuda.CUarray_cubemap_face": [[5, 1, 1, "", "CU_CUBEMAP_FACE_NEGATIVE_X"], [5, 1, 1, "", "CU_CUBEMAP_FACE_NEGATIVE_Y"], [5, 1, 1, "", "CU_CUBEMAP_FACE_NEGATIVE_Z"], [5, 1, 1, "", "CU_CUBEMAP_FACE_POSITIVE_X"], [5, 1, 1, "", "CU_CUBEMAP_FACE_POSITIVE_Y"], [5, 1, 1, "", "CU_CUBEMAP_FACE_POSITIVE_Z"]], "cuda.cuda.CUarray_format": [[5, 1, 1, "", "CU_AD_FORMAT_AYUV"], [5, 1, 1, "", "CU_AD_FORMAT_BC1_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC1_UNORM_SRGB"], [5, 1, 1, "", "CU_AD_FORMAT_BC2_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC2_UNORM_SRGB"], [5, 1, 1, "", "CU_AD_FORMAT_BC3_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC3_UNORM_SRGB"], [5, 1, 1, "", "CU_AD_FORMAT_BC4_SNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC4_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC5_SNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC5_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC6H_SF16"], [5, 1, 1, "", "CU_AD_FORMAT_BC6H_UF16"], [5, 1, 1, "", "CU_AD_FORMAT_BC7_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC7_UNORM_SRGB"], [5, 1, 1, "", "CU_AD_FORMAT_FLOAT"], [5, 1, 1, "", "CU_AD_FORMAT_HALF"], [5, 1, 1, "", "CU_AD_FORMAT_MAX"], [5, 1, 1, "", "CU_AD_FORMAT_NV12"], [5, 1, 1, "", "CU_AD_FORMAT_NV16"], [5, 1, 1, "", "CU_AD_FORMAT_P010"], [5, 1, 1, "", "CU_AD_FORMAT_P016"], [5, 1, 1, "", "CU_AD_FORMAT_P210"], [5, 1, 1, "", "CU_AD_FORMAT_P216"], [5, 1, 1, "", "CU_AD_FORMAT_SIGNED_INT16"], [5, 1, 1, "", "CU_AD_FORMAT_SIGNED_INT32"], [5, 1, 1, "", "CU_AD_FORMAT_SIGNED_INT8"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT16X1"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT16X2"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT16X4"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT8X1"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT8X2"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT8X4"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT16X1"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT16X2"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT16X4"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT8X1"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT8X2"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT8X4"], [5, 1, 1, "", "CU_AD_FORMAT_UNSIGNED_INT16"], [5, 1, 1, "", "CU_AD_FORMAT_UNSIGNED_INT32"], [5, 1, 1, "", "CU_AD_FORMAT_UNSIGNED_INT8"], [5, 1, 1, "", "CU_AD_FORMAT_Y210"], [5, 1, 1, "", "CU_AD_FORMAT_Y216"], [5, 1, 1, "", "CU_AD_FORMAT_Y410"], [5, 1, 1, "", "CU_AD_FORMAT_Y416"], [5, 1, 1, "", "CU_AD_FORMAT_Y444_PLANAR10"], [5, 1, 1, "", "CU_AD_FORMAT_Y444_PLANAR8"], [5, 1, 1, "", "CU_AD_FORMAT_YUY2"]], "cuda.cuda.CUasyncCallback": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUasyncCallbackHandle": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUasyncNotificationInfo": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "info"], [5, 1, 1, "", "type"]], "cuda.cuda.CUasyncNotificationInfo_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "info"], [5, 1, 1, "", "type"]], "cuda.cuda.CUasyncNotificationType": [[5, 1, 1, "", "CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET"]], "cuda.cuda.CUcigDataType": [[5, 1, 1, "", "CIG_DATA_TYPE_D3D12_COMMAND_QUEUE"]], "cuda.cuda.CUclusterSchedulingPolicy": [[5, 1, 1, "", "CU_CLUSTER_SCHEDULING_POLICY_DEFAULT"], [5, 1, 1, "", "CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING"], [5, 1, 1, "", "CU_CLUSTER_SCHEDULING_POLICY_SPREAD"]], "cuda.cuda.CUcomputemode": [[5, 1, 1, "", "CU_COMPUTEMODE_DEFAULT"], [5, 1, 1, "", "CU_COMPUTEMODE_EXCLUSIVE_PROCESS"], [5, 1, 1, "", "CU_COMPUTEMODE_PROHIBITED"]], "cuda.cuda.CUcontext": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUcoredumpSettings": [[5, 1, 1, "", "CU_COREDUMP_ENABLE_ON_EXCEPTION"], [5, 1, 1, "", "CU_COREDUMP_ENABLE_USER_TRIGGER"], [5, 1, 1, "", "CU_COREDUMP_FILE"], [5, 1, 1, "", "CU_COREDUMP_GENERATION_FLAGS"], [5, 1, 1, "", "CU_COREDUMP_LIGHTWEIGHT"], [5, 1, 1, "", "CU_COREDUMP_MAX"], [5, 1, 1, "", "CU_COREDUMP_PIPE"], [5, 1, 1, "", "CU_COREDUMP_TRIGGER_HOST"]], "cuda.cuda.CUctxCigParam": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "sharedData"], [5, 1, 1, "", "sharedDataType"]], "cuda.cuda.CUctxCigParam_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "sharedData"], [5, 1, 1, "", "sharedDataType"]], "cuda.cuda.CUctxCreateParams": [[5, 1, 1, "", "cigParams"], [5, 1, 1, "", "execAffinityParams"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExecAffinityParams"]], "cuda.cuda.CUctxCreateParams_st": [[5, 1, 1, "", "cigParams"], [5, 1, 1, "", "execAffinityParams"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExecAffinityParams"]], "cuda.cuda.CUctx_flags": [[5, 1, 1, "", "CU_CTX_BLOCKING_SYNC"], [5, 1, 1, "", "CU_CTX_COREDUMP_ENABLE"], [5, 1, 1, "", "CU_CTX_FLAGS_MASK"], [5, 1, 1, "", "CU_CTX_LMEM_RESIZE_TO_MAX"], [5, 1, 1, "", "CU_CTX_MAP_HOST"], [5, 1, 1, "", "CU_CTX_SCHED_AUTO"], [5, 1, 1, "", "CU_CTX_SCHED_BLOCKING_SYNC"], [5, 1, 1, "", "CU_CTX_SCHED_MASK"], [5, 1, 1, "", "CU_CTX_SCHED_SPIN"], [5, 1, 1, "", "CU_CTX_SCHED_YIELD"], [5, 1, 1, "", "CU_CTX_SYNC_MEMOPS"], [5, 1, 1, "", "CU_CTX_USER_COREDUMP_ENABLE"]], "cuda.cuda.CUdevResource": [[5, 1, 1, "", "_internal_padding"], [5, 1, 1, "", "_oversize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "sm"], [5, 1, 1, "", "type"]], "cuda.cuda.CUdevResourceDesc": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUdevResourceType": [[5, 1, 1, "", "CU_DEV_RESOURCE_TYPE_INVALID"], [5, 1, 1, "", "CU_DEV_RESOURCE_TYPE_SM"]], "cuda.cuda.CUdevResource_st": [[5, 1, 1, "", "_internal_padding"], [5, 1, 1, "", "_oversize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "sm"], [5, 1, 1, "", "type"]], "cuda.cuda.CUdevSmResource": [[5, 2, 1, "id75", "getPtr"], [5, 1, 1, "id74", "smCount"]], "cuda.cuda.CUdevSmResourceSplit_flags": [[5, 1, 1, "", "CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING"], [5, 1, 1, "", "CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE"]], "cuda.cuda.CUdevSmResource_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "smCount"]], "cuda.cuda.CUdevice": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUdeviceNumaConfig": [[5, 1, 1, "", "CU_DEVICE_NUMA_CONFIG_NONE"], [5, 1, 1, "", "CU_DEVICE_NUMA_CONFIG_NUMA_NODE"]], "cuda.cuda.CUdevice_P2PAttribute": [[5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK"]], "cuda.cuda.CUdevice_attribute": [[5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CLOCK_RATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COMPUTE_MODE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_ECC_ENABLED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_OVERLAP"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_INTEGRATED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_PITCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MPS_ENABLED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_NUMA_CONFIG"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_NUMA_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PCI_BUS_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TCC_DRIVER"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_WARP_SIZE"]], "cuda.cuda.CUdevice_v1": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUdeviceptr": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUdeviceptr_v2": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUdevprop": [[5, 1, 1, "", "SIMDWidth"], [5, 1, 1, "", "clockRate"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxGridSize"], [5, 1, 1, "", "maxThreadsDim"], [5, 1, 1, "", "maxThreadsPerBlock"], [5, 1, 1, "", "memPitch"], [5, 1, 1, "", "regsPerBlock"], [5, 1, 1, "", "sharedMemPerBlock"], [5, 1, 1, "", "textureAlign"], [5, 1, 1, "", "totalConstantMemory"]], "cuda.cuda.CUdevprop_st": [[5, 1, 1, "", "SIMDWidth"], [5, 1, 1, "", "clockRate"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxGridSize"], [5, 1, 1, "", "maxThreadsDim"], [5, 1, 1, "", "maxThreadsPerBlock"], [5, 1, 1, "", "memPitch"], [5, 1, 1, "", "regsPerBlock"], [5, 1, 1, "", "sharedMemPerBlock"], [5, 1, 1, "", "textureAlign"], [5, 1, 1, "", "totalConstantMemory"]], "cuda.cuda.CUdevprop_v1": [[5, 1, 1, "", "SIMDWidth"], [5, 1, 1, "", "clockRate"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxGridSize"], [5, 1, 1, "", "maxThreadsDim"], [5, 1, 1, "", "maxThreadsPerBlock"], [5, 1, 1, "", "memPitch"], [5, 1, 1, "", "regsPerBlock"], [5, 1, 1, "", "sharedMemPerBlock"], [5, 1, 1, "", "textureAlign"], [5, 1, 1, "", "totalConstantMemory"]], "cuda.cuda.CUdriverProcAddressQueryResult": [[5, 1, 1, "", "CU_GET_PROC_ADDRESS_SUCCESS"], [5, 1, 1, "", "CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND"], [5, 1, 1, "", "CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT"]], "cuda.cuda.CUdriverProcAddress_flags": [[5, 1, 1, "", "CU_GET_PROC_ADDRESS_DEFAULT"], [5, 1, 1, "", "CU_GET_PROC_ADDRESS_LEGACY_STREAM"], [5, 1, 1, "", "CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM"]], "cuda.cuda.CUeglColorFormat": [[5, 1, 1, "", "CU_EGL_COLOR_FORMAT_A"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_ABGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_ARGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_AYUV"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_AYUV_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_CCCC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_BCCR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_CBRC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_CCCC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_CRBC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_RCCB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER14_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER14_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER14_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER14_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER20_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER20_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER20_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER20_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_BCCR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_CBRC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_CRBC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_RCCB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BGRA"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_L"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_MAX"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_R"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_RG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_RGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_RGBA"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_UYVY_422"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_UYVY_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_VYUY"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_VYUY_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV422_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV444_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUVA"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUVA_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUYV_422"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUYV_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU422_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU444_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVYU"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVYU_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y_ER"]], "cuda.cuda.CUeglFrame": [[5, 1, 1, "", "cuFormat"], [5, 1, 1, "", "depth"], [5, 1, 1, "", "eglColorFormat"], [5, 1, 1, "", "frame"], [5, 1, 1, "", "frameType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "numChannels"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "planeCount"], [5, 1, 1, "", "width"]], "cuda.cuda.CUeglFrameType": [[5, 1, 1, "", "CU_EGL_FRAME_TYPE_ARRAY"], [5, 1, 1, "", "CU_EGL_FRAME_TYPE_PITCH"]], "cuda.cuda.CUeglFrame_st": [[5, 1, 1, "", "cuFormat"], [5, 1, 1, "", "depth"], [5, 1, 1, "", "eglColorFormat"], [5, 1, 1, "", "frame"], [5, 1, 1, "", "frameType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "numChannels"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "planeCount"], [5, 1, 1, "", "width"]], "cuda.cuda.CUeglFrame_v1": [[5, 1, 1, "", "cuFormat"], [5, 1, 1, "", "depth"], [5, 1, 1, "", "eglColorFormat"], [5, 1, 1, "", "frame"], [5, 1, 1, "", "frameType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "numChannels"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "planeCount"], [5, 1, 1, "", "width"]], "cuda.cuda.CUeglResourceLocationFlags": [[5, 1, 1, "", "CU_EGL_RESOURCE_LOCATION_SYSMEM"], [5, 1, 1, "", "CU_EGL_RESOURCE_LOCATION_VIDMEM"]], "cuda.cuda.CUeglStreamConnection": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUevent": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUevent_flags": [[5, 1, 1, "", "CU_EVENT_BLOCKING_SYNC"], [5, 1, 1, "", "CU_EVENT_DEFAULT"], [5, 1, 1, "", "CU_EVENT_DISABLE_TIMING"], [5, 1, 1, "", "CU_EVENT_INTERPROCESS"]], "cuda.cuda.CUevent_record_flags": [[5, 1, 1, "", "CU_EVENT_RECORD_DEFAULT"], [5, 1, 1, "", "CU_EVENT_RECORD_EXTERNAL"]], "cuda.cuda.CUevent_sched_flags": [[5, 1, 1, "", "CU_EVENT_SCHED_AUTO"], [5, 1, 1, "", "CU_EVENT_SCHED_BLOCKING_SYNC"], [5, 1, 1, "", "CU_EVENT_SCHED_SPIN"], [5, 1, 1, "", "CU_EVENT_SCHED_YIELD"]], "cuda.cuda.CUevent_wait_flags": [[5, 1, 1, "", "CU_EVENT_WAIT_DEFAULT"], [5, 1, 1, "", "CU_EVENT_WAIT_EXTERNAL"]], "cuda.cuda.CUexecAffinityParam": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "param"], [5, 1, 1, "", "type"]], "cuda.cuda.CUexecAffinityParam_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "param"], [5, 1, 1, "", "type"]], "cuda.cuda.CUexecAffinityParam_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "param"], [5, 1, 1, "", "type"]], "cuda.cuda.CUexecAffinitySmCount": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "val"]], "cuda.cuda.CUexecAffinitySmCount_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "val"]], "cuda.cuda.CUexecAffinitySmCount_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "val"]], "cuda.cuda.CUexecAffinityType": [[5, 1, 1, "", "CU_EXEC_AFFINITY_TYPE_MAX"], [5, 1, 1, "", "CU_EXEC_AFFINITY_TYPE_SM_COUNT"]], "cuda.cuda.CUexternalMemory": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUexternalMemoryHandleType": [[5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT"]], "cuda.cuda.CUexternalSemaphore": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUexternalSemaphoreHandleType": [[5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32"]], "cuda.cuda.CUfilter_mode": [[5, 1, 1, "", "CU_TR_FILTER_MODE_LINEAR"], [5, 1, 1, "", "CU_TR_FILTER_MODE_POINT"]], "cuda.cuda.CUflushGPUDirectRDMAWritesOptions": [[5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST"], [5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS"]], "cuda.cuda.CUflushGPUDirectRDMAWritesScope": [[5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES"], [5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER"]], "cuda.cuda.CUflushGPUDirectRDMAWritesTarget": [[5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX"]], "cuda.cuda.CUfunc_cache": [[5, 1, 1, "", "CU_FUNC_CACHE_PREFER_EQUAL"], [5, 1, 1, "", "CU_FUNC_CACHE_PREFER_L1"], [5, 1, 1, "", "CU_FUNC_CACHE_PREFER_NONE"], [5, 1, 1, "", "CU_FUNC_CACHE_PREFER_SHARED"]], "cuda.cuda.CUfunction": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUfunctionLoadingState": [[5, 1, 1, "", "CU_FUNCTION_LOADING_STATE_LOADED"], [5, 1, 1, "", "CU_FUNCTION_LOADING_STATE_MAX"], [5, 1, 1, "", "CU_FUNCTION_LOADING_STATE_UNLOADED"]], "cuda.cuda.CUfunction_attribute": [[5, 1, 1, "", "CU_FUNC_ATTRIBUTE_BINARY_VERSION"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_CACHE_MODE_CA"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_MAX"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_NUM_REGS"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_PTX_VERSION"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES"]], "cuda.cuda.CUgraph": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUgraphConditionalHandle": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUgraphConditionalNodeType": [[5, 1, 1, "", "CU_GRAPH_COND_TYPE_IF"], [5, 1, 1, "", "CU_GRAPH_COND_TYPE_WHILE"]], "cuda.cuda.CUgraphDebugDot_flags": [[5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE"]], "cuda.cuda.CUgraphDependencyType": [[5, 1, 1, "", "CU_GRAPH_DEPENDENCY_TYPE_DEFAULT"], [5, 1, 1, "", "CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC"]], "cuda.cuda.CUgraphDeviceNode": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUgraphEdgeData": [[5, 1, 1, "", "from_port"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "to_port"], [5, 1, 1, "", "type"]], "cuda.cuda.CUgraphEdgeData_st": [[5, 1, 1, "", "from_port"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "to_port"], [5, 1, 1, "", "type"]], "cuda.cuda.CUgraphExec": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUgraphExecUpdateResult": [[5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_SUCCESS"]], "cuda.cuda.CUgraphExecUpdateResultInfo": [[5, 1, 1, "", "errorFromNode"], [5, 1, 1, "", "errorNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "result"]], "cuda.cuda.CUgraphExecUpdateResultInfo_st": [[5, 1, 1, "", "errorFromNode"], [5, 1, 1, "", "errorNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "result"]], "cuda.cuda.CUgraphExecUpdateResultInfo_v1": [[5, 1, 1, "", "errorFromNode"], [5, 1, 1, "", "errorNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "result"]], "cuda.cuda.CUgraphInstantiateResult": [[5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_ERROR"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_SUCCESS"]], "cuda.cuda.CUgraphInstantiate_flags": [[5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY"]], "cuda.cuda.CUgraphMem_attribute": [[5, 1, 1, "", "CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT"], [5, 1, 1, "", "CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH"], [5, 1, 1, "", "CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT"], [5, 1, 1, "", "CU_GRAPH_MEM_ATTR_USED_MEM_HIGH"]], "cuda.cuda.CUgraphNode": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUgraphNodeParams": [[5, 1, 1, "", "alloc"], [5, 1, 1, "", "conditional"], [5, 1, 1, "", "eventRecord"], [5, 1, 1, "", "eventWait"], [5, 1, 1, "", "extSemSignal"], [5, 1, 1, "", "extSemWait"], [5, 1, 1, "", "free"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "graph"], [5, 1, 1, "", "host"], [5, 1, 1, "", "kernel"], [5, 1, 1, "", "memOp"], [5, 1, 1, "", "memcpy"], [5, 1, 1, "", "memset"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "reserved2"], [5, 1, 1, "", "type"]], "cuda.cuda.CUgraphNodeParams_st": [[5, 1, 1, "", "alloc"], [5, 1, 1, "", "conditional"], [5, 1, 1, "", "eventRecord"], [5, 1, 1, "", "eventWait"], [5, 1, 1, "", "extSemSignal"], [5, 1, 1, "", "extSemWait"], [5, 1, 1, "", "free"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "graph"], [5, 1, 1, "", "host"], [5, 1, 1, "", "kernel"], [5, 1, 1, "", "memOp"], [5, 1, 1, "", "memcpy"], [5, 1, 1, "", "memset"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "reserved2"], [5, 1, 1, "", "type"]], "cuda.cuda.CUgraphNodeType": [[5, 1, 1, "", "CU_GRAPH_NODE_TYPE_BATCH_MEM_OP"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_CONDITIONAL"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_EMPTY"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_EVENT_RECORD"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_GRAPH"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_HOST"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_KERNEL"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_MEMCPY"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_MEMSET"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_MEM_ALLOC"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_MEM_FREE"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_WAIT_EVENT"]], "cuda.cuda.CUgraphicsMapResourceFlags": [[5, 1, 1, "", "CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE"], [5, 1, 1, "", "CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY"], [5, 1, 1, "", "CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD"]], "cuda.cuda.CUgraphicsRegisterFlags": [[5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_NONE"], [5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY"], [5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST"], [5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER"], [5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD"]], "cuda.cuda.CUgraphicsResource": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUgreenCtx": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUgreenCtxCreate_flags": [[5, 1, 1, "", "CU_GREEN_CTX_DEFAULT_STREAM"]], "cuda.cuda.CUhostFn": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUipcEventHandle": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUipcEventHandle_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUipcEventHandle_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUipcMemHandle": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUipcMemHandle_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUipcMemHandle_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUipcMem_flags": [[5, 1, 1, "", "CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS"]], "cuda.cuda.CUjitInputType": [[5, 1, 1, "", "CU_JIT_INPUT_CUBIN"], [5, 1, 1, "", "CU_JIT_INPUT_FATBINARY"], [5, 1, 1, "", "CU_JIT_INPUT_LIBRARY"], [5, 1, 1, "", "CU_JIT_INPUT_NVVM"], [5, 1, 1, "", "CU_JIT_INPUT_OBJECT"], [5, 1, 1, "", "CU_JIT_INPUT_PTX"], [5, 1, 1, "", "CU_JIT_NUM_INPUT_TYPES"]], "cuda.cuda.CUjit_cacheMode": [[5, 1, 1, "", "CU_JIT_CACHE_OPTION_CA"], [5, 1, 1, "", "CU_JIT_CACHE_OPTION_CG"], [5, 1, 1, "", "CU_JIT_CACHE_OPTION_NONE"]], "cuda.cuda.CUjit_fallback": [[5, 1, 1, "", "CU_PREFER_BINARY"], [5, 1, 1, "", "CU_PREFER_PTX"]], "cuda.cuda.CUjit_option": [[5, 1, 1, "", "CU_JIT_CACHE_MODE"], [5, 1, 1, "", "CU_JIT_ERROR_LOG_BUFFER"], [5, 1, 1, "", "CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES"], [5, 1, 1, "", "CU_JIT_FALLBACK_STRATEGY"], [5, 1, 1, "", "CU_JIT_FAST_COMPILE"], [5, 1, 1, "", "CU_JIT_FMA"], [5, 1, 1, "", "CU_JIT_FTZ"], [5, 1, 1, "", "CU_JIT_GENERATE_DEBUG_INFO"], [5, 1, 1, "", "CU_JIT_GENERATE_LINE_INFO"], [5, 1, 1, "", "CU_JIT_GLOBAL_SYMBOL_ADDRESSES"], [5, 1, 1, "", "CU_JIT_GLOBAL_SYMBOL_COUNT"], [5, 1, 1, "", "CU_JIT_GLOBAL_SYMBOL_NAMES"], [5, 1, 1, "", "CU_JIT_INFO_LOG_BUFFER"], [5, 1, 1, "", "CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES"], [5, 1, 1, "", "CU_JIT_LOG_VERBOSE"], [5, 1, 1, "", "CU_JIT_LTO"], [5, 1, 1, "", "CU_JIT_MAX_REGISTERS"], [5, 1, 1, "", "CU_JIT_MAX_THREADS_PER_BLOCK"], [5, 1, 1, "", "CU_JIT_MIN_CTA_PER_SM"], [5, 1, 1, "", "CU_JIT_NEW_SM3X_OPT"], [5, 1, 1, "", "CU_JIT_NUM_OPTIONS"], [5, 1, 1, "", "CU_JIT_OPTIMIZATION_LEVEL"], [5, 1, 1, "", "CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES"], [5, 1, 1, "", "CU_JIT_OVERRIDE_DIRECTIVE_VALUES"], [5, 1, 1, "", "CU_JIT_POSITION_INDEPENDENT_CODE"], [5, 1, 1, "", "CU_JIT_PREC_DIV"], [5, 1, 1, "", "CU_JIT_PREC_SQRT"], [5, 1, 1, "", "CU_JIT_REFERENCED_KERNEL_COUNT"], [5, 1, 1, "", "CU_JIT_REFERENCED_KERNEL_NAMES"], [5, 1, 1, "", "CU_JIT_REFERENCED_VARIABLE_COUNT"], [5, 1, 1, "", "CU_JIT_REFERENCED_VARIABLE_NAMES"], [5, 1, 1, "", "CU_JIT_TARGET"], [5, 1, 1, "", "CU_JIT_TARGET_FROM_CUCONTEXT"], [5, 1, 1, "", "CU_JIT_THREADS_PER_BLOCK"], [5, 1, 1, "", "CU_JIT_WALL_TIME"]], "cuda.cuda.CUjit_target": [[5, 1, 1, "", "CU_TARGET_COMPUTE_30"], [5, 1, 1, "", "CU_TARGET_COMPUTE_32"], [5, 1, 1, "", "CU_TARGET_COMPUTE_35"], [5, 1, 1, "", "CU_TARGET_COMPUTE_37"], [5, 1, 1, "", "CU_TARGET_COMPUTE_50"], [5, 1, 1, "", "CU_TARGET_COMPUTE_52"], [5, 1, 1, "", "CU_TARGET_COMPUTE_53"], [5, 1, 1, "", "CU_TARGET_COMPUTE_60"], [5, 1, 1, "", "CU_TARGET_COMPUTE_61"], [5, 1, 1, "", "CU_TARGET_COMPUTE_62"], [5, 1, 1, "", "CU_TARGET_COMPUTE_70"], [5, 1, 1, "", "CU_TARGET_COMPUTE_72"], [5, 1, 1, "", "CU_TARGET_COMPUTE_75"], [5, 1, 1, "", "CU_TARGET_COMPUTE_80"], [5, 1, 1, "", "CU_TARGET_COMPUTE_86"], [5, 1, 1, "", "CU_TARGET_COMPUTE_87"], [5, 1, 1, "", "CU_TARGET_COMPUTE_89"], [5, 1, 1, "", "CU_TARGET_COMPUTE_90"], [5, 1, 1, "", "CU_TARGET_COMPUTE_90A"]], "cuda.cuda.CUkernel": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUkernelNodeAttrValue": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.cuda.CUkernelNodeAttrValue_v1": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.cuda.CUlaunchAttribute": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "value"]], "cuda.cuda.CUlaunchAttributeID": [[5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_COOPERATIVE"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_IGNORE"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_PRIORITY"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY"]], "cuda.cuda.CUlaunchAttributeValue": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.cuda.CUlaunchAttributeValue_union": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.cuda.CUlaunchAttribute_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "value"]], "cuda.cuda.CUlaunchConfig": [[5, 1, 1, "", "attrs"], [5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "numAttrs"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUlaunchConfig_st": [[5, 1, 1, "", "attrs"], [5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "numAttrs"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.cuda.CUlaunchMemSyncDomain": [[5, 1, 1, "", "CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT"], [5, 1, 1, "", "CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE"]], "cuda.cuda.CUlaunchMemSyncDomainMap": [[5, 1, 1, "", "default_"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "remote"]], "cuda.cuda.CUlaunchMemSyncDomainMap_st": [[5, 1, 1, "", "default_"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "remote"]], "cuda.cuda.CUlibrary": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUlibraryHostUniversalFunctionAndDataTable": [[5, 1, 1, "", "dataTable"], [5, 1, 1, "", "dataWindowSize"], [5, 1, 1, "", "functionTable"], [5, 1, 1, "", "functionWindowSize"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUlibraryHostUniversalFunctionAndDataTable_st": [[5, 1, 1, "", "dataTable"], [5, 1, 1, "", "dataWindowSize"], [5, 1, 1, "", "functionTable"], [5, 1, 1, "", "functionWindowSize"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUlibraryOption": [[5, 1, 1, "", "CU_LIBRARY_BINARY_IS_PRESERVED"], [5, 1, 1, "", "CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE"], [5, 1, 1, "", "CU_LIBRARY_NUM_OPTIONS"]], "cuda.cuda.CUlimit": [[5, 1, 1, "", "CU_LIMIT_CIG_ENABLED"], [5, 1, 1, "", "CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED"], [5, 1, 1, "", "CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT"], [5, 1, 1, "", "CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH"], [5, 1, 1, "", "CU_LIMIT_MALLOC_HEAP_SIZE"], [5, 1, 1, "", "CU_LIMIT_MAX"], [5, 1, 1, "", "CU_LIMIT_MAX_L2_FETCH_GRANULARITY"], [5, 1, 1, "", "CU_LIMIT_PERSISTING_L2_CACHE_SIZE"], [5, 1, 1, "", "CU_LIMIT_PRINTF_FIFO_SIZE"], [5, 1, 1, "", "CU_LIMIT_SHMEM_SIZE"], [5, 1, 1, "", "CU_LIMIT_STACK_SIZE"]], "cuda.cuda.CUlinkState": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUmemAccessDesc": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"]], "cuda.cuda.CUmemAccessDesc_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"]], "cuda.cuda.CUmemAccessDesc_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"]], "cuda.cuda.CUmemAccess_flags": [[5, 1, 1, "", "CU_MEM_ACCESS_FLAGS_PROT_MAX"], [5, 1, 1, "", "CU_MEM_ACCESS_FLAGS_PROT_NONE"], [5, 1, 1, "", "CU_MEM_ACCESS_FLAGS_PROT_READ"], [5, 1, 1, "", "CU_MEM_ACCESS_FLAGS_PROT_READWRITE"]], "cuda.cuda.CUmemAllocationCompType": [[5, 1, 1, "", "CU_MEM_ALLOCATION_COMP_GENERIC"], [5, 1, 1, "", "CU_MEM_ALLOCATION_COMP_NONE"]], "cuda.cuda.CUmemAllocationGranularity_flags": [[5, 1, 1, "", "CU_MEM_ALLOC_GRANULARITY_MINIMUM"], [5, 1, 1, "", "CU_MEM_ALLOC_GRANULARITY_RECOMMENDED"]], "cuda.cuda.CUmemAllocationHandleType": [[5, 1, 1, "", "CU_MEM_HANDLE_TYPE_FABRIC"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_MAX"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_NONE"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_WIN32"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_WIN32_KMT"]], "cuda.cuda.CUmemAllocationProp": [[5, 1, 1, "", "allocFlags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"], [5, 1, 1, "", "requestedHandleTypes"], [5, 1, 1, "", "type"], [5, 1, 1, "", "win32HandleMetaData"]], "cuda.cuda.CUmemAllocationProp_st": [[5, 1, 1, "", "allocFlags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"], [5, 1, 1, "", "requestedHandleTypes"], [5, 1, 1, "", "type"], [5, 1, 1, "", "win32HandleMetaData"]], "cuda.cuda.CUmemAllocationProp_v1": [[5, 1, 1, "", "allocFlags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"], [5, 1, 1, "", "requestedHandleTypes"], [5, 1, 1, "", "type"], [5, 1, 1, "", "win32HandleMetaData"]], "cuda.cuda.CUmemAllocationType": [[5, 1, 1, "", "CU_MEM_ALLOCATION_TYPE_INVALID"], [5, 1, 1, "", "CU_MEM_ALLOCATION_TYPE_MAX"], [5, 1, 1, "", "CU_MEM_ALLOCATION_TYPE_PINNED"]], "cuda.cuda.CUmemAttach_flags": [[5, 1, 1, "", "CU_MEM_ATTACH_GLOBAL"], [5, 1, 1, "", "CU_MEM_ATTACH_HOST"], [5, 1, 1, "", "CU_MEM_ATTACH_SINGLE"]], "cuda.cuda.CUmemFabricHandle": [[5, 1, 1, "", "data"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUmemFabricHandle_st": [[5, 1, 1, "", "data"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUmemFabricHandle_v1": [[5, 1, 1, "", "data"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUmemGenericAllocationHandle": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUmemGenericAllocationHandle_v1": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUmemHandleType": [[5, 1, 1, "", "CU_MEM_HANDLE_TYPE_GENERIC"]], "cuda.cuda.CUmemLocation": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "type"]], "cuda.cuda.CUmemLocationType": [[5, 1, 1, "", "CU_MEM_LOCATION_TYPE_DEVICE"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_HOST"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_HOST_NUMA"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_INVALID"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_MAX"]], "cuda.cuda.CUmemLocation_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "type"]], "cuda.cuda.CUmemLocation_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "type"]], "cuda.cuda.CUmemOperationType": [[5, 1, 1, "", "CU_MEM_OPERATION_TYPE_MAP"], [5, 1, 1, "", "CU_MEM_OPERATION_TYPE_UNMAP"]], "cuda.cuda.CUmemPoolProps": [[5, 1, 1, "", "allocType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "location"], [5, 1, 1, "", "maxSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "usage"], [5, 1, 1, "", "win32SecurityAttributes"]], "cuda.cuda.CUmemPoolProps_st": [[5, 1, 1, "", "allocType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "location"], [5, 1, 1, "", "maxSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "usage"], [5, 1, 1, "", "win32SecurityAttributes"]], "cuda.cuda.CUmemPoolProps_v1": [[5, 1, 1, "", "allocType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "location"], [5, 1, 1, "", "maxSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "usage"], [5, 1, 1, "", "win32SecurityAttributes"]], "cuda.cuda.CUmemPoolPtrExportData": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUmemPoolPtrExportData_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUmemPoolPtrExportData_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.cuda.CUmemPool_attribute": [[5, 1, 1, "", "CU_MEMPOOL_ATTR_RELEASE_THRESHOLD"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_USED_MEM_CURRENT"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_USED_MEM_HIGH"]], "cuda.cuda.CUmemRangeHandleType": [[5, 1, 1, "", "CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD"], [5, 1, 1, "", "CU_MEM_RANGE_HANDLE_TYPE_MAX"]], "cuda.cuda.CUmem_advise": [[5, 1, 1, "", "CU_MEM_ADVISE_SET_ACCESSED_BY"], [5, 1, 1, "", "CU_MEM_ADVISE_SET_PREFERRED_LOCATION"], [5, 1, 1, "", "CU_MEM_ADVISE_SET_READ_MOSTLY"], [5, 1, 1, "", "CU_MEM_ADVISE_UNSET_ACCESSED_BY"], [5, 1, 1, "", "CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION"], [5, 1, 1, "", "CU_MEM_ADVISE_UNSET_READ_MOSTLY"]], "cuda.cuda.CUmem_range_attribute": [[5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY"]], "cuda.cuda.CUmemoryPool": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUmemorytype": [[5, 1, 1, "", "CU_MEMORYTYPE_ARRAY"], [5, 1, 1, "", "CU_MEMORYTYPE_DEVICE"], [5, 1, 1, "", "CU_MEMORYTYPE_HOST"], [5, 1, 1, "", "CU_MEMORYTYPE_UNIFIED"]], "cuda.cuda.CUmipmappedArray": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUmodule": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUmoduleLoadingMode": [[5, 1, 1, "", "CU_MODULE_EAGER_LOADING"], [5, 1, 1, "", "CU_MODULE_LAZY_LOADING"]], "cuda.cuda.CUmulticastGranularity_flags": [[5, 1, 1, "", "CU_MULTICAST_GRANULARITY_MINIMUM"], [5, 1, 1, "", "CU_MULTICAST_GRANULARITY_RECOMMENDED"]], "cuda.cuda.CUmulticastObjectProp": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "numDevices"], [5, 1, 1, "", "size"]], "cuda.cuda.CUmulticastObjectProp_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "numDevices"], [5, 1, 1, "", "size"]], "cuda.cuda.CUmulticastObjectProp_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "numDevices"], [5, 1, 1, "", "size"]], "cuda.cuda.CUoccupancyB2DSize": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUoccupancy_flags": [[5, 1, 1, "", "CU_OCCUPANCY_DEFAULT"], [5, 1, 1, "", "CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE"]], "cuda.cuda.CUpointer_attribute": [[5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ACCESS_FLAGS"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_BUFFER_ID"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_CONTEXT"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_DEVICE_POINTER"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_HOST_POINTER"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_IS_MANAGED"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MAPPED"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MAPPING_SIZE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MEMORY_TYPE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_P2P_TOKENS"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_RANGE_SIZE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_RANGE_START_ADDR"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_SYNC_MEMOPS"]], "cuda.cuda.CUresourceViewFormat": [[5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_1X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_1X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_2X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_2X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_4X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_4X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_NONE"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SIGNED_BC4"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SIGNED_BC5"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SIGNED_BC6H"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_1X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_1X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_1X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_2X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_2X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_2X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_4X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_4X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_4X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_1X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_1X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_1X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_2X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_2X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_2X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_4X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_4X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_4X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC1"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC2"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC3"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC4"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC5"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC6H"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC7"]], "cuda.cuda.CUresourcetype": [[5, 1, 1, "", "CU_RESOURCE_TYPE_ARRAY"], [5, 1, 1, "", "CU_RESOURCE_TYPE_LINEAR"], [5, 1, 1, "", "CU_RESOURCE_TYPE_MIPMAPPED_ARRAY"], [5, 1, 1, "", "CU_RESOURCE_TYPE_PITCH2D"]], "cuda.cuda.CUresult": [[5, 1, 1, "", "CUDA_ERROR_ALREADY_ACQUIRED"], [5, 1, 1, "", "CUDA_ERROR_ALREADY_MAPPED"], [5, 1, 1, "", "CUDA_ERROR_ARRAY_IS_MAPPED"], [5, 1, 1, "", "CUDA_ERROR_ASSERT"], [5, 1, 1, "", "CUDA_ERROR_CAPTURED_EVENT"], [5, 1, 1, "", "CUDA_ERROR_CDP_NOT_SUPPORTED"], [5, 1, 1, "", "CUDA_ERROR_CDP_VERSION_MISMATCH"], [5, 1, 1, "", "CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE"], [5, 1, 1, "", "CUDA_ERROR_CONTEXT_ALREADY_CURRENT"], [5, 1, 1, "", "CUDA_ERROR_CONTEXT_ALREADY_IN_USE"], [5, 1, 1, "", "CUDA_ERROR_CONTEXT_IS_DESTROYED"], [5, 1, 1, "", "CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE"], [5, 1, 1, "", "CUDA_ERROR_DEINITIALIZED"], [5, 1, 1, "", "CUDA_ERROR_DEVICE_NOT_LICENSED"], [5, 1, 1, "", "CUDA_ERROR_DEVICE_UNAVAILABLE"], [5, 1, 1, "", "CUDA_ERROR_ECC_UNCORRECTABLE"], [5, 1, 1, "", "CUDA_ERROR_EXTERNAL_DEVICE"], [5, 1, 1, "", "CUDA_ERROR_FILE_NOT_FOUND"], [5, 1, 1, "", "CUDA_ERROR_FUNCTION_NOT_LOADED"], [5, 1, 1, "", "CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE"], [5, 1, 1, "", "CUDA_ERROR_HARDWARE_STACK_ERROR"], [5, 1, 1, "", "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED"], [5, 1, 1, "", "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED"], [5, 1, 1, "", "CUDA_ERROR_ILLEGAL_ADDRESS"], [5, 1, 1, "", "CUDA_ERROR_ILLEGAL_INSTRUCTION"], [5, 1, 1, "", "CUDA_ERROR_ILLEGAL_STATE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_ADDRESS_SPACE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_CLUSTER_SIZE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_CONTEXT"], [5, 1, 1, "", "CUDA_ERROR_INVALID_DEVICE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_GRAPHICS_CONTEXT"], [5, 1, 1, "", "CUDA_ERROR_INVALID_HANDLE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_IMAGE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_PC"], [5, 1, 1, "", "CUDA_ERROR_INVALID_PTX"], [5, 1, 1, "", "CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION"], [5, 1, 1, "", "CUDA_ERROR_INVALID_RESOURCE_TYPE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_SOURCE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_VALUE"], [5, 1, 1, "", "CUDA_ERROR_JIT_COMPILATION_DISABLED"], [5, 1, 1, "", "CUDA_ERROR_JIT_COMPILER_NOT_FOUND"], [5, 1, 1, "", "CUDA_ERROR_LAUNCH_FAILED"], [5, 1, 1, "", "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING"], [5, 1, 1, "", "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES"], [5, 1, 1, "", "CUDA_ERROR_LAUNCH_TIMEOUT"], [5, 1, 1, "", "CUDA_ERROR_LOSSY_QUERY"], [5, 1, 1, "", "CUDA_ERROR_MAP_FAILED"], [5, 1, 1, "", "CUDA_ERROR_MISALIGNED_ADDRESS"], [5, 1, 1, "", "CUDA_ERROR_MPS_CLIENT_TERMINATED"], [5, 1, 1, "", "CUDA_ERROR_MPS_CONNECTION_FAILED"], [5, 1, 1, "", "CUDA_ERROR_MPS_MAX_CLIENTS_REACHED"], [5, 1, 1, "", "CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED"], [5, 1, 1, "", "CUDA_ERROR_MPS_RPC_FAILURE"], [5, 1, 1, "", "CUDA_ERROR_MPS_SERVER_NOT_READY"], [5, 1, 1, "", "CUDA_ERROR_NOT_FOUND"], [5, 1, 1, "", "CUDA_ERROR_NOT_INITIALIZED"], [5, 1, 1, "", "CUDA_ERROR_NOT_MAPPED"], [5, 1, 1, "", "CUDA_ERROR_NOT_MAPPED_AS_ARRAY"], [5, 1, 1, "", "CUDA_ERROR_NOT_MAPPED_AS_POINTER"], [5, 1, 1, "", "CUDA_ERROR_NOT_PERMITTED"], [5, 1, 1, "", "CUDA_ERROR_NOT_READY"], [5, 1, 1, "", "CUDA_ERROR_NOT_SUPPORTED"], [5, 1, 1, "", "CUDA_ERROR_NO_BINARY_FOR_GPU"], [5, 1, 1, "", "CUDA_ERROR_NO_DEVICE"], [5, 1, 1, "", "CUDA_ERROR_NVLINK_UNCORRECTABLE"], [5, 1, 1, "", "CUDA_ERROR_OPERATING_SYSTEM"], [5, 1, 1, "", "CUDA_ERROR_OUT_OF_MEMORY"], [5, 1, 1, "", "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED"], [5, 1, 1, "", "CUDA_ERROR_PEER_ACCESS_NOT_ENABLED"], [5, 1, 1, "", "CUDA_ERROR_PEER_ACCESS_UNSUPPORTED"], [5, 1, 1, "", "CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE"], [5, 1, 1, "", "CUDA_ERROR_PROFILER_ALREADY_STARTED"], [5, 1, 1, "", "CUDA_ERROR_PROFILER_ALREADY_STOPPED"], [5, 1, 1, "", "CUDA_ERROR_PROFILER_DISABLED"], [5, 1, 1, "", "CUDA_ERROR_PROFILER_NOT_INITIALIZED"], [5, 1, 1, "", "CUDA_ERROR_SHARED_OBJECT_INIT_FAILED"], [5, 1, 1, "", "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_IMPLICIT"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_INVALIDATED"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_ISOLATION"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_MERGE"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_UNJOINED"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_UNMATCHED"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD"], [5, 1, 1, "", "CUDA_ERROR_STUB_LIBRARY"], [5, 1, 1, "", "CUDA_ERROR_SYSTEM_DRIVER_MISMATCH"], [5, 1, 1, "", "CUDA_ERROR_SYSTEM_NOT_READY"], [5, 1, 1, "", "CUDA_ERROR_TIMEOUT"], [5, 1, 1, "", "CUDA_ERROR_TOO_MANY_PEERS"], [5, 1, 1, "", "CUDA_ERROR_UNKNOWN"], [5, 1, 1, "", "CUDA_ERROR_UNMAP_FAILED"], [5, 1, 1, "", "CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC"], [5, 1, 1, "", "CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY"], [5, 1, 1, "", "CUDA_ERROR_UNSUPPORTED_LIMIT"], [5, 1, 1, "", "CUDA_ERROR_UNSUPPORTED_PTX_VERSION"], [5, 1, 1, "", "CUDA_SUCCESS"]], "cuda.cuda.CUshared_carveout": [[5, 1, 1, "", "CU_SHAREDMEM_CARVEOUT_DEFAULT"], [5, 1, 1, "", "CU_SHAREDMEM_CARVEOUT_MAX_L1"], [5, 1, 1, "", "CU_SHAREDMEM_CARVEOUT_MAX_SHARED"]], "cuda.cuda.CUsharedconfig": [[5, 1, 1, "", "CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE"], [5, 1, 1, "", "CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE"], [5, 1, 1, "", "CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE"]], "cuda.cuda.CUstream": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUstreamAttrValue": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.cuda.CUstreamAttrValue_v1": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.cuda.CUstreamBatchMemOpParams": [[5, 1, 1, "", "flushRemoteWrites"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memoryBarrier"], [5, 1, 1, "", "operation"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "waitValue"], [5, 1, 1, "", "writeValue"]], "cuda.cuda.CUstreamBatchMemOpParams_union": [[5, 1, 1, "", "flushRemoteWrites"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memoryBarrier"], [5, 1, 1, "", "operation"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "waitValue"], [5, 1, 1, "", "writeValue"]], "cuda.cuda.CUstreamBatchMemOpParams_v1": [[5, 1, 1, "", "flushRemoteWrites"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memoryBarrier"], [5, 1, 1, "", "operation"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "waitValue"], [5, 1, 1, "", "writeValue"]], "cuda.cuda.CUstreamBatchMemOpType": [[5, 1, 1, "", "CU_STREAM_MEM_OP_BARRIER"], [5, 1, 1, "", "CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES"], [5, 1, 1, "", "CU_STREAM_MEM_OP_WAIT_VALUE_32"], [5, 1, 1, "", "CU_STREAM_MEM_OP_WAIT_VALUE_64"], [5, 1, 1, "", "CU_STREAM_MEM_OP_WRITE_VALUE_32"], [5, 1, 1, "", "CU_STREAM_MEM_OP_WRITE_VALUE_64"]], "cuda.cuda.CUstreamCallback": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUstreamCaptureMode": [[5, 1, 1, "", "CU_STREAM_CAPTURE_MODE_GLOBAL"], [5, 1, 1, "", "CU_STREAM_CAPTURE_MODE_RELAXED"], [5, 1, 1, "", "CU_STREAM_CAPTURE_MODE_THREAD_LOCAL"]], "cuda.cuda.CUstreamCaptureStatus": [[5, 1, 1, "", "CU_STREAM_CAPTURE_STATUS_ACTIVE"], [5, 1, 1, "", "CU_STREAM_CAPTURE_STATUS_INVALIDATED"], [5, 1, 1, "", "CU_STREAM_CAPTURE_STATUS_NONE"]], "cuda.cuda.CUstreamMemoryBarrier_flags": [[5, 1, 1, "", "CU_STREAM_MEMORY_BARRIER_TYPE_GPU"], [5, 1, 1, "", "CU_STREAM_MEMORY_BARRIER_TYPE_SYS"]], "cuda.cuda.CUstreamUpdateCaptureDependencies_flags": [[5, 1, 1, "", "CU_STREAM_ADD_CAPTURE_DEPENDENCIES"], [5, 1, 1, "", "CU_STREAM_SET_CAPTURE_DEPENDENCIES"]], "cuda.cuda.CUstreamWaitValue_flags": [[5, 1, 1, "", "CU_STREAM_WAIT_VALUE_AND"], [5, 1, 1, "", "CU_STREAM_WAIT_VALUE_EQ"], [5, 1, 1, "", "CU_STREAM_WAIT_VALUE_FLUSH"], [5, 1, 1, "", "CU_STREAM_WAIT_VALUE_GEQ"], [5, 1, 1, "", "CU_STREAM_WAIT_VALUE_NOR"]], "cuda.cuda.CUstreamWriteValue_flags": [[5, 1, 1, "", "CU_STREAM_WRITE_VALUE_DEFAULT"], [5, 1, 1, "", "CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER"]], "cuda.cuda.CUstream_flags": [[5, 1, 1, "", "CU_STREAM_DEFAULT"], [5, 1, 1, "", "CU_STREAM_NON_BLOCKING"]], "cuda.cuda.CUsurfObject": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUsurfObject_v1": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUsurfref": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUsynchronizationPolicy": [[5, 1, 1, "", "CU_SYNC_POLICY_AUTO"], [5, 1, 1, "", "CU_SYNC_POLICY_BLOCKING_SYNC"], [5, 1, 1, "", "CU_SYNC_POLICY_SPIN"], [5, 1, 1, "", "CU_SYNC_POLICY_YIELD"]], "cuda.cuda.CUtensorMap": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "opaque"]], "cuda.cuda.CUtensorMapDataType": [[5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_BFLOAT16"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_FLOAT16"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_FLOAT32"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_FLOAT64"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_INT32"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_INT64"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_TFLOAT32"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_UINT16"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_UINT32"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_UINT64"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_UINT8"]], "cuda.cuda.CUtensorMapFloatOOBfill": [[5, 1, 1, "", "CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA"], [5, 1, 1, "", "CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE"]], "cuda.cuda.CUtensorMapInterleave": [[5, 1, 1, "", "CU_TENSOR_MAP_INTERLEAVE_16B"], [5, 1, 1, "", "CU_TENSOR_MAP_INTERLEAVE_32B"], [5, 1, 1, "", "CU_TENSOR_MAP_INTERLEAVE_NONE"]], "cuda.cuda.CUtensorMapL2promotion": [[5, 1, 1, "", "CU_TENSOR_MAP_L2_PROMOTION_L2_128B"], [5, 1, 1, "", "CU_TENSOR_MAP_L2_PROMOTION_L2_256B"], [5, 1, 1, "", "CU_TENSOR_MAP_L2_PROMOTION_L2_64B"], [5, 1, 1, "", "CU_TENSOR_MAP_L2_PROMOTION_NONE"]], "cuda.cuda.CUtensorMapSwizzle": [[5, 1, 1, "", "CU_TENSOR_MAP_SWIZZLE_128B"], [5, 1, 1, "", "CU_TENSOR_MAP_SWIZZLE_32B"], [5, 1, 1, "", "CU_TENSOR_MAP_SWIZZLE_64B"], [5, 1, 1, "", "CU_TENSOR_MAP_SWIZZLE_NONE"]], "cuda.cuda.CUtensorMap_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "opaque"]], "cuda.cuda.CUtexObject": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUtexObject_v1": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUtexref": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUuserObject": [[5, 2, 1, "", "getPtr"]], "cuda.cuda.CUuserObjectRetain_flags": [[5, 1, 1, "", "CU_GRAPH_USER_OBJECT_MOVE"]], "cuda.cuda.CUuserObject_flags": [[5, 1, 1, "", "CU_USER_OBJECT_NO_DESTRUCTOR_SYNC"]], "cuda.cuda.CUuuid": [[5, 1, 1, "", "bytes"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.CUuuid_st": [[5, 1, 1, "", "bytes"], [5, 2, 1, "", "getPtr"]], "cuda.cuda.cl_context_flags": [[5, 1, 1, "", "NVCL_CTX_SCHED_AUTO"], [5, 1, 1, "", "NVCL_CTX_SCHED_BLOCKING_SYNC"], [5, 1, 1, "", "NVCL_CTX_SCHED_SPIN"], [5, 1, 1, "", "NVCL_CTX_SCHED_YIELD"]], "cuda.cuda.cl_event_flags": [[5, 1, 1, "", "NVCL_EVENT_SCHED_AUTO"], [5, 1, 1, "", "NVCL_EVENT_SCHED_BLOCKING_SYNC"], [5, 1, 1, "", "NVCL_EVENT_SCHED_SPIN"], [5, 1, 1, "", "NVCL_EVENT_SCHED_YIELD"]], "cuda.cudart": [[6, 1, 1, "", "CUDA_EGL_MAX_PLANES"], [6, 1, 1, "", "CUDA_IPC_HANDLE_SIZE"], [6, 0, 1, "", "CUuuid"], [6, 0, 1, "", "CUuuid_st"], [6, 0, 1, "", "cudaAccessPolicyWindow"], [6, 0, 1, "", "cudaAccessProperty"], [6, 1, 1, "", "cudaArrayColorAttachment"], [6, 1, 1, "", "cudaArrayCubemap"], [6, 1, 1, "", "cudaArrayDefault"], [6, 1, 1, "", "cudaArrayDeferredMapping"], [6, 3, 1, "", "cudaArrayGetInfo"], [6, 3, 1, "", "cudaArrayGetMemoryRequirements"], [6, 3, 1, "", "cudaArrayGetPlane"], [6, 3, 1, "", "cudaArrayGetSparseProperties"], [6, 1, 1, "", "cudaArrayLayered"], [6, 0, 1, "", "cudaArrayMemoryRequirements"], [6, 1, 1, "", "cudaArraySparse"], [6, 0, 1, "", "cudaArraySparseProperties"], [6, 1, 1, "", "cudaArraySparsePropertiesSingleMipTail"], [6, 1, 1, "", "cudaArraySurfaceLoadStore"], [6, 1, 1, "", "cudaArrayTextureGather"], [6, 0, 1, "", "cudaArray_const_t"], [6, 0, 1, "", "cudaArray_t"], [6, 0, 1, "", "cudaAsyncCallback"], [6, 0, 1, "", "cudaAsyncCallbackHandle_t"], [6, 0, 1, "", "cudaAsyncNotificationInfo"], [6, 0, 1, "", "cudaAsyncNotificationInfo_t"], [6, 0, 1, "", "cudaAsyncNotificationType"], [6, 0, 1, "", "cudaCGScope"], [6, 0, 1, "", "cudaChannelFormatDesc"], [6, 0, 1, "", "cudaChannelFormatKind"], [6, 0, 1, "", "cudaChildGraphNodeParams"], [6, 3, 1, "", "cudaChooseDevice"], [6, 0, 1, "", "cudaClusterSchedulingPolicy"], [6, 0, 1, "", "cudaComputeMode"], [6, 0, 1, "", "cudaConditionalNodeParams"], [6, 1, 1, "", "cudaCooperativeLaunchMultiDeviceNoPostSync"], [6, 1, 1, "", "cudaCooperativeLaunchMultiDeviceNoPreSync"], [6, 1, 1, "", "cudaCpuDeviceId"], [6, 3, 1, "", "cudaCreateChannelDesc"], [6, 3, 1, "", "cudaCreateSurfaceObject"], [6, 3, 1, "", "cudaCreateTextureObject"], [6, 3, 1, "", "cudaCtxResetPersistingL2Cache"], [6, 3, 1, "", "cudaDestroyExternalMemory"], [6, 3, 1, "", "cudaDestroyExternalSemaphore"], [6, 3, 1, "", "cudaDestroySurfaceObject"], [6, 3, 1, "", "cudaDestroyTextureObject"], [6, 0, 1, "", "cudaDeviceAttr"], [6, 1, 1, "", "cudaDeviceBlockingSync"], [6, 3, 1, "", "cudaDeviceCanAccessPeer"], [6, 3, 1, "", "cudaDeviceDisablePeerAccess"], [6, 3, 1, "", "cudaDeviceEnablePeerAccess"], [6, 3, 1, "", "cudaDeviceFlushGPUDirectRDMAWrites"], [6, 3, 1, "", "cudaDeviceGetAttribute"], [6, 3, 1, "", "cudaDeviceGetByPCIBusId"], [6, 3, 1, "", "cudaDeviceGetCacheConfig"], [6, 3, 1, "", "cudaDeviceGetDefaultMemPool"], [6, 3, 1, "", "cudaDeviceGetGraphMemAttribute"], [6, 3, 1, "", "cudaDeviceGetLimit"], [6, 3, 1, "", "cudaDeviceGetMemPool"], [6, 3, 1, "", "cudaDeviceGetNvSciSyncAttributes"], [6, 3, 1, "", "cudaDeviceGetP2PAttribute"], [6, 3, 1, "", "cudaDeviceGetPCIBusId"], [6, 3, 1, "", "cudaDeviceGetStreamPriorityRange"], [6, 3, 1, "", "cudaDeviceGetTexture1DLinearMaxWidth"], [6, 3, 1, "", "cudaDeviceGraphMemTrim"], [6, 1, 1, "", "cudaDeviceLmemResizeToMax"], [6, 1, 1, "", "cudaDeviceMapHost"], [6, 1, 1, "", "cudaDeviceMask"], [6, 0, 1, "", "cudaDeviceNumaConfig"], [6, 0, 1, "", "cudaDeviceP2PAttr"], [6, 0, 1, "", "cudaDeviceProp"], [6, 3, 1, "", "cudaDeviceRegisterAsyncNotification"], [6, 3, 1, "", "cudaDeviceReset"], [6, 1, 1, "", "cudaDeviceScheduleAuto"], [6, 1, 1, "", "cudaDeviceScheduleBlockingSync"], [6, 1, 1, "", "cudaDeviceScheduleMask"], [6, 1, 1, "", "cudaDeviceScheduleSpin"], [6, 1, 1, "", "cudaDeviceScheduleYield"], [6, 3, 1, "", "cudaDeviceSetCacheConfig"], [6, 3, 1, "", "cudaDeviceSetGraphMemAttribute"], [6, 3, 1, "", "cudaDeviceSetLimit"], [6, 3, 1, "", "cudaDeviceSetMemPool"], [6, 1, 1, "", "cudaDeviceSyncMemops"], [6, 3, 1, "", "cudaDeviceSynchronize"], [6, 3, 1, "", "cudaDeviceUnregisterAsyncNotification"], [6, 0, 1, "", "cudaDriverEntryPointQueryResult"], [6, 3, 1, "", "cudaDriverGetVersion"], [6, 3, 1, "", "cudaEGLStreamConsumerAcquireFrame"], [6, 3, 1, "", "cudaEGLStreamConsumerConnect"], [6, 3, 1, "", "cudaEGLStreamConsumerConnectWithFlags"], [6, 3, 1, "", "cudaEGLStreamConsumerDisconnect"], [6, 3, 1, "", "cudaEGLStreamConsumerReleaseFrame"], [6, 3, 1, "", "cudaEGLStreamProducerConnect"], [6, 3, 1, "", "cudaEGLStreamProducerDisconnect"], [6, 3, 1, "", "cudaEGLStreamProducerPresentFrame"], [6, 3, 1, "", "cudaEGLStreamProducerReturnFrame"], [6, 0, 1, "", "cudaEglColorFormat"], [6, 0, 1, "", "cudaEglFrame"], [6, 0, 1, "", "cudaEglFrameType"], [6, 0, 1, "", "cudaEglFrame_st"], [6, 0, 1, "", "cudaEglPlaneDesc"], [6, 0, 1, "", "cudaEglPlaneDesc_st"], [6, 0, 1, "", "cudaEglResourceLocationFlags"], [6, 0, 1, "", "cudaEglStreamConnection"], [6, 0, 1, "", "cudaError_t"], [6, 1, 1, "", "cudaEventBlockingSync"], [6, 3, 1, "", "cudaEventCreate"], [6, 3, 1, "", "cudaEventCreateFromEGLSync"], [6, 3, 1, "", "cudaEventCreateWithFlags"], [6, 1, 1, "", "cudaEventDefault"], [6, 3, 1, "", "cudaEventDestroy"], [6, 1, 1, "", "cudaEventDisableTiming"], [6, 3, 1, "", "cudaEventElapsedTime"], [6, 1, 1, "", "cudaEventInterprocess"], [6, 3, 1, "", "cudaEventQuery"], [6, 3, 1, "", "cudaEventRecord"], [6, 1, 1, "", "cudaEventRecordDefault"], [6, 1, 1, "", "cudaEventRecordExternal"], [6, 0, 1, "", "cudaEventRecordNodeParams"], [6, 3, 1, "", "cudaEventRecordWithFlags"], [6, 3, 1, "", "cudaEventSynchronize"], [6, 1, 1, "", "cudaEventWaitDefault"], [6, 1, 1, "", "cudaEventWaitExternal"], [6, 0, 1, "", "cudaEventWaitNodeParams"], [6, 0, 1, "", "cudaEvent_t"], [6, 0, 1, "", "cudaExtent"], [6, 0, 1, "", "cudaExternalMemoryBufferDesc"], [6, 1, 1, "", "cudaExternalMemoryDedicated"], [6, 3, 1, "", "cudaExternalMemoryGetMappedBuffer"], [6, 3, 1, "", "cudaExternalMemoryGetMappedMipmappedArray"], [6, 0, 1, "", "cudaExternalMemoryHandleDesc"], [6, 0, 1, "", "cudaExternalMemoryHandleType"], [6, 0, 1, "", "cudaExternalMemoryMipmappedArrayDesc"], [6, 0, 1, "", "cudaExternalMemory_t"], [6, 0, 1, "", "cudaExternalSemaphoreHandleDesc"], [6, 0, 1, "", "cudaExternalSemaphoreHandleType"], [6, 0, 1, "", "cudaExternalSemaphoreSignalNodeParams"], [6, 0, 1, "", "cudaExternalSemaphoreSignalNodeParamsV2"], [6, 0, 1, "", "cudaExternalSemaphoreSignalParams"], [6, 1, 1, "", "cudaExternalSemaphoreSignalSkipNvSciBufMemSync"], [6, 0, 1, "", "cudaExternalSemaphoreWaitNodeParams"], [6, 0, 1, "", "cudaExternalSemaphoreWaitNodeParamsV2"], [6, 0, 1, "", "cudaExternalSemaphoreWaitParams"], [6, 1, 1, "", "cudaExternalSemaphoreWaitSkipNvSciBufMemSync"], [6, 0, 1, "", "cudaExternalSemaphore_t"], [6, 0, 1, "", "cudaFlushGPUDirectRDMAWritesOptions"], [6, 0, 1, "", "cudaFlushGPUDirectRDMAWritesScope"], [6, 0, 1, "", "cudaFlushGPUDirectRDMAWritesTarget"], [6, 3, 1, "", "cudaFree"], [6, 3, 1, "", "cudaFreeArray"], [6, 3, 1, "", "cudaFreeAsync"], [6, 3, 1, "", "cudaFreeHost"], [6, 3, 1, "", "cudaFreeMipmappedArray"], [6, 0, 1, "", "cudaFuncAttribute"], [6, 0, 1, "", "cudaFuncAttributes"], [6, 0, 1, "", "cudaFuncCache"], [6, 3, 1, "", "cudaFuncGetAttributes"], [6, 3, 1, "", "cudaFuncSetAttribute"], [6, 3, 1, "", "cudaFuncSetCacheConfig"], [6, 0, 1, "", "cudaFunction_t"], [6, 0, 1, "", "cudaGLDeviceList"], [6, 3, 1, "", "cudaGLGetDevices"], [6, 0, 1, "", "cudaGPUDirectRDMAWritesOrdering"], [6, 3, 1, "", "cudaGetChannelDesc"], [6, 3, 1, "", "cudaGetDevice"], [6, 3, 1, "", "cudaGetDeviceCount"], [6, 3, 1, "", "cudaGetDeviceFlags"], [6, 3, 1, "", "cudaGetDeviceProperties"], [6, 3, 1, "", "cudaGetDriverEntryPoint"], [6, 3, 1, "", "cudaGetDriverEntryPointByVersion"], [6, 0, 1, "", "cudaGetDriverEntryPointFlags"], [6, 3, 1, "", "cudaGetErrorName"], [6, 3, 1, "", "cudaGetErrorString"], [6, 3, 1, "", "cudaGetKernel"], [6, 3, 1, "", "cudaGetLastError"], [6, 3, 1, "", "cudaGetMipmappedArrayLevel"], [6, 3, 1, "", "cudaGetSurfaceObjectResourceDesc"], [6, 3, 1, "", "cudaGetTextureObjectResourceDesc"], [6, 3, 1, "", "cudaGetTextureObjectResourceViewDesc"], [6, 3, 1, "", "cudaGetTextureObjectTextureDesc"], [6, 3, 1, "", "cudaGraphAddChildGraphNode"], [6, 3, 1, "", "cudaGraphAddDependencies"], [6, 3, 1, "", "cudaGraphAddDependencies_v2"], [6, 3, 1, "", "cudaGraphAddEmptyNode"], [6, 3, 1, "", "cudaGraphAddEventRecordNode"], [6, 3, 1, "", "cudaGraphAddEventWaitNode"], [6, 3, 1, "", "cudaGraphAddExternalSemaphoresSignalNode"], [6, 3, 1, "", "cudaGraphAddExternalSemaphoresWaitNode"], [6, 3, 1, "", "cudaGraphAddHostNode"], [6, 3, 1, "", "cudaGraphAddKernelNode"], [6, 3, 1, "", "cudaGraphAddMemAllocNode"], [6, 3, 1, "", "cudaGraphAddMemFreeNode"], [6, 3, 1, "", "cudaGraphAddMemcpyNode"], [6, 3, 1, "", "cudaGraphAddMemcpyNode1D"], [6, 3, 1, "", "cudaGraphAddMemsetNode"], [6, 3, 1, "", "cudaGraphAddNode"], [6, 3, 1, "", "cudaGraphAddNode_v2"], [6, 3, 1, "", "cudaGraphChildGraphNodeGetGraph"], [6, 3, 1, "", "cudaGraphClone"], [6, 0, 1, "", "cudaGraphConditionalHandle"], [6, 3, 1, "", "cudaGraphConditionalHandleCreate"], [6, 0, 1, "", "cudaGraphConditionalHandleFlags"], [6, 0, 1, "", "cudaGraphConditionalNodeType"], [6, 3, 1, "", "cudaGraphCreate"], [6, 0, 1, "", "cudaGraphDebugDotFlags"], [6, 3, 1, "", "cudaGraphDebugDotPrint"], [6, 0, 1, "", "cudaGraphDependencyType"], [6, 3, 1, "", "cudaGraphDestroy"], [6, 3, 1, "", "cudaGraphDestroyNode"], [6, 0, 1, "", "cudaGraphDeviceNode_t"], [6, 0, 1, "", "cudaGraphEdgeData"], [6, 0, 1, "", "cudaGraphEdgeData_st"], [6, 3, 1, "", "cudaGraphEventRecordNodeGetEvent"], [6, 3, 1, "", "cudaGraphEventRecordNodeSetEvent"], [6, 3, 1, "", "cudaGraphEventWaitNodeGetEvent"], [6, 3, 1, "", "cudaGraphEventWaitNodeSetEvent"], [6, 3, 1, "", "cudaGraphExecChildGraphNodeSetParams"], [6, 3, 1, "", "cudaGraphExecDestroy"], [6, 3, 1, "", "cudaGraphExecEventRecordNodeSetEvent"], [6, 3, 1, "", "cudaGraphExecEventWaitNodeSetEvent"], [6, 3, 1, "", "cudaGraphExecExternalSemaphoresSignalNodeSetParams"], [6, 3, 1, "", "cudaGraphExecExternalSemaphoresWaitNodeSetParams"], [6, 3, 1, "", "cudaGraphExecGetFlags"], [6, 3, 1, "", "cudaGraphExecHostNodeSetParams"], [6, 3, 1, "", "cudaGraphExecKernelNodeSetParams"], [6, 3, 1, "", "cudaGraphExecMemcpyNodeSetParams"], [6, 3, 1, "", "cudaGraphExecMemcpyNodeSetParams1D"], [6, 3, 1, "", "cudaGraphExecMemsetNodeSetParams"], [6, 3, 1, "", "cudaGraphExecNodeSetParams"], [6, 3, 1, "", "cudaGraphExecUpdate"], [6, 0, 1, "", "cudaGraphExecUpdateResult"], [6, 0, 1, "", "cudaGraphExecUpdateResultInfo"], [6, 0, 1, "", "cudaGraphExecUpdateResultInfo_st"], [6, 0, 1, "", "cudaGraphExec_t"], [6, 3, 1, "", "cudaGraphExternalSemaphoresSignalNodeGetParams"], [6, 3, 1, "", "cudaGraphExternalSemaphoresSignalNodeSetParams"], [6, 3, 1, "", "cudaGraphExternalSemaphoresWaitNodeGetParams"], [6, 3, 1, "", "cudaGraphExternalSemaphoresWaitNodeSetParams"], [6, 3, 1, "", "cudaGraphGetEdges"], [6, 3, 1, "", "cudaGraphGetEdges_v2"], [6, 3, 1, "", "cudaGraphGetNodes"], [6, 3, 1, "", "cudaGraphGetRootNodes"], [6, 3, 1, "", "cudaGraphHostNodeGetParams"], [6, 3, 1, "", "cudaGraphHostNodeSetParams"], [6, 3, 1, "", "cudaGraphInstantiate"], [6, 0, 1, "", "cudaGraphInstantiateFlags"], [6, 0, 1, "", "cudaGraphInstantiateParams"], [6, 0, 1, "", "cudaGraphInstantiateParams_st"], [6, 0, 1, "", "cudaGraphInstantiateResult"], [6, 3, 1, "", "cudaGraphInstantiateWithFlags"], [6, 3, 1, "", "cudaGraphInstantiateWithParams"], [6, 3, 1, "", "cudaGraphKernelNodeCopyAttributes"], [6, 0, 1, "", "cudaGraphKernelNodeField"], [6, 3, 1, "", "cudaGraphKernelNodeGetAttribute"], [6, 3, 1, "", "cudaGraphKernelNodeGetParams"], [6, 1, 1, "", "cudaGraphKernelNodePortDefault"], [6, 1, 1, "", "cudaGraphKernelNodePortLaunchCompletion"], [6, 1, 1, "", "cudaGraphKernelNodePortProgrammatic"], [6, 3, 1, "", "cudaGraphKernelNodeSetAttribute"], [6, 3, 1, "", "cudaGraphKernelNodeSetParams"], [6, 0, 1, "", "cudaGraphKernelNodeUpdate"], [6, 3, 1, "", "cudaGraphLaunch"], [6, 3, 1, "", "cudaGraphMemAllocNodeGetParams"], [6, 0, 1, "", "cudaGraphMemAttributeType"], [6, 3, 1, "", "cudaGraphMemFreeNodeGetParams"], [6, 3, 1, "", "cudaGraphMemcpyNodeGetParams"], [6, 3, 1, "", "cudaGraphMemcpyNodeSetParams"], [6, 3, 1, "", "cudaGraphMemcpyNodeSetParams1D"], [6, 3, 1, "", "cudaGraphMemsetNodeGetParams"], [6, 3, 1, "", "cudaGraphMemsetNodeSetParams"], [6, 3, 1, "", "cudaGraphNodeFindInClone"], [6, 3, 1, "", "cudaGraphNodeGetDependencies"], [6, 3, 1, "", "cudaGraphNodeGetDependencies_v2"], [6, 3, 1, "", "cudaGraphNodeGetDependentNodes"], [6, 3, 1, "", "cudaGraphNodeGetDependentNodes_v2"], [6, 3, 1, "", "cudaGraphNodeGetEnabled"], [6, 3, 1, "", "cudaGraphNodeGetType"], [6, 0, 1, "", "cudaGraphNodeParams"], [6, 3, 1, "", "cudaGraphNodeSetEnabled"], [6, 3, 1, "", "cudaGraphNodeSetParams"], [6, 0, 1, "", "cudaGraphNodeType"], [6, 0, 1, "", "cudaGraphNode_t"], [6, 3, 1, "", "cudaGraphReleaseUserObject"], [6, 3, 1, "", "cudaGraphRemoveDependencies"], [6, 3, 1, "", "cudaGraphRemoveDependencies_v2"], [6, 3, 1, "", "cudaGraphRetainUserObject"], [6, 3, 1, "", "cudaGraphUpload"], [6, 0, 1, "", "cudaGraph_t"], [6, 0, 1, "", "cudaGraphicsCubeFace"], [6, 3, 1, "", "cudaGraphicsEGLRegisterImage"], [6, 3, 1, "", "cudaGraphicsGLRegisterBuffer"], [6, 3, 1, "", "cudaGraphicsGLRegisterImage"], [6, 0, 1, "", "cudaGraphicsMapFlags"], [6, 3, 1, "", "cudaGraphicsMapResources"], [6, 0, 1, "", "cudaGraphicsRegisterFlags"], [6, 3, 1, "", "cudaGraphicsResourceGetMappedEglFrame"], [6, 3, 1, "", "cudaGraphicsResourceGetMappedMipmappedArray"], [6, 3, 1, "", "cudaGraphicsResourceGetMappedPointer"], [6, 3, 1, "", "cudaGraphicsResourceSetMapFlags"], [6, 0, 1, "", "cudaGraphicsResource_t"], [6, 3, 1, "", "cudaGraphicsSubResourceGetMappedArray"], [6, 3, 1, "", "cudaGraphicsUnmapResources"], [6, 3, 1, "", "cudaGraphicsUnregisterResource"], [6, 3, 1, "", "cudaGraphicsVDPAURegisterOutputSurface"], [6, 3, 1, "", "cudaGraphicsVDPAURegisterVideoSurface"], [6, 3, 1, "", "cudaHostAlloc"], [6, 1, 1, "", "cudaHostAllocDefault"], [6, 1, 1, "", "cudaHostAllocMapped"], [6, 1, 1, "", "cudaHostAllocPortable"], [6, 1, 1, "", "cudaHostAllocWriteCombined"], [6, 0, 1, "", "cudaHostFn_t"], [6, 3, 1, "", "cudaHostGetDevicePointer"], [6, 3, 1, "", "cudaHostGetFlags"], [6, 0, 1, "", "cudaHostNodeParams"], [6, 0, 1, "", "cudaHostNodeParamsV2"], [6, 3, 1, "", "cudaHostRegister"], [6, 1, 1, "", "cudaHostRegisterDefault"], [6, 1, 1, "", "cudaHostRegisterIoMemory"], [6, 1, 1, "", "cudaHostRegisterMapped"], [6, 1, 1, "", "cudaHostRegisterPortable"], [6, 1, 1, "", "cudaHostRegisterReadOnly"], [6, 3, 1, "", "cudaHostUnregister"], [6, 3, 1, "", "cudaImportExternalMemory"], [6, 3, 1, "", "cudaImportExternalSemaphore"], [6, 3, 1, "", "cudaInitDevice"], [6, 1, 1, "", "cudaInitDeviceFlagsAreValid"], [6, 1, 1, "", "cudaInvalidDeviceId"], [6, 3, 1, "", "cudaIpcCloseMemHandle"], [6, 0, 1, "", "cudaIpcEventHandle_st"], [6, 0, 1, "", "cudaIpcEventHandle_t"], [6, 3, 1, "", "cudaIpcGetEventHandle"], [6, 3, 1, "", "cudaIpcGetMemHandle"], [6, 0, 1, "", "cudaIpcMemHandle_st"], [6, 0, 1, "", "cudaIpcMemHandle_t"], [6, 1, 1, "", "cudaIpcMemLazyEnablePeerAccess"], [6, 3, 1, "", "cudaIpcOpenEventHandle"], [6, 3, 1, "", "cudaIpcOpenMemHandle"], [6, 1, 1, "", "cudaKernelNodeAttrID"], [6, 1, 1, "", "cudaKernelNodeAttrValue"], [6, 1, 1, "", "cudaKernelNodeAttributeAccessPolicyWindow"], [6, 1, 1, "", "cudaKernelNodeAttributeClusterDimension"], [6, 1, 1, "", "cudaKernelNodeAttributeClusterSchedulingPolicyPreference"], [6, 1, 1, "", "cudaKernelNodeAttributeCooperative"], [6, 1, 1, "", "cudaKernelNodeAttributeDeviceUpdatableKernelNode"], [6, 1, 1, "", "cudaKernelNodeAttributeMemSyncDomain"], [6, 1, 1, "", "cudaKernelNodeAttributeMemSyncDomainMap"], [6, 1, 1, "", "cudaKernelNodeAttributePreferredSharedMemoryCarveout"], [6, 1, 1, "", "cudaKernelNodeAttributePriority"], [6, 0, 1, "", "cudaKernelNodeParams"], [6, 0, 1, "", "cudaKernelNodeParamsV2"], [6, 0, 1, "", "cudaKernel_t"], [6, 0, 1, "", "cudaLaunchAttribute"], [6, 0, 1, "", "cudaLaunchAttributeID"], [6, 0, 1, "id0", "cudaLaunchAttributeValue"], [6, 0, 1, "", "cudaLaunchAttribute_st"], [6, 3, 1, "", "cudaLaunchHostFunc"], [6, 0, 1, "", "cudaLaunchMemSyncDomain"], [6, 0, 1, "", "cudaLaunchMemSyncDomainMap"], [6, 0, 1, "", "cudaLaunchMemSyncDomainMap_st"], [6, 0, 1, "", "cudaLimit"], [6, 3, 1, "", "cudaMalloc"], [6, 3, 1, "", "cudaMalloc3D"], [6, 3, 1, "", "cudaMalloc3DArray"], [6, 3, 1, "", "cudaMallocArray"], [6, 3, 1, "", "cudaMallocAsync"], [6, 3, 1, "", "cudaMallocFromPoolAsync"], [6, 3, 1, "", "cudaMallocHost"], [6, 3, 1, "", "cudaMallocManaged"], [6, 3, 1, "", "cudaMallocMipmappedArray"], [6, 3, 1, "", "cudaMallocPitch"], [6, 0, 1, "", "cudaMemAccessDesc"], [6, 0, 1, "", "cudaMemAccessFlags"], [6, 3, 1, "", "cudaMemAdvise"], [6, 3, 1, "", "cudaMemAdvise_v2"], [6, 0, 1, "", "cudaMemAllocNodeParams"], [6, 0, 1, "", "cudaMemAllocNodeParamsV2"], [6, 0, 1, "", "cudaMemAllocationHandleType"], [6, 0, 1, "", "cudaMemAllocationType"], [6, 1, 1, "", "cudaMemAttachGlobal"], [6, 1, 1, "", "cudaMemAttachHost"], [6, 1, 1, "", "cudaMemAttachSingle"], [6, 0, 1, "", "cudaMemFabricHandle_st"], [6, 0, 1, "", "cudaMemFabricHandle_t"], [6, 0, 1, "", "cudaMemFreeNodeParams"], [6, 3, 1, "", "cudaMemGetInfo"], [6, 0, 1, "", "cudaMemLocation"], [6, 0, 1, "", "cudaMemLocationType"], [6, 0, 1, "", "cudaMemPoolAttr"], [6, 3, 1, "", "cudaMemPoolCreate"], [6, 3, 1, "", "cudaMemPoolDestroy"], [6, 3, 1, "", "cudaMemPoolExportPointer"], [6, 3, 1, "", "cudaMemPoolExportToShareableHandle"], [6, 3, 1, "", "cudaMemPoolGetAccess"], [6, 3, 1, "", "cudaMemPoolGetAttribute"], [6, 3, 1, "", "cudaMemPoolImportFromShareableHandle"], [6, 3, 1, "", "cudaMemPoolImportPointer"], [6, 0, 1, "", "cudaMemPoolProps"], [6, 0, 1, "", "cudaMemPoolPtrExportData"], [6, 3, 1, "", "cudaMemPoolSetAccess"], [6, 3, 1, "", "cudaMemPoolSetAttribute"], [6, 3, 1, "", "cudaMemPoolTrimTo"], [6, 0, 1, "", "cudaMemPool_t"], [6, 3, 1, "", "cudaMemPrefetchAsync"], [6, 3, 1, "", "cudaMemPrefetchAsync_v2"], [6, 0, 1, "", "cudaMemRangeAttribute"], [6, 3, 1, "", "cudaMemRangeGetAttribute"], [6, 3, 1, "", "cudaMemRangeGetAttributes"], [6, 3, 1, "", "cudaMemcpy"], [6, 3, 1, "", "cudaMemcpy2D"], [6, 3, 1, "", "cudaMemcpy2DArrayToArray"], [6, 3, 1, "", "cudaMemcpy2DAsync"], [6, 3, 1, "", "cudaMemcpy2DFromArray"], [6, 3, 1, "", "cudaMemcpy2DFromArrayAsync"], [6, 3, 1, "", "cudaMemcpy2DToArray"], [6, 3, 1, "", "cudaMemcpy2DToArrayAsync"], [6, 3, 1, "", "cudaMemcpy3D"], [6, 3, 1, "", "cudaMemcpy3DAsync"], [6, 0, 1, "", "cudaMemcpy3DParms"], [6, 3, 1, "", "cudaMemcpy3DPeer"], [6, 3, 1, "", "cudaMemcpy3DPeerAsync"], [6, 0, 1, "", "cudaMemcpy3DPeerParms"], [6, 3, 1, "", "cudaMemcpyAsync"], [6, 0, 1, "", "cudaMemcpyKind"], [6, 0, 1, "", "cudaMemcpyNodeParams"], [6, 3, 1, "", "cudaMemcpyPeer"], [6, 3, 1, "", "cudaMemcpyPeerAsync"], [6, 0, 1, "", "cudaMemoryAdvise"], [6, 0, 1, "", "cudaMemoryType"], [6, 3, 1, "", "cudaMemset"], [6, 3, 1, "", "cudaMemset2D"], [6, 3, 1, "", "cudaMemset2DAsync"], [6, 3, 1, "", "cudaMemset3D"], [6, 3, 1, "", "cudaMemset3DAsync"], [6, 3, 1, "", "cudaMemsetAsync"], [6, 0, 1, "", "cudaMemsetParams"], [6, 0, 1, "", "cudaMemsetParamsV2"], [6, 3, 1, "", "cudaMipmappedArrayGetMemoryRequirements"], [6, 3, 1, "", "cudaMipmappedArrayGetSparseProperties"], [6, 0, 1, "", "cudaMipmappedArray_const_t"], [6, 0, 1, "", "cudaMipmappedArray_t"], [6, 1, 1, "", "cudaNvSciSyncAttrSignal"], [6, 1, 1, "", "cudaNvSciSyncAttrWait"], [6, 3, 1, "", "cudaOccupancyAvailableDynamicSMemPerBlock"], [6, 1, 1, "", "cudaOccupancyDefault"], [6, 1, 1, "", "cudaOccupancyDisableCachingOverride"], [6, 3, 1, "", "cudaOccupancyMaxActiveBlocksPerMultiprocessor"], [6, 3, 1, "", "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags"], [6, 3, 1, "", "cudaPeekAtLastError"], [6, 1, 1, "", "cudaPeerAccessDefault"], [6, 0, 1, "", "cudaPitchedPtr"], [6, 0, 1, "", "cudaPointerAttributes"], [6, 3, 1, "", "cudaPointerGetAttributes"], [6, 0, 1, "", "cudaPos"], [6, 3, 1, "", "cudaProfilerStart"], [6, 3, 1, "", "cudaProfilerStop"], [6, 0, 1, "", "cudaResourceDesc"], [6, 0, 1, "", "cudaResourceType"], [6, 0, 1, "", "cudaResourceViewDesc"], [6, 0, 1, "", "cudaResourceViewFormat"], [6, 3, 1, "", "cudaRuntimeGetVersion"], [6, 3, 1, "", "cudaSetDevice"], [6, 3, 1, "", "cudaSetDeviceFlags"], [6, 0, 1, "", "cudaSharedCarveout"], [6, 0, 1, "", "cudaSharedMemConfig"], [6, 3, 1, "", "cudaSignalExternalSemaphoresAsync"], [6, 3, 1, "", "cudaStreamAddCallback"], [6, 3, 1, "", "cudaStreamAttachMemAsync"], [6, 1, 1, "", "cudaStreamAttrID"], [6, 1, 1, "", "cudaStreamAttrValue"], [6, 1, 1, "", "cudaStreamAttributeAccessPolicyWindow"], [6, 1, 1, "", "cudaStreamAttributeMemSyncDomain"], [6, 1, 1, "", "cudaStreamAttributeMemSyncDomainMap"], [6, 1, 1, "", "cudaStreamAttributePriority"], [6, 1, 1, "", "cudaStreamAttributeSynchronizationPolicy"], [6, 3, 1, "", "cudaStreamBeginCapture"], [6, 3, 1, "", "cudaStreamBeginCaptureToGraph"], [6, 0, 1, "", "cudaStreamCallback_t"], [6, 0, 1, "", "cudaStreamCaptureMode"], [6, 0, 1, "", "cudaStreamCaptureStatus"], [6, 3, 1, "", "cudaStreamCopyAttributes"], [6, 3, 1, "", "cudaStreamCreate"], [6, 3, 1, "", "cudaStreamCreateWithFlags"], [6, 3, 1, "", "cudaStreamCreateWithPriority"], [6, 1, 1, "", "cudaStreamDefault"], [6, 3, 1, "", "cudaStreamDestroy"], [6, 3, 1, "", "cudaStreamEndCapture"], [6, 3, 1, "", "cudaStreamGetAttribute"], [6, 3, 1, "", "cudaStreamGetCaptureInfo"], [6, 3, 1, "", "cudaStreamGetCaptureInfo_v3"], [6, 3, 1, "", "cudaStreamGetFlags"], [6, 3, 1, "", "cudaStreamGetId"], [6, 3, 1, "", "cudaStreamGetPriority"], [6, 3, 1, "", "cudaStreamIsCapturing"], [6, 1, 1, "", "cudaStreamLegacy"], [6, 1, 1, "", "cudaStreamNonBlocking"], [6, 1, 1, "", "cudaStreamPerThread"], [6, 3, 1, "", "cudaStreamQuery"], [6, 3, 1, "", "cudaStreamSetAttribute"], [6, 3, 1, "", "cudaStreamSynchronize"], [6, 3, 1, "", "cudaStreamUpdateCaptureDependencies"], [6, 0, 1, "", "cudaStreamUpdateCaptureDependenciesFlags"], [6, 3, 1, "", "cudaStreamUpdateCaptureDependencies_v2"], [6, 3, 1, "", "cudaStreamWaitEvent"], [6, 0, 1, "", "cudaStream_t"], [6, 0, 1, "", "cudaSurfaceBoundaryMode"], [6, 0, 1, "", "cudaSurfaceFormatMode"], [6, 0, 1, "", "cudaSurfaceObject_t"], [6, 1, 1, "", "cudaSurfaceType1D"], [6, 1, 1, "", "cudaSurfaceType1DLayered"], [6, 1, 1, "", "cudaSurfaceType2D"], [6, 1, 1, "", "cudaSurfaceType2DLayered"], [6, 1, 1, "", "cudaSurfaceType3D"], [6, 1, 1, "", "cudaSurfaceTypeCubemap"], [6, 1, 1, "", "cudaSurfaceTypeCubemapLayered"], [6, 0, 1, "", "cudaSynchronizationPolicy"], [6, 0, 1, "", "cudaTextureAddressMode"], [6, 0, 1, "", "cudaTextureDesc"], [6, 0, 1, "", "cudaTextureFilterMode"], [6, 0, 1, "", "cudaTextureObject_t"], [6, 0, 1, "", "cudaTextureReadMode"], [6, 1, 1, "", "cudaTextureType1D"], [6, 1, 1, "", "cudaTextureType1DLayered"], [6, 1, 1, "", "cudaTextureType2D"], [6, 1, 1, "", "cudaTextureType2DLayered"], [6, 1, 1, "", "cudaTextureType3D"], [6, 1, 1, "", "cudaTextureTypeCubemap"], [6, 1, 1, "", "cudaTextureTypeCubemapLayered"], [6, 3, 1, "", "cudaThreadExchangeStreamCaptureMode"], [6, 0, 1, "", "cudaUUID_t"], [6, 3, 1, "", "cudaUserObjectCreate"], [6, 0, 1, "", "cudaUserObjectFlags"], [6, 3, 1, "", "cudaUserObjectRelease"], [6, 3, 1, "", "cudaUserObjectRetain"], [6, 0, 1, "", "cudaUserObjectRetainFlags"], [6, 0, 1, "", "cudaUserObject_t"], [6, 3, 1, "", "cudaVDPAUGetDevice"], [6, 3, 1, "", "cudaVDPAUSetVDPAUDevice"], [6, 3, 1, "", "cudaWaitExternalSemaphoresAsync"], [6, 3, 1, "", "getLocalRuntimeVersion"], [6, 3, 1, "", "make_cudaExtent"], [6, 3, 1, "", "make_cudaPitchedPtr"], [6, 3, 1, "", "make_cudaPos"]], "cuda.cudart.CUuuid": [[6, 1, 1, "", "bytes"], [6, 2, 1, "", "getPtr"]], "cuda.cudart.CUuuid_st": [[6, 1, 1, "", "bytes"], [6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaAccessPolicyWindow": [[6, 1, 1, "", "base_ptr"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "hitProp"], [6, 1, 1, "", "hitRatio"], [6, 1, 1, "", "missProp"], [6, 1, 1, "", "num_bytes"]], "cuda.cudart.cudaAccessProperty": [[6, 1, 1, "", "cudaAccessPropertyNormal"], [6, 1, 1, "", "cudaAccessPropertyPersisting"], [6, 1, 1, "", "cudaAccessPropertyStreaming"]], "cuda.cudart.cudaArrayMemoryRequirements": [[6, 1, 1, "", "alignment"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"], [6, 1, 1, "", "size"]], "cuda.cudart.cudaArraySparseProperties": [[6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "miptailFirstLevel"], [6, 1, 1, "", "miptailSize"], [6, 1, 1, "", "reserved"], [6, 1, 1, "", "tileExtent"]], "cuda.cudart.cudaArray_const_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaArray_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaAsyncCallback": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaAsyncCallbackHandle_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaAsyncNotificationInfo": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "info"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaAsyncNotificationInfo_t": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "info"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaAsyncNotificationType": [[6, 1, 1, "", "cudaAsyncNotificationTypeOverBudget"]], "cuda.cudart.cudaCGScope": [[6, 1, 1, "", "cudaCGScopeGrid"], [6, 1, 1, "", "cudaCGScopeInvalid"], [6, 1, 1, "", "cudaCGScopeMultiGrid"]], "cuda.cudart.cudaChannelFormatDesc": [[6, 1, 1, "", "f"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "w"], [6, 1, 1, "", "x"], [6, 1, 1, "", "y"], [6, 1, 1, "", "z"]], "cuda.cudart.cudaChannelFormatKind": [[6, 1, 1, "", "cudaChannelFormatKindFloat"], [6, 1, 1, "", "cudaChannelFormatKindNV12"], [6, 1, 1, "", "cudaChannelFormatKindNone"], [6, 1, 1, "", "cudaChannelFormatKindSigned"], [6, 1, 1, "", "cudaChannelFormatKindSignedBlockCompressed4"], [6, 1, 1, "", "cudaChannelFormatKindSignedBlockCompressed5"], [6, 1, 1, "", "cudaChannelFormatKindSignedBlockCompressed6H"], [6, 1, 1, "", "cudaChannelFormatKindSignedNormalized16X1"], [6, 1, 1, "", "cudaChannelFormatKindSignedNormalized16X2"], [6, 1, 1, "", "cudaChannelFormatKindSignedNormalized16X4"], [6, 1, 1, "", "cudaChannelFormatKindSignedNormalized8X1"], [6, 1, 1, "", "cudaChannelFormatKindSignedNormalized8X2"], [6, 1, 1, "", "cudaChannelFormatKindSignedNormalized8X4"], [6, 1, 1, "", "cudaChannelFormatKindUnsigned"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed1"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed1SRGB"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed2"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed2SRGB"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed3"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed3SRGB"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed4"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed5"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed6H"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed7"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed7SRGB"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized16X1"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized16X2"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized16X4"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized8X1"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized8X2"], [6, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized8X4"]], "cuda.cudart.cudaChildGraphNodeParams": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "graph"]], "cuda.cudart.cudaClusterSchedulingPolicy": [[6, 1, 1, "", "cudaClusterSchedulingPolicyDefault"], [6, 1, 1, "", "cudaClusterSchedulingPolicyLoadBalancing"], [6, 1, 1, "", "cudaClusterSchedulingPolicySpread"]], "cuda.cudart.cudaComputeMode": [[6, 1, 1, "", "cudaComputeModeDefault"], [6, 1, 1, "", "cudaComputeModeExclusive"], [6, 1, 1, "", "cudaComputeModeExclusiveProcess"], [6, 1, 1, "", "cudaComputeModeProhibited"]], "cuda.cudart.cudaConditionalNodeParams": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "handle"], [6, 1, 1, "", "phGraph_out"], [6, 1, 1, "", "size"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaDeviceAttr": [[6, 1, 1, "", "cudaDevAttrAsyncEngineCount"], [6, 1, 1, "", "cudaDevAttrCanFlushRemoteWrites"], [6, 1, 1, "", "cudaDevAttrCanMapHostMemory"], [6, 1, 1, "", "cudaDevAttrCanUseHostPointerForRegisteredMem"], [6, 1, 1, "", "cudaDevAttrClockRate"], [6, 1, 1, "", "cudaDevAttrClusterLaunch"], [6, 1, 1, "", "cudaDevAttrComputeCapabilityMajor"], [6, 1, 1, "", "cudaDevAttrComputeCapabilityMinor"], [6, 1, 1, "", "cudaDevAttrComputeMode"], [6, 1, 1, "", "cudaDevAttrComputePreemptionSupported"], [6, 1, 1, "", "cudaDevAttrConcurrentKernels"], [6, 1, 1, "", "cudaDevAttrConcurrentManagedAccess"], [6, 1, 1, "", "cudaDevAttrCooperativeLaunch"], [6, 1, 1, "", "cudaDevAttrCooperativeMultiDeviceLaunch"], [6, 1, 1, "", "cudaDevAttrD3D12CigSupported"], [6, 1, 1, "", "cudaDevAttrDeferredMappingCudaArraySupported"], [6, 1, 1, "", "cudaDevAttrDirectManagedMemAccessFromHost"], [6, 1, 1, "", "cudaDevAttrEccEnabled"], [6, 1, 1, "", "cudaDevAttrGPUDirectRDMAFlushWritesOptions"], [6, 1, 1, "", "cudaDevAttrGPUDirectRDMASupported"], [6, 1, 1, "", "cudaDevAttrGPUDirectRDMAWritesOrdering"], [6, 1, 1, "", "cudaDevAttrGlobalL1CacheSupported"], [6, 1, 1, "", "cudaDevAttrGlobalMemoryBusWidth"], [6, 1, 1, "", "cudaDevAttrGpuOverlap"], [6, 1, 1, "", "cudaDevAttrHostNativeAtomicSupported"], [6, 1, 1, "", "cudaDevAttrHostNumaId"], [6, 1, 1, "", "cudaDevAttrHostRegisterReadOnlySupported"], [6, 1, 1, "", "cudaDevAttrHostRegisterSupported"], [6, 1, 1, "", "cudaDevAttrIntegrated"], [6, 1, 1, "", "cudaDevAttrIpcEventSupport"], [6, 1, 1, "", "cudaDevAttrIsMultiGpuBoard"], [6, 1, 1, "", "cudaDevAttrKernelExecTimeout"], [6, 1, 1, "", "cudaDevAttrL2CacheSize"], [6, 1, 1, "", "cudaDevAttrLocalL1CacheSupported"], [6, 1, 1, "", "cudaDevAttrManagedMemory"], [6, 1, 1, "", "cudaDevAttrMax"], [6, 1, 1, "", "cudaDevAttrMaxAccessPolicyWindowSize"], [6, 1, 1, "", "cudaDevAttrMaxBlockDimX"], [6, 1, 1, "", "cudaDevAttrMaxBlockDimY"], [6, 1, 1, "", "cudaDevAttrMaxBlockDimZ"], [6, 1, 1, "", "cudaDevAttrMaxBlocksPerMultiprocessor"], [6, 1, 1, "", "cudaDevAttrMaxGridDimX"], [6, 1, 1, "", "cudaDevAttrMaxGridDimY"], [6, 1, 1, "", "cudaDevAttrMaxGridDimZ"], [6, 1, 1, "", "cudaDevAttrMaxPersistingL2CacheSize"], [6, 1, 1, "", "cudaDevAttrMaxPitch"], [6, 1, 1, "", "cudaDevAttrMaxRegistersPerBlock"], [6, 1, 1, "", "cudaDevAttrMaxRegistersPerMultiprocessor"], [6, 1, 1, "", "cudaDevAttrMaxSharedMemoryPerBlock"], [6, 1, 1, "", "cudaDevAttrMaxSharedMemoryPerBlockOptin"], [6, 1, 1, "", "cudaDevAttrMaxSharedMemoryPerMultiprocessor"], [6, 1, 1, "", "cudaDevAttrMaxSurface1DLayeredLayers"], [6, 1, 1, "", "cudaDevAttrMaxSurface1DLayeredWidth"], [6, 1, 1, "", "cudaDevAttrMaxSurface1DWidth"], [6, 1, 1, "", "cudaDevAttrMaxSurface2DHeight"], [6, 1, 1, "", "cudaDevAttrMaxSurface2DLayeredHeight"], [6, 1, 1, "", "cudaDevAttrMaxSurface2DLayeredLayers"], [6, 1, 1, "", "cudaDevAttrMaxSurface2DLayeredWidth"], [6, 1, 1, "", "cudaDevAttrMaxSurface2DWidth"], [6, 1, 1, "", "cudaDevAttrMaxSurface3DDepth"], [6, 1, 1, "", "cudaDevAttrMaxSurface3DHeight"], [6, 1, 1, "", "cudaDevAttrMaxSurface3DWidth"], [6, 1, 1, "", "cudaDevAttrMaxSurfaceCubemapLayeredLayers"], [6, 1, 1, "", "cudaDevAttrMaxSurfaceCubemapLayeredWidth"], [6, 1, 1, "", "cudaDevAttrMaxSurfaceCubemapWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture1DLayeredLayers"], [6, 1, 1, "", "cudaDevAttrMaxTexture1DLayeredWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture1DLinearWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture1DMipmappedWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture1DWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DGatherHeight"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DGatherWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DHeight"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DLayeredHeight"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DLayeredLayers"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DLayeredWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DLinearHeight"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DLinearPitch"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DLinearWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DMipmappedHeight"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DMipmappedWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture2DWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture3DDepth"], [6, 1, 1, "", "cudaDevAttrMaxTexture3DDepthAlt"], [6, 1, 1, "", "cudaDevAttrMaxTexture3DHeight"], [6, 1, 1, "", "cudaDevAttrMaxTexture3DHeightAlt"], [6, 1, 1, "", "cudaDevAttrMaxTexture3DWidth"], [6, 1, 1, "", "cudaDevAttrMaxTexture3DWidthAlt"], [6, 1, 1, "", "cudaDevAttrMaxTextureCubemapLayeredLayers"], [6, 1, 1, "", "cudaDevAttrMaxTextureCubemapLayeredWidth"], [6, 1, 1, "", "cudaDevAttrMaxTextureCubemapWidth"], [6, 1, 1, "", "cudaDevAttrMaxThreadsPerBlock"], [6, 1, 1, "", "cudaDevAttrMaxThreadsPerMultiProcessor"], [6, 1, 1, "", "cudaDevAttrMaxTimelineSemaphoreInteropSupported"], [6, 1, 1, "", "cudaDevAttrMemSyncDomainCount"], [6, 1, 1, "", "cudaDevAttrMemoryClockRate"], [6, 1, 1, "", "cudaDevAttrMemoryPoolSupportedHandleTypes"], [6, 1, 1, "", "cudaDevAttrMemoryPoolsSupported"], [6, 1, 1, "", "cudaDevAttrMpsEnabled"], [6, 1, 1, "", "cudaDevAttrMultiGpuBoardGroupID"], [6, 1, 1, "", "cudaDevAttrMultiProcessorCount"], [6, 1, 1, "", "cudaDevAttrNumaConfig"], [6, 1, 1, "", "cudaDevAttrNumaId"], [6, 1, 1, "", "cudaDevAttrPageableMemoryAccess"], [6, 1, 1, "", "cudaDevAttrPageableMemoryAccessUsesHostPageTables"], [6, 1, 1, "", "cudaDevAttrPciBusId"], [6, 1, 1, "", "cudaDevAttrPciDeviceId"], [6, 1, 1, "", "cudaDevAttrPciDomainId"], [6, 1, 1, "", "cudaDevAttrReserved122"], [6, 1, 1, "", "cudaDevAttrReserved123"], [6, 1, 1, "", "cudaDevAttrReserved124"], [6, 1, 1, "", "cudaDevAttrReserved127"], [6, 1, 1, "", "cudaDevAttrReserved128"], [6, 1, 1, "", "cudaDevAttrReserved129"], [6, 1, 1, "", "cudaDevAttrReserved132"], [6, 1, 1, "", "cudaDevAttrReserved92"], [6, 1, 1, "", "cudaDevAttrReserved93"], [6, 1, 1, "", "cudaDevAttrReserved94"], [6, 1, 1, "", "cudaDevAttrReservedSharedMemoryPerBlock"], [6, 1, 1, "", "cudaDevAttrSingleToDoublePrecisionPerfRatio"], [6, 1, 1, "", "cudaDevAttrSparseCudaArraySupported"], [6, 1, 1, "", "cudaDevAttrStreamPrioritiesSupported"], [6, 1, 1, "", "cudaDevAttrSurfaceAlignment"], [6, 1, 1, "", "cudaDevAttrTccDriver"], [6, 1, 1, "", "cudaDevAttrTextureAlignment"], [6, 1, 1, "", "cudaDevAttrTexturePitchAlignment"], [6, 1, 1, "", "cudaDevAttrTimelineSemaphoreInteropSupported"], [6, 1, 1, "", "cudaDevAttrTotalConstantMemory"], [6, 1, 1, "", "cudaDevAttrUnifiedAddressing"], [6, 1, 1, "", "cudaDevAttrWarpSize"]], "cuda.cudart.cudaDeviceNumaConfig": [[6, 1, 1, "", "cudaDeviceNumaConfigNone"], [6, 1, 1, "", "cudaDeviceNumaConfigNumaNode"]], "cuda.cudart.cudaDeviceP2PAttr": [[6, 1, 1, "", "cudaDevP2PAttrAccessSupported"], [6, 1, 1, "", "cudaDevP2PAttrCudaArrayAccessSupported"], [6, 1, 1, "", "cudaDevP2PAttrNativeAtomicSupported"], [6, 1, 1, "", "cudaDevP2PAttrPerformanceRank"]], "cuda.cudart.cudaDeviceProp": [[6, 1, 1, "", "ECCEnabled"], [6, 1, 1, "", "accessPolicyMaxWindowSize"], [6, 1, 1, "", "asyncEngineCount"], [6, 1, 1, "", "canMapHostMemory"], [6, 1, 1, "", "canUseHostPointerForRegisteredMem"], [6, 1, 1, "", "clockRate"], [6, 1, 1, "", "clusterLaunch"], [6, 1, 1, "", "computeMode"], [6, 1, 1, "", "computePreemptionSupported"], [6, 1, 1, "", "concurrentKernels"], [6, 1, 1, "", "concurrentManagedAccess"], [6, 1, 1, "", "cooperativeLaunch"], [6, 1, 1, "", "cooperativeMultiDeviceLaunch"], [6, 1, 1, "", "deferredMappingCudaArraySupported"], [6, 1, 1, "", "deviceOverlap"], [6, 1, 1, "", "directManagedMemAccessFromHost"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "globalL1CacheSupported"], [6, 1, 1, "", "gpuDirectRDMAFlushWritesOptions"], [6, 1, 1, "", "gpuDirectRDMASupported"], [6, 1, 1, "", "gpuDirectRDMAWritesOrdering"], [6, 1, 1, "", "hostNativeAtomicSupported"], [6, 1, 1, "", "hostRegisterReadOnlySupported"], [6, 1, 1, "", "hostRegisterSupported"], [6, 1, 1, "", "integrated"], [6, 1, 1, "", "ipcEventSupported"], [6, 1, 1, "", "isMultiGpuBoard"], [6, 1, 1, "", "kernelExecTimeoutEnabled"], [6, 1, 1, "", "l2CacheSize"], [6, 1, 1, "", "localL1CacheSupported"], [6, 1, 1, "", "luid"], [6, 1, 1, "", "luidDeviceNodeMask"], [6, 1, 1, "", "major"], [6, 1, 1, "", "managedMemory"], [6, 1, 1, "", "maxBlocksPerMultiProcessor"], [6, 1, 1, "", "maxGridSize"], [6, 1, 1, "", "maxSurface1D"], [6, 1, 1, "", "maxSurface1DLayered"], [6, 1, 1, "", "maxSurface2D"], [6, 1, 1, "", "maxSurface2DLayered"], [6, 1, 1, "", "maxSurface3D"], [6, 1, 1, "", "maxSurfaceCubemap"], [6, 1, 1, "", "maxSurfaceCubemapLayered"], [6, 1, 1, "", "maxTexture1D"], [6, 1, 1, "", "maxTexture1DLayered"], [6, 1, 1, "", "maxTexture1DLinear"], [6, 1, 1, "", "maxTexture1DMipmap"], [6, 1, 1, "", "maxTexture2D"], [6, 1, 1, "", "maxTexture2DGather"], [6, 1, 1, "", "maxTexture2DLayered"], [6, 1, 1, "", "maxTexture2DLinear"], [6, 1, 1, "", "maxTexture2DMipmap"], [6, 1, 1, "", "maxTexture3D"], [6, 1, 1, "", "maxTexture3DAlt"], [6, 1, 1, "", "maxTextureCubemap"], [6, 1, 1, "", "maxTextureCubemapLayered"], [6, 1, 1, "", "maxThreadsDim"], [6, 1, 1, "", "maxThreadsPerBlock"], [6, 1, 1, "", "maxThreadsPerMultiProcessor"], [6, 1, 1, "", "memPitch"], [6, 1, 1, "", "memoryBusWidth"], [6, 1, 1, "", "memoryClockRate"], [6, 1, 1, "", "memoryPoolSupportedHandleTypes"], [6, 1, 1, "", "memoryPoolsSupported"], [6, 1, 1, "", "minor"], [6, 1, 1, "", "multiGpuBoardGroupID"], [6, 1, 1, "", "multiProcessorCount"], [6, 1, 1, "", "name"], [6, 1, 1, "", "pageableMemoryAccess"], [6, 1, 1, "", "pageableMemoryAccessUsesHostPageTables"], [6, 1, 1, "", "pciBusID"], [6, 1, 1, "", "pciDeviceID"], [6, 1, 1, "", "pciDomainID"], [6, 1, 1, "", "persistingL2CacheMaxSize"], [6, 1, 1, "", "regsPerBlock"], [6, 1, 1, "", "regsPerMultiprocessor"], [6, 1, 1, "", "reserved"], [6, 1, 1, "", "reserved1"], [6, 1, 1, "", "reserved2"], [6, 1, 1, "", "reservedSharedMemPerBlock"], [6, 1, 1, "", "sharedMemPerBlock"], [6, 1, 1, "", "sharedMemPerBlockOptin"], [6, 1, 1, "", "sharedMemPerMultiprocessor"], [6, 1, 1, "", "singleToDoublePrecisionPerfRatio"], [6, 1, 1, "", "sparseCudaArraySupported"], [6, 1, 1, "", "streamPrioritiesSupported"], [6, 1, 1, "", "surfaceAlignment"], [6, 1, 1, "", "tccDriver"], [6, 1, 1, "", "textureAlignment"], [6, 1, 1, "", "texturePitchAlignment"], [6, 1, 1, "", "timelineSemaphoreInteropSupported"], [6, 1, 1, "", "totalConstMem"], [6, 1, 1, "", "totalGlobalMem"], [6, 1, 1, "", "unifiedAddressing"], [6, 1, 1, "", "unifiedFunctionPointers"], [6, 1, 1, "", "uuid"], [6, 1, 1, "", "warpSize"]], "cuda.cudart.cudaDriverEntryPointQueryResult": [[6, 1, 1, "", "cudaDriverEntryPointSuccess"], [6, 1, 1, "", "cudaDriverEntryPointSymbolNotFound"], [6, 1, 1, "", "cudaDriverEntryPointVersionNotSufficent"]], "cuda.cudart.cudaEglColorFormat": [[6, 1, 1, "", "cudaEglColorFormatA"], [6, 1, 1, "", "cudaEglColorFormatABGR"], [6, 1, 1, "", "cudaEglColorFormatARGB"], [6, 1, 1, "", "cudaEglColorFormatAYUV"], [6, 1, 1, "", "cudaEglColorFormatAYUV_ER"], [6, 1, 1, "", "cudaEglColorFormatBGRA"], [6, 1, 1, "", "cudaEglColorFormatBayer10BGGR"], [6, 1, 1, "", "cudaEglColorFormatBayer10CCCC"], [6, 1, 1, "", "cudaEglColorFormatBayer10GBRG"], [6, 1, 1, "", "cudaEglColorFormatBayer10GRBG"], [6, 1, 1, "", "cudaEglColorFormatBayer10RGGB"], [6, 1, 1, "", "cudaEglColorFormatBayer12BCCR"], [6, 1, 1, "", "cudaEglColorFormatBayer12BGGR"], [6, 1, 1, "", "cudaEglColorFormatBayer12CBRC"], [6, 1, 1, "", "cudaEglColorFormatBayer12CCCC"], [6, 1, 1, "", "cudaEglColorFormatBayer12CRBC"], [6, 1, 1, "", "cudaEglColorFormatBayer12GBRG"], [6, 1, 1, "", "cudaEglColorFormatBayer12GRBG"], [6, 1, 1, "", "cudaEglColorFormatBayer12RCCB"], [6, 1, 1, "", "cudaEglColorFormatBayer12RGGB"], [6, 1, 1, "", "cudaEglColorFormatBayer14BGGR"], [6, 1, 1, "", "cudaEglColorFormatBayer14GBRG"], [6, 1, 1, "", "cudaEglColorFormatBayer14GRBG"], [6, 1, 1, "", "cudaEglColorFormatBayer14RGGB"], [6, 1, 1, "", "cudaEglColorFormatBayer20BGGR"], [6, 1, 1, "", "cudaEglColorFormatBayer20GBRG"], [6, 1, 1, "", "cudaEglColorFormatBayer20GRBG"], [6, 1, 1, "", "cudaEglColorFormatBayer20RGGB"], [6, 1, 1, "", "cudaEglColorFormatBayerBCCR"], [6, 1, 1, "", "cudaEglColorFormatBayerBGGR"], [6, 1, 1, "", "cudaEglColorFormatBayerCBRC"], [6, 1, 1, "", "cudaEglColorFormatBayerCRBC"], [6, 1, 1, "", "cudaEglColorFormatBayerGBRG"], [6, 1, 1, "", "cudaEglColorFormatBayerGRBG"], [6, 1, 1, "", "cudaEglColorFormatBayerIspBGGR"], [6, 1, 1, "", "cudaEglColorFormatBayerIspGBRG"], [6, 1, 1, "", "cudaEglColorFormatBayerIspGRBG"], [6, 1, 1, "", "cudaEglColorFormatBayerIspRGGB"], [6, 1, 1, "", "cudaEglColorFormatBayerRCCB"], [6, 1, 1, "", "cudaEglColorFormatBayerRGGB"], [6, 1, 1, "", "cudaEglColorFormatL"], [6, 1, 1, "", "cudaEglColorFormatR"], [6, 1, 1, "", "cudaEglColorFormatRG"], [6, 1, 1, "", "cudaEglColorFormatRGBA"], [6, 1, 1, "", "cudaEglColorFormatUYVY422"], [6, 1, 1, "", "cudaEglColorFormatUYVY_ER"], [6, 1, 1, "", "cudaEglColorFormatVYUY"], [6, 1, 1, "", "cudaEglColorFormatVYUY_ER"], [6, 1, 1, "", "cudaEglColorFormatY"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar_2020"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar_709"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_422SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_422SemiPlanar_2020"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_422SemiPlanar_709"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_444SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER"], [6, 1, 1, "", "cudaEglColorFormatY10V10U10_444SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatY10_709_ER"], [6, 1, 1, "", "cudaEglColorFormatY10_ER"], [6, 1, 1, "", "cudaEglColorFormatY12V12U12_420SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER"], [6, 1, 1, "", "cudaEglColorFormatY12V12U12_420SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatY12V12U12_444SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER"], [6, 1, 1, "", "cudaEglColorFormatY12V12U12_444SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatY12_709_ER"], [6, 1, 1, "", "cudaEglColorFormatY12_ER"], [6, 1, 1, "", "cudaEglColorFormatYUV420Planar"], [6, 1, 1, "", "cudaEglColorFormatYUV420Planar_2020"], [6, 1, 1, "", "cudaEglColorFormatYUV420Planar_709"], [6, 1, 1, "", "cudaEglColorFormatYUV420Planar_ER"], [6, 1, 1, "", "cudaEglColorFormatYUV420SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatYUV420SemiPlanar_2020"], [6, 1, 1, "", "cudaEglColorFormatYUV420SemiPlanar_709"], [6, 1, 1, "", "cudaEglColorFormatYUV420SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatYUV422Planar"], [6, 1, 1, "", "cudaEglColorFormatYUV422Planar_ER"], [6, 1, 1, "", "cudaEglColorFormatYUV422SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatYUV422SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatYUV444Planar"], [6, 1, 1, "", "cudaEglColorFormatYUV444Planar_ER"], [6, 1, 1, "", "cudaEglColorFormatYUV444SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatYUV444SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatYUVA"], [6, 1, 1, "", "cudaEglColorFormatYUVA_ER"], [6, 1, 1, "", "cudaEglColorFormatYUYV422"], [6, 1, 1, "", "cudaEglColorFormatYUYV_ER"], [6, 1, 1, "", "cudaEglColorFormatYVU420Planar"], [6, 1, 1, "", "cudaEglColorFormatYVU420Planar_2020"], [6, 1, 1, "", "cudaEglColorFormatYVU420Planar_709"], [6, 1, 1, "", "cudaEglColorFormatYVU420Planar_ER"], [6, 1, 1, "", "cudaEglColorFormatYVU420SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatYVU420SemiPlanar_2020"], [6, 1, 1, "", "cudaEglColorFormatYVU420SemiPlanar_709"], [6, 1, 1, "", "cudaEglColorFormatYVU420SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatYVU422Planar"], [6, 1, 1, "", "cudaEglColorFormatYVU422Planar_ER"], [6, 1, 1, "", "cudaEglColorFormatYVU422SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatYVU422SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatYVU444Planar"], [6, 1, 1, "", "cudaEglColorFormatYVU444Planar_ER"], [6, 1, 1, "", "cudaEglColorFormatYVU444SemiPlanar"], [6, 1, 1, "", "cudaEglColorFormatYVU444SemiPlanar_ER"], [6, 1, 1, "", "cudaEglColorFormatYVYU"], [6, 1, 1, "", "cudaEglColorFormatYVYU_ER"], [6, 1, 1, "", "cudaEglColorFormatY_709_ER"], [6, 1, 1, "", "cudaEglColorFormatY_ER"]], "cuda.cudart.cudaEglFrame": [[6, 1, 1, "", "eglColorFormat"], [6, 1, 1, "", "frame"], [6, 1, 1, "", "frameType"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "planeCount"], [6, 1, 1, "", "planeDesc"]], "cuda.cudart.cudaEglFrameType": [[6, 1, 1, "", "cudaEglFrameTypeArray"], [6, 1, 1, "", "cudaEglFrameTypePitch"]], "cuda.cudart.cudaEglFrame_st": [[6, 1, 1, "", "eglColorFormat"], [6, 1, 1, "", "frame"], [6, 1, 1, "", "frameType"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "planeCount"], [6, 1, 1, "", "planeDesc"]], "cuda.cudart.cudaEglPlaneDesc": [[6, 1, 1, "", "channelDesc"], [6, 1, 1, "", "depth"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "height"], [6, 1, 1, "", "numChannels"], [6, 1, 1, "", "pitch"], [6, 1, 1, "", "reserved"], [6, 1, 1, "", "width"]], "cuda.cudart.cudaEglPlaneDesc_st": [[6, 1, 1, "", "channelDesc"], [6, 1, 1, "", "depth"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "height"], [6, 1, 1, "", "numChannels"], [6, 1, 1, "", "pitch"], [6, 1, 1, "", "reserved"], [6, 1, 1, "", "width"]], "cuda.cudart.cudaEglResourceLocationFlags": [[6, 1, 1, "", "cudaEglResourceLocationSysmem"], [6, 1, 1, "", "cudaEglResourceLocationVidmem"]], "cuda.cudart.cudaEglStreamConnection": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaError_t": [[6, 1, 1, "", "cudaErrorAddressOfConstant"], [6, 1, 1, "", "cudaErrorAlreadyAcquired"], [6, 1, 1, "", "cudaErrorAlreadyMapped"], [6, 1, 1, "", "cudaErrorApiFailureBase"], [6, 1, 1, "", "cudaErrorArrayIsMapped"], [6, 1, 1, "", "cudaErrorAssert"], [6, 1, 1, "", "cudaErrorCallRequiresNewerDriver"], [6, 1, 1, "", "cudaErrorCapturedEvent"], [6, 1, 1, "", "cudaErrorCdpNotSupported"], [6, 1, 1, "", "cudaErrorCdpVersionMismatch"], [6, 1, 1, "", "cudaErrorCompatNotSupportedOnDevice"], [6, 1, 1, "", "cudaErrorContextIsDestroyed"], [6, 1, 1, "", "cudaErrorCooperativeLaunchTooLarge"], [6, 1, 1, "", "cudaErrorCudartUnloading"], [6, 1, 1, "", "cudaErrorDeviceAlreadyInUse"], [6, 1, 1, "", "cudaErrorDeviceNotLicensed"], [6, 1, 1, "", "cudaErrorDeviceUninitialized"], [6, 1, 1, "", "cudaErrorDevicesUnavailable"], [6, 1, 1, "", "cudaErrorDuplicateSurfaceName"], [6, 1, 1, "", "cudaErrorDuplicateTextureName"], [6, 1, 1, "", "cudaErrorDuplicateVariableName"], [6, 1, 1, "", "cudaErrorECCUncorrectable"], [6, 1, 1, "", "cudaErrorExternalDevice"], [6, 1, 1, "", "cudaErrorFileNotFound"], [6, 1, 1, "", "cudaErrorFunctionNotLoaded"], [6, 1, 1, "", "cudaErrorGraphExecUpdateFailure"], [6, 1, 1, "", "cudaErrorHardwareStackError"], [6, 1, 1, "", "cudaErrorHostMemoryAlreadyRegistered"], [6, 1, 1, "", "cudaErrorHostMemoryNotRegistered"], [6, 1, 1, "", "cudaErrorIllegalAddress"], [6, 1, 1, "", "cudaErrorIllegalInstruction"], [6, 1, 1, "", "cudaErrorIllegalState"], [6, 1, 1, "", "cudaErrorIncompatibleDriverContext"], [6, 1, 1, "", "cudaErrorInitializationError"], [6, 1, 1, "", "cudaErrorInsufficientDriver"], [6, 1, 1, "", "cudaErrorInvalidAddressSpace"], [6, 1, 1, "", "cudaErrorInvalidChannelDescriptor"], [6, 1, 1, "", "cudaErrorInvalidClusterSize"], [6, 1, 1, "", "cudaErrorInvalidConfiguration"], [6, 1, 1, "", "cudaErrorInvalidDevice"], [6, 1, 1, "", "cudaErrorInvalidDeviceFunction"], [6, 1, 1, "", "cudaErrorInvalidDevicePointer"], [6, 1, 1, "", "cudaErrorInvalidFilterSetting"], [6, 1, 1, "", "cudaErrorInvalidGraphicsContext"], [6, 1, 1, "", "cudaErrorInvalidHostPointer"], [6, 1, 1, "", "cudaErrorInvalidKernelImage"], [6, 1, 1, "", "cudaErrorInvalidMemcpyDirection"], [6, 1, 1, "", "cudaErrorInvalidNormSetting"], [6, 1, 1, "", "cudaErrorInvalidPc"], [6, 1, 1, "", "cudaErrorInvalidPitchValue"], [6, 1, 1, "", "cudaErrorInvalidPtx"], [6, 1, 1, "", "cudaErrorInvalidResourceConfiguration"], [6, 1, 1, "", "cudaErrorInvalidResourceHandle"], [6, 1, 1, "", "cudaErrorInvalidResourceType"], [6, 1, 1, "", "cudaErrorInvalidSource"], [6, 1, 1, "", "cudaErrorInvalidSurface"], [6, 1, 1, "", "cudaErrorInvalidSymbol"], [6, 1, 1, "", "cudaErrorInvalidTexture"], [6, 1, 1, "", "cudaErrorInvalidTextureBinding"], [6, 1, 1, "", "cudaErrorInvalidValue"], [6, 1, 1, "", "cudaErrorJitCompilationDisabled"], [6, 1, 1, "", "cudaErrorJitCompilerNotFound"], [6, 1, 1, "", "cudaErrorLaunchFailure"], [6, 1, 1, "", "cudaErrorLaunchFileScopedSurf"], [6, 1, 1, "", "cudaErrorLaunchFileScopedTex"], [6, 1, 1, "", "cudaErrorLaunchIncompatibleTexturing"], [6, 1, 1, "", "cudaErrorLaunchMaxDepthExceeded"], [6, 1, 1, "", "cudaErrorLaunchOutOfResources"], [6, 1, 1, "", "cudaErrorLaunchPendingCountExceeded"], [6, 1, 1, "", "cudaErrorLaunchTimeout"], [6, 1, 1, "", "cudaErrorLossyQuery"], [6, 1, 1, "", "cudaErrorMapBufferObjectFailed"], [6, 1, 1, "", "cudaErrorMemoryAllocation"], [6, 1, 1, "", "cudaErrorMemoryValueTooLarge"], [6, 1, 1, "", "cudaErrorMisalignedAddress"], [6, 1, 1, "", "cudaErrorMissingConfiguration"], [6, 1, 1, "", "cudaErrorMixedDeviceExecution"], [6, 1, 1, "", "cudaErrorMpsClientTerminated"], [6, 1, 1, "", "cudaErrorMpsConnectionFailed"], [6, 1, 1, "", "cudaErrorMpsMaxClientsReached"], [6, 1, 1, "", "cudaErrorMpsMaxConnectionsReached"], [6, 1, 1, "", "cudaErrorMpsRpcFailure"], [6, 1, 1, "", "cudaErrorMpsServerNotReady"], [6, 1, 1, "", "cudaErrorNoDevice"], [6, 1, 1, "", "cudaErrorNoKernelImageForDevice"], [6, 1, 1, "", "cudaErrorNotMapped"], [6, 1, 1, "", "cudaErrorNotMappedAsArray"], [6, 1, 1, "", "cudaErrorNotMappedAsPointer"], [6, 1, 1, "", "cudaErrorNotPermitted"], [6, 1, 1, "", "cudaErrorNotReady"], [6, 1, 1, "", "cudaErrorNotSupported"], [6, 1, 1, "", "cudaErrorNotYetImplemented"], [6, 1, 1, "", "cudaErrorNvlinkUncorrectable"], [6, 1, 1, "", "cudaErrorOperatingSystem"], [6, 1, 1, "", "cudaErrorPeerAccessAlreadyEnabled"], [6, 1, 1, "", "cudaErrorPeerAccessNotEnabled"], [6, 1, 1, "", "cudaErrorPeerAccessUnsupported"], [6, 1, 1, "", "cudaErrorPriorLaunchFailure"], [6, 1, 1, "", "cudaErrorProfilerAlreadyStarted"], [6, 1, 1, "", "cudaErrorProfilerAlreadyStopped"], [6, 1, 1, "", "cudaErrorProfilerDisabled"], [6, 1, 1, "", "cudaErrorProfilerNotInitialized"], [6, 1, 1, "", "cudaErrorSetOnActiveProcess"], [6, 1, 1, "", "cudaErrorSharedObjectInitFailed"], [6, 1, 1, "", "cudaErrorSharedObjectSymbolNotFound"], [6, 1, 1, "", "cudaErrorSoftwareValidityNotEstablished"], [6, 1, 1, "", "cudaErrorStartupFailure"], [6, 1, 1, "", "cudaErrorStreamCaptureImplicit"], [6, 1, 1, "", "cudaErrorStreamCaptureInvalidated"], [6, 1, 1, "", "cudaErrorStreamCaptureIsolation"], [6, 1, 1, "", "cudaErrorStreamCaptureMerge"], [6, 1, 1, "", "cudaErrorStreamCaptureUnjoined"], [6, 1, 1, "", "cudaErrorStreamCaptureUnmatched"], [6, 1, 1, "", "cudaErrorStreamCaptureUnsupported"], [6, 1, 1, "", "cudaErrorStreamCaptureWrongThread"], [6, 1, 1, "", "cudaErrorStubLibrary"], [6, 1, 1, "", "cudaErrorSymbolNotFound"], [6, 1, 1, "", "cudaErrorSyncDepthExceeded"], [6, 1, 1, "", "cudaErrorSynchronizationError"], [6, 1, 1, "", "cudaErrorSystemDriverMismatch"], [6, 1, 1, "", "cudaErrorSystemNotReady"], [6, 1, 1, "", "cudaErrorTextureFetchFailed"], [6, 1, 1, "", "cudaErrorTextureNotBound"], [6, 1, 1, "", "cudaErrorTimeout"], [6, 1, 1, "", "cudaErrorTooManyPeers"], [6, 1, 1, "", "cudaErrorUnknown"], [6, 1, 1, "", "cudaErrorUnmapBufferObjectFailed"], [6, 1, 1, "", "cudaErrorUnsupportedDevSideSync"], [6, 1, 1, "", "cudaErrorUnsupportedExecAffinity"], [6, 1, 1, "", "cudaErrorUnsupportedLimit"], [6, 1, 1, "", "cudaErrorUnsupportedPtxVersion"], [6, 1, 1, "", "cudaSuccess"]], "cuda.cudart.cudaEventRecordNodeParams": [[6, 1, 1, "", "event"], [6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaEventWaitNodeParams": [[6, 1, 1, "", "event"], [6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaEvent_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaExtent": [[6, 1, 1, "", "depth"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "height"], [6, 1, 1, "", "width"]], "cuda.cudart.cudaExternalMemoryBufferDesc": [[6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "offset"], [6, 1, 1, "", "size"]], "cuda.cudart.cudaExternalMemoryHandleDesc": [[6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "handle"], [6, 1, 1, "", "size"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaExternalMemoryHandleType": [[6, 1, 1, "", "cudaExternalMemoryHandleTypeD3D11Resource"], [6, 1, 1, "", "cudaExternalMemoryHandleTypeD3D11ResourceKmt"], [6, 1, 1, "", "cudaExternalMemoryHandleTypeD3D12Heap"], [6, 1, 1, "", "cudaExternalMemoryHandleTypeD3D12Resource"], [6, 1, 1, "", "cudaExternalMemoryHandleTypeNvSciBuf"], [6, 1, 1, "", "cudaExternalMemoryHandleTypeOpaqueFd"], [6, 1, 1, "", "cudaExternalMemoryHandleTypeOpaqueWin32"], [6, 1, 1, "", "cudaExternalMemoryHandleTypeOpaqueWin32Kmt"]], "cuda.cudart.cudaExternalMemoryMipmappedArrayDesc": [[6, 1, 1, "", "extent"], [6, 1, 1, "", "flags"], [6, 1, 1, "", "formatDesc"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "numLevels"], [6, 1, 1, "", "offset"]], "cuda.cudart.cudaExternalMemory_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaExternalSemaphoreHandleDesc": [[6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "handle"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaExternalSemaphoreHandleType": [[6, 1, 1, "", "cudaExternalSemaphoreHandleTypeD3D11Fence"], [6, 1, 1, "", "cudaExternalSemaphoreHandleTypeD3D12Fence"], [6, 1, 1, "", "cudaExternalSemaphoreHandleTypeKeyedMutex"], [6, 1, 1, "", "cudaExternalSemaphoreHandleTypeKeyedMutexKmt"], [6, 1, 1, "", "cudaExternalSemaphoreHandleTypeNvSciSync"], [6, 1, 1, "", "cudaExternalSemaphoreHandleTypeOpaqueFd"], [6, 1, 1, "", "cudaExternalSemaphoreHandleTypeOpaqueWin32"], [6, 1, 1, "", "cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt"], [6, 1, 1, "", "cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd"], [6, 1, 1, "", "cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32"]], "cuda.cudart.cudaExternalSemaphoreSignalNodeParams": [[6, 1, 1, "", "extSemArray"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "numExtSems"], [6, 1, 1, "", "paramsArray"]], "cuda.cudart.cudaExternalSemaphoreSignalNodeParamsV2": [[6, 1, 1, "", "extSemArray"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "numExtSems"], [6, 1, 1, "", "paramsArray"]], "cuda.cudart.cudaExternalSemaphoreSignalParams": [[6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "params"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaExternalSemaphoreWaitNodeParams": [[6, 1, 1, "", "extSemArray"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "numExtSems"], [6, 1, 1, "", "paramsArray"]], "cuda.cudart.cudaExternalSemaphoreWaitNodeParamsV2": [[6, 1, 1, "", "extSemArray"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "numExtSems"], [6, 1, 1, "", "paramsArray"]], "cuda.cudart.cudaExternalSemaphoreWaitParams": [[6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "params"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaExternalSemaphore_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaFlushGPUDirectRDMAWritesOptions": [[6, 1, 1, "", "cudaFlushGPUDirectRDMAWritesOptionHost"], [6, 1, 1, "", "cudaFlushGPUDirectRDMAWritesOptionMemOps"]], "cuda.cudart.cudaFlushGPUDirectRDMAWritesScope": [[6, 1, 1, "", "cudaFlushGPUDirectRDMAWritesToAllDevices"], [6, 1, 1, "", "cudaFlushGPUDirectRDMAWritesToOwner"]], "cuda.cudart.cudaFlushGPUDirectRDMAWritesTarget": [[6, 1, 1, "", "cudaFlushGPUDirectRDMAWritesTargetCurrentDevice"]], "cuda.cudart.cudaFuncAttribute": [[6, 1, 1, "", "cudaFuncAttributeClusterDimMustBeSet"], [6, 1, 1, "", "cudaFuncAttributeClusterSchedulingPolicyPreference"], [6, 1, 1, "", "cudaFuncAttributeMax"], [6, 1, 1, "", "cudaFuncAttributeMaxDynamicSharedMemorySize"], [6, 1, 1, "", "cudaFuncAttributeNonPortableClusterSizeAllowed"], [6, 1, 1, "", "cudaFuncAttributePreferredSharedMemoryCarveout"], [6, 1, 1, "", "cudaFuncAttributeRequiredClusterDepth"], [6, 1, 1, "", "cudaFuncAttributeRequiredClusterHeight"], [6, 1, 1, "", "cudaFuncAttributeRequiredClusterWidth"]], "cuda.cudart.cudaFuncAttributes": [[6, 1, 1, "", "binaryVersion"], [6, 1, 1, "", "cacheModeCA"], [6, 1, 1, "", "clusterDimMustBeSet"], [6, 1, 1, "", "clusterSchedulingPolicyPreference"], [6, 1, 1, "", "constSizeBytes"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "localSizeBytes"], [6, 1, 1, "", "maxDynamicSharedSizeBytes"], [6, 1, 1, "", "maxThreadsPerBlock"], [6, 1, 1, "", "nonPortableClusterSizeAllowed"], [6, 1, 1, "", "numRegs"], [6, 1, 1, "", "preferredShmemCarveout"], [6, 1, 1, "", "ptxVersion"], [6, 1, 1, "", "requiredClusterDepth"], [6, 1, 1, "", "requiredClusterHeight"], [6, 1, 1, "", "requiredClusterWidth"], [6, 1, 1, "", "reserved"], [6, 1, 1, "", "sharedSizeBytes"]], "cuda.cudart.cudaFuncCache": [[6, 1, 1, "", "cudaFuncCachePreferEqual"], [6, 1, 1, "", "cudaFuncCachePreferL1"], [6, 1, 1, "", "cudaFuncCachePreferNone"], [6, 1, 1, "", "cudaFuncCachePreferShared"]], "cuda.cudart.cudaFunction_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaGLDeviceList": [[6, 1, 1, "", "cudaGLDeviceListAll"], [6, 1, 1, "", "cudaGLDeviceListCurrentFrame"], [6, 1, 1, "", "cudaGLDeviceListNextFrame"]], "cuda.cudart.cudaGPUDirectRDMAWritesOrdering": [[6, 1, 1, "", "cudaGPUDirectRDMAWritesOrderingAllDevices"], [6, 1, 1, "", "cudaGPUDirectRDMAWritesOrderingNone"], [6, 1, 1, "", "cudaGPUDirectRDMAWritesOrderingOwner"]], "cuda.cudart.cudaGetDriverEntryPointFlags": [[6, 1, 1, "", "cudaEnableDefault"], [6, 1, 1, "", "cudaEnableLegacyStream"], [6, 1, 1, "", "cudaEnablePerThreadDefaultStream"]], "cuda.cudart.cudaGraphConditionalHandle": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaGraphConditionalHandleFlags": [[6, 1, 1, "", "cudaGraphCondAssignDefault"]], "cuda.cudart.cudaGraphConditionalNodeType": [[6, 1, 1, "", "cudaGraphCondTypeIf"], [6, 1, 1, "", "cudaGraphCondTypeWhile"]], "cuda.cudart.cudaGraphDebugDotFlags": [[6, 1, 1, "", "cudaGraphDebugDotFlagsConditionalNodeParams"], [6, 1, 1, "", "cudaGraphDebugDotFlagsEventNodeParams"], [6, 1, 1, "", "cudaGraphDebugDotFlagsExtSemasSignalNodeParams"], [6, 1, 1, "", "cudaGraphDebugDotFlagsExtSemasWaitNodeParams"], [6, 1, 1, "", "cudaGraphDebugDotFlagsHandles"], [6, 1, 1, "", "cudaGraphDebugDotFlagsHostNodeParams"], [6, 1, 1, "", "cudaGraphDebugDotFlagsKernelNodeAttributes"], [6, 1, 1, "", "cudaGraphDebugDotFlagsKernelNodeParams"], [6, 1, 1, "", "cudaGraphDebugDotFlagsMemcpyNodeParams"], [6, 1, 1, "", "cudaGraphDebugDotFlagsMemsetNodeParams"], [6, 1, 1, "", "cudaGraphDebugDotFlagsVerbose"]], "cuda.cudart.cudaGraphDependencyType": [[6, 1, 1, "", "cudaGraphDependencyTypeDefault"], [6, 1, 1, "", "cudaGraphDependencyTypeProgrammatic"]], "cuda.cudart.cudaGraphDeviceNode_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaGraphEdgeData": [[6, 1, 1, "", "from_port"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"], [6, 1, 1, "", "to_port"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaGraphEdgeData_st": [[6, 1, 1, "", "from_port"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"], [6, 1, 1, "", "to_port"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaGraphExecUpdateResult": [[6, 1, 1, "", "cudaGraphExecUpdateError"], [6, 1, 1, "", "cudaGraphExecUpdateErrorAttributesChanged"], [6, 1, 1, "", "cudaGraphExecUpdateErrorFunctionChanged"], [6, 1, 1, "", "cudaGraphExecUpdateErrorNodeTypeChanged"], [6, 1, 1, "", "cudaGraphExecUpdateErrorNotSupported"], [6, 1, 1, "", "cudaGraphExecUpdateErrorParametersChanged"], [6, 1, 1, "", "cudaGraphExecUpdateErrorTopologyChanged"], [6, 1, 1, "", "cudaGraphExecUpdateErrorUnsupportedFunctionChange"], [6, 1, 1, "", "cudaGraphExecUpdateSuccess"]], "cuda.cudart.cudaGraphExecUpdateResultInfo": [[6, 1, 1, "", "errorFromNode"], [6, 1, 1, "", "errorNode"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "result"]], "cuda.cudart.cudaGraphExecUpdateResultInfo_st": [[6, 1, 1, "", "errorFromNode"], [6, 1, 1, "", "errorNode"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "result"]], "cuda.cudart.cudaGraphExec_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaGraphInstantiateFlags": [[6, 1, 1, "", "cudaGraphInstantiateFlagAutoFreeOnLaunch"], [6, 1, 1, "", "cudaGraphInstantiateFlagDeviceLaunch"], [6, 1, 1, "", "cudaGraphInstantiateFlagUpload"], [6, 1, 1, "", "cudaGraphInstantiateFlagUseNodePriority"]], "cuda.cudart.cudaGraphInstantiateParams": [[6, 1, 1, "", "errNode_out"], [6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "result_out"], [6, 1, 1, "", "uploadStream"]], "cuda.cudart.cudaGraphInstantiateParams_st": [[6, 1, 1, "", "errNode_out"], [6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "result_out"], [6, 1, 1, "", "uploadStream"]], "cuda.cudart.cudaGraphInstantiateResult": [[6, 1, 1, "", "cudaGraphInstantiateError"], [6, 1, 1, "", "cudaGraphInstantiateInvalidStructure"], [6, 1, 1, "", "cudaGraphInstantiateMultipleDevicesNotSupported"], [6, 1, 1, "", "cudaGraphInstantiateNodeOperationNotSupported"], [6, 1, 1, "", "cudaGraphInstantiateSuccess"]], "cuda.cudart.cudaGraphKernelNodeField": [[6, 1, 1, "", "cudaGraphKernelNodeFieldEnabled"], [6, 1, 1, "", "cudaGraphKernelNodeFieldGridDim"], [6, 1, 1, "", "cudaGraphKernelNodeFieldInvalid"], [6, 1, 1, "", "cudaGraphKernelNodeFieldParam"]], "cuda.cudart.cudaGraphKernelNodeUpdate": [[6, 1, 1, "", "field"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "node"], [6, 1, 1, "", "updateData"]], "cuda.cudart.cudaGraphMemAttributeType": [[6, 1, 1, "", "cudaGraphMemAttrReservedMemCurrent"], [6, 1, 1, "", "cudaGraphMemAttrReservedMemHigh"], [6, 1, 1, "", "cudaGraphMemAttrUsedMemCurrent"], [6, 1, 1, "", "cudaGraphMemAttrUsedMemHigh"]], "cuda.cudart.cudaGraphNodeParams": [[6, 1, 1, "", "alloc"], [6, 1, 1, "", "conditional"], [6, 1, 1, "", "eventRecord"], [6, 1, 1, "", "eventWait"], [6, 1, 1, "", "extSemSignal"], [6, 1, 1, "", "extSemWait"], [6, 1, 1, "", "free"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "graph"], [6, 1, 1, "", "host"], [6, 1, 1, "", "kernel"], [6, 1, 1, "", "memcpy"], [6, 1, 1, "", "memset"], [6, 1, 1, "", "reserved0"], [6, 1, 1, "", "reserved1"], [6, 1, 1, "", "reserved2"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaGraphNodeType": [[6, 1, 1, "", "cudaGraphNodeTypeConditional"], [6, 1, 1, "", "cudaGraphNodeTypeCount"], [6, 1, 1, "", "cudaGraphNodeTypeEmpty"], [6, 1, 1, "", "cudaGraphNodeTypeEventRecord"], [6, 1, 1, "", "cudaGraphNodeTypeExtSemaphoreSignal"], [6, 1, 1, "", "cudaGraphNodeTypeExtSemaphoreWait"], [6, 1, 1, "", "cudaGraphNodeTypeGraph"], [6, 1, 1, "", "cudaGraphNodeTypeHost"], [6, 1, 1, "", "cudaGraphNodeTypeKernel"], [6, 1, 1, "", "cudaGraphNodeTypeMemAlloc"], [6, 1, 1, "", "cudaGraphNodeTypeMemFree"], [6, 1, 1, "", "cudaGraphNodeTypeMemcpy"], [6, 1, 1, "", "cudaGraphNodeTypeMemset"], [6, 1, 1, "", "cudaGraphNodeTypeWaitEvent"]], "cuda.cudart.cudaGraphNode_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaGraph_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaGraphicsCubeFace": [[6, 1, 1, "", "cudaGraphicsCubeFaceNegativeX"], [6, 1, 1, "", "cudaGraphicsCubeFaceNegativeY"], [6, 1, 1, "", "cudaGraphicsCubeFaceNegativeZ"], [6, 1, 1, "", "cudaGraphicsCubeFacePositiveX"], [6, 1, 1, "", "cudaGraphicsCubeFacePositiveY"], [6, 1, 1, "", "cudaGraphicsCubeFacePositiveZ"]], "cuda.cudart.cudaGraphicsMapFlags": [[6, 1, 1, "", "cudaGraphicsMapFlagsNone"], [6, 1, 1, "", "cudaGraphicsMapFlagsReadOnly"], [6, 1, 1, "", "cudaGraphicsMapFlagsWriteDiscard"]], "cuda.cudart.cudaGraphicsRegisterFlags": [[6, 1, 1, "", "cudaGraphicsRegisterFlagsNone"], [6, 1, 1, "", "cudaGraphicsRegisterFlagsReadOnly"], [6, 1, 1, "", "cudaGraphicsRegisterFlagsSurfaceLoadStore"], [6, 1, 1, "", "cudaGraphicsRegisterFlagsTextureGather"], [6, 1, 1, "", "cudaGraphicsRegisterFlagsWriteDiscard"]], "cuda.cudart.cudaGraphicsResource_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaHostFn_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaHostNodeParams": [[6, 1, 1, "", "fn"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "userData"]], "cuda.cudart.cudaHostNodeParamsV2": [[6, 1, 1, "", "fn"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "userData"]], "cuda.cudart.cudaIpcEventHandle_st": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaIpcEventHandle_t": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaIpcMemHandle_st": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaIpcMemHandle_t": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaKernelNodeParams": [[6, 1, 1, "", "blockDim"], [6, 1, 1, "", "extra"], [6, 1, 1, "", "func"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "gridDim"], [6, 1, 1, "", "kernelParams"], [6, 1, 1, "", "sharedMemBytes"]], "cuda.cudart.cudaKernelNodeParamsV2": [[6, 1, 1, "", "blockDim"], [6, 1, 1, "", "extra"], [6, 1, 1, "", "func"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "gridDim"], [6, 1, 1, "", "kernelParams"], [6, 1, 1, "", "sharedMemBytes"]], "cuda.cudart.cudaKernel_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaLaunchAttribute": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "id"], [6, 1, 1, "", "val"]], "cuda.cudart.cudaLaunchAttributeID": [[6, 1, 1, "", "cudaLaunchAttributeAccessPolicyWindow"], [6, 1, 1, "", "cudaLaunchAttributeClusterDimension"], [6, 1, 1, "", "cudaLaunchAttributeClusterSchedulingPolicyPreference"], [6, 1, 1, "", "cudaLaunchAttributeCooperative"], [6, 1, 1, "", "cudaLaunchAttributeDeviceUpdatableKernelNode"], [6, 1, 1, "", "cudaLaunchAttributeIgnore"], [6, 1, 1, "", "cudaLaunchAttributeLaunchCompletionEvent"], [6, 1, 1, "", "cudaLaunchAttributeMemSyncDomain"], [6, 1, 1, "", "cudaLaunchAttributeMemSyncDomainMap"], [6, 1, 1, "", "cudaLaunchAttributePreferredSharedMemoryCarveout"], [6, 1, 1, "", "cudaLaunchAttributePriority"], [6, 1, 1, "", "cudaLaunchAttributeProgrammaticEvent"], [6, 1, 1, "", "cudaLaunchAttributeProgrammaticStreamSerialization"], [6, 1, 1, "", "cudaLaunchAttributeSynchronizationPolicy"]], "cuda.cudart.cudaLaunchAttributeValue": [[6, 1, 1, "id28", "accessPolicyWindow"], [6, 1, 1, "id31", "clusterDim"], [6, 1, 1, "id32", "clusterSchedulingPolicyPreference"], [6, 1, 1, "id29", "cooperative"], [6, 1, 1, "id39", "deviceUpdatableKernelNode"], [6, 2, 1, "id41", "getPtr"], [6, 1, 1, "id38", "launchCompletionEvent"], [6, 1, 1, "id37", "memSyncDomain"], [6, 1, 1, "id36", "memSyncDomainMap"], [6, 1, 1, "id27", "pad"], [6, 1, 1, "id35", "priority"], [6, 1, 1, "id34", "programmaticEvent"], [6, 1, 1, "id33", "programmaticStreamSerializationAllowed"], [6, 1, 1, "id40", "sharedMemCarveout"], [6, 1, 1, "id30", "syncPolicy"]], "cuda.cudart.cudaLaunchAttribute_st": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "id"], [6, 1, 1, "", "val"]], "cuda.cudart.cudaLaunchMemSyncDomain": [[6, 1, 1, "", "cudaLaunchMemSyncDomainDefault"], [6, 1, 1, "", "cudaLaunchMemSyncDomainRemote"]], "cuda.cudart.cudaLaunchMemSyncDomainMap": [[6, 1, 1, "", "default_"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "remote"]], "cuda.cudart.cudaLaunchMemSyncDomainMap_st": [[6, 1, 1, "", "default_"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "remote"]], "cuda.cudart.cudaLimit": [[6, 1, 1, "", "cudaLimitDevRuntimePendingLaunchCount"], [6, 1, 1, "", "cudaLimitDevRuntimeSyncDepth"], [6, 1, 1, "", "cudaLimitMallocHeapSize"], [6, 1, 1, "", "cudaLimitMaxL2FetchGranularity"], [6, 1, 1, "", "cudaLimitPersistingL2CacheSize"], [6, 1, 1, "", "cudaLimitPrintfFifoSize"], [6, 1, 1, "", "cudaLimitStackSize"]], "cuda.cudart.cudaMemAccessDesc": [[6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "location"]], "cuda.cudart.cudaMemAccessFlags": [[6, 1, 1, "", "cudaMemAccessFlagsProtNone"], [6, 1, 1, "", "cudaMemAccessFlagsProtRead"], [6, 1, 1, "", "cudaMemAccessFlagsProtReadWrite"]], "cuda.cudart.cudaMemAllocNodeParams": [[6, 1, 1, "", "accessDescCount"], [6, 1, 1, "", "accessDescs"], [6, 1, 1, "", "bytesize"], [6, 1, 1, "", "dptr"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "poolProps"]], "cuda.cudart.cudaMemAllocNodeParamsV2": [[6, 1, 1, "", "accessDescCount"], [6, 1, 1, "", "accessDescs"], [6, 1, 1, "", "bytesize"], [6, 1, 1, "", "dptr"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "poolProps"]], "cuda.cudart.cudaMemAllocationHandleType": [[6, 1, 1, "", "cudaMemHandleTypeFabric"], [6, 1, 1, "", "cudaMemHandleTypeNone"], [6, 1, 1, "", "cudaMemHandleTypePosixFileDescriptor"], [6, 1, 1, "", "cudaMemHandleTypeWin32"], [6, 1, 1, "", "cudaMemHandleTypeWin32Kmt"]], "cuda.cudart.cudaMemAllocationType": [[6, 1, 1, "", "cudaMemAllocationTypeInvalid"], [6, 1, 1, "", "cudaMemAllocationTypeMax"], [6, 1, 1, "", "cudaMemAllocationTypePinned"]], "cuda.cudart.cudaMemFabricHandle_st": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaMemFabricHandle_t": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaMemFreeNodeParams": [[6, 1, 1, "", "dptr"], [6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaMemLocation": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "id"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaMemLocationType": [[6, 1, 1, "", "cudaMemLocationTypeDevice"], [6, 1, 1, "", "cudaMemLocationTypeHost"], [6, 1, 1, "", "cudaMemLocationTypeHostNuma"], [6, 1, 1, "", "cudaMemLocationTypeHostNumaCurrent"], [6, 1, 1, "", "cudaMemLocationTypeInvalid"]], "cuda.cudart.cudaMemPoolAttr": [[6, 1, 1, "", "cudaMemPoolAttrReleaseThreshold"], [6, 1, 1, "", "cudaMemPoolAttrReservedMemCurrent"], [6, 1, 1, "", "cudaMemPoolAttrReservedMemHigh"], [6, 1, 1, "", "cudaMemPoolAttrUsedMemCurrent"], [6, 1, 1, "", "cudaMemPoolAttrUsedMemHigh"], [6, 1, 1, "", "cudaMemPoolReuseAllowInternalDependencies"], [6, 1, 1, "", "cudaMemPoolReuseAllowOpportunistic"], [6, 1, 1, "", "cudaMemPoolReuseFollowEventDependencies"]], "cuda.cudart.cudaMemPoolProps": [[6, 1, 1, "", "allocType"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "handleTypes"], [6, 1, 1, "", "location"], [6, 1, 1, "", "maxSize"], [6, 1, 1, "", "reserved"], [6, 1, 1, "", "usage"], [6, 1, 1, "", "win32SecurityAttributes"]], "cuda.cudart.cudaMemPoolPtrExportData": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaMemPool_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaMemRangeAttribute": [[6, 1, 1, "", "cudaMemRangeAttributeAccessedBy"], [6, 1, 1, "", "cudaMemRangeAttributeLastPrefetchLocation"], [6, 1, 1, "", "cudaMemRangeAttributeLastPrefetchLocationId"], [6, 1, 1, "", "cudaMemRangeAttributeLastPrefetchLocationType"], [6, 1, 1, "", "cudaMemRangeAttributePreferredLocation"], [6, 1, 1, "", "cudaMemRangeAttributePreferredLocationId"], [6, 1, 1, "", "cudaMemRangeAttributePreferredLocationType"], [6, 1, 1, "", "cudaMemRangeAttributeReadMostly"]], "cuda.cudart.cudaMemcpy3DParms": [[6, 1, 1, "", "dstArray"], [6, 1, 1, "", "dstPos"], [6, 1, 1, "", "dstPtr"], [6, 1, 1, "", "extent"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "kind"], [6, 1, 1, "", "srcArray"], [6, 1, 1, "", "srcPos"], [6, 1, 1, "", "srcPtr"]], "cuda.cudart.cudaMemcpy3DPeerParms": [[6, 1, 1, "", "dstArray"], [6, 1, 1, "", "dstDevice"], [6, 1, 1, "", "dstPos"], [6, 1, 1, "", "dstPtr"], [6, 1, 1, "", "extent"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "srcArray"], [6, 1, 1, "", "srcDevice"], [6, 1, 1, "", "srcPos"], [6, 1, 1, "", "srcPtr"]], "cuda.cudart.cudaMemcpyKind": [[6, 1, 1, "", "cudaMemcpyDefault"], [6, 1, 1, "", "cudaMemcpyDeviceToDevice"], [6, 1, 1, "", "cudaMemcpyDeviceToHost"], [6, 1, 1, "", "cudaMemcpyHostToDevice"], [6, 1, 1, "", "cudaMemcpyHostToHost"]], "cuda.cudart.cudaMemcpyNodeParams": [[6, 1, 1, "", "copyParams"], [6, 1, 1, "", "flags"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "reserved"]], "cuda.cudart.cudaMemoryAdvise": [[6, 1, 1, "", "cudaMemAdviseSetAccessedBy"], [6, 1, 1, "", "cudaMemAdviseSetPreferredLocation"], [6, 1, 1, "", "cudaMemAdviseSetReadMostly"], [6, 1, 1, "", "cudaMemAdviseUnsetAccessedBy"], [6, 1, 1, "", "cudaMemAdviseUnsetPreferredLocation"], [6, 1, 1, "", "cudaMemAdviseUnsetReadMostly"]], "cuda.cudart.cudaMemoryType": [[6, 1, 1, "", "cudaMemoryTypeDevice"], [6, 1, 1, "", "cudaMemoryTypeHost"], [6, 1, 1, "", "cudaMemoryTypeManaged"], [6, 1, 1, "", "cudaMemoryTypeUnregistered"]], "cuda.cudart.cudaMemsetParams": [[6, 1, 1, "", "dst"], [6, 1, 1, "", "elementSize"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "height"], [6, 1, 1, "", "pitch"], [6, 1, 1, "", "value"], [6, 1, 1, "", "width"]], "cuda.cudart.cudaMemsetParamsV2": [[6, 1, 1, "", "dst"], [6, 1, 1, "", "elementSize"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "height"], [6, 1, 1, "", "pitch"], [6, 1, 1, "", "value"], [6, 1, 1, "", "width"]], "cuda.cudart.cudaMipmappedArray_const_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaMipmappedArray_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaPitchedPtr": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "pitch"], [6, 1, 1, "", "ptr"], [6, 1, 1, "", "xsize"], [6, 1, 1, "", "ysize"]], "cuda.cudart.cudaPointerAttributes": [[6, 1, 1, "", "device"], [6, 1, 1, "", "devicePointer"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "hostPointer"], [6, 1, 1, "", "type"]], "cuda.cudart.cudaPos": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "x"], [6, 1, 1, "", "y"], [6, 1, 1, "", "z"]], "cuda.cudart.cudaResourceDesc": [[6, 2, 1, "", "getPtr"], [6, 1, 1, "", "res"], [6, 1, 1, "", "resType"]], "cuda.cudart.cudaResourceType": [[6, 1, 1, "", "cudaResourceTypeArray"], [6, 1, 1, "", "cudaResourceTypeLinear"], [6, 1, 1, "", "cudaResourceTypeMipmappedArray"], [6, 1, 1, "", "cudaResourceTypePitch2D"]], "cuda.cudart.cudaResourceViewDesc": [[6, 1, 1, "", "depth"], [6, 1, 1, "", "firstLayer"], [6, 1, 1, "", "firstMipmapLevel"], [6, 1, 1, "", "format"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "height"], [6, 1, 1, "", "lastLayer"], [6, 1, 1, "", "lastMipmapLevel"], [6, 1, 1, "", "width"]], "cuda.cudart.cudaResourceViewFormat": [[6, 1, 1, "", "cudaResViewFormatFloat1"], [6, 1, 1, "", "cudaResViewFormatFloat2"], [6, 1, 1, "", "cudaResViewFormatFloat4"], [6, 1, 1, "", "cudaResViewFormatHalf1"], [6, 1, 1, "", "cudaResViewFormatHalf2"], [6, 1, 1, "", "cudaResViewFormatHalf4"], [6, 1, 1, "", "cudaResViewFormatNone"], [6, 1, 1, "", "cudaResViewFormatSignedBlockCompressed4"], [6, 1, 1, "", "cudaResViewFormatSignedBlockCompressed5"], [6, 1, 1, "", "cudaResViewFormatSignedBlockCompressed6H"], [6, 1, 1, "", "cudaResViewFormatSignedChar1"], [6, 1, 1, "", "cudaResViewFormatSignedChar2"], [6, 1, 1, "", "cudaResViewFormatSignedChar4"], [6, 1, 1, "", "cudaResViewFormatSignedInt1"], [6, 1, 1, "", "cudaResViewFormatSignedInt2"], [6, 1, 1, "", "cudaResViewFormatSignedInt4"], [6, 1, 1, "", "cudaResViewFormatSignedShort1"], [6, 1, 1, "", "cudaResViewFormatSignedShort2"], [6, 1, 1, "", "cudaResViewFormatSignedShort4"], [6, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed1"], [6, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed2"], [6, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed3"], [6, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed4"], [6, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed5"], [6, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed6H"], [6, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed7"], [6, 1, 1, "", "cudaResViewFormatUnsignedChar1"], [6, 1, 1, "", "cudaResViewFormatUnsignedChar2"], [6, 1, 1, "", "cudaResViewFormatUnsignedChar4"], [6, 1, 1, "", "cudaResViewFormatUnsignedInt1"], [6, 1, 1, "", "cudaResViewFormatUnsignedInt2"], [6, 1, 1, "", "cudaResViewFormatUnsignedInt4"], [6, 1, 1, "", "cudaResViewFormatUnsignedShort1"], [6, 1, 1, "", "cudaResViewFormatUnsignedShort2"], [6, 1, 1, "", "cudaResViewFormatUnsignedShort4"]], "cuda.cudart.cudaSharedCarveout": [[6, 1, 1, "", "cudaSharedmemCarveoutDefault"], [6, 1, 1, "", "cudaSharedmemCarveoutMaxL1"], [6, 1, 1, "", "cudaSharedmemCarveoutMaxShared"]], "cuda.cudart.cudaSharedMemConfig": [[6, 1, 1, "", "cudaSharedMemBankSizeDefault"], [6, 1, 1, "", "cudaSharedMemBankSizeEightByte"], [6, 1, 1, "", "cudaSharedMemBankSizeFourByte"]], "cuda.cudart.cudaStreamCallback_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaStreamCaptureMode": [[6, 1, 1, "", "cudaStreamCaptureModeGlobal"], [6, 1, 1, "", "cudaStreamCaptureModeRelaxed"], [6, 1, 1, "", "cudaStreamCaptureModeThreadLocal"]], "cuda.cudart.cudaStreamCaptureStatus": [[6, 1, 1, "", "cudaStreamCaptureStatusActive"], [6, 1, 1, "", "cudaStreamCaptureStatusInvalidated"], [6, 1, 1, "", "cudaStreamCaptureStatusNone"]], "cuda.cudart.cudaStreamUpdateCaptureDependenciesFlags": [[6, 1, 1, "", "cudaStreamAddCaptureDependencies"], [6, 1, 1, "", "cudaStreamSetCaptureDependencies"]], "cuda.cudart.cudaStream_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaSurfaceBoundaryMode": [[6, 1, 1, "", "cudaBoundaryModeClamp"], [6, 1, 1, "", "cudaBoundaryModeTrap"], [6, 1, 1, "", "cudaBoundaryModeZero"]], "cuda.cudart.cudaSurfaceFormatMode": [[6, 1, 1, "", "cudaFormatModeAuto"], [6, 1, 1, "", "cudaFormatModeForced"]], "cuda.cudart.cudaSurfaceObject_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaSynchronizationPolicy": [[6, 1, 1, "", "cudaSyncPolicyAuto"], [6, 1, 1, "", "cudaSyncPolicyBlockingSync"], [6, 1, 1, "", "cudaSyncPolicySpin"], [6, 1, 1, "", "cudaSyncPolicyYield"]], "cuda.cudart.cudaTextureAddressMode": [[6, 1, 1, "", "cudaAddressModeBorder"], [6, 1, 1, "", "cudaAddressModeClamp"], [6, 1, 1, "", "cudaAddressModeMirror"], [6, 1, 1, "", "cudaAddressModeWrap"]], "cuda.cudart.cudaTextureDesc": [[6, 1, 1, "", "addressMode"], [6, 1, 1, "", "borderColor"], [6, 1, 1, "", "disableTrilinearOptimization"], [6, 1, 1, "", "filterMode"], [6, 2, 1, "", "getPtr"], [6, 1, 1, "", "maxAnisotropy"], [6, 1, 1, "", "maxMipmapLevelClamp"], [6, 1, 1, "", "minMipmapLevelClamp"], [6, 1, 1, "", "mipmapFilterMode"], [6, 1, 1, "", "mipmapLevelBias"], [6, 1, 1, "", "normalizedCoords"], [6, 1, 1, "", "readMode"], [6, 1, 1, "", "sRGB"], [6, 1, 1, "", "seamlessCubemap"]], "cuda.cudart.cudaTextureFilterMode": [[6, 1, 1, "", "cudaFilterModeLinear"], [6, 1, 1, "", "cudaFilterModePoint"]], "cuda.cudart.cudaTextureObject_t": [[6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaTextureReadMode": [[6, 1, 1, "", "cudaReadModeElementType"], [6, 1, 1, "", "cudaReadModeNormalizedFloat"]], "cuda.cudart.cudaUUID_t": [[6, 1, 1, "", "bytes"], [6, 2, 1, "", "getPtr"]], "cuda.cudart.cudaUserObjectFlags": [[6, 1, 1, "", "cudaUserObjectNoDestructorSync"]], "cuda.cudart.cudaUserObjectRetainFlags": [[6, 1, 1, "", "cudaGraphUserObjectMove"]], "cuda.cudart.cudaUserObject_t": [[6, 2, 1, "", "getPtr"]], "cuda.nvrtc": [[7, 3, 1, "", "nvrtcAddNameExpression"], [7, 3, 1, "", "nvrtcCompileProgram"], [7, 3, 1, "", "nvrtcCreateProgram"], [7, 3, 1, "", "nvrtcDestroyProgram"], [7, 3, 1, "", "nvrtcGetCUBIN"], [7, 3, 1, "", "nvrtcGetCUBINSize"], [7, 3, 1, "", "nvrtcGetErrorString"], [7, 3, 1, "", "nvrtcGetLTOIR"], [7, 3, 1, "", "nvrtcGetLTOIRSize"], [7, 3, 1, "", "nvrtcGetLoweredName"], [7, 3, 1, "", "nvrtcGetNVVM"], [7, 3, 1, "", "nvrtcGetNVVMSize"], [7, 3, 1, "", "nvrtcGetNumSupportedArchs"], [7, 3, 1, "", "nvrtcGetOptiXIR"], [7, 3, 1, "", "nvrtcGetOptiXIRSize"], [7, 3, 1, "", "nvrtcGetPTX"], [7, 3, 1, "", "nvrtcGetPTXSize"], [7, 3, 1, "", "nvrtcGetProgramLog"], [7, 3, 1, "", "nvrtcGetProgramLogSize"], [7, 3, 1, "", "nvrtcGetSupportedArchs"], [7, 0, 1, "", "nvrtcProgram"], [7, 0, 1, "", "nvrtcResult"], [7, 3, 1, "", "nvrtcVersion"]], "cuda.nvrtc.nvrtcProgram": [[7, 2, 1, "", "getPtr"]], "cuda.nvrtc.nvrtcResult": [[7, 1, 1, "", "NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"], [7, 1, 1, "", "NVRTC_ERROR_COMPILATION"], [7, 1, 1, "", "NVRTC_ERROR_INTERNAL_ERROR"], [7, 1, 1, "", "NVRTC_ERROR_INVALID_INPUT"], [7, 1, 1, "", "NVRTC_ERROR_INVALID_OPTION"], [7, 1, 1, "", "NVRTC_ERROR_INVALID_PROGRAM"], [7, 1, 1, "", "NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID"], [7, 1, 1, "", "NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION"], [7, 1, 1, "", "NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION"], [7, 1, 1, "", "NVRTC_ERROR_OUT_OF_MEMORY"], [7, 1, 1, "", "NVRTC_ERROR_PROGRAM_CREATION_FAILURE"], [7, 1, 1, "", "NVRTC_ERROR_TIME_FILE_WRITE_FAILED"], [7, 1, 1, "", "NVRTC_SUCCESS"]]}, "objtypes": {"0": "py:class", "1": "py:attribute", "2": "py:method", "3": "py:function"}, "objnames": {"0": ["py", "class", "Python class"], "1": ["py", "attribute", "Python attribute"], "2": ["py", "method", "Python method"], "3": ["py", "function", "Python function"]}, "titleterms": {"cuda": [0, 3, 5, 6, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "python": [0, 3, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "api": [0, 5, 6], "refer": 0, "captionhold": 0, "code": 1, "conduct": 1, "overview": [1, 9], "our": 1, "pledg": 1, "standard": 1, "respons": 1, "scope": 1, "enforc": 1, "attribut": [1, 5], "contribut": 2, "manual": 3, "content": 3, "indic": 3, "tabl": 3, "instal": 4, "runtim": [4, 6], "requir": 4, "from": 4, "pypi": 4, "conda": 4, "sourc": [4, 16, 17], "build": [4, 16, 17], "In": 4, "place": 4, "develop": 4, "doc": 4, "publish": 4, "data": [5, 6], "type": [5, 6], "us": [5, 6], "driver": [5, 6], "error": [5, 6, 7], "handl": [5, 6, 7], "initi": 5, "version": [5, 6], "manag": [5, 6], "devic": [5, 6], "primari": 5, "context": 5, "modul": 5, "librari": 5, "memori": [5, 6], "virtual": 5, "stream": [5, 6, 13], "order": [5, 6], "alloc": [5, 6], "multicast": 5, "object": [5, 6], "unifi": [5, 6], "address": [5, 6], "event": [5, 6], "extern": [5, 6], "resourc": [5, 6], "interoper": [5, 6, 13], "oper": 5, "execut": [5, 6], "control": [5, 6], "graph": [5, 6], "occup": [5, 6], "textur": [5, 6], "surfac": [5, 6], "tensor": 5, "map": 5, "peer": [5, 6], "access": [5, 6], "graphic": [5, 6], "entri": [5, 6], "point": [5, 6], "coredump": 5, "green": 5, "egl": [5, 6], "opengl": [5, 6], "profil": [5, 6], "vdpau": [5, 6], "cudart": 6, "direct3d": 6, "9": 6, "10": 6, "11": [6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "c": 6, "routin": 6, "interact": 6, "nvrtc": 7, "gener": 7, "inform": 7, "queri": 7, "compil": 7, "support": [7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "option": 7, "motiv": 8, "what": 8, "why": 8, "workflow": 9, "perform": 9, "kernel": 9, "applic": 9, "comparison": 9, "futur": 9, "releas": [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "note": [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "4": [11, 26], "0": [11, 12, 13, 15, 17, 21, 22, 23, 25, 26, 27, 28], "hightlight": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "limit": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "function": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "Not": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "thi": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28], "5": [12, 27], "6": [13, 14, 28], "default": 13, "primit": 13, "1": [14, 16, 18, 22, 24], "7": [15, 16], "8": [17, 18, 19, 20], "2": [19, 23, 24], "3": [20, 25], "12": [21, 22, 23, 24, 25, 26, 27, 28]}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 6, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 56}}) \ No newline at end of file +Search.setIndex({"docnames": ["api", "conduct", "contribute", "index", "install", "module/driver", "module/nvrtc", "module/runtime", "motivation", "overview", "release", "release/11.4.0-notes", "release/11.5.0-notes", "release/11.6.0-notes", "release/11.6.1-notes", "release/11.7.0-notes", "release/11.7.1-notes", "release/11.8.0-notes", "release/11.8.1-notes", "release/11.8.2-notes", "release/11.8.3-notes", "release/11.8.4-notes", "release/12.0.0-notes", "release/12.1.0-notes", "release/12.2.0-notes", "release/12.2.1-notes", "release/12.3.0-notes", "release/12.4.0-notes", "release/12.5.0-notes", "release/12.6.0-notes", "release/12.6.1-notes"], "filenames": ["api.rst", "conduct.md", "contribute.md", "index.rst", "install.md", "module/driver.rst", "module/nvrtc.rst", "module/runtime.rst", "motivation.md", "overview.md", "release.md", "release/11.4.0-notes.md", "release/11.5.0-notes.md", "release/11.6.0-notes.md", "release/11.6.1-notes.md", "release/11.7.0-notes.md", "release/11.7.1-notes.md", "release/11.8.0-notes.md", "release/11.8.1-notes.md", "release/11.8.2-notes.md", "release/11.8.3-notes.md", "release/11.8.4-notes.md", "release/12.0.0-notes.md", "release/12.1.0-notes.md", "release/12.2.0-notes.md", "release/12.2.1-notes.md", "release/12.3.0-notes.md", "release/12.4.0-notes.md", "release/12.5.0-notes.md", "release/12.6.0-notes.md", "release/12.6.1-notes.md"], "titles": ["CUDA Python API Reference", "Code of Conduct", "Contributing", "CUDA Python Manual", "Installation", "driver", "nvrtc", "runtime", "Motivation", "Overview", "Release Notes", "CUDA Python 11.4.0 Release notes", "CUDA Python 11.5.0 Release notes", "CUDA Python 11.6.0 Release notes", "CUDA Python 11.6.1 Release notes", "CUDA Python 11.7.0 Release notes", "CUDA Python 11.7.1 Release notes", "CUDA Python 11.8.0 Release notes", "CUDA Python 11.8.1 Release notes", "CUDA Python 11.8.2 Release notes", "CUDA Python 11.8.3 Release notes", "CUDA Python 11.8.4 Release notes", "CUDA Python 12.0.0 Release notes", "CUDA Python 12.1.0 Release notes", "CUDA Python 12.2.0 Release notes", "CUDA Python 12.2.1 Release notes", "CUDA Python 12.3.0 Release notes", "CUDA Python 12.4.0 Release notes", "CUDA Python 12.5.0 Release notes", "CUDA Python 12.6.0 Release notes", "CUDA Python 12.6.1 Release notes"], "terms": {"driver": [0, 3, 4, 6, 8, 9], "data": [0, 6, 8, 9], "type": [0, 2, 4, 6, 9, 16, 17], "us": [0, 1, 4, 6, 8, 9, 16, 17, 18, 21, 30], "error": [0, 9, 16, 24], "handl": [0, 9, 13], "initi": [0, 6, 7, 9, 11], "version": [0, 1, 4, 6, 9, 21, 26], "manag": 0, "devic": [0, 6, 9], "primari": [0, 7, 9], "context": [0, 7, 9], "modul": [0, 3, 4, 7, 8, 9], "librari": [0, 4, 6, 7, 8, 9, 14], "memori": [0, 6, 8, 9], "virtual": [0, 6, 7, 9], "stream": [0, 9, 10, 12], "order": [0, 6], "alloc": [0, 6, 9], "multicast": 0, "object": [0, 9], "unifi": [0, 8, 9], "address": [0, 1, 6], "event": [0, 1, 9], "extern": [0, 9], "resourc": [0, 9], "interoper": [0, 9, 10], "oper": [0, 6, 7, 9], "execut": [0, 6, 8, 9, 13], "control": [0, 9], "graph": 0, "occup": 0, "textur": [0, 6], "surfac": [0, 6], "tensor": 0, "map": [0, 7], "peer": 0, "access": [0, 8, 9, 16], "graphic": [0, 13], "entri": [0, 8], "point": [0, 6, 9], "coredump": 0, "attribut": [0, 3, 7], "green": 0, "egl": [0, 12, 13], "opengl": 0, "profil": [0, 9, 13], "vdpau": [0, 12, 13], "runtim": [0, 3, 5, 6, 8, 9, 16, 26, 28], "direct3d": 0, "9": [0, 5, 6], "10": [0, 5, 6], "11": [0, 3, 5, 6, 10, 23], "c": [0, 4, 5, 6, 9, 16], "routin": 0, "interact": [0, 5, 9], "nvrtc": [0, 3, 4, 9, 16], "gener": [0, 5, 7, 9], "inform": [0, 1, 5, 7, 9], "queri": [0, 5, 7, 9, 26], "compil": [0, 4, 5, 7, 8, 9], "support": [0, 4, 5, 7, 8, 10], "option": [0, 5, 7, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "defin": [1, 5, 6, 7, 16, 17], "follow": [1, 4, 5, 6, 7, 9, 16], "cuda": [1, 2, 4, 6, 10], "python": [1, 2, 4], "project": 1, "In": [1, 5, 6, 7, 8, 9, 16], "interest": [1, 2], "foster": 1, "an": [1, 2, 5, 6, 7, 8, 9, 13], "open": [1, 4, 5, 7, 19], "welcom": 1, "environ": [1, 4, 5, 7, 13], "we": [1, 2, 8, 9], "contributor": 1, "maintain": [1, 4, 5, 7], "make": [1, 4, 5, 6, 7], "particip": [1, 5, 7], "commun": 1, "harass": 1, "free": [1, 5, 7], "experi": [1, 9], "everyon": 1, "regardless": [1, 5, 7], "ag": 1, "bodi": [1, 5, 7], "size": [1, 5, 6, 7, 9], "disabl": [1, 5, 6, 7, 17], "ethnic": 1, "sex": 1, "characterist": [1, 5], "gender": 1, "ident": [1, 5, 6, 7, 9], "express": [1, 5, 6, 7], "level": [1, 5, 7, 9, 11, 12], "educ": 1, "socio": 1, "econom": 1, "statu": [1, 5, 7], "nation": 1, "person": 1, "appear": [1, 5, 6], "race": [1, 5, 7], "religion": 1, "sexual": 1, "orient": [1, 9], "exampl": [1, 5, 7, 9, 13, 22], "behavior": [1, 5, 7, 13], "contribut": [1, 3], "creat": [1, 4, 5, 6, 7, 9], "posit": [1, 5, 7], "includ": [1, 4, 5, 6, 7, 16, 17], "inclus": [1, 6, 8], "languag": [1, 6, 8, 9], "Being": 1, "respect": [1, 5, 7], "differ": [1, 5, 7, 8, 9], "viewpoint": 1, "gracefulli": 1, "accept": [1, 2, 5, 6, 7, 13], "construct": [1, 5, 7, 9], "critic": 1, "focus": 1, "what": [1, 2, 3, 5, 7], "best": [1, 5, 7, 8, 9], "show": [1, 6, 9], "empathi": 1, "toward": [1, 9], "other": [1, 4, 5, 6, 7], "member": [1, 5, 6, 7, 13], "unaccept": 1, "The": [1, 2, 4, 5, 6, 7, 8, 9], "imageri": 1, "unwelcom": 1, "attent": [1, 2], "advanc": [1, 5, 7], "troll": 1, "insult": 1, "derogatori": 1, "comment": [1, 2], "polit": 1, "attack": 1, "public": [1, 5, 7], "privat": 1, "publish": 1, "physic": [1, 5, 7], "electron": 1, "without": [1, 5, 6, 7], "explicit": [1, 5, 7], "permiss": 1, "which": [1, 5, 6, 7, 9], "could": [1, 5, 7], "reason": [1, 5, 7], "consid": [1, 5, 6, 7], "inappropri": 1, "profession": 1, "set": [1, 4, 5, 6, 7, 8, 9, 13], "ar": [1, 4, 5, 6, 7, 8, 9, 13, 16], "clarifi": 1, "expect": [1, 5, 7, 9], "take": [1, 5, 6, 7, 9], "appropri": [1, 5, 6, 7], "fair": 1, "correct": [1, 5, 7, 9], "action": [1, 5, 7], "ani": [1, 5, 6, 7, 9], "instanc": [1, 5, 6, 7], "have": [1, 5, 6, 7, 8, 9, 16, 17], "right": [1, 5], "remov": [1, 5, 6, 7, 13, 17], "edit": [1, 4], "reject": [1, 5, 7], "commit": [1, 5, 7, 9], "wiki": 1, "issu": [1, 2, 5, 7, 16, 17, 18, 21, 23, 24, 26, 28, 29, 30], "align": [1, 5, 7], "thi": [1, 2, 5, 6, 7, 9, 10], "ban": 1, "temporarili": 1, "perman": [1, 5], "thei": [1, 5, 6, 7], "deem": [1, 6], "threaten": 1, "offens": 1, "harm": 1, "appli": [1, 5, 7, 22], "both": [1, 5, 7, 8, 29], "within": [1, 5, 7, 9], "space": [1, 5, 6, 7], "when": [1, 5, 6, 7, 8, 9, 13, 24], "individu": [1, 5, 6, 7, 9], "repres": [1, 5, 7], "its": [1, 5, 7, 8, 9], "offici": 1, "e": [1, 4, 5, 6, 7, 17], "mail": 1, "post": [1, 5], "via": [1, 4, 5, 6, 7], "social": 1, "media": 1, "account": [1, 5], "act": 1, "appoint": 1, "onlin": [1, 5], "offlin": [1, 5], "represent": [1, 5, 7], "mai": [1, 5, 6, 7, 9, 15, 16, 19, 28], "further": [1, 5, 7], "abus": 1, "otherwis": [1, 5, 7], "report": [1, 2, 5, 6, 7], "contact": 1, "team": [1, 2, 5], "nvidia": [1, 2, 4, 5, 7, 8, 9, 16, 26], "com": [1, 5, 7, 16], "all": [1, 4, 5, 6, 7, 9, 11, 12, 16, 17], "complaint": 1, "review": 1, "investig": 1, "result": [1, 5, 6, 7, 9], "necessari": [1, 5, 7], "circumst": [1, 5, 7], "oblig": 1, "confidenti": 1, "regard": [1, 5, 7], "incid": 1, "detail": [1, 5, 7, 9], "specif": [1, 4, 5, 6, 7, 9], "polici": [1, 5, 7], "separ": [1, 5, 6, 7], "who": 1, "do": [1, 2, 5, 6, 7, 9], "good": [1, 5, 6, 7], "faith": 1, "face": [1, 5, 7, 16], "temporari": [1, 16], "repercuss": 1, "determin": [1, 5, 6, 7], "s": [1, 5, 6, 7, 8, 9, 17], "leadership": 1, "adapt": 1, "from": [1, 3, 5, 6, 7, 8, 9, 13, 16, 22], "coven": 1, "1": [1, 3, 5, 6, 7, 9, 10, 13, 17, 19, 29], "4": [1, 3, 5, 6, 7, 10, 18], "avail": [1, 5, 6, 7], "http": [1, 5, 7, 16], "www": 1, "org": 1, "html": [1, 4, 5], "For": [1, 5, 6, 7, 9], "answer": 1, "common": [1, 5, 7, 9], "question": 1, "about": [1, 5, 7], "see": [1, 2, 5, 6, 7, 9, 13], "faq": 1, "thank": 2, "you": [2, 4, 5, 7, 8, 9], "your": [2, 4, 7, 9], "base": [2, 5, 7, 8], "fall": [2, 5, 7], "two": [2, 5, 6, 7], "categori": 2, "want": [2, 5, 7, 8, 9], "bug": [2, 6], "featur": [2, 5, 6, 7], "request": [2, 5, 7], "document": [2, 4, 5, 7], "file": [2, 5, 6, 7], "describ": [2, 5, 6, 7], "encount": [2, 5, 7], "chang": [2, 5, 6, 7, 9, 12, 13], "evalu": 2, "triag": 2, "them": [2, 5, 6, 7, 9], "schedul": [2, 5, 7], "releas": [2, 3, 5, 6, 7, 8, 9], "If": [2, 5, 6, 7], "believ": 2, "need": [2, 5, 7, 9, 16, 17], "prioriti": [2, 5, 7], "notifi": 2, "implement": [2, 5, 6, 7], "fix": [2, 5, 7, 14, 16, 21, 22, 29], "At": [2, 5, 7], "time": [2, 5, 6, 7], "code": [2, 3, 5, 6, 7, 8, 9, 11, 26], "instal": [3, 5, 7, 18], "requir": [3, 5, 6, 7, 9, 17, 29], "pypi": [3, 12, 26, 27], "conda": [3, 12, 23, 26, 27], "sourc": [3, 5, 6, 7, 10, 11], "build": [3, 8, 9, 10, 20], "doc": [3, 5, 7], "overview": [3, 5, 7], "workflow": 3, "perform": [3, 5, 6, 7], "futur": [3, 5, 6, 7, 8, 11, 12, 16], "motiv": 3, "why": [3, 5, 7], "conduct": 3, "our": [3, 8, 9], "pledg": 3, "standard": [3, 5, 7, 8, 9], "respons": [3, 5, 7], "scope": [3, 5, 7], "enforc": [3, 7], "note": [3, 4, 5, 6, 7, 9], "12": [3, 4, 5, 6, 7, 10, 13], "6": [3, 4, 5, 6, 7, 10], "0": [3, 4, 5, 6, 7, 9, 10], "5": [3, 5, 6, 7, 10, 27], "3": [3, 5, 6, 7, 10, 17, 23, 25], "2": [3, 5, 6, 7, 9, 10, 13, 16], "8": [3, 5, 6, 7, 10, 22, 25, 26], "7": [3, 5, 6, 7, 10, 17, 21, 23, 30], "api": [3, 6, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "refer": [3, 5, 7], "index": [3, 4, 5, 7], "search": [3, 5, 6, 7, 21], "page": [3, 4, 5, 7], "platform": [4, 5, 7, 11, 12, 16, 17], "depend": [4, 5, 7, 16, 23], "linux": [4, 5, 6, 7], "450": 4, "80": [4, 5, 7], "02": 4, "later": [4, 5, 6, 7, 9], "window": [4, 5, 7, 29], "456": 4, "38": [4, 5, 7], "toolkit": [4, 5, 7, 8, 13, 15, 17, 22, 23, 24, 26, 27, 28, 29], "onli": [4, 5, 6, 7, 9, 11], "redistribut": 4, "compon": [4, 5, 7], "guid": [4, 5, 7], "can": [4, 5, 6, 7, 8, 9], "guidanc": 4, "obtain": [4, 5, 7], "local": [4, 5, 6, 7, 9], "pip": 4, "packag": [4, 5, 7, 9, 11, 23, 26], "assign": [4, 5, 6, 7, 9], "cudart": [4, 6, 13, 20, 26], "provid": [4, 5, 6, 7, 8, 9], "header": [4, 5, 6, 7, 16, 17], "enabl": [4, 5, 6, 7, 9, 17], "writ": 4, "kernel": [4, 5, 6, 7], "share": [4, 5, 7, 9], "cython": [4, 8, 11, 12, 20, 25], "pyclibrari": 4, "remain": [4, 5, 7], "test": [4, 5, 7], "outlin": [4, 5, 7], "txt": 4, "must": [4, 5, 6, 7, 9, 24], "match": [4, 5, 7, 9], "major": [4, 5, 6, 7, 9], "minor": [4, 5, 6, 7, 9, 17], "compat": [4, 5, 7, 8, 9, 20, 25], "still": [4, 5, 7, 17], "dure": [4, 5, 6, 7, 16], "process": [4, 5, 6, 7, 8, 9, 29], "variabl": [4, 5, 6, 7, 13], "cuda_hom": [4, 16, 17], "cuda_path": 4, "find": [4, 5, 7], "locat": [4, 5, 7, 9, 17], "particular": [4, 5, 6, 7, 16], "path": [4, 5, 6, 7, 8], "usr": 4, "should": [4, 5, 6, 7, 9], "export": [4, 5, 7, 13], "To": [4, 5, 6, 7, 9], "extens": [4, 5, 6, 7], "run": [4, 5, 6, 7, 9], "setup": [4, 5, 7], "py": [4, 5, 7], "build_ext": 4, "inplac": 4, "debug": [4, 5, 6, 7], "gdb": [4, 5], "pass": [4, 5, 6, 7, 9], "argument": [4, 5, 6, 7, 9], "current": [4, 5, 6, 7, 9], "g": [4, 5, 6, 7], "port": [4, 5, 7], "bind": [4, 5, 6, 7, 8, 9, 11, 12, 16], "env": [4, 5], "f": [4, 5, 7, 9], "docs_src": 4, "yml": 4, "activ": [4, 5, 7], "Then": 4, "step": [4, 5, 9], "abov": [4, 5, 7, 9], "cd": 4, "git": 4, "checkout": 4, "gh": 4, "cp": 4, "class": [5, 6, 7, 13], "cuuuid_st": [5, 7], "void_ptr": [5, 7], "_ptr": [5, 7], "byte": [5, 6, 7, 9], "definit": [5, 6, 7], "uuid": [5, 7], "getptr": [5, 6, 7], "get": [5, 6, 7, 8, 9], "cumemfabrichandle_st": 5, "fabric": [5, 7], "opaqu": [5, 6, 7], "same": [5, 6, 7, 9], "node": [5, 7], "ipc": [5, 7], "between": [5, 6, 7, 9, 28], "connect": [5, 7], "nvswitch": 5, "cuipceventhandle_st": 5, "reserv": [5, 7], "cuipcmemhandle_st": 5, "mem": 5, "custreambatchmemopparams_union": 5, "per": [5, 7, 9, 13], "paramet": [5, 6, 7, 9, 29], "custreambatchmemop": 5, "custreambatchmemoptyp": 5, "waitvalu": 5, "custreammemopwaitvalueparams_st": [5, 13], "writevalu": 5, "custreammemopwritevalueparams_st": 5, "flushremotewrit": 5, "custreammemopflushremotewritesparams_st": 5, "memorybarri": 5, "custreammemopmemorybarrierparams_st": 5, "pad": [5, 7], "list": [5, 6, 7, 9, 29], "cuuint64_t": [5, 7, 13], "cuda_batch_mem_op_node_params_v1_st": 5, "ctx": 5, "cucontext": [5, 7], "count": [5, 7], "unsign": [5, 7], "int": [5, 6, 7, 9], "paramarrai": 5, "custreambatchmemopparam": 5, "flag": [5, 7], "cuda_batch_mem_op_node_params_v2_st": 5, "batch": 5, "number": [5, 6, 7], "arrai": [5, 6, 7, 8, 9], "cuasyncnotificationinfo_st": 5, "user": [5, 7, 8], "async": [5, 7], "notif": [5, 7], "callback": [5, 7], "cuasyncnotificationtyp": 5, "info": [5, 6, 7], "anon_union2": [5, 7], "cudevprop_st": 5, "legaci": [5, 7, 13], "properti": [5, 7], "maxthreadsperblock": [5, 7], "maximum": [5, 6, 7], "thread": [5, 6, 7, 9, 13], "block": [5, 6, 7, 9], "maxthreadsdim": [5, 7], "each": [5, 6, 7, 9], "dimens": [5, 7], "maxgrids": [5, 7], "grid": [5, 7, 9], "sharedmemperblock": [5, 7], "totalconstantmemori": 5, "constant": [5, 6, 7], "simdwidth": 5, "warp": [5, 7], "mempitch": [5, 7], "pitch": [5, 7], "allow": [5, 6, 7, 8, 9, 17, 29], "copi": [5, 7, 9], "regsperblock": [5, 7], "32": [5, 7, 29], "bit": [5, 7], "regist": [5, 6, 7], "clockrat": [5, 7], "clock": [5, 7], "frequenc": [5, 7], "kilohertz": [5, 7], "texturealign": [5, 7], "cuaccesspolicywindow_st": 5, "specifi": [5, 6, 7], "contigu": [5, 7, 9], "extent": [5, 7], "begin": [5, 7], "base_ptr": [5, 7], "end": [5, 6, 7, 24], "num_byt": [5, 7], "limit": [5, 6, 7, 10], "cu_device_attribute_max_access_policy_window_s": 5, "partit": [5, 7], "mani": [5, 7], "segment": [5, 7], "sum": [5, 7], "hit": [5, 7], "approx": [5, 7], "ratio": [5, 7], "miss": [5, 7, 17], "fit": [5, 7], "capabl": [5, 7, 8, 9], "architectur": [5, 6, 7, 9], "hitprop": [5, 7], "missprop": [5, 7], "start": [5, 7], "restrict": [5, 6, 7], "size_t": [5, 7, 9], "hitratio": [5, 7], "percentag": [5, 6, 7], "line": [5, 6, 7, 24], "rest": [5, 7], "float": [5, 6, 7, 9], "cuaccessproperti": [5, 7], "either": [5, 6, 7], "normal": [5, 7], "cuda_kernel_node_params_st": 5, "gpu": [5, 6, 7, 8, 9], "func": [5, 7], "launch": [5, 6, 7, 9, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cufunct": [5, 7], "griddimx": 5, "width": [5, 7], "griddimi": 5, "height": [5, 7], "griddimz": 5, "depth": [5, 7], "blockdimx": 5, "x": [5, 7, 9, 11, 12], "blockdimi": 5, "y": [5, 7, 9], "blockdimz": 5, "z": [5, 7, 9], "sharedmembyt": [5, 7], "dynam": [5, 7, 9], "kernelparam": [5, 7], "pointer": [5, 6, 7, 9], "extra": [5, 6, 7, 9], "cuda_kernel_node_params_v2_st": 5, "kern": 5, "referenc": [5, 7], "null": [5, 6, 7], "cukernel": 5, "task": [5, 7], "valu": [5, 6, 7, 9, 13], "indic": [5, 6, 7], "field": [5, 7], "ignor": [5, 6, 7, 9], "cuda_kernel_node_params_v3_st": 5, "cuda_memset_node_params_st": 5, "memset": [5, 7], "dst": [5, 7], "destin": [5, 7], "cudeviceptr": [5, 9, 28], "unus": [5, 7], "elements": [5, 7], "element": [5, 7], "row": [5, 6, 7], "cuda_memset_node_params_v2_st": 5, "cuda_host_node_params_st": 5, "host": [5, 7, 8, 9], "fn": [5, 7], "function": [5, 6, 7, 9, 10], "call": [5, 6, 7, 9], "cuhostfn": 5, "userdata": [5, 7], "cuda_host_node_params_v2_st": 5, "cuda_conditional_node_param": 5, "condit": [5, 7], "cugraphconditionalhandlecr": 5, "cugraphconditionalhandl": 5, "cugraphconditionalnodetyp": 5, "output": [5, 6, 7, 9], "phgraph_out": [5, 7], "own": [5, 7, 8, 9], "popul": [5, 6, 7], "child": [5, 7], "creation": [5, 7, 9], "valid": [5, 6, 7], "lifetim": [5, 7], "content": [5, 6, 7], "subject": [5, 7, 9], "constraint": [5, 7], "empti": [5, 7], "memcopi": [5, 7], "recurs": [5, 7], "nest": [5, 7], "belong": [5, 7], "These": [5, 6, 7], "custreambegincapturetograph": 5, "cugraph": [5, 7], "cugraphedgedata_st": 5, "annot": [5, 6, 7], "edg": [5, 7], "implicitli": [5, 7], "default": [5, 6, 7, 10, 12], "zero": [5, 6, 7, 9], "A": [5, 7, 9, 16], "struct": [5, 7], "full": [5, 7, 8, 9], "serial": [5, 7, 9], "visibl": [5, 7], "from_port": [5, 7], "trigger": [5, 7], "upstream": [5, 7], "mean": [5, 7], "specfic": [5, 7], "case": [5, 6, 7], "complet": [5, 7, 9], "downstream": [5, 7], "portion": [5, 7], "thereof": [5, 7], "to_port": [5, 7], "non": [5, 6, 7], "cu_graph_kernel_node_port_default": 5, "cu_graph_kernel_node_port_programmat": 5, "cu_graph_kernel_node_port_launch_ord": 5, "entireti": [5, 7], "work": [5, 7, 9], "accordingli": [5, 7], "cugraphdependencytyp": 5, "It": [5, 6, 7, 9, 17], "char": [5, 6, 7, 9], "due": [5, 7], "layout": [5, 7, 21, 30], "bitfield": [5, 7], "ensur": [5, 7, 9], "addit": [5, 7], "ad": [5, 7], "cuda_graph_instantiate_params_st": 5, "instanti": [5, 7], "huploadstream": 5, "upload": [5, 7], "custream": [5, 7], "herrnode_out": [5, 7], "caus": [5, 6, 7], "fail": [5, 7], "cugraphnod": [5, 7], "result_out": [5, 7], "whether": [5, 6, 7], "wa": [5, 6, 7, 9], "success": [5, 7], "cugraphinstantiateresult": 5, "culaunchmemsyncdomainmap_st": 5, "synchron": [5, 7, 13], "domain": [5, 7], "cudalaunchmemsyncdomain": [5, 7], "By": [5, 7, 17], "cu_launch_mem_sync_domain_remot": 5, "id": [5, 7], "also": [5, 6, 7, 8, 9], "alter": [5, 7], "culaunchmemsyncdomainmap": 5, "cu_launch_attribute_mem_sync_domain_map": 5, "rang": [5, 6, 7], "through": [5, 6, 7, 13], "cu_device_attribute_mem_sync_domain_count": 5, "default_": [5, 7], "design": [5, 7, 9], "remot": [5, 7], "culaunchattributevalue_union": 5, "union": [5, 7], "culaunchattribut": 5, "accesspolicywindow": [5, 7], "cu_launch_attribute_access_policy_window": 5, "cuaccesspolicywindow": 5, "cooper": [5, 7], "cu_launch_attribute_coop": 5, "nonzero": [5, 7], "culaunchcooperativekernel": 5, "syncpolici": [5, 7], "cu_launch_attribute_synchronization_polici": 5, "cusynchronizationpolici": 5, "queu": [5, 7], "up": [5, 6, 7, 9], "clusterdim": [5, 7], "cu_launch_attribute_cluster_dimens": 5, "desir": [5, 7], "cluster": [5, 7], "divisor": [5, 7], "anon_struct1": 5, "clusterschedulingpolicyprefer": [5, 7], "cu_launch_attribute_cluster_scheduling_policy_prefer": 5, "prefer": [5, 7, 8], "cuclusterschedulingpolici": 5, "programmaticstreamserializationallow": [5, 7], "cu_launch_attribute_programmatic_stream_seri": 5, "programmaticev": [5, 7], "cu_launch_attribute_programmatic_ev": 5, "cuevent": [5, 7], "fire": [5, 7], "record": [5, 7], "cueventrecordwithflag": 5, "doe": [5, 6, 7, 9], "cu_event_record_extern": 5, "triggeratblockstart": [5, 7], "automat": [5, 7, 9], "anon_struct2": 5, "launchcompletionev": [5, 7], "cu_launch_attribute_launch_completion_ev": 5, "last": [5, 7], "anon_struct3": 5, "cu_launch_attribute_prior": 5, "memsyncdomainmap": [5, 7], "memsyncdomain": [5, 7], "cu_launch_attribute_mem_sync_domain": 5, "culaunchmemsyncdomain": 5, "deviceupdatablekernelnod": [5, 7], "cu_launch_attribute_device_updatable_kernel_nod": 5, "deviceupdat": [5, 7], "updat": [5, 6, 7, 18], "cugraphdevicenod": 5, "devnod": [5, 7], "return": [5, 6, 7, 9], "variou": [5, 7], "side": [5, 7], "anon_struct4": 5, "sharedmemcarveout": [5, 7], "cu_launch_attribute_preferred_shared_memory_carveout": 5, "culaunchattribute_st": 5, "culaunchattributeid": 5, "culaunchattributevalu": 5, "culaunchconfig_st": 5, "configur": [5, 7, 9], "hstream": [5, 7], "identifi": [5, 7], "attr": [5, 7], "nullabl": [5, 7], "culaunchconfig": 5, "numattr": 5, "cuexecaffinitysmcount_st": 5, "cu_exec_affinity_type_sm_count": 5, "val": [5, 7], "sm": [5, 7], "cuexecaffinityparam_st": 5, "affin": [5, 7], "cuexecaffinitytyp": 5, "param": [5, 7], "anon_union3": 5, "cuctxcigparam_st": 5, "cig": [5, 7], "shareddatatyp": 5, "cucigdatatyp": 5, "shareddata": 5, "cuctxcreateparams_st": 5, "exactli": [5, 7], "one": [5, 6, 7, 8], "execaffinityparam": 5, "cigparam": 5, "cuexecaffinityparam": 5, "numexecaffinityparam": 5, "cuctxcigparam": 5, "culibraryhostuniversalfunctionanddatatable_st": 5, "functiont": 5, "functionwindows": 5, "datat": 5, "datawindows": 5, "cuda_memcpy2d_st": 5, "2d": [5, 7], "srcxinbyt": 5, "srcy": 5, "srcmemorytyp": 5, "cumemorytyp": 5, "srchost": 5, "srcdevic": [5, 7], "srcarrai": [5, 7], "cuarrai": [5, 7], "srcpitch": 5, "src": [5, 6, 7], "dstxinbyt": 5, "dsty": 5, "dstmemorytyp": 5, "dsthost": 5, "dstdevic": [5, 7], "dstarrai": [5, 7], "dstpitch": 5, "widthinbyt": 5, "cuda_memcpy3d_st": 5, "3d": [5, 7], "srcz": 5, "srclod": 5, "lod": 5, "reserved0": [5, 7], "srcheight": 5, "dstz": 5, "dstlod": 5, "reserved1": [5, 7], "dstheight": 5, "cuda_memcpy3d_peer_st": 5, "cross": [5, 7], "srccontext": 5, "cu_memorytype_arrai": [5, 7], "dstcontext": 5, "cuda_memcpy_node_params_st": 5, "memcpi": [5, 7], "copyctx": 5, "copyparam": [5, 7], "cuda_memcpy3d": 5, "cuda_array_descriptor_st": 5, "descriptor": [5, 7], "format": [5, 7, 9], "cuarray_format": 5, "numchannel": [5, 7], "channel": [5, 7, 26], "cuda_array3d_descriptor_st": 5, "cuda_array_sparse_properties_st": 5, "spars": [5, 7], "tileext": [5, 7], "anon_struct5": 5, "miptailfirstlevel": [5, 7], "first": [5, 6, 7, 9], "mip": [5, 7], "tail": [5, 7], "miptails": [5, 7], "total": [5, 7], "long": [5, 6, 7, 9], "cu_array_sparse_properties_single_miptail": 5, "cuda_array_memory_requirements_st": 5, "cuda_resource_desc_st": 5, "restyp": [5, 7], "curesourcetyp": [5, 7], "re": [5, 7, 9, 16], "anon_union4": 5, "cuda_texture_desc_st": 5, "addressmod": [5, 7], "mode": [5, 6, 7], "cuaddress_mod": 5, "filtermod": [5, 7], "filter": [5, 7], "cufilter_mod": 5, "maxanisotropi": [5, 7], "anisotropi": [5, 7], "mipmapfiltermod": [5, 7], "mipmap": [5, 7], "mipmaplevelbia": [5, 7], "bia": 5, "minmipmaplevelclamp": [5, 7], "minimum": [5, 6, 7, 8], "clamp": [5, 7], "maxmipmaplevelclamp": [5, 7], "bordercolor": [5, 7], "border": [5, 7], "color": [5, 7], "cuda_resource_view_desc_st": 5, "view": [5, 7], "curesourceviewformat": 5, "firstmipmaplevel": [5, 7], "lastmipmaplevel": [5, 7], "firstlay": [5, 7], "layer": [5, 7], "lastlay": [5, 7], "cutensormap_st": 5, "64": [5, 7], "cuda_pointer_attribute_p2p_tokens_st": 5, "direct": [5, 6, 7], "v3": 5, "token": [5, 6], "p2ptoken": 5, "vaspacetoken": 5, "cuda_launch_params_st": 5, "cuda_external_memory_handle_desc_st": 5, "cuexternalmemoryhandletyp": 5, "anon_union5": 5, "cuda_external_memory_ded": 5, "cuda_external_memory_buffer_desc_st": 5, "buffer": [5, 7], "offset": [5, 7], "where": [5, 7], "cuda_external_memory_mipmapped_array_desc_st": 5, "chain": [5, 7], "arraydesc": 5, "cuda_array3d_descriptor": 5, "numlevel": [5, 7], "cuda_external_semaphore_handle_desc_st": 5, "semaphor": [5, 7], "cuexternalsemaphorehandletyp": 5, "anon_union6": 5, "cuda_external_semaphore_signal_params_st": 5, "signal": [5, 7], "anon_struct15": [5, 7], "cuda_external_semaphore_signal_param": 5, "cuexternalsemaphor": 5, "cu_external_semaphore_handle_type_nvscisync": 5, "cuda_external_semaphore_signal_skip_nvscibuf_memsync": 5, "while": [5, 7], "import": [5, 7, 8, 9], "cu_external_memory_handle_type_nvscibuf": 5, "cuda_external_semaphore_wait_params_st": 5, "wait": [5, 7], "anon_struct18": [5, 7], "cuda_external_semaphore_wait_param": 5, "cuda_external_semaphore_wait_skip_nvscibuf_memsync": 5, "cuda_ext_sem_signal_node_params_st": 5, "extsemarrai": [5, 7], "paramsarrai": [5, 7], "numextsem": [5, 7], "suppli": [5, 7], "cuda_ext_sem_signal_node_params_v2_st": 5, "cuda_ext_sem_wait_node_params_st": 5, "cuda_ext_sem_wait_node_params_v2_st": 5, "cuarraymapinfo_st": 5, "resourcetyp": 5, "anon_union9": [5, 7], "subresourcetyp": 5, "subresourc": [5, 7], "cuarraysparsesubresourcetyp": 5, "anon_union10": [5, 7], "memoperationtyp": 5, "cumemoperationtyp": 5, "memhandletyp": 5, "cumemhandletyp": 5, "memhandl": 5, "anon_union11": 5, "devicebitmask": 5, "ordin": [5, 7], "mask": [5, 7], "now": [5, 7, 8, 9, 13, 16, 17], "cumemlocation_st": 5, "modifi": [5, 6, 7], "cumemlocationtyp": [5, 7], "given": [5, 6, 7], "cumemallocationprop_st": 5, "cumemallocationtyp": 5, "requestedhandletyp": 5, "cumemallocationhandletyp": [5, 7], "cumemloc": 5, "win32handlemetadata": 5, "pobject_attribut": 5, "cu_mem_handle_type_win32": 5, "structur": [5, 7, 13], "secur": [5, 7], "transfer": [5, 7, 9], "allocflag": 5, "anon_struct21": [5, 7], "cumulticastobjectprop_st": 5, "numdevic": 5, "amount": [5, 6, 7], "bound": [5, 7], "handletyp": [5, 7], "bitmask": [5, 7], "cumemaccessdesc_st": 5, "cumemprot": [5, 7], "cumemaccess_flag": 5, "cugraphexecupdateresultinfo_st": 5, "cugraphexecupd": 5, "give": [5, 7], "more": [5, 6, 7, 8, 9], "cugraphexecupdateresult": 5, "errornod": [5, 7], "topolog": [5, 7], "associ": [5, 6, 7, 9], "errorfromnod": [5, 7], "cumempoolprops_st": 5, "made": [5, 7], "pool": [5, 6, 7], "alloctyp": [5, 7], "cu_mem_allocation_type_pin": 5, "resid": [5, 7, 9], "win32securityattribut": [5, 7], "lpsecurityattribut": [5, 7], "maxsiz": [5, 7], "system": [5, 6, 7, 9], "usag": [5, 7], "intend": [5, 6, 7], "short": [5, 6, 7, 16], "cumempoolptrexportdata_st": 5, "cuda_mem_alloc_node_params_v1_st": 5, "poolprop": [5, 7], "cu_mem_handle_type_non": 5, "cumempoolprop": 5, "accessdesc": [5, 7], "cumemaccessdesc": 5, "accessdesccount": [5, 7], "exce": [5, 7], "bytes": [5, 7], "dptr": [5, 7], "out": [5, 7, 9], "cuda_mem_alloc_node_params_v2_st": 5, "cuda_mem_free_node_params_st": 5, "cuda_child_graph_node_params_st": 5, "clone": [5, 7], "cuda_event_record_node_params_st": 5, "cuda_event_wait_node_params_st": 5, "cugraphnodeparams_st": 5, "cugraphaddnod": [5, 7], "cugraphnodetyp": 5, "cuda_kernel_node_params_v3": 5, "cuda_memcpy_node_param": 5, "cuda_memset_node_params_v2": 5, "cuda_host_node_params_v2": 5, "cuda_child_graph_node_param": 5, "eventwait": [5, 7], "cuda_event_wait_node_param": 5, "eventrecord": [5, 7], "cuda_event_record_node_param": 5, "extsemsign": [5, 7], "cuda_ext_sem_signal_node_params_v2": 5, "extsemwait": [5, 7], "cuda_ext_sem_wait_node_params_v2": 5, "cuda_mem_alloc_node_params_v2": 5, "cuda_mem_free_node_param": 5, "memop": [5, 7], "cuda_batch_mem_op_node_params_v2": 5, "reserved2": [5, 7], "cueglframe_st": 5, "eglfram": [5, 7], "frame": [5, 7], "contain": [5, 6, 7], "plane": [5, 7], "multiplanar": [5, 7], "anon_union14": 5, "planecount": [5, 7], "frametyp": [5, 7], "cueglframetyp": 5, "eglcolorformat": [5, 7], "cueglcolorformat": 5, "cuformat": 5, "cuipcmem_flag": 5, "cu_ipc_mem_lazy_enable_peer_access": 5, "cumemattach_flag": 5, "attach": [5, 7], "cu_mem_attach_glob": 5, "cu_mem_attach_host": 5, "cannot": [5, 7], "cu_mem_attach_singl": 5, "singl": [5, 6, 7, 8, 9], "cuctx_flag": 5, "cu_ctx_sched_auto": 5, "cu_ctx_sched_spin": 5, "spin": [5, 7], "cu_ctx_sched_yield": 5, "yield": [5, 7], "cu_ctx_sched_blocking_sync": 5, "cu_ctx_blocking_sync": 5, "deprec": [5, 6, 7, 13], "cu_ctx_sched_mask": 5, "cu_ctx_map_host": 5, "cu_ctx_lmem_resize_to_max": 5, "16": [5, 7, 11, 26], "keep": [5, 7], "after": [5, 6, 7, 9], "cu_ctx_coredump_en": 5, "except": [5, 7, 9], "cu_ctx_user_coredump_en": 5, "pipe": 5, "cu_ctx_sync_memop": 5, "128": [5, 7], "cu_ctx_flags_mask": 5, "255": [5, 7], "cuevent_sched_flag": 5, "sched": 5, "cu_event_sched_auto": 5, "cu_event_sched_spin": 5, "cu_event_sched_yield": 5, "cu_event_sched_blocking_sync": 5, "cl_event_flag": 5, "nvcl": 5, "nvcl_event_sched_auto": 5, "nvcl_event_sched_spin": 5, "nvcl_event_sched_yield": 5, "nvcl_event_sched_blocking_sync": 5, "cl_context_flag": 5, "nvcl_ctx_sched_auto": 5, "nvcl_ctx_sched_spin": 5, "nvcl_ctx_sched_yield": 5, "nvcl_ctx_sched_blocking_sync": 5, "custream_flag": 5, "cu_stream_default": 5, "cu_stream_non_block": 5, "cuevent_flag": 5, "cu_event_default": 5, "cu_event_blocking_sync": 5, "cu_event_disable_tim": 5, "cu_event_interprocess": 5, "suitabl": [5, 7], "interprocess": [5, 7], "cuevent_record_flag": 5, "cu_event_record_default": 5, "captur": [5, 7], "instead": [5, 6, 7, 16, 19], "invalid": [5, 7], "outsid": [5, 7, 9], "cuevent_wait_flag": 5, "cu_event_wait_default": 5, "cu_event_wait_extern": 5, "custreamwaitvalue_flag": 5, "custreamwaitvalue32": 5, "custreamwaitvalue64": 5, "cu_stream_wait_value_geq": 5, "until": [5, 6, 7, 9], "int32_t": 5, "addr": 5, "int64_t": 5, "cyclic": 5, "comparison": 5, "wraparound": 5, "cu_stream_wait_value_eq": 5, "cu_stream_wait_value_and": 5, "cu_stream_wait_value_nor": 5, "cudevicegetattribut": [5, 7, 9], "cu_device_attribute_can_use_stream_wait_value_nor": 5, "cu_stream_wait_value_flush": [5, 7], "1073741824": 5, "flush": [5, 6, 7], "outstand": [5, 7], "write": [5, 7, 9], "guarante": [5, 7], "reach": [5, 7], "befor": [5, 6, 7, 9, 13], "satisfi": [5, 7], "permit": [5, 7], "reorder": 5, "intern": [5, 7], "would": [5, 7, 9], "arriv": 5, "second": [5, 7], "observ": [5, 7], "select": [5, 6, 7], "cu_device_attribute_can_flush_remote_writ": 5, "custreamwritevalue_flag": 5, "custreamwritevalue32": 5, "cu_stream_write_value_default": 5, "cu_stream_write_value_no_memory_barri": 5, "were": [5, 7], "optim": [5, 6, 7], "fenc": [5, 7], "ha": [5, 7, 8, 9, 16, 17], "similar": [5, 7], "semant": [5, 7], "__threadfence_system": 5, "rather": [5, 7, 26], "than": [5, 6, 7, 26], "v2": 5, "cu_stream_mem_op_wait_value_32": 5, "cu_stream_mem_op_write_value_32": 5, "cu_stream_mem_op_wait_value_64": 5, "cu_stream_mem_op_write_value_64": 5, "custreamwritevalue64": 5, "cu_stream_mem_op_barri": 5, "insert": [5, 7], "barrier": [5, 7, 8], "cu_stream_mem_op_flush_remote_writ": [5, 7], "effect": [5, 7], "standalon": 5, "custreammemorybarrier_flag": 5, "custreammemorybarri": 5, "cu_stream_memory_barrier_type_si": 5, "wide": [5, 7], "cu_stream_memory_barrier_type_gpu": 5, "cuoccupancy_flag": 5, "calcul": [5, 7], "cu_occupancy_default": 5, "cu_occupancy_disable_caching_overrid": 5, "assum": [5, 6, 7], "global": [5, 6, 7], "cach": [5, 7], "turn": [5, 6, 7], "off": [5, 6, 7], "custreamupdatecapturedependencies_flag": 5, "custreamupdatecapturedepend": 5, "cu_stream_add_capture_depend": 5, "add": [5, 6, 7, 23, 27, 29], "new": [5, 6, 7, 8, 20, 21, 26, 30], "cu_stream_set_capture_depend": 5, "replac": [5, 7, 8], "sent": [5, 7], "cu_async_notification_type_over_budget": 5, "cu_ad_format_unsigned_int8": 5, "integ": [5, 7], "cu_ad_format_unsigned_int16": 5, "cu_ad_format_unsigned_int32": 5, "cu_ad_format_signed_int8": 5, "sign": [5, 7], "cu_ad_format_signed_int16": 5, "cu_ad_format_signed_int32": 5, "cu_ad_format_half": 5, "cu_ad_format_float": 5, "cu_ad_format_nv12": 5, "176": 5, "yuv": [5, 7], "planar": [5, 7], "sampl": [5, 7], "cu_ad_format_unorm_int8x1": 5, "192": 5, "cu_ad_format_unorm_int8x2": 5, "193": 5, "cu_ad_format_unorm_int8x4": 5, "194": 5, "cu_ad_format_unorm_int16x1": 5, "195": 5, "cu_ad_format_unorm_int16x2": 5, "196": 5, "cu_ad_format_unorm_int16x4": 5, "197": 5, "cu_ad_format_snorm_int8x1": 5, "198": 5, "cu_ad_format_snorm_int8x2": 5, "199": 5, "cu_ad_format_snorm_int8x4": 5, "200": [5, 7], "cu_ad_format_snorm_int16x1": 5, "201": [5, 7], "cu_ad_format_snorm_int16x2": 5, "202": 5, "cu_ad_format_snorm_int16x4": 5, "203": 5, "cu_ad_format_bc1_unorm": 5, "145": 5, "compress": [5, 7], "bc1": [5, 7], "cu_ad_format_bc1_unorm_srgb": 5, "146": 5, "srgb": [5, 7], "encod": [5, 7, 9], "cu_ad_format_bc2_unorm": 5, "147": 5, "bc2": [5, 7], "cu_ad_format_bc2_unorm_srgb": 5, "148": 5, "cu_ad_format_bc3_unorm": 5, "149": 5, "bc3": [5, 7], "cu_ad_format_bc3_unorm_srgb": 5, "150": 5, "cu_ad_format_bc4_unorm": 5, "151": 5, "bc4": [5, 7], "cu_ad_format_bc4_snorm": 5, "152": 5, "cu_ad_format_bc5_unorm": 5, "153": 5, "bc5": [5, 7], "cu_ad_format_bc5_snorm": 5, "154": 5, "cu_ad_format_bc6h_uf16": 5, "155": 5, "half": [5, 7], "bc6h": [5, 7], "cu_ad_format_bc6h_sf16": 5, "156": 5, "cu_ad_format_bc7_unorm": 5, "157": 5, "bc7": [5, 7], "cu_ad_format_bc7_unorm_srgb": 5, "158": 5, "cu_ad_format_p010": 5, "159": 5, "cu_ad_format_p016": 5, "161": 5, "cu_ad_format_nv16": 5, "162": 5, "cu_ad_format_p210": 5, "163": 5, "cu_ad_format_p216": 5, "164": 5, "cu_ad_format_yuy2": 5, "165": 5, "pack": 5, "cu_ad_format_y210": 5, "166": 5, "cu_ad_format_y216": 5, "167": 5, "cu_ad_format_ayuv": 5, "168": 5, "cu_ad_format_y410": 5, "169": 5, "cu_ad_format_y416": 5, "177": 5, "cu_ad_format_y444_planar8": 5, "178": 5, "cu_ad_format_y444_planar10": 5, "179": 5, "cu_ad_format_max": 5, "2147483647": [5, 7], "cu_tr_address_mode_wrap": 5, "wrap": [5, 7, 9, 13], "cu_tr_address_mode_clamp": 5, "cu_tr_address_mode_mirror": 5, "mirror": [5, 7], "cu_tr_address_mode_bord": 5, "cu_tr_filter_mode_point": 5, "cu_tr_filter_mode_linear": 5, "linear": [5, 7], "cudevice_attribut": [5, 9], "cu_device_attribute_max_threads_per_block": 5, "cu_device_attribute_max_block_dim_x": 5, "cu_device_attribute_max_block_dim_i": 5, "cu_device_attribute_max_block_dim_z": 5, "cu_device_attribute_max_grid_dim_x": 5, "cu_device_attribute_max_grid_dim_i": 5, "cu_device_attribute_max_grid_dim_z": 5, "cu_device_attribute_max_shared_memory_per_block": 5, "cu_device_attribute_shared_memory_per_block": 5, "cu_device_attribute_total_constant_memori": 5, "cu_device_attribute_warp_s": 5, "cu_device_attribute_max_pitch": 5, "cu_device_attribute_max_registers_per_block": 5, "cu_device_attribute_registers_per_block": 5, "cu_device_attribute_clock_r": 5, "13": [5, 7], "typic": [5, 6, 7], "cu_device_attribute_texture_align": 5, "14": [5, 6, 7], "cu_device_attribute_gpu_overlap": 5, "15": [5, 7], "possibl": [5, 7, 9], "concurr": [5, 6, 7], "cu_device_attribute_async_engine_count": 5, "cu_device_attribute_multiprocessor_count": 5, "multiprocessor": [5, 7], "cu_device_attribute_kernel_exec_timeout": 5, "17": [5, 6, 7], "cu_device_attribute_integr": 5, "18": [5, 7, 12, 14, 19], "integr": [5, 7], "cu_device_attribute_can_map_host_memori": 5, "19": [5, 7, 26], "cu_device_attribute_compute_mod": 5, "20": [5, 6, 7], "comput": [5, 7, 8, 9], "cucomputemod": 5, "cu_device_attribute_maximum_texture1d_width": 5, "21": [5, 7, 28], "1d": [5, 7], "cu_device_attribute_maximum_texture2d_width": 5, "22": [5, 7, 16], "cu_device_attribute_maximum_texture2d_height": 5, "23": [5, 7, 20], "cu_device_attribute_maximum_texture3d_width": 5, "24": [5, 7, 17], "cu_device_attribute_maximum_texture3d_height": 5, "25": [5, 7], "cu_device_attribute_maximum_texture3d_depth": 5, "26": [5, 7], "cu_device_attribute_maximum_texture2d_layered_width": 5, "27": [5, 7, 18], "cu_device_attribute_maximum_texture2d_layered_height": 5, "28": [5, 7, 23, 24], "cu_device_attribute_maximum_texture2d_layered_lay": 5, "29": [5, 7, 16], "cu_device_attribute_maximum_texture2d_array_width": 5, "cu_device_attribute_maximum_texture2d_array_height": 5, "cu_device_attribute_maximum_texture2d_array_numslic": 5, "cu_device_attribute_surface_align": 5, "30": [5, 7], "cu_device_attribute_concurrent_kernel": 5, "31": [5, 7], "multipl": [5, 7, 9], "cu_device_attribute_ecc_en": 5, "ecc": [5, 7], "cu_device_attribute_pci_bus_id": 5, "33": [5, 7], "pci": [5, 7], "bu": [5, 7], "cu_device_attribute_pci_device_id": 5, "34": [5, 7], "cu_device_attribute_tcc_driv": 5, "35": [5, 7], "tcc": [5, 7], "model": [5, 7, 9], "cu_device_attribute_memory_clock_r": 5, "36": [5, 7], "peak": [5, 7], "cu_device_attribute_global_memory_bus_width": 5, "37": [5, 7], "cu_device_attribute_l2_cache_s": 5, "l2": [5, 7], "cu_device_attribute_max_threads_per_multiprocessor": 5, "39": [5, 7], "40": [5, 7], "asynchron": [5, 7, 9], "engin": [5, 7, 8, 9], "cu_device_attribute_unified_address": 5, "41": [5, 7, 23], "cu_device_attribute_maximum_texture1d_layered_width": 5, "42": [5, 7, 23], "cu_device_attribute_maximum_texture1d_layered_lay": 5, "43": [5, 7, 23], "cu_device_attribute_can_tex2d_gath": 5, "44": [5, 7, 24], "cu_device_attribute_maximum_texture2d_gather_width": 5, "45": [5, 7, 24], "cuda_array3d_texture_gath": 5, "cu_device_attribute_maximum_texture2d_gather_height": 5, "46": [5, 7], "cu_device_attribute_maximum_texture3d_width_altern": 5, "47": [5, 7], "altern": [5, 6, 7], "cu_device_attribute_maximum_texture3d_height_altern": 5, "48": [5, 7, 26], "cu_device_attribute_maximum_texture3d_depth_altern": 5, "49": [5, 7], "cu_device_attribute_pci_domain_id": 5, "50": [5, 7], "cu_device_attribute_texture_pitch_align": 5, "51": [5, 7, 26], "cu_device_attribute_maximum_texturecubemap_width": 5, "52": [5, 7], "cubemap": [5, 7], "cu_device_attribute_maximum_texturecubemap_layered_width": 5, "53": [5, 7], "cu_device_attribute_maximum_texturecubemap_layered_lay": 5, "54": [5, 7], "cu_device_attribute_maximum_surface1d_width": 5, "55": [5, 7], "cu_device_attribute_maximum_surface2d_width": 5, "56": [5, 7], "cu_device_attribute_maximum_surface2d_height": 5, "57": [5, 7], "cu_device_attribute_maximum_surface3d_width": 5, "58": [5, 7, 28], "cu_device_attribute_maximum_surface3d_height": 5, "59": [5, 7], "cu_device_attribute_maximum_surface3d_depth": 5, "60": [5, 7], "cu_device_attribute_maximum_surface1d_layered_width": 5, "61": [5, 7], "cu_device_attribute_maximum_surface1d_layered_lay": 5, "62": [5, 7], "cu_device_attribute_maximum_surface2d_layered_width": 5, "63": [5, 7], "cu_device_attribute_maximum_surface2d_layered_height": 5, "cu_device_attribute_maximum_surface2d_layered_lay": 5, "65": [5, 7], "cu_device_attribute_maximum_surfacecubemap_width": 5, "66": [5, 7], "cu_device_attribute_maximum_surfacecubemap_layered_width": 5, "67": [5, 7], "cu_device_attribute_maximum_surfacecubemap_layered_lay": 5, "68": [5, 7], "cu_device_attribute_maximum_texture1d_linear_width": 5, "69": [5, 7], "cudadevicegettexture1dlinearmaxwidth": [5, 7], "cudevicegettexture1dlinearmaxwidth": [5, 7], "cu_device_attribute_maximum_texture2d_linear_width": 5, "70": [5, 7], "cu_device_attribute_maximum_texture2d_linear_height": 5, "71": [5, 7], "cu_device_attribute_maximum_texture2d_linear_pitch": 5, "72": [5, 7, 29], "cu_device_attribute_maximum_texture2d_mipmapped_width": 5, "73": [5, 7, 29], "cu_device_attribute_maximum_texture2d_mipmapped_height": 5, "74": [5, 7], "cu_device_attribute_compute_capability_major": [5, 9], "75": [5, 7], "cu_device_attribute_compute_capability_minor": [5, 9], "76": [5, 7], "cu_device_attribute_maximum_texture1d_mipmapped_width": 5, "77": [5, 7], "cu_device_attribute_stream_priorities_support": 5, "78": [5, 7], "cu_device_attribute_global_l1_cache_support": 5, "79": [5, 7], "l1": [5, 7], "cu_device_attribute_local_l1_cache_support": 5, "cu_device_attribute_max_shared_memory_per_multiprocessor": 5, "81": [5, 7], "cu_device_attribute_max_registers_per_multiprocessor": 5, "82": [5, 7], "cu_device_attribute_managed_memori": 5, "83": [5, 7], "cu_device_attribute_multi_gpu_board": 5, "84": [5, 7], "multi": [5, 7], "board": [5, 7], "cu_device_attribute_multi_gpu_board_group_id": 5, "85": [5, 7], "uniqu": [5, 7], "group": [5, 7], "cu_device_attribute_host_native_atomic_support": 5, "86": [5, 7], "link": [5, 6, 7], "nativ": [5, 7], "atom": [5, 7], "placehold": 5, "hardwar": [5, 7], "cu_device_attribute_single_to_double_precision_perf_ratio": 5, "87": [5, 7], "precis": [5, 6, 7], "doubl": [5, 7], "cu_device_attribute_pageable_memory_access": 5, "88": [5, 7], "coher": [5, 7], "pageabl": [5, 7], "cudahostregist": [5, 7], "cu_device_attribute_concurrent_managed_access": 5, "89": [5, 7, 21], "cpu": [5, 7, 8, 9], "cu_device_attribute_compute_preemption_support": 5, "90": [5, 7, 21, 30], "preemption": [5, 7], "cu_device_attribute_can_use_host_pointer_for_registered_mem": 5, "91": [5, 7], "cu_device_attribute_can_use_stream_mem_ops_v1": 5, "92": [5, 7], "along": [5, 6, 7], "v1": 5, "relat": [5, 7], "cu_device_attribute_can_use_64_bit_stream_mem_ops_v1": 5, "93": [5, 7], "cu_device_attribute_can_use_stream_wait_value_nor_v1": 5, "94": [5, 7], "cu_device_attribute_cooperative_launch": 5, "95": [5, 7], "cu_device_attribute_cooperative_multi_device_launch": 5, "96": [5, 7], "culaunchcooperativekernelmultidevic": 5, "cu_device_attribute_max_shared_memory_per_block_optin": 5, "97": [5, 7], "optin": [5, 7], "98": [5, 7], "cu_device_attribute_host_register_support": 5, "99": [5, 7], "registr": [5, 7], "cu_device_attribute_pageable_memory_access_uses_host_page_t": 5, "100": [5, 7], "tabl": [5, 6, 7, 9], "cu_device_attribute_direct_managed_mem_access_from_host": 5, "101": [5, 6, 7], "directli": [5, 6, 7], "migrat": [5, 7], "cu_device_attribute_virtual_address_management_support": 5, "102": [5, 7], "cu_device_attribute_virtual_memory_management_support": 5, "like": [5, 7, 9], "cumemaddressreserv": 5, "cumemcr": [5, 7], "cumemmap": 5, "cu_device_attribute_handle_type_posix_file_descriptor_support": 5, "103": [5, 7], "posix": [5, 7], "cumemexporttoshareablehandl": 5, "cu_device_attribute_handle_type_win32_handle_support": 5, "104": [5, 7], "win32": [5, 7], "nt": [5, 7], "cu_device_attribute_handle_type_win32_kmt_handle_support": 5, "105": [5, 7], "kmt": [5, 7], "cu_device_attribute_max_blocks_per_multiprocessor": 5, "106": [5, 7], "cu_device_attribute_generic_compression_support": 5, "107": [5, 7], "cu_device_attribute_max_persisting_l2_cache_s": 5, "108": [5, 7], "persist": [5, 7], "capac": [5, 7], "109": [5, 7], "cu_device_attribute_gpu_direct_rdma_with_cuda_vmm_support": 5, "110": [5, 7], "gpudirect": [5, 7], "rdma": [5, 7], "cu_device_attribute_reserved_shared_memory_per_block": 5, "111": [5, 7], "cu_device_attribute_sparse_cuda_array_support": 5, "112": [5, 7], "cu_device_attribute_read_only_host_register_support": 5, "113": [5, 7], "cumemhostregist": [5, 7], "cu_memhostergister_read_onli": 5, "read": [5, 7], "cu_device_attribute_timeline_semaphore_interop_support": 5, "114": [5, 7], "timelin": [5, 7], "interop": [5, 7, 28], "cu_device_attribute_memory_pools_support": 5, "115": [5, 7], "cumemallocasync": [5, 7], "cumempool": 5, "famili": [5, 7], "cu_device_attribute_gpu_direct_rdma_support": 5, "116": [5, 7], "nvidia_p2p_get_pag": [5, 7], "cu_device_attribute_gpu_direct_rdma_flush_writes_opt": 5, "117": [5, 7], "shall": [5, 7], "interpret": [5, 7, 9], "cuflushgpudirectrdmawritesopt": 5, "enum": [5, 7], "cu_device_attribute_gpu_direct_rdma_writes_ord": 5, "118": [5, 7], "consum": [5, 7], "cugpudirectrdmawritesord": 5, "numer": [5, 6, 7], "here": [5, 6, 7, 9], "cu_device_attribute_mempool_supported_handle_typ": 5, "119": [5, 7], "mempool": [5, 7], "cu_device_attribute_cluster_launch": 5, "120": [5, 7], "cu_device_attribute_deferred_mapping_cuda_array_support": 5, "121": [5, 7], "defer": [5, 7], "cu_device_attribute_can_use_64_bit_stream_mem_op": 5, "122": [5, 7], "123": [5, 7], "cu_device_attribute_dma_buf_support": 5, "124": [5, 7], "dma_buf": 5, "mechan": [5, 7], "cu_device_attribute_ipc_event_support": 5, "125": [5, 7], "126": [5, 7], "cu_device_attribute_tensor_map_access_support": 5, "127": [5, 7], "cu_device_attribute_handle_type_fabric_support": 5, "cu_device_attribute_unified_function_point": 5, "129": [5, 7], "cu_device_attribute_numa_config": 5, "130": [5, 7], "numa": [5, 7], "cudevicenumaconfig": 5, "cu_device_attribute_numa_id": 5, "131": [5, 7], "cu_device_attribute_multicast_support": 5, "132": [5, 7], "switch": [5, 6, 7], "reduct": 5, "cu_device_attribute_mps_en": 5, "133": [5, 7], "mp": [5, 7], "cu_device_attribute_host_numa_id": 5, "134": [5, 7], "closest": [5, 7], "cu_device_attribute_d3d12_cig_support": 5, "135": [5, 7], "d3d12": [5, 7], "cu_device_attribute_max": 5, "136": [5, 7], "cupointer_attribut": 5, "cu_pointer_attribute_context": 5, "cu_pointer_attribute_memory_typ": 5, "cu_pointer_attribute_device_point": 5, "cu_pointer_attribute_host_point": 5, "cu_pointer_attribute_p2p_token": 5, "pair": [5, 7], "nv": 5, "p2p": [5, 7], "h": [5, 6, 7, 16], "interfac": [5, 7, 8, 9], "cu_pointer_attribute_sync_memop": 5, "everi": [5, 7], "region": [5, 7], "cu_pointer_attribute_buffer_id": 5, "cu_pointer_attribute_is_manag": 5, "cu_pointer_attribute_device_ordin": 5, "cu_pointer_attribute_is_legacy_cuda_ipc_cap": 5, "cudaipcgetmemhandl": [5, 7], "cu_pointer_attribute_range_start_addr": 5, "cu_pointer_attribute_range_s": 5, "cu_pointer_attribute_map": 5, "back": [5, 7, 9], "cu_pointer_attribute_allowed_handle_typ": 5, "cu_pointer_attribute_is_gpu_direct_rdma_cap": 5, "cu_pointer_attribute_access_flag": 5, "correspond": [5, 6, 7], "cu_pointer_attribute_mempool_handl": 5, "cu_pointer_attribute_mapping_s": 5, "actual": [5, 6, 7], "underli": [5, 7], "cu_pointer_attribute_mapping_base_addr": 5, "cu_pointer_attribute_memory_block_id": 5, "cufunction_attribut": 5, "cu_func_attribute_max_threads_per_block": 5, "beyond": [5, 7], "load": [5, 7, 9, 14], "cu_func_attribute_shared_size_byt": 5, "static": [5, 7], "cu_func_attribute_const_size_byt": 5, "cu_func_attribute_local_size_byt": 5, "cu_func_attribute_num_reg": 5, "cu_func_attribute_ptx_vers": 5, "ptx": [5, 6, 7, 9], "so": [5, 7, 9, 19], "undefin": [5, 6, 7], "cubin": [5, 6], "prior": [5, 7], "cu_func_attribute_binary_vers": 5, "binari": [5, 7], "properli": [5, 7], "cu_func_attribute_cache_mode_ca": 5, "been": [5, 6, 7, 9, 16], "xptxa": [5, 6, 7], "dlcm": [5, 7], "ca": [5, 7], "cu_func_attribute_max_dynamic_shared_size_byt": 5, "larger": [5, 7], "cufuncsetattribut": 5, "cukernelsetattribut": 5, "cu_func_attribute_preferred_shared_memory_carveout": 5, "On": [5, 6, 7], "carveout": [5, 7], "percent": [5, 7], "hint": [5, 7], "choos": [5, 7], "cu_func_attribute_cluster_size_must_be_set": 5, "cu_func_attribute_required_cluster_width": 5, "check": [5, 6, 7, 9], "cuda_error_not_permit": 5, "cu_func_attribute_required_cluster_height": 5, "cu_func_attribute_required_cluster_depth": 5, "cu_func_attribute_non_portable_cluster_size_allow": 5, "portabl": [5, 7, 9], "disallow": [5, 7], "sku": [5, 7], "program": [5, 6, 7, 8, 9], "might": [5, 7], "cudaoccupancymaxactiveclust": [5, 7], "assist": [5, 7], "higher": [5, 6, 7, 9], "target": [5, 6, 7, 8, 9], "sm_90": [5, 6, 7], "increas": [5, 6, 7, 9], "unit": [5, 6, 7, 9], "cu_func_attribute_cluster_scheduling_policy_prefer": 5, "cudaclusterschedulingpolici": [5, 7], "cu_func_attribute_max": 5, "cufunc_cach": 5, "cu_func_cache_prefer_non": 5, "cu_func_cache_prefer_shar": 5, "smaller": [5, 7, 8], "cu_func_cache_prefer_l1": 5, "cu_func_cache_prefer_equ": 5, "equal": [5, 6, 7], "cusharedconfig": 5, "cu_shared_mem_config_default_bank_s": 5, "bank": 5, "cu_shared_mem_config_four_byte_bank_s": 5, "four": [5, 7], "cu_shared_mem_config_eight_byte_bank_s": 5, "eight": 5, "cushared_carveout": 5, "cu_sharedmem_carveout_default": 5, "No": [5, 6, 7], "cu_sharedmem_carveout_max_shar": 5, "cu_sharedmem_carveout_max_l1": 5, "cu_memorytype_host": 5, "cu_memorytype_devic": [5, 7], "cu_memorytype_unifi": 5, "cu_computemode_default": 5, "cu_computemode_prohibit": 5, "prohibit": [5, 7], "cu_computemode_exclusive_process": 5, "exclus": [5, 7], "present": [5, 6, 7], "cumem_advis": 5, "advis": [5, 7], "cu_mem_advise_set_read_mostli": 5, "mostli": [5, 7], "occasion": [5, 7], "written": [5, 6, 7], "cu_mem_advise_unset_read_mostli": 5, "undo": [5, 7], "cu_mem_advise_set_preferred_loc": [5, 7], "cu_mem_advise_unset_preferred_loc": 5, "clear": [5, 7], "cu_mem_advise_set_accessed_bi": 5, "prevent": [5, 7], "fault": [5, 7], "much": [5, 7], "cu_mem_advise_unset_accessed_bi": [5, 7], "let": [5, 7], "subsystem": [5, 7], "decid": [5, 7], "cumem_range_attribut": 5, "cu_mem_range_attribute_read_mostli": 5, "cu_mem_range_attribute_preferred_loc": 5, "cu_mem_range_attribute_accessed_bi": 5, "cu_mem_range_attribute_last_prefetch_loc": 5, "prefetch": [5, 7], "cu_mem_range_attribute_preferred_location_typ": 5, "cu_mem_range_attribute_preferred_location_id": 5, "cu_mem_range_attribute_last_prefetch_location_typ": 5, "cu_mem_range_attribute_last_prefetch_location_id": 5, "cujit_opt": 5, "linker": 5, "cu_jit_max_regist": 5, "max": [5, 7], "cu_jit_threads_per_block": 5, "IN": 5, "util": [5, 7, 8], "abl": [5, 7], "combin": [5, 7], "cu_jit_target": 5, "cu_jit_wall_tim": 5, "overwrit": [5, 7], "wall": 5, "millisecond": [5, 7], "spent": 5, "cu_jit_info_log_buff": 5, "print": 5, "log": [5, 6], "messag": [5, 6], "natur": [5, 7, 9], "cu_jit_info_log_buffer_size_byt": 5, "cap": 5, "termin": [5, 7], "fill": [5, 7], "cu_jit_error_log_buff": 5, "reflect": [5, 7], "cu_jit_error_log_buffer_size_byt": 5, "cu_jit_optimization_level": 5, "being": [5, 7], "highest": [5, 7], "cu_jit_target_from_cucontext": 5, "chosen": [5, 7], "cujit_target": 5, "enumer": [5, 6, 7], "cu_jit_fallback_strategi": 5, "choic": 5, "fallback": [5, 7], "strategi": 5, "found": [5, 7, 16, 24], "cujit_fallback": 5, "culink": 5, "exact": [5, 6, 7], "cu_jit_generate_debug_info": 5, "fals": [5, 6, 9], "cu_jit_log_verbos": 5, "verbos": 5, "cu_jit_generate_line_info": 5, "lineinfo": [5, 6], "cu_jit_cache_mod": 5, "explicitli": [5, 6, 7], "cujit_cachemode_enum": 5, "cu_jit_new_sm3x_opt": 5, "cu_jit_fast_compil": 5, "jit": [5, 7], "purpos": [5, 7], "cu_jit_global_symbol_nam": 5, "symbol": [5, 7, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "name": [5, 6, 7, 9], "reloc": 5, "store": [5, 6, 7, 9], "cu_jit_global_symbol_address": 5, "cu_jit_global_symbol_count": 5, "unresolv": 5, "illeg": [5, 7], "const": 5, "void": [5, 7, 9], "cu_jit_lto": 5, "lto": [5, 6], "ir": [5, 6], "cu_jit_ftz": 5, "cu_jit_prec_div": 5, "cu_jit_prec_sqrt": 5, "cu_jit_fma": 5, "cu_jit_referenced_kernel_nam": 5, "cu_jit_referenced_kernel_count": 5, "cu_jit_referenced_variable_nam": 5, "cu_jit_referenced_variable_count": 5, "cu_jit_optimize_unused_device_vari": 5, "cu_jit_position_independent_cod": 5, "independ": [5, 7], "cu_jit_min_cta_per_sm": 5, "cta": 5, "togeth": [5, 8], "cu_jit_max_threads_per_block": 5, "well": [5, 7], "alreadi": [5, 7], "minnctapersm": 5, "cu_jit_override_directive_valu": 5, "preced": [5, 6, 7], "over": [5, 7], "product": [5, 7], "exeed": 5, "invoc": 5, "exceed": [5, 7], "failur": [5, 7], "maxntid": 5, "cu_jit_num_opt": 5, "cu_target_compute_30": 5, "cu_target_compute_32": 5, "cu_target_compute_35": 5, "cu_target_compute_37": 5, "cu_target_compute_50": 5, "cu_target_compute_52": 5, "cu_target_compute_53": 5, "cu_target_compute_60": 5, "cu_target_compute_61": 5, "cu_target_compute_62": 5, "cu_target_compute_70": 5, "cu_target_compute_72": 5, "cu_target_compute_75": 5, "cu_target_compute_80": 5, "cu_target_compute_86": 5, "cu_target_compute_87": 5, "cu_target_compute_89": 5, "cu_target_compute_90": 5, "acceler": [5, 7, 8, 9], "cu_target_compute_90a": 5, "65626": 5, "cu_prefer_ptx": 5, "cu_prefer_binari": 5, "cujit_cachemod": 5, "cu_jit_cache_option_non": 5, "cu_jit_cache_option_cg": 5, "cu_jit_cache_option_ca": 5, "cujitinputtyp": 5, "cu_jit_input_cubin": 5, "applic": [5, 7, 8], "none": [5, 6, 7, 9], "cu_jit_input_ptx": 5, "cu_jit_input_fatbinari": 5, "bundl": 5, "some": [5, 7, 9], "cu_jit_input_object": 5, "embed": [5, 6, 7], "cu_jit_input_librari": 5, "archiv": 5, "cu_jit_input_nvvm": 5, "cu_jit_num_input_typ": 5, "cugraphicsregisterflag": 5, "cu_graphics_register_flags_non": 5, "cu_graphics_register_flags_read_onli": 5, "cu_graphics_register_flags_write_discard": 5, "cu_graphics_register_flags_surface_ldst": 5, "cu_graphics_register_flags_texture_gath": 5, "cugraphicsmapresourceflag": 5, "unmap": [5, 7], "cu_graphics_map_resource_flags_non": 5, "cu_graphics_map_resource_flags_read_onli": 5, "cu_graphics_map_resource_flags_write_discard": 5, "cuarray_cubemap_fac": 5, "cube": [5, 7], "cu_cubemap_face_positive_x": 5, "cu_cubemap_face_negative_x": 5, "neg": [5, 7], "cu_cubemap_face_positive_i": 5, "cu_cubemap_face_negative_i": 5, "cu_cubemap_face_positive_z": 5, "cu_cubemap_face_negative_z": 5, "culimit": 5, "cu_limit_stack_s": 5, "stack": [5, 6, 7], "cu_limit_printf_fifo_s": 5, "printf": [5, 7], "fifo": [5, 7], "cu_limit_malloc_heap_s": 5, "malloc": [5, 7], "heap": [5, 7], "cu_limit_dev_runtime_sync_depth": 5, "cu_limit_dev_runtime_pending_launch_count": 5, "pend": [5, 7], "cu_limit_max_l2_fetch_granular": 5, "fetch": [5, 7], "granular": [5, 7], "cu_limit_persisting_l2_cache_s": 5, "cu_limit_shmem_s": 5, "cu_limit_cig_en": 5, "cu_limit_cig_shmem_fallback_en": 5, "cu_limit_max": 5, "cu_resource_type_arrai": 5, "cu_resource_type_mipmapped_arrai": 5, "cu_resource_type_linear": 5, "cu_resource_type_pitch2d": 5, "cu_access_property_norm": 5, "cu_access_property_stream": 5, "less": [5, 6, 7], "persit": [5, 7], "cu_access_property_persist": 5, "cu_graph_cond_type_if": 5, "onc": [5, 7], "cu_graph_cond_type_whil": 5, "repeatedli": [5, 7], "cu_graph_node_type_kernel": 5, "cu_graph_node_type_memcpi": 5, "cu_graph_node_type_memset": 5, "cu_graph_node_type_host": 5, "cu_graph_node_type_graph": 5, "cu_graph_node_type_empti": 5, "op": [5, 7], "cu_graph_node_type_wait_ev": 5, "cu_graph_node_type_event_record": 5, "cu_graph_node_type_ext_semas_sign": 5, "cu_graph_node_type_ext_semas_wait": 5, "cu_graph_node_type_mem_alloc": 5, "cu_graph_node_type_mem_fre": 5, "cu_graph_node_type_batch_mem_op": 5, "cu_graph_node_type_condit": 5, "loop": [5, 7], "insid": [5, 7], "iter": [5, 7, 8], "upon": [5, 7], "exist": [5, 6, 7, 8], "cudagraphsetcondit": [5, 7], "part": [5, 7, 9], "cugraphedgedata": 5, "cu_graph_dependency_type_default": 5, "ordinari": [5, 7], "cu_graph_dependency_type_programmat": 5, "cudagriddependencysynchron": [5, 7], "outgo": [5, 7], "cuda_graph_instantiate_success": 5, "succeed": [5, 7], "cuda_graph_instantiate_error": 5, "unexpect": [5, 7], "cuda_graph_instantiate_invalid_structur": 5, "cycl": [5, 7], "cuda_graph_instantiate_node_operation_not_support": 5, "becaus": [5, 6, 7, 9], "unsupport": [5, 7], "cuda_graph_instantiate_multiple_ctxs_not_support": 5, "cu_sync_policy_auto": 5, "cu_sync_policy_spin": 5, "cu_sync_policy_yield": 5, "cu_sync_policy_blocking_sync": 5, "cu_cluster_scheduling_policy_default": 5, "cu_cluster_scheduling_policy_spread": 5, "spread": [5, 7], "cu_cluster_scheduling_policy_load_balanc": 5, "balanc": [5, 7], "affect": [5, 7], "thu": [5, 7], "elimin": [5, 7, 9], "latenc": [5, 7], "unrel": [5, 7], "traffic": [5, 7], "custreamsetattribut": 5, "culaunchkernelex": 5, "cugraphkernelnodesetattribut": 5, "done": [5, 7, 13], "distanc": [5, 7], "word": [5, 7], "suffici": [5, 7], "anoth": [5, 7, 9], "even": [5, 6, 7, 9], "cu_launch_mem_sync_domain_default": 5, "cu_launch_attribute_ignor": 5, "conveni": [5, 7], "composit": [5, 7], "programmat": [5, 7], "resolv": [5, 7, 16, 17, 18, 21, 23, 24, 26, 28, 29, 30], "opportunist": [5, 7], "overlap": [5, 7, 9], "previou": [5, 6, 7, 17], "sync": [5, 7], "equival": [5, 6, 7], "instruct": [5, 6, 7, 9, 18], "launchdep": 5, "builtin": [5, 6], "cudatriggerprogrammaticlaunchcomplet": [5, 7], "cueventsynchron": [5, 7], "primarili": [5, 7], "meant": [5, 7], "establish": [5, 7], "i": [5, 6, 7, 17], "nomin": [5, 7], "begun": [5, 7], "effort": [5, 7], "b": [5, 7, 9], "claim": [5, 7], "unavail": [5, 7], "exercis": [5, 7], "caution": [5, 7], "invers": [5, 7], "lead": [5, 7], "deadlock": [5, 7], "relev": [5, 7], "cudagraphkernelnodeupdatesappli": [5, 7], "compar": [5, 7, 9], "regular": [5, 7], "firstli": [5, 7], "cugraphdestroynod": 5, "addition": [5, 7], "opt": [5, 6, 7, 9], "attempt": [5, 6, 7], "cugraphkernelnodecopyattribut": 5, "neither": [5, 7], "nor": [5, 7], "those": [5, 6, 7], "cugraphupload": [5, 7], "again": [5, 7], "custreamcapturestatu": 5, "status": [5, 7], "custreamiscaptur": 5, "cu_stream_capture_status_non": 5, "cu_stream_capture_status_act": 5, "cu_stream_capture_status_invalid": 5, "sequenc": [5, 7], "custreamcapturemod": 5, "custreambegincaptur": [5, 7], "cuthreadexchangestreamcapturemod": 5, "cu_stream_capture_mode_glob": 5, "cu_stream_capture_mode_thread_loc": 5, "cu_stream_capture_mode_relax": 5, "cudriverprocaddress_flag": 5, "cugetprocaddress": [5, 7], "cu_get_proc_address_default": 5, "cu_get_proc_address_legacy_stream": 5, "cu_get_proc_address_per_thread_default_stream": 5, "cudriverprocaddressqueryresult": 5, "cu_get_proc_address_success": 5, "succesfulli": [5, 7], "cu_get_proc_address_symbol_not_found": 5, "cu_get_proc_address_version_not_suffici": 5, "cu_exec_affinity_type_max": 5, "cig_data_type_d3d12_command_queu": 5, "culibraryopt": 5, "culibraryloaddata": [5, 29], "culibraryloadfromfil": 5, "cu_library_host_universal_function_and_data_t": 5, "cu_library_binary_is_preserv": 5, "preserv": [5, 6, 7], "know": [5, 7], "culibraryunload": 5, "cuda_error_invalid_valu": [5, 7], "cu_library_num_opt": 5, "curesult": [5, 9], "cuda_success": [5, 7, 9], "cueventqueri": [5, 7], "custreamqueri": [5, 7], "cuda_error_out_of_memori": [5, 7], "unabl": [5, 7], "enough": [5, 7], "cuda_error_not_initi": [5, 7], "cuinit": [5, 9], "cuda_error_deiniti": 5, "shut": [5, 7], "down": [5, 7], "cuda_error_profiler_dis": 5, "happen": [5, 7], "tool": [5, 6, 7, 9], "visual": [5, 7], "cuda_error_profiler_not_initi": 5, "cuda_error_profiler_already_start": 5, "cuda_error_profiler_already_stop": 5, "cuda_error_stub_librari": 5, "stub": [5, 7], "real": [5, 7], "cuda_error_device_unavail": 5, "often": [5, 7], "cuda_error_no_devic": 5, "detect": [5, 7, 9], "cuda_error_invalid_devic": 5, "cuda_error_device_not_licens": 5, "licens": [5, 7], "cuda_error_invalid_imag": 5, "imag": [5, 7], "cuda_error_invalid_context": 5, "most": [5, 7, 8, 9], "frequent": [5, 7], "had": [5, 7], "cuctxdestroi": [5, 7, 9], "invok": [5, 7], "mix": [5, 7], "3010": [5, 7], "3020": [5, 7], "cuctxgetapivers": [5, 7], "convert": [5, 7], "cuctxfromgreenctx": 5, "cuda_error_context_already_curr": 5, "cuda_error_map_fail": 5, "205": [5, 7], "cuda_error_unmap_fail": 5, "206": [5, 7], "unregist": [5, 7], "cuda_error_array_is_map": 5, "207": [5, 7], "destroi": [5, 6, 7], "cuda_error_already_map": 5, "208": [5, 7], "cuda_error_no_binary_for_gpu": 5, "209": [5, 7], "occur": [5, 7], "cuda_error_already_acquir": 5, "210": [5, 7], "acquir": [5, 7], "cuda_error_not_map": 5, "211": [5, 7], "cuda_error_not_mapped_as_arrai": 5, "212": [5, 7], "cuda_error_not_mapped_as_point": 5, "213": [5, 7], "cuda_error_ecc_uncorrect": 5, "214": [5, 7], "uncorrect": [5, 7], "cuda_error_unsupported_limit": 5, "215": [5, 7], "cuda_error_context_already_in_us": 5, "216": [5, 7], "cuda_error_peer_access_unsupport": 5, "217": [5, 7], "across": [5, 7], "cuda_error_invalid_ptx": 5, "218": [5, 7], "cuda_error_invalid_graphics_context": 5, "219": [5, 7], "directx": [5, 7], "cuda_error_nvlink_uncorrect": 5, "220": [5, 7], "nvlink": [5, 7], "cuda_error_jit_compiler_not_found": 5, "221": [5, 7], "cuda_error_unsupported_ptx_vers": 5, "222": [5, 7], "toolchain": [5, 7], "cuda_error_jit_compilation_dis": 5, "223": [5, 7], "cuda_error_unsupported_exec_affin": 5, "224": [5, 7], "cuda_error_unsupported_devside_sync": 5, "225": [5, 7], "cudadevicesynchron": [5, 7], "cuda_error_invalid_sourc": 5, "300": [5, 7], "cuda_error_file_not_found": 5, "301": [5, 7], "cuda_error_shared_object_symbol_not_found": 5, "302": [5, 7], "cuda_error_shared_object_init_fail": 5, "303": [5, 7], "cuda_error_operating_system": 5, "304": [5, 7], "os": [5, 7], "cuda_error_invalid_handl": 5, "400": [5, 7], "cuda_error_illegal_st": 5, "401": [5, 7], "state": [5, 7], "cuda_error_lossy_queri": 5, "402": [5, 7], "introspect": [5, 7], "wai": [5, 7, 9], "discard": [5, 7], "funtion": [5, 7], "newer": [5, 7], "omiss": [5, 7], "cuda_error_not_found": 5, "500": [5, 7], "cuda_error_not_readi": 5, "600": [5, 7], "previous": [5, 6, 7], "yet": [5, 7, 9], "cuda_error_illegal_address": 5, "700": [5, 7], "leav": [5, 7], "inconsist": [5, 7], "continu": [5, 7], "relaunch": [5, 7], "cuda_error_launch_out_of_resourc": 5, "701": [5, 7], "did": [5, 7], "usual": [5, 7], "too": [5, 7], "wrong": [5, 21], "cuda_error_launch_timeout": 5, "702": [5, 7], "took": [5, 7], "timeout": [5, 7], "cuda_error_launch_incompatible_textur": 5, "703": [5, 7], "incompat": [5, 6, 7, 9], "cuda_error_peer_access_already_en": 5, "704": [5, 7], "cuctxenablepeeraccess": [5, 7], "try": [5, 7, 9], "cuda_error_peer_access_not_en": 5, "705": [5, 7], "cuctxdisablepeeraccess": [5, 7], "cuda_error_primary_context_act": 5, "708": [5, 7], "cuda_error_context_is_destroi": 5, "709": [5, 7], "cuda_error_assert": 5, "710": [5, 7], "assert": [5, 6, 7, 9], "anymor": 5, "reconstruct": 5, "cuda_error_too_many_p": 5, "711": [5, 7], "exhaust": [5, 7], "cuda_error_host_memory_already_regist": 5, "712": [5, 7], "cuda_error_host_memory_not_regist": 5, "713": [5, 7], "cumemhostunregist": [5, 7], "cuda_error_hardware_stack_error": 5, "714": [5, 7], "corrupt": [5, 7], "cuda_error_illegal_instruct": 5, "715": [5, 7], "cuda_error_misaligned_address": 5, "716": [5, 7], "cuda_error_invalid_address_spac": 5, "717": [5, 7], "certain": [5, 6, 7, 16], "cuda_error_invalid_pc": 5, "718": [5, 7], "counter": [5, 7], "cuda_error_launch_fail": 5, "719": [5, 7], "dereferenc": [5, 7], "cuda_error_cooperative_launch_too_larg": 5, "720": [5, 7], "cuoccupancymaxactiveblockspermultiprocessor": [5, 7], "cuoccupancymaxactiveblockspermultiprocessorwithflag": [5, 7], "800": [5, 7], "cuda_error_not_support": [5, 7], "801": [5, 7], "cuda_error_system_not_readi": 5, "802": [5, 7], "readi": [5, 7, 9], "verifi": [5, 7], "daemon": [5, 7], "cuda_error_system_driver_mismatch": 5, "803": [5, 7], "mismatch": [5, 7], "displai": [5, 6, 7], "cuda_error_compat_not_supported_on_devic": 5, "804": [5, 7], "upgrad": [5, 7], "forward": [5, 6, 7], "matrix": [5, 7], "cuda_visible_devic": [5, 7], "cuda_error_mps_connection_fail": 5, "805": [5, 7], "client": [5, 7], "server": [5, 7], "cuda_error_mps_rpc_failur": 5, "806": [5, 7], "procedur": [5, 7], "cuda_error_mps_server_not_readi": 5, "807": [5, 7], "recov": [5, 7], "fatal": [5, 7], "cuda_error_mps_max_clients_reach": 5, "808": [5, 7], "cuda_error_mps_max_connections_reach": 5, "809": [5, 7], "cuda_error_mps_client_termin": 5, "810": [5, 7], "cuda_error_cdp_not_support": 5, "811": [5, 7], "parallel": [5, 6, 7, 8, 9], "cuda_error_cdp_version_mismatch": 5, "812": [5, 7], "cuda_error_stream_capture_unsupport": 5, "900": [5, 7], "cuda_error_stream_capture_invalid": 5, "901": [5, 7], "cuda_error_stream_capture_merg": 5, "902": [5, 7], "merg": [5, 7], "cuda_error_stream_capture_unmatch": 5, "903": [5, 7], "cuda_error_stream_capture_unjoin": 5, "904": [5, 7], "fork": [5, 7], "join": [5, 7], "cuda_error_stream_capture_isol": 5, "905": [5, 7], "boundari": [5, 7], "implicit": [5, 6, 7], "cuda_error_stream_capture_implicit": 5, "906": [5, 7], "cudastreamlegaci": [5, 7], "cuda_error_captured_ev": 5, "907": [5, 7], "cuda_error_stream_capture_wrong_thread": 5, "908": [5, 7], "custreamendcaptur": 5, "cuda_error_timeout": 5, "909": [5, 7], "laps": [5, 7], "cuda_error_graph_exec_update_failur": 5, "910": [5, 7], "violat": [5, 7], "cuda_error_external_devic": 5, "911": [5, 7], "consumpt": [5, 6, 7], "cuda_error_invalid_cluster_s": 5, "912": [5, 7], "misconfigur": [5, 7], "cuda_error_function_not_load": 5, "913": [5, 7], "indici": [5, 7], "cuda_error_invalid_resource_typ": 5, "914": [5, 7], "cuda_error_invalid_resource_configur": 5, "915": [5, 7], "insuffici": [5, 7], "cuda_error_unknown": 5, "999": [5, 7], "unknown": [5, 6, 7, 9], "cudevice_p2pattribut": 5, "cu_device_p2p_attribute_performance_rank": 5, "rel": [5, 7], "cu_device_p2p_attribute_access_support": 5, "cu_device_p2p_attribute_native_atomic_support": 5, "cu_device_p2p_attribute_access_access_support": 5, "cu_device_p2p_attribute_cuda_array_access_support": 5, "cu_res_view_format_non": 5, "cu_res_view_format_uint_1x8": 5, "cu_res_view_format_uint_2x8": 5, "cu_res_view_format_uint_4x8": 5, "cu_res_view_format_sint_1x8": 5, "cu_res_view_format_sint_2x8": 5, "cu_res_view_format_sint_4x8": 5, "cu_res_view_format_uint_1x16": 5, "cu_res_view_format_uint_2x16": 5, "cu_res_view_format_uint_4x16": 5, "cu_res_view_format_sint_1x16": 5, "cu_res_view_format_sint_2x16": 5, "cu_res_view_format_sint_4x16": 5, "cu_res_view_format_uint_1x32": 5, "cu_res_view_format_uint_2x32": 5, "cu_res_view_format_uint_4x32": 5, "cu_res_view_format_sint_1x32": 5, "cu_res_view_format_sint_2x32": 5, "cu_res_view_format_sint_4x32": 5, "cu_res_view_format_float_1x16": 5, "cu_res_view_format_float_2x16": 5, "cu_res_view_format_float_4x16": 5, "cu_res_view_format_float_1x32": 5, "cu_res_view_format_float_2x32": 5, "cu_res_view_format_float_4x32": 5, "cu_res_view_format_unsigned_bc1": 5, "cu_res_view_format_unsigned_bc2": 5, "cu_res_view_format_unsigned_bc3": 5, "cu_res_view_format_unsigned_bc4": 5, "cu_res_view_format_signed_bc4": 5, "cu_res_view_format_unsigned_bc5": 5, "cu_res_view_format_signed_bc5": 5, "cu_res_view_format_unsigned_bc6h": 5, "cu_res_view_format_signed_bc6h": 5, "cu_res_view_format_unsigned_bc7": 5, "cutensormapdatatyp": 5, "cu_tensor_map_data_type_uint8": 5, "cu_tensor_map_data_type_uint16": 5, "cu_tensor_map_data_type_uint32": 5, "cu_tensor_map_data_type_int32": 5, "cu_tensor_map_data_type_uint64": 5, "cu_tensor_map_data_type_int64": 5, "cu_tensor_map_data_type_float16": 5, "cu_tensor_map_data_type_float32": 5, "cu_tensor_map_data_type_float64": 5, "cu_tensor_map_data_type_bfloat16": 5, "cu_tensor_map_data_type_float32_ftz": 5, "cu_tensor_map_data_type_tfloat32": 5, "cu_tensor_map_data_type_tfloat32_ftz": 5, "cutensormapinterleav": 5, "interleav": [5, 7], "cu_tensor_map_interleave_non": 5, "cu_tensor_map_interleave_16b": 5, "cu_tensor_map_interleave_32b": 5, "cutensormapswizzl": 5, "swizzl": 5, "cu_tensor_map_swizzle_non": 5, "cu_tensor_map_swizzle_32b": 5, "cu_tensor_map_swizzle_64b": 5, "cu_tensor_map_swizzle_128b": 5, "cutensormapl2promot": 5, "promot": [5, 7], "cu_tensor_map_l2_promotion_non": 5, "cu_tensor_map_l2_promotion_l2_64b": 5, "cu_tensor_map_l2_promotion_l2_128b": 5, "cu_tensor_map_l2_promotion_l2_256b": 5, "cutensormapfloatoobfil": 5, "cu_tensor_map_float_oob_fill_non": 5, "cu_tensor_map_float_oob_fill_nan_request_zero_fma": 5, "cuda_pointer_attribute_access_flag": 5, "cu_pointer_attribute_access_flag_non": 5, "stage": [5, 7], "cu_pointer_attribute_access_flag_read": 5, "cu_pointer_attribute_access_flag_readwrit": 5, "cu_external_memory_handle_type_opaque_fd": 5, "cu_external_memory_handle_type_opaque_win32": 5, "cu_external_memory_handle_type_opaque_win32_kmt": 5, "cu_external_memory_handle_type_d3d12_heap": 5, "cu_external_memory_handle_type_d3d12_resourc": 5, "cu_external_memory_handle_type_d3d11_resourc": 5, "d3d11": [5, 7], "cu_external_memory_handle_type_d3d11_resource_kmt": 5, "nvscibuf": [5, 7], "cu_external_semaphore_handle_type_opaque_fd": 5, "cu_external_semaphore_handle_type_opaque_win32": 5, "cu_external_semaphore_handle_type_opaque_win32_kmt": 5, "cu_external_semaphore_handle_type_d3d12_f": 5, "cu_external_semaphore_handle_type_d3d11_f": 5, "nvscisync": [5, 7], "cu_external_semaphore_handle_type_d3d11_keyed_mutex": 5, "kei": [5, 7, 9], "mutex": [5, 7], "cu_external_semaphore_handle_type_d3d11_keyed_mutex_kmt": 5, "cu_external_semaphore_handle_type_timeline_semaphore_fd": 5, "cu_external_semaphore_handle_type_timeline_semaphore_win32": 5, "cu_mem_handle_type_posix_file_descriptor": 5, "cu_mem_handle_type_win32_kmt": 5, "d3dkmt_handl": [5, 7], "cu_mem_handle_type_fabr": 5, "cumemfabrichandl": 5, "cu_mem_handle_type_max": 5, "protect": [5, 7], "cu_mem_access_flags_prot_non": 5, "cu_mem_access_flags_prot_read": 5, "cu_mem_access_flags_prot_readwrit": 5, "cu_mem_access_flags_prot_max": 5, "cu_mem_location_type_invalid": 5, "cu_mem_location_type_devic": 5, "cu_mem_location_type_host": 5, "cu_mem_location_type_host_numa": 5, "cu_mem_location_type_host_numa_curr": 5, "cu_mem_location_type_max": 5, "cu_mem_allocation_type_invalid": 5, "pin": [5, 7], "cu_mem_allocation_type_max": 5, "cumemallocationgranularity_flag": 5, "cu_mem_alloc_granularity_minimum": 5, "cu_mem_alloc_granularity_recommend": 5, "recommend": [5, 7], "cumemrangehandletyp": 5, "cu_mem_range_handle_type_dma_buf_fd": 5, "cu_mem_range_handle_type_max": 5, "cu_array_sparse_subresource_type_sparse_level": 5, "cu_array_sparse_subresource_type_miptail": 5, "cu_mem_operation_type_map": 5, "cu_mem_operation_type_unmap": 5, "cu_mem_handle_type_gener": 5, "cumemallocationcomptyp": 5, "cu_mem_allocation_comp_non": 5, "cu_mem_allocation_comp_gener": 5, "cumulticastgranularity_flag": 5, "cu_multicast_granularity_minimum": 5, "cu_multicast_granularity_recommend": 5, "cu_graph_exec_update_success": 5, "cu_graph_exec_update_error": 5, "cu_graph_exec_update_error_topology_chang": 5, "cu_graph_exec_update_error_node_type_chang": 5, "cu_graph_exec_update_error_function_chang": 5, "cu_graph_exec_update_error_parameters_chang": 5, "cu_graph_exec_update_error_not_support": 5, "someth": [5, 7], "cu_graph_exec_update_error_unsupported_function_chang": 5, "cu_graph_exec_update_error_attributes_chang": 5, "cumempool_attribut": 5, "cu_mempool_attr_reuse_follow_event_depend": 5, "freed": [5, 7], "cu_mempool_attr_reuse_allow_opportunist": 5, "reus": [5, 7], "cu_mempool_attr_reuse_allow_internal_depend": 5, "piec": [5, 7], "cufreeasync": [5, 7], "cu_mempool_attr_release_threshold": 5, "hold": [5, 7], "onto": [5, 7], "threshold": [5, 6, 7], "held": [5, 7], "next": [5, 7, 9], "cu_mempool_attr_reserved_mem_curr": 5, "cu_mempool_attr_reserved_mem_high": 5, "high": [5, 6, 7], "watermark": [5, 7], "sinc": [5, 7], "reset": [5, 6, 7], "cu_mempool_attr_used_mem_curr": 5, "cu_mempool_attr_used_mem_high": 5, "cugraphmem_attribut": 5, "cu_graph_mem_attr_used_mem_curr": 5, "cu_graph_mem_attr_used_mem_high": 5, "cu_graph_mem_attr_reserved_mem_curr": 5, "cu_graph_mem_attr_reserved_mem_high": 5, "cu_flush_gpu_direct_rdma_writes_option_host": 5, "cuflushgpudirectrdmawrit": [5, 7], "counterpart": [5, 7], "cu_flush_gpu_direct_rdma_writes_option_memop": 5, "cu_gpu_direct_rdma_writes_ordering_non": 5, "leverag": [5, 7, 9], "cu_gpu_direct_rdma_writes_ordering_own": 5, "consist": [5, 7], "although": [5, 7], "cu_gpu_direct_rdma_writes_ordering_all_devic": 5, "cuflushgpudirectrdmawritesscop": 5, "cu_flush_gpu_direct_rdma_writes_to_own": 5, "cu_flush_gpu_direct_rdma_writes_to_all_devic": 5, "cuflushgpudirectrdmawritestarget": 5, "cu_flush_gpu_direct_rdma_writes_target_current_ctx": 5, "cugraphdebugdot_flag": 5, "cugraphdebugdotprint": 5, "cu_graph_debug_dot_flags_verbos": 5, "cu_graph_debug_dot_flags_runtime_typ": 5, "cu_graph_debug_dot_flags_kernel_node_param": 5, "cuda_kernel_node_param": 5, "cu_graph_debug_dot_flags_memcpy_node_param": 5, "cu_graph_debug_dot_flags_memset_node_param": 5, "cuda_memset_node_param": 5, "cu_graph_debug_dot_flags_host_node_param": 5, "cuda_host_node_param": 5, "cu_graph_debug_dot_flags_event_node_param": 5, "cu_graph_debug_dot_flags_ext_semas_signal_node_param": 5, "cuda_ext_sem_signal_node_param": 5, "cu_graph_debug_dot_flags_ext_semas_wait_node_param": 5, "256": [5, 7], "cuda_ext_sem_wait_node_param": 5, "cu_graph_debug_dot_flags_kernel_node_attribut": 5, "512": [5, 7, 9], "cukernelnodeattrvalu": 5, "cu_graph_debug_dot_flags_handl": 5, "1024": [5, 7], "cu_graph_debug_dot_flags_mem_alloc_node_param": 5, "2048": [5, 7], "cu_graph_debug_dot_flags_mem_free_node_param": 5, "4096": 5, "cu_graph_debug_dot_flags_batch_mem_op_node_param": 5, "8192": 5, "cu_graph_debug_dot_flags_extra_topo_info": 5, "16384": 5, "cu_graph_debug_dot_flags_conditional_node_param": 5, "32768": [5, 7, 9], "cuuserobject_flag": 5, "cu_user_object_no_destructor_sync": 5, "destructor": [5, 7], "cuuserobjectretain_flag": 5, "retain": [5, 7], "cu_graph_user_object_mov": 5, "caller": [5, 7], "cugraphinstantiate_flag": [5, 7], "cuda_graph_instantiate_flag_auto_free_on_launch": 5, "cuda_graph_instantiate_flag_upload": 5, "cugraphinstantiatewithparam": 5, "instantiateparam": [5, 7], "cuda_graph_instantiate_flag_device_launch": 5, "launchabl": [5, 7], "conjunct": [5, 7], "cuda_graph_instantiate_flag_use_node_prior": 5, "cu_device_numa_config_non": 5, "cu_device_numa_config_numa_nod": 5, "cu_egl_frame_type_arrai": 5, "cu_egl_frame_type_pitch": 5, "cueglresourcelocationflag": 5, "sysmem": [5, 7], "vidmem": [5, 7], "igpu": [5, 7], "video": [5, 7], "dgpu": [5, 7], "cu_egl_resource_location_sysmem": 5, "cu_egl_resource_location_vidmem": 5, "dedic": [5, 7], "There": [5, 7, 9], "produc": [5, 7], "cuda_egl": [5, 7], "three": [5, 6, 7, 9], "cu_egl_color_format_yuv420_planar": 5, "u": [5, 6, 7], "v": [5, 7], "cu_egl_color_format_yuv420_semiplanar": 5, "uv": [5, 7], "vu": [5, 7], "yuv420planar": [5, 7], "cu_egl_color_format_yuv422_planar": 5, "cu_egl_color_format_yuv422_semiplanar": 5, "yuv422planar": [5, 7], "cu_egl_color_format_rgb": 5, "r": [5, 7], "bgr": 5, "cu_egl_color_format_bgr": 5, "rgb": 5, "cu_egl_color_format_argb": 5, "bgra": [5, 7], "cu_egl_color_format_rgba": 5, "abgr": [5, 7], "cu_egl_color_format_l": 5, "lumin": [5, 7], "cu_egl_color_format_r": 5, "cu_egl_color_format_yuv444_planar": 5, "cu_egl_color_format_yuv444_semiplanar": 5, "yuv444planar": [5, 7], "cu_egl_color_format_yuyv_422": 5, "uyvi": [5, 7], "cu_egl_color_format_uyvy_422": 5, "yuyv": [5, 7], "cu_egl_color_format_abgr": 5, "rgba": [5, 7], "cu_egl_color_format_bgra": 5, "argb": [5, 7], "cu_egl_color_format_a": 5, "alpha": [5, 7], "cu_egl_color_format_rg": 5, "gr": [5, 7], "cu_egl_color_format_ayuv": 5, "vuya": [5, 7], "cu_egl_color_format_yvu444_semiplanar": 5, "cu_egl_color_format_yvu422_semiplanar": 5, "cu_egl_color_format_yvu420_semiplanar": 5, "cu_egl_color_format_y10v10u10_444_semiplanar": 5, "y10": [5, 7], "v10u10": [5, 7], "cu_egl_color_format_y10v10u10_420_semiplanar": 5, "cu_egl_color_format_y12v12u12_444_semiplanar": 5, "y12": [5, 7], "v12u12": [5, 7], "cu_egl_color_format_y12v12u12_420_semiplanar": 5, "cu_egl_color_format_vyuy_": 5, "extend": [5, 7], "yvyu": [5, 7], "cu_egl_color_format_uyvy_": 5, "cu_egl_color_format_yuyv_": 5, "cu_egl_color_format_yvyu_": 5, "vyui": [5, 7], "cu_egl_color_format_yuv_": 5, "vui": 5, "cu_egl_color_format_yuva_": 5, "avui": [5, 7], "cu_egl_color_format_ayuv_": 5, "cu_egl_color_format_yuv444_planar_": 5, "cu_egl_color_format_yuv422_planar_": 5, "cu_egl_color_format_yuv420_planar_": 5, "cu_egl_color_format_yuv444_semiplanar_": 5, "cu_egl_color_format_yuv422_semiplanar_": 5, "cu_egl_color_format_yuv420_semiplanar_": 5, "cu_egl_color_format_yvu444_planar_": 5, "cu_egl_color_format_yvu422_planar_": 5, "cu_egl_color_format_yvu420_planar_": 5, "cu_egl_color_format_yvu444_semiplanar_": 5, "cu_egl_color_format_yvu422_semiplanar_": 5, "cu_egl_color_format_yvu420_semiplanar_": 5, "cu_egl_color_format_bayer_rggb": 5, "bayer": [5, 7], "rggb": [5, 7], "cu_egl_color_format_bayer_bggr": 5, "bggr": [5, 7], "cu_egl_color_format_bayer_grbg": 5, "grbg": [5, 7], "cu_egl_color_format_bayer_gbrg": 5, "gbrg": [5, 7], "cu_egl_color_format_bayer10_rggb": 5, "bayer10": [5, 7], "cu_egl_color_format_bayer10_bggr": 5, "cu_egl_color_format_bayer10_grbg": 5, "cu_egl_color_format_bayer10_gbrg": 5, "cu_egl_color_format_bayer12_rggb": 5, "bayer12": [5, 7], "cu_egl_color_format_bayer12_bggr": 5, "cu_egl_color_format_bayer12_grbg": 5, "cu_egl_color_format_bayer12_gbrg": 5, "cu_egl_color_format_bayer14_rggb": 5, "bayer14": [5, 7], "cu_egl_color_format_bayer14_bggr": 5, "cu_egl_color_format_bayer14_grbg": 5, "cu_egl_color_format_bayer14_gbrg": 5, "cu_egl_color_format_bayer20_rggb": 5, "bayer20": [5, 7], "cu_egl_color_format_bayer20_bggr": 5, "cu_egl_color_format_bayer20_grbg": 5, "cu_egl_color_format_bayer20_gbrg": 5, "cu_egl_color_format_yvu444_planar": 5, "cu_egl_color_format_yvu422_planar": 5, "cu_egl_color_format_yvu420_planar": 5, "cu_egl_color_format_bayer_isp_rggb": 5, "proprietari": [5, 7], "isp": [5, 7], "datatyp": [5, 7], "cu_egl_color_format_bayer_isp_bggr": 5, "cu_egl_color_format_bayer_isp_grbg": 5, "cu_egl_color_format_bayer_isp_gbrg": 5, "cu_egl_color_format_bayer_bccr": 5, "bccr": [5, 7], "cu_egl_color_format_bayer_rccb": 5, "rccb": [5, 7], "cu_egl_color_format_bayer_crbc": 5, "crbc": [5, 7], "cu_egl_color_format_bayer_cbrc": 5, "cbrc": [5, 7], "cu_egl_color_format_bayer10_cccc": 5, "cccc": [5, 7], "cu_egl_color_format_bayer12_bccr": 5, "cu_egl_color_format_bayer12_rccb": 5, "cu_egl_color_format_bayer12_crbc": 5, "cu_egl_color_format_bayer12_cbrc": 5, "cu_egl_color_format_bayer12_cccc": 5, "cu_egl_color_format_i": 5, "cu_egl_color_format_yuv420_semiplanar_2020": 5, "cu_egl_color_format_yvu420_semiplanar_2020": 5, "cu_egl_color_format_yuv420_planar_2020": 5, "cu_egl_color_format_yvu420_planar_2020": 5, "cu_egl_color_format_yuv420_semiplanar_709": 5, "cu_egl_color_format_yvu420_semiplanar_709": 5, "cu_egl_color_format_yuv420_planar_709": 5, "cu_egl_color_format_yvu420_planar_709": 5, "cu_egl_color_format_y10v10u10_420_semiplanar_709": 5, "cu_egl_color_format_y10v10u10_420_semiplanar_2020": 5, "cu_egl_color_format_y10v10u10_422_semiplanar_2020": 5, "cu_egl_color_format_y10v10u10_422_semiplanar": 5, "cu_egl_color_format_y10v10u10_422_semiplanar_709": 5, "cu_egl_color_format_y_": 5, "cu_egl_color_format_y_709_": 5, "cu_egl_color_format_y10_": 5, "cu_egl_color_format_y10_709_": 5, "cu_egl_color_format_y12_": 5, "cu_egl_color_format_y12_709_": 5, "cu_egl_color_format_yuva": 5, "cu_egl_color_format_yuv": 5, "cu_egl_color_format_yvyu": 5, "cu_egl_color_format_vyui": 5, "cu_egl_color_format_y10v10u10_420_semiplanar_": 5, "cu_egl_color_format_y10v10u10_420_semiplanar_709_": 5, "cu_egl_color_format_y10v10u10_444_semiplanar_": 5, "cu_egl_color_format_y10v10u10_444_semiplanar_709_": 5, "cu_egl_color_format_y12v12u12_420_semiplanar_": 5, "cu_egl_color_format_y12v12u12_420_semiplanar_709_": 5, "cu_egl_color_format_y12v12u12_444_semiplanar_": 5, "cu_egl_color_format_y12v12u12_444_semiplanar_709_": 5, "cu_egl_color_format_max": 5, "cudeviceptr_v2": 5, "whose": [5, 7], "cudevice_v1": 5, "cudevic": [5, 9], "arg": [5, 6, 7, 9], "kwarg": [5, 6, 7], "cumodul": 5, "culibrari": 5, "cumipmappedarrai": 5, "cutexref": 5, "cusurfref": 5, "cugraphicsresourc": [5, 7], "cutexobject_v1": 5, "cutexobject": [5, 7], "cusurfobject_v1": 5, "cusurfobject": [5, 7], "cuexternalmemori": 5, "cugraphexec": [5, 7], "cumemorypool": [5, 7], "cuuserobject": 5, "cuasynccallbackhandl": 5, "cugreenctx": 5, "safe": [5, 7], "cugreenctxcr": 5, "cuuuid": [5, 7], "cumemfabrichandle_v1": 5, "cuipceventhandle_v1": 5, "cuipceventhandl": 5, "cuipcmemhandle_v1": 5, "cuipcmemhandl": 5, "custreambatchmemopparams_v1": 5, "cuda_batch_mem_op_node_params_v1": 5, "cuda_batch_mem_op_node_param": 5, "cuasyncnotificationinfo": 5, "cuasynccallback": 5, "cudevprop_v1": 5, "cudevprop": 5, "culinkst": 5, "cuaccesspolicywindow_v1": 5, "cuda_kernel_node_params_v1": 5, "cuda_kernel_node_params_v2": 5, "cuda_memset_node_params_v1": 5, "cuda_host_node_params_v1": 5, "cuda_graph_instantiate_param": 5, "cukernelnodeattrid": 5, "cukernelnodeattrvalue_v1": 5, "custreamattrid": 5, "custreamattrvalue_v1": 5, "custreamattrvalu": 5, "cuexecaffinitysmcount_v1": 5, "cuexecaffinitysmcount": 5, "cuexecaffinityparam_v1": 5, "cuctxcreateparam": 5, "culibraryhostuniversalfunctionanddatat": 5, "custreamcallback": 5, "cuoccupancyb2ds": 5, "cuda_memcpy2d_v2": 5, "cuda_memcpy2d": 5, "cuda_memcpy3d_v2": 5, "cuda_memcpy3d_peer_v1": 5, "cuda_memcpy3d_p": 5, "cuda_array_descriptor_v2": 5, "cuda_array_descriptor": 5, "cuda_array3d_descriptor_v2": 5, "cuda_array_sparse_properties_v1": 5, "cuda_array_sparse_properti": 5, "cuda_array_memory_requirements_v1": 5, "cuda_array_memory_requir": 5, "cuda_resource_desc_v1": 5, "cuda_resource_desc": 5, "cuda_texture_desc_v1": 5, "cuda_texture_desc": 5, "cuda_resource_view_desc_v1": 5, "cuda_resource_view_desc": 5, "cutensormap": 5, "cuda_pointer_attribute_p2p_tokens_v1": 5, "cuda_pointer_attribute_p2p_token": 5, "cuda_launch_params_v1": 5, "cuda_launch_param": 5, "cuda_external_memory_handle_desc_v1": 5, "cuda_external_memory_handle_desc": 5, "cuda_external_memory_buffer_desc_v1": 5, "cuda_external_memory_buffer_desc": 5, "cuda_external_memory_mipmapped_array_desc_v1": 5, "cuda_external_memory_mipmapped_array_desc": 5, "cuda_external_semaphore_handle_desc_v1": 5, "cuda_external_semaphore_handle_desc": 5, "cuda_external_semaphore_signal_params_v1": 5, "cuda_external_semaphore_wait_params_v1": 5, "cuda_ext_sem_signal_node_params_v1": 5, "cuda_ext_sem_wait_node_params_v1": 5, "cumemgenericallocationhandle_v1": 5, "cumemgenericallocationhandl": 5, "cuarraymapinfo_v1": 5, "cuarraymapinfo": 5, "cumemlocation_v1": 5, "cumemallocationprop_v1": 5, "cumemallocationprop": 5, "cumulticastobjectprop_v1": 5, "cumulticastobjectprop": 5, "cumemaccessdesc_v1": 5, "cugraphexecupdateresultinfo_v1": 5, "cugraphexecupdateresultinfo": 5, "cumempoolprops_v1": 5, "cumempoolptrexportdata_v1": 5, "cumempoolptrexportdata": 5, "cuda_mem_alloc_node_params_v1": 5, "cuda_mem_alloc_node_param": 5, "cugraphnodeparam": 5, "cueglframe_v1": 5, "cueglfram": 5, "cueglstreamconnect": 5, "eglsream": [5, 7], "cuda_vers": 5, "12060": 5, "cu_ipc_handle_s": 5, "cu_stream_legaci": [5, 7], "link_sync_behavior": [5, 7], "cu_stream_per_thread": [5, 7], "cu_compute_accelerated_target_bas": 5, "65536": 5, "cu_graph_cond_assign_default": 5, "finish": [5, 7, 9], "cu_kernel_node_attribute_access_policy_window": 5, "cu_kernel_node_attribute_coop": 5, "cu_kernel_node_attribute_cluster_dimens": 5, "cu_kernel_node_attribute_cluster_scheduling_policy_prefer": 5, "cu_kernel_node_attribute_prior": 5, "cu_kernel_node_attribute_mem_sync_domain_map": 5, "cu_kernel_node_attribute_mem_sync_domain": 5, "cu_kernel_node_attribute_device_updatable_kernel_nod": 5, "cu_kernel_node_attribute_preferred_shared_memory_carveout": 5, "cu_stream_attribute_access_policy_window": 5, "cu_stream_attribute_synchronization_polici": 5, "cu_stream_attribute_prior": 5, "cu_stream_attribute_mem_sync_domain_map": 5, "cu_stream_attribute_mem_sync_domain": 5, "cu_memhostalloc_port": 5, "cumemhostalloc": [5, 7], "cu_memhostalloc_devicemap": 5, "cumemhostgetdevicepoint": [5, 7], "cu_memhostalloc_writecombin": 5, "fast": [5, 6], "faster": [5, 6, 7, 8], "dma": 5, "slow": 5, "sse4": 5, "movntdqa": 5, "cu_memhostregister_port": 5, "cu_memhostregister_devicemap": 5, "cu_memhostregister_iomemori": 5, "treat": [5, 6, 7], "o": [5, 7], "third": [5, 7], "parti": [5, 7], "pcie": [5, 7], "mark": [5, 7], "unprivileg": 5, "older": [5, 7], "cu_memhostregister_read_onli": 5, "cu_tensor_map_num_qword": 5, "skip": [5, 7], "cuda_nvscisync_attr_sign": 5, "cudevicegetnvscisyncattribut": 5, "nvscisyncattr": [5, 7], "cuda_nvscisync_attr_wait": 5, "waiter": [5, 7], "cu_mem_create_usage_tile_pool": 5, "tile": [5, 7], "cuda_cooperative_launch_multi_device_no_pre_launch_sync": 5, "cuda_cooperative_launch_multi_device_no_post_launch_sync": 5, "subsequ": [5, 6, 7], "push": [5, 7], "cuda_array3d_lay": 5, "collect": [5, 7], "cuda_array3d_2darrai": 5, "cuda_array3d_surface_ldst": 5, "cuda_array3d_cubemap": 5, "six": [5, 7], "gather": [5, 7], "cuda_array3d_depth_textur": 5, "depth_textur": 5, "cuda_array3d_color_attach": 5, "cuda_array3d_spars": 5, "cuda_array3d_deferred_map": 5, "cuda_array3d_video_encode_decod": 5, "decod": 5, "cu_trsa_override_format": 5, "overrid": [5, 7], "texref": 5, "infer": [5, 7], "cutexrefsetarrai": 5, "cu_trsf_read_as_integ": 5, "cutexrefsetflag": 5, "cutexobjectcr": [5, 7], "cu_trsf_normalized_coordin": 5, "coordin": [5, 7], "dim": [5, 9], "cu_trsf_srgb": 5, "convers": [5, 7], "cu_trsf_disable_trilinear_optim": 5, "trilinear": [5, 7], "cu_trsf_seamless_cubemap": 5, "seamless": [5, 7], "cu_launch_param_end_as_int": 5, "cu_launch_param_end": [5, 7], "culaunchkernel": [5, 9], "cu_launch_param_buffer_pointer_as_int": 5, "cu_launch_param_buffer_point": [5, 7], "honor": 5, "cu_launch_param_buffer_s": [5, 7], "cu_launch_param_buffer_size_as_int": 5, "cu_param_tr_default": 5, "texunit": 5, "cu_device_cpu": 5, "cu_device_invalid": 5, "max_plan": 5, "cuda_egl_infinite_timeout": 5, "cueglstreamconsumeracquirefram": [5, 7, 12], "infinit": [5, 7], "section": [5, 7], "low": [5, 7, 9, 11, 12], "cugeterrorstr": [5, 7], "string": [5, 6, 7, 9, 14], "descript": [5, 7], "pstr": 5, "recogn": [5, 7], "cudageterrorstr": [5, 7], "cugeterrornam": [5, 7, 9], "cudageterrornam": [5, 7], "cudrivergetvers": [5, 7], "latest": [5, 7, 16, 17, 18], "driververs": [5, 7], "1000": [5, 7], "9020": [5, 7], "cudadrivergetvers": [5, 7], "cudaruntimegetvers": [5, 7, 26], "cudeviceget": [5, 9], "cudevicegetcount": [5, 7], "cudevicegetnam": [5, 7], "cudevicegetuuid": 5, "cudevicegetluid": 5, "cudevicetotalmem": 5, "cudevicegetexecaffinitysupport": 5, "greater": [5, 6, 7], "cudagetdevicecount": [5, 7], "length": [5, 7], "dev": [5, 7], "ascii": [5, 7, 9], "cudagetdeviceproperti": [5, 7], "cudevicegetuuid_v2": 5, "supplant": 5, "octet": 5, "obj": [5, 7], "mig": 5, "subscrib": 5, "luid": [5, 7], "devicenodemask": 5, "numbyt": 5, "cudamemgetinfo": [5, 7], "pformat": 5, "allocat": [5, 7], "maxwidthinel": [5, 7], "attrib": [5, 7], "pi": 5, "involv": [5, 6, 7], "cumemallocpitch": [5, 7], "simultan": [5, 7], "reli": [5, 7], "known": [5, 7], "slot": [5, 7], "tesla": [5, 7], "vista": [5, 7], "doesn": [5, 7, 9], "t": [5, 7, 9], "cudadevicegetattribut": [5, 7], "nvscisyncattrlist": [5, 7], "nvscisyncattrkey_requiredperm": [5, 7], "how": [5, 7, 9], "least": [5, 7], "orthogon": [5, 7], "develop": [5, 7, 8, 9], "input": [5, 6, 7, 9], "nvscisyncaccessperm_signalonli": [5, 7], "nvscisyncaccessperm_waitonli": [5, 7], "nvscisyncaccessperm_waitsign": [5, 7], "nvscisyncattrkey_primitiveinfo": [5, 7], "nvscisyncattrvalprimitivetype_sysmemsemaphor": [5, 7], "nvscisyncattrvalprimitivetype_syncpoint": [5, 7], "tegra": [5, 7], "nvscisyncattrvalprimitivetype_sysmemsemaphorepayload64b": [5, 7], "ga10x": [5, 7], "nvscisyncattrkey_gpuid": [5, 7], "cuimportexternalsemaphor": 5, "cudestroyexternalsemaphor": 5, "cusignalexternalsemaphoresasync": 5, "cuwaitexternalsemaphoresasync": 5, "cudevicesetmempool": [5, 7], "cudamempool_t": [5, 7], "cudevicegetdefaultmempool": [5, 7], "cudevicegetmempool": [5, 7], "cumempoolcr": [5, 7], "cumempooldestroi": [5, 7], "cumemallocfrompoolasync": [5, 7], "never": [5, 7], "pool_out": [5, 7], "cumempooltrimto": [5, 7], "cumempoolgetattribut": [5, 7], "cumempoolsetattribut": [5, 7], "cumempoolsetaccess": [5, 7], "typenam": [5, 7], "li": [5, 7], "omit": [5, 6, 7], "cu_device_attribute_flush_flush_gpu_direct_rdma_opt": 5, "cudeviceprimaryctxretain": 5, "successfulli": [5, 7], "cudeviceprimaryctxreleas": 5, "cudeviceprimaryctxreset": 5, "unlik": [5, 7], "cuctxcreat": [5, 9], "newli": [5, 7], "smi": 5, "pleas": [5, 6, 7], "alwai": [5, 7], "cudeviceprimaryctxsetflag": [5, 7], "pctx": 5, "cuctxgetcacheconfig": [5, 7], "cuctxgetdevic": 5, "cuctxgetflag": [5, 7], "cuctxgetlimit": [5, 7], "cuctxpopcurr": 5, "cuctxpushcurr": 5, "cuctxsetcacheconfig": [5, 7], "cuctxsetlimit": [5, 7], "cuctxsynchron": [5, 7], "earlier": [5, 7, 9], "method": 5, "pop": [5, 7], "pervious": 5, "ones": 5, "lsb": [5, 7], "decreas": [5, 7], "lower": [5, 6, 7, 8], "primit": [5, 7, 10], "heurist": [5, 6, 7], "logic": [5, 7], "processor": [5, 7], "p": [5, 7], "power": [5, 7], "reduc": [5, 6, 7], "resiz": [5, 7], "thrash": [5, 7], "cost": [5, 7], "potenti": [5, 7], "cucoredumpsetattributeglob": 5, "rais": [5, 7, 9], "under": [5, 7], "core": 5, "dump": 5, "taken": [5, 6, 7], "cucoredumpsetattribut": 5, "becom": [5, 7, 21, 30], "impli": [5, 6, 7], "titl": [5, 7], "learn": [5, 7, 8, 9], "exhibit": [5, 7], "cudeviceprimaryctxgetst": [5, 7], "cuctxsetflag": 5, "cudasetdeviceflag": [5, 7], "inact": 5, "cudagetdeviceflag": [5, 7], "clean": [5, 7, 9], "howev": [5, 6, 7], "cudadevicereset": [5, 7], "below": [5, 6, 7], "restor": [5, 7], "cu_coredump_enable_user_trigg": 5, "true": [5, 6, 7, 9], "cuctxcreate_v3": 5, "tupl": [5, 6, 7, 29], "numparam": 5, "latter": 5, "former": 5, "round": [5, 6, 7], "henc": [5, 6, 7], "imper": 5, "cuctxgetexecaffin": 5, "volta": [5, 7], "cuctxcreate_v4": 5, "ctxcreateparam": 5, "execaffin": 5, "softwar": 5, "pcigparam": 5, "id3d12commandqueu": 5, "cumemalloc": [5, 7, 9], "cumemallochost": [5, 7], "cumemallocmanag": [5, 7], "though": [5, 7], "old": 5, "That": [5, 9], "cuctxsetcurr": [5, 7], "unbound": [5, 7], "top": [5, 7, 8, 9], "cuctxgetcurr": [5, 7], "cudasetdevic": [5, 7], "cudagetdevic": [5, 7], "cuctxgetsharedmemconfig": 5, "cuctxgetstreampriorityrang": [5, 7], "cuctxgetid": 5, "ctxid": 5, "life": [5, 7], "meet": [5, 7], "w": [5, 6, 7], "nearest": [5, 6, 7], "etc": [5, 7], "discuss": [5, 7], "isn": 5, "origin": [5, 7], "immedi": [5, 7, 9], "cudaerrorsyncdepthexceed": [5, 7], "mind": [5, 7], "larg": [5, 7], "longer": [5, 7, 16, 17], "cudaerrorlaunchpendingcountexceed": [5, 7], "cudagetlasterror": [5, 7], "sustain": [5, 7], "upfront": [5, 7], "0b": [5, 7], "128b": [5, 7], "pure": [5, 7], "cudadevicesetlimit": [5, 7], "pvalu": [5, 7], "cudadevicegetlimit": [5, 7], "pconfig": 5, "cufuncsetcacheconfig": [5, 7], "cudadevicegetcacheconfig": [5, 7], "config": [5, 7, 20], "cukernelsetcacheconfig": 5, "unless": [5, 7], "noth": [5, 7], "recent": [5, 7], "cudadevicesetcacheconfig": [5, 7], "introduc": [5, 7, 9], "break": 5, "4020": 5, "greatest": [5, 7], "leastprior": [5, 7], "greatestprior": [5, 7], "convent": [5, 7], "meaning": [5, 7], "custreamcreatewithprior": [5, 7], "custreamgetprior": [5, 7], "cudadevicegetstreampriorityrang": [5, 7], "cuctxresetpersistingl2cach": 5, "pexecaffin": 5, "cuctxrecordev": 5, "hctx": 5, "hevent": 5, "cuctxwaitev": 5, "examin": [5, 7], "cudaevent_t": [5, 7], "cugreenctxrecordev": 5, "cugreenctxwaitev": 5, "cueventrecord": [5, 7], "conflict": [5, 7], "submit": [5, 7], "custreamwaitev": [5, 7], "ongo": [5, 7], "cumoduleloadingmod": 5, "lazi": 5, "cu_module_eager_load": 5, "cu_module_lazy_load": 5, "cumoduleload": 5, "fname": 5, "filenam": 5, "lazili": 5, "nvcc": [5, 7], "handwritten": 5, "fatbin": 5, "cumodulegetfunct": [5, 9], "cumodulegetglob": 5, "cumodulegettexref": 5, "cumoduleloaddata": [5, 9], "cumoduleloaddataex": 5, "cumoduleloadfatbinari": 5, "cumoduleunload": [5, 9], "hand": [5, 7, 8], "numopt": [5, 6], "optionvalu": 5, "fatcubin": 5, "fat": 5, "programm": [5, 6, 7], "hmod": 5, "unload": [5, 7], "culibrarygetmodul": 5, "cumodulegetloadingmod": 5, "cuda_module_load": 5, "hfunc": 5, "retriev": [5, 7, 9], "cumodulegetfunctioncount": 5, "mod": 5, "cumoduleenumeratefunct": 5, "numfunct": 5, "partial": 5, "cufunctionisload": 5, "incur": [5, 7], "cufunctionload": 5, "cufuncisload": 5, "cufuncload": 5, "One": [5, 6, 9], "cudagetsymboladdress": [5, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudagetsymbols": [5, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "culinkcr": 5, "eventu": [5, 7], "culinkdestroi": 5, "machin": [5, 9], "accumul": 5, "culinkadddata": 5, "culinkaddfil": 5, "relocat": [5, 6], "rdc": [5, 6], "final": [5, 9], "culinkcomplet": 5, "consequ": 5, "cast": [5, 7], "stateout": 5, "ownership": [5, 7], "cubinout": 5, "sizeout": 5, "receiv": [5, 7], "jitopt": 5, "jitoptionsvalu": 5, "numjitopt": 5, "libraryopt": 5, "libraryoptionvalu": 5, "numlibraryopt": 5, "eager": 5, "eagerli": 5, "culibrarygetkernel": 5, "pkernel": 5, "cukernelgetfunct": 5, "culibrarygetkernelcount": 5, "lib": 5, "culibraryenumeratekernel": 5, "numkernel": 5, "pmod": 5, "pfunc": 5, "cukernelgetlibrari": 5, "plib": 5, "culibrarygetglob": 5, "culibrarygetmanag": 5, "atleast": 5, "culibrarygetunifiedfunct": 5, "fptr": 5, "denot": [5, 6, 7], "cukernelgetattribut": 5, "cu_func_cache_mode_ca": 5, "split": [5, 6, 7], "cufuncgetattribut": [5, 7], "Not": [5, 7, 10], "irrespect": 5, "maxim": [5, 7, 9], "stricter": 5, "lock": [5, 7], "overridden": 5, "cukernelgetnam": 5, "reload": 5, "mangl": [5, 6], "declar": [5, 7, 16], "linkag": 5, "cukernelgetparaminfo": 5, "paramindex": 5, "paramoffset": 5, "params": 5, "cudagraphkernelnodesetparam": [5, 7], "cufuncgetparaminfo": 5, "cumemgetinfo": [5, 7], "accord": [5, 7], "tenet": [5, 7], "situat": [5, 7], "estim": [5, 7], "prone": [5, 7], "deviat": [5, 7], "soc": [5, 7], "exclud": [5, 7], "swap": [5, 7], "move": [5, 6, 7], "area": [5, 7], "app": [5, 7], "cuarray3dcr": [5, 7], "cuarray3dgetdescriptor": [5, 7], "cuarraycr": [5, 7], "cuarraydestroi": [5, 7], "cuarraygetdescriptor": [5, 7], "cumemcpy2d": [5, 7], "cumemcpy2dasync": [5, 7], "cumemcpy2dunalign": [5, 7], "cumemcpy3d": [5, 7], "cumemcpy3dasync": [5, 7], "cumemcpyatoa": 5, "cumemcpyatod": 5, "cumemcpyatoh": 5, "cumemcpyatohasync": 5, "cumemcpydtoa": 5, "cumemcpydtod": [5, 7], "cumemcpydtodasync": [5, 7], "cumemcpydtoh": [5, 7], "cumemcpydtohasync": [5, 7, 9], "cumemcpyhtoa": 5, "cumemcpyhtoaasync": 5, "cumemcpyhtod": [5, 7], "cumemcpyhtodasync": [5, 7, 9], "cumemfre": [5, 7, 9], "cumemfreehost": [5, 7], "cumemgetaddressrang": 5, "cumemsetd2d8": [5, 7], "cumemsetd2d16": [5, 7], "cumemsetd2d32": [5, 7], "cumemsetd8": [5, 7], "cumemsetd16": [5, 7], "cumemsetd32": [5, 7], "kind": [5, 6, 7], "cudamalloc": [5, 6, 7], "elementsizebyt": 5, "coalesc": [5, 7], "largest": 5, "transact": 5, "correctli": 5, "speed": [5, 8], "ppitch": 5, "column": [5, 6, 7], "especi": [5, 7], "cutexrefsetaddress2d": 5, "cudamallocpitch": [5, 7], "implict": 5, "cumemfreeasync": [5, 7], "cudafre": [5, 7], "pbase": 5, "psize": 5, "track": [5, 7, 13], "cumemcpi": [5, 7], "bandwidth": [5, 7], "excess": [5, 7], "degrad": [5, 7], "As": [5, 7, 16], "sparingli": [5, 7], "exchang": [5, 7], "pp": 5, "cudamallochost": [5, 7], "cudafreehost": [5, 7], "just": [5, 7], "wc": [5, 7], "quickli": [5, 7], "effici": [5, 7], "cudahostalloc": [5, 7], "pdptr": 5, "cudahostgetdevicepoint": [5, 7], "cumemhostgetflag": [5, 7], "pflag": [5, 7], "cudahostgetflag": [5, 7], "obei": [5, 7], "custreamattachmemasync": [5, 7], "won": [5, 7], "oversubscript": [5, 7], "evict": [5, 7], "room": [5, 7], "emploi": [5, 7], "pattern": [5, 7], "cumemadvis": [5, 7], "cumemprefetchasync": [5, 7], "storag": [5, 7], "among": [5, 7, 9], "cuda_managed_force_device_alloc": [5, 7], "forc": [5, 7], "arm": 5, "discret": [5, 7], "drive": 5, "px": 5, "cudamallocmanag": [5, 7], "cudeviceregisterasyncnotif": 5, "callbackfunc": [5, 7], "likewis": [5, 7], "distinguish": [5, 7], "10m": [5, 7], "cudeviceunregisterasyncnotif": 5, "stop": [5, 7], "cudevicegetbypcibusid": [5, 7], "pcibusid": [5, 7], "hexadecim": [5, 7], "form": [5, 6, 7, 9], "cudevicegetpcibusid": [5, 7], "cudadevicegetbypcibusid": [5, 7], "charact": [5, 6, 7], "cudadevicegetpcibusid": [5, 7], "cuipcgeteventhandl": [5, 7], "cuipcopeneventhandl": [5, 7], "cueventdestroi": [5, 7], "come": [5, 7, 11, 12], "cuapidevicegetattribut": 5, "phandl": 5, "cueventcr": [5, 7], "cuipcgetmemhandl": [5, 7], "cuipcopenmemhandl": [5, 7], "cuipcclosememhandl": [5, 7], "cudaipcgeteventhandl": [5, 7], "behav": [5, 7], "phevent": [5, 7], "cudaipcopeneventhandl": [5, 7], "lightweight": [5, 7], "advers": [5, 7], "usabl": [5, 7], "cudevicecanaccessp": [5, 7], "increment": [5, 7], "cudaipcopenmemhandl": [5, 7], "close": [5, 7, 9], "decrement": [5, 7], "unaffect": [5, 7], "cudaipcclosememhandl": [5, 7], "ptr": [5, 7], "unpopul": [5, 7], "3rd": 5, "cudahostunregist": [5, 7], "bytecount": 5, "cudamemcpi": [5, 7], "cudamemcpytosymbol": [5, 7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudamemcpyfromsymbol": [5, 7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cumemcpyp": [5, 7], "cumemcpy3dp": [5, 7], "cumemcpypeerasync": [5, 7], "cumemcpy3dpeerasync": [5, 7], "cudamemcpyp": [5, 7], "dstoffset": 5, "cudamemcpytoarrai": 5, "srcoffset": 5, "evenli": 5, "divis": [5, 6], "cudamemcpyfromarrai": 5, "psrc": 5, "cudamemcpyarraytoarrai": 5, "pcopi": 5, "cumemorytype_enum": 5, "intra": [5, 7], "significantli": [5, 7], "slower": 5, "cudamemcpy2d": [5, 7], "cudamemcpy2dtoarrai": [5, 7], "cudamemcpy2dfromarrai": [5, 7], "slice": [5, 7], "cudamemcpy3d": [5, 7], "cudamemcpy3dp": [5, 7], "cumemcpyasync": [5, 7], "cudastream_t": [5, 7, 13], "cumemsetd2d8async": [5, 7], "cumemsetd2d16async": [5, 7], "cumemsetd2d32async": [5, 7], "cumemsetd8async": [5, 7], "cumemsetd16async": [5, 7], "cumemsetd32async": [5, 7], "cudamemcpyasync": [5, 7], "cudamemcpytosymbolasync": [5, 7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudamemcpyfromsymbolasync": [5, 7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudamemcpypeerasync": [5, 7], "cudamemcpytoarrayasync": 5, "cudamemcpyfromarrayasync": 5, "cudamemcpy2dasync": [5, 7], "cudamemcpy2dtoarrayasync": [5, 7], "cudamemcpy2dfromarrayasync": [5, 7], "cudamemcpy3dasync": [5, 7], "cudamemcpy3dpeerasync": [5, 7], "uc": 5, "n": [5, 6, 7, 9], "cudamemset": [5, 7], "ui": 5, "fastest": [5, 7], "cudamemset2d": [5, 7], "cudamemsetasync": [5, 7], "cudamemset2dasync": [5, 7], "pallocatearrai": 5, "dimension": [5, 7], "4x16": 5, "float16": 5, "cudamallocarrai": [5, 7], "harrai": [5, 7], "parraydescriptor": 5, "subroutin": 5, "cudaarraygetinfo": [5, 7], "cuarraygetsparseproperti": 5, "sparseproperti": [5, 7], "cumipmappedarraygetlevel": [5, 7], "cumipmappedarraygetsparseproperti": 5, "entir": [5, 6, 7, 9], "cumemmaparrayasync": [5, 7], "cuarraygetmemoryrequir": 5, "memoryrequir": [5, 7], "cumipmappedarraygetmemoryrequir": 5, "cuarraygetplan": [5, 7], "planeidx": [5, 7], "pplanearrai": [5, 7], "cudaarraygetplan": [5, 7], "cudafreearrai": [5, 7], "special": [5, 7], "cusurfrefsetarrai": 5, "breviti": [5, 7], "sake": [5, 7], "ex": [5, 7], "texture1d_width": 5, "cudamalloc3darrai": [5, 7], "cumipmappedarraycr": [5, 7], "pmipmappedarraydesc": 5, "nummipmaplevel": 5, "floor": [5, 7], "log2": [5, 7], "texture1d_mipmapped_width": 5, "cumipmappedarraydestroi": [5, 7], "cudamallocmipmappedarrai": [5, 7], "hmipmappedarrai": 5, "plevelarrai": 5, "cudagetmipmappedarraylevel": [5, 7], "cudafreemipmappedarrai": [5, 7], "cumemgethandleforaddressrang": 5, "fulli": [5, 7], "va": 5, "uva": 5, "cumemaddressfre": 5, "memaddressreserv": 5, "prop": [5, 7], "transmit": 5, "recipi": [5, 7], "cumemgetallocationgranular": 5, "imex": [5, 7], "entiti": [5, 6, 7], "aim": [5, 7], "fine": [5, 7], "grain": [5, 7], "grant": [5, 7], "modprob": [5, 7], "cli": [5, 7, 9], "cumemreleas": 5, "cumemimportfromshareablehandl": 5, "shareabl": [5, 7], "cumulticastgetgranular": 5, "cu_multicast_minimum_granular": 5, "cu_multicast_recommended_granular": 5, "cumemsetaccess": 5, "cumemunmap": 5, "mapinfolist": 5, "subregion": [5, 7], "cuarraysparsesubresourcetype_enum": 5, "miplevel": [5, 7], "span": 5, "small": [5, 6], "constitut": 5, "sparselevel": 5, "offsetx": 5, "offseti": 5, "offsetz": 5, "extentwidth": 5, "extentheight": 5, "extentdepth": 5, "miptail": 5, "don": [5, 7], "sub": [5, 7], "unreleas": 5, "desc": [5, 7], "cumemgetaccess": 5, "afterward": 5, "vulkan": [5, 7], "shareablehandl": [5, 7], "oshandl": 5, "shhandletyp": 5, "sli": [5, 7], "minim": [5, 6, 7], "cumemgetallocationpropertiesfromhandl": 5, "cumemretainallocationhandl": 5, "necessarili": [5, 7], "expos": 5, "promis": [5, 7], "realloc": [5, 7], "compliant": [5, 7], "tempor": [5, 7], "inter": [5, 7], "contract": [5, 6, 7], "therefor": [5, 7, 9, 17], "basic": [5, 7, 9], "minbytestokeep": [5, 7], "tri": [5, 7], "fewer": [5, 7, 8], "trim": [5, 7, 23], "trimto": [5, 7], "eg": [5, 7], "cumempoolgetaccess": [5, 7], "cumempoolexporttoshareablehandl": [5, 7], "haven": [5, 7], "cumempoolimportfromshareablehandl": [5, 7], "cumempoolexportpoint": [5, 7], "cumempoolimportpoint": [5, 7], "handle_out": [5, 7], "sharedata_out": [5, 7], "sharedata": [5, 7], "ptr_out": [5, 7], "cumulticastcr": 5, "broadcast": 5, "cumulticastadddevic": 5, "cumulticastbindmem": 5, "cumulticastbindaddr": 5, "memmori": 5, "cumulticastunbind": 5, "mchandl": 5, "similarli": 5, "mcoffset": 5, "memoffset": 5, "beeen": 5, "memptr": 5, "cudamallocasync": [5, 7], "unbind": 5, "upto": 5, "warn": [5, 6], "distinct": [5, 7], "look": [5, 7], "cupointergetattribut": [5, 7], "unnecessari": 5, "multidimension": [5, 7], "disjoint": [5, 7], "boolean": 5, "against": [5, 7], "mappabl": 5, "cupointersetattribut": 5, "cudapointergetattribut": [5, 7], "devptr": [5, 7], "cumemprefetchasync_v2": 5, "enqueu": [5, 7], "subset": [5, 7], "serv": [5, 7], "improv": [5, 7, 9], "etih": [5, 7], "OR": [5, 7], "advic": [5, 7], "cumemadvise_v2": [5, 7], "impos": [5, 7], "go": [5, 7, 9], "duplic": [5, 7], "collaps": [5, 7], "arbitrari": [5, 7], "avoid": [5, 7], "resolut": [5, 7], "constantli": [5, 7], "But": [5, 7], "indefinit": [5, 7], "impact": [5, 7], "se": [5, 7], "scenario": [5, 7], "infrequ": [5, 7], "overhead": [5, 7, 8], "help": [5, 7, 8, 9], "procesor": [5, 7], "alu": [5, 7], "cumemrangegetattribut": [5, 7], "datas": [5, 7], "simpli": [5, 7], "sizeof": [5, 7], "cudamemrangegetattribut": [5, 7], "numattribut": [5, 7], "unset": [5, 7], "custreamcr": [5, 7, 9], "phstream": 5, "custreamdestroi": [5, 7, 9], "cugreenctxstreamcr": 5, "custreamgetflag": [5, 7], "custreamsynchron": [5, 7, 9], "custreamaddcallback": [5, 7], "cudastreamcr": [5, 7], "cudastreamcreatewithflag": [5, 7], "preferenti": [5, 7], "preempt": [5, 7], "lowest": [5, 7], "cudastreamcreatewithprior": [5, 7], "cudastreamgetprior": [5, 7], "cudastreamgetflag": [5, 7], "custreamgetid": [5, 7], "streamid": [5, 7], "cudastreamperthread": [5, 7], "cudastreamgetid": [5, 7], "custreamgetctx": 5, "custreamgetctx_v2": 5, "till": 5, "pgreenctx": 5, "cuevent_capture_flag": 5, "cudastreamwaitev": [5, 7], "item": [5, 7], "mandat": [5, 7], "idl": [5, 7], "durat": [5, 7], "behind": [5, 7], "consecut": [5, 7], "culaunchhostfunc": [5, 7], "cudastreamaddcallback": [5, 7], "slate": [5, 7], "custreamgetcaptureinfo": 5, "unsaf": [5, 7], "hgraph": [5, 7], "dependencydata": [5, 7], "numdepend": [5, 7], "place": [5, 7, 9], "cudagraph_t": [5, 7], "facilit": [5, 7], "determinist": [5, 7], "encourag": [5, 7], "fashion": [5, 7], "replai": [5, 7], "whenev": [5, 7], "phgraph": 5, "rule": [5, 7], "cugraphdestroi": 5, "capturestatu": 5, "unspecifi": [5, 7], "capturestatus_out": [5, 7], "id_out": [5, 7], "graph_out": [5, 7], "progress": [5, 7], "unreach": [5, 7], "dependencies_out": [5, 7], "absent": [5, 7], "numdependencies_out": [5, 7], "custreamgetcaptureinfo_v3": 5, "edgedata_out": [5, 7], "custreamupdatecapturedependencies_v2": 5, "keyword": [5, 7], "singli": [5, 7], "constrain": [5, 7], "whole": [5, 6, 7], "legal": [5, 7], "revert": [5, 7], "cudastreamattachmemasync": [5, 7], "cudastreamqueri": [5, 7, 13], "ote_null_stream": 5, "cudastreamsynchron": [5, 7], "cudastreamdestroi": [5, 7], "custreamcopyattribut": 5, "custreamgetattribut": 5, "value_out": [5, 7], "cueventelapsedtim": [5, 7], "cudaeventcr": [5, 7], "cudaeventcreatewithflag": [5, 7], "cudaeventrecord": [5, 7], "incomplet": [5, 7], "cudaeventqueri": [5, 7], "busi": [5, 7], "cudaeventsynchron": [5, 7], "cudaeventdestroi": [5, 7], "hstart": 5, "hend": 5, "elaps": [5, 7], "around": [5, 7], "microsecond": [5, 7], "measur": [5, 7], "signific": [5, 7], "pmillisecond": 5, "ms": [5, 7], "cudaeventelapsedtim": [5, 7], "cuimportexternalmemori": 5, "memhandledesc": [5, 7], "extmem_out": [5, 7], "fd": [5, 7], "utf": [5, 7], "id3d12devic": [5, 7], "createsharedhandl": [5, 7], "id3d12heap": [5, 7], "id3d12resourc": [5, 7], "idxgiresource1": [5, 7], "id3d11resourc": [5, 7], "idxgiresourc": [5, 7], "getsharedhandl": [5, 7], "nvscibufobject": [5, 7], "cudestroyexternalmemori": 5, "cuexternalmemorygetmappedbuff": 5, "cuexternalmemorygetmappedmipmappedarrai": 5, "vkinvalidatemappedmemoryrang": [5, 7], "vkflushmappedmemoryrang": [5, 7], "pipelin": [5, 7], "chapter": [5, 7], "extmem": [5, 7], "bufferdesc": [5, 7], "volatil": [5, 7], "deriv": [5, 7, 9], "mipmapdesc": [5, 7], "semhandledesc": [5, 7], "extsem_out": [5, 7], "id3d12fenc": [5, 7], "id3d11fenc": [5, 7], "nvscisyncobj": [5, 7], "idxgikeyedmutex": [5, 7], "Such": [5, 7], "nvscisyncf": 5, "nvscisyncattrkey_requiredeterministicf": [5, 7], "indeterminist": [5, 7], "unblock": [5, 7], "incorrectli": [5, 7], "amongst": [5, 7], "cuda_external_semaphore_param": 5, "keyedmutex": [5, 7], "timeoutm": [5, 7], "interv": [5, 7], "finit": [5, 7], "macro": [5, 6, 7], "extsem": [5, 7], "cu_device_attribute_can_use_stream_wait_value_nor_v2": 5, "improp": 5, "indirectli": 5, "versu": 5, "expans": 5, "cufunctionloadingst": 5, "cu_function_loading_state_unload": 5, "cu_function_loading_state_load": 5, "cu_function_loading_state_max": 5, "With": [5, 8, 9], "few": 5, "execept": 5, "cudafuncgetattribut": [5, 7], "cudafuncsetattribut": [5, 7], "cudafuncsetcacheconfig": [5, 7], "cufuncgetmodul": 5, "cufuncgetnam": 5, "burden": [5, 7], "manner": 5, "commonli": [5, 7, 9], "cufuncsetblockshap": 5, "cufuncsetshareds": 5, "cuparamsets": 5, "cuparamseti": 5, "cuparamsetf": 5, "cuparamsetv": 5, "met": [5, 7], "cudalaunchkernel": [5, 7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "contingu": 5, "unavai": 5, "cudalaunchkernelex": [5, 7], "shape": [5, 7], "overwritten": [5, 7], "cudalaunchcooperativekernel": [5, 7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "launchparamslist": 5, "themselv": 5, "cudalaunchcooperativekernelmultidevic": [5, 7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "contrast": 5, "cugraphcr": 5, "cugraphaddchildgraphnod": 5, "cugraphaddemptynod": 5, "cugraphaddkernelnod": 5, "cugraphaddhostnod": 5, "cugraphaddmemcpynod": 5, "cugraphaddmemsetnod": 5, "cugraphinstanti": 5, "cugraphgetnod": 5, "cugraphgetrootnod": 5, "cugraphgetedg": 5, "cugraphclon": 5, "nodeparam": [5, 7], "root": [5, 6, 7], "phgraphnod": [5, 7], "cugraphkernelnodegetparam": 5, "cugraphkernelnodesetparam": 5, "hnode": [5, 7], "cudagraphnode_t": [5, 7], "cugraphnodesetparam": 5, "operand": [5, 7], "cugraphmemcpynodegetparam": 5, "cugraphmemcpynodesetparam": 5, "memsetparam": 5, "cugraphmemsetnodegetparam": 5, "cugraphmemsetnodesetparam": 5, "pre": [5, 6, 7], "cugraphhostnodegetparam": 5, "cugraphhostnodesetparam": 5, "childgraph": [5, 7], "cugraphchildgraphnodegetgraph": 5, "cugraphnodefindinclon": 5, "transit": [5, 7], "phase": [5, 6, 7], "cugraphaddeventrecordnod": 5, "cugraphaddeventwaitnod": 5, "cugrapheventrecordnodegetev": 5, "event_out": [5, 7], "cugrapheventrecordnodesetev": 5, "cugrapheventwaitnodegetev": 5, "cugrapheventwaitnodesetev": 5, "cugraphaddexternalsemaphoressignalnod": 5, "cugraphexternalsemaphoressignalnodegetparam": 5, "cugraphexternalsemaphoressignalnodesetparam": 5, "cugraphexecexternalsemaphoressignalnodesetparam": 5, "cugraphaddexternalsemaphoreswaitnod": 5, "params_out": [5, 7], "cugraphexternalsemaphoreswaitnodegetparam": 5, "cugraphexternalsemaphoreswaitnodesetparam": 5, "cugraphexecexternalsemaphoreswaitnodesetparam": 5, "cugraphaddbatchmemopnod": 5, "cugraphbatchmemopnodegetparam": 5, "cugraphbatchmemopnodesetparam": 5, "nodeparams_out": 5, "cugraphexecbatchmemopnodesetparam": 5, "hgraphexec": [5, 7], "graphexec": [5, 7], "membar": 5, "modif": [5, 7], "cudagraphexec_t": [5, 7], "cugraphexecnodesetparam": 5, "cugraphaddmemallocnod": 5, "cugraphaddmemfreenod": 5, "unfre": [5, 7], "delet": [5, 7], "cugraphmemallocnodegetparam": 5, "cudevicegraphmemtrim": 5, "cudevicegetgraphmemattribut": 5, "cudevicesetgraphmemattribut": 5, "cugraphmemfreenodegetparam": 5, "twice": [5, 7], "dptr_out": [5, 7], "originalgraph": [5, 7], "phgraphclon": 5, "horiginalnod": 5, "hclonedgraph": 5, "horiginalgraph": 5, "phclonednod": 5, "phnode": 5, "cugraphnodegettyp": 5, "numnod": [5, 7], "cugraphnodegetdepend": 5, "cugraphnodegetdependentnod": 5, "numrootnod": 5, "rootnod": 5, "numedg": [5, 7], "endpoint": [5, 7], "cugraphadddepend": 5, "cugraphremovedepend": 5, "cugraphgetedges_v2": 5, "edgedata": [5, 7], "alon": [5, 7], "lossi": 5, "cugraphnodegetdependencies_v2": 5, "numdependentnod": 5, "dependentnod": 5, "cugraphnodegetdependentnodes_v2": 5, "from_": [5, 7], "cugraphadddependencies_v2": 5, "cugraphremovedependencies_v2": 5, "sever": [5, 7], "vice": [5, 7], "versa": [5, 7], "phgraphexec": 5, "cugraphexecdestroi": 5, "cudagraphlaunch": [5, 7], "cugraphlaunch": 5, "offend": [5, 7], "cugraphexecgetflag": 5, "cugraphexeckernelnodesetparam": 5, "cdp": [5, 7], "cugraphexecmemcpynodesetparam": 5, "cugraphexecmemsetnodesetparam": 5, "cugraphexechostnodesetparam": 5, "cugraphexecchildgraphnodesetparam": 5, "cugraphexeceventrecordnodesetev": 5, "cugraphexeceventwaitnodesetev": 5, "aspect": [5, 7], "oportunist": [5, 7], "cugraphnodeseten": 5, "isen": [5, 7], "reenabl": [5, 7], "cugraphnodegeten": 5, "flight": 5, "assing": [5, 7], "resultinfo": [5, 7], "exit": [5, 7], "herrornode_out": [5, 7], "succe": [5, 6, 7], "cugraphkernelnodegetattribut": 5, "dot": [5, 7], "cuuserobjectcr": 5, "initialrefcount": [5, 7], "refcount": [5, 7], "manual": [5, 7, 9], "object_out": [5, 7], "cuuserobjectretain": 5, "cuuserobjectreleas": 5, "cugraphretainuserobject": 5, "cugraphreleaseuserobject": 5, "int_max": [5, 7], "tag": [5, 6, 7], "past": [5, 7], "brace": [5, 7], "cugraphaddnode_v2": 5, "defaultlaunchvalu": [5, 7], "children": [5, 7], "phandle_out": [5, 7], "blocksiz": [5, 7], "dynamicsmems": [5, 7], "numblock": [5, 7], "cudaoccupancymaxactiveblockspermultiprocessor": [5, 7], "suppress": [5, 6, 7], "maxwel": [5, 7], "tune": [5, 7], "cudaoccupancymaxactiveblockspermultiprocessorwithflag": [5, 7], "cuoccupancymaxpotentialblocks": 5, "blocksizetodynamicsmems": 5, "blocksizelimit": 5, "suggest": 5, "achiev": 5, "fewest": 5, "mingrids": 5, "vari": [5, 7], "unari": 5, "signatur": [5, 7, 13, 24], "cudaoccupancymaxpotentialblocks": [5, 7], "cuoccupancymaxpotentialblocksizewithflag": 5, "cudaoccupancymaxpotentialblocksizewithflag": [5, 7], "cuoccupancyavailabledynamicsmemperblock": 5, "cuoccupancymaxpotentialclusters": 5, "clusters": 5, "cuoccupancymaxactiveclust": 5, "co": 5, "numclust": 5, "els": [5, 9], "presdesc": [5, 7], "ptexdesc": [5, 7], "presviewdesc": [5, 7], "ptexobject": [5, 7], "sizeinbyt": [5, 7], "pitch2d": [5, 7], "pitchinbyt": [5, 7], "breadth": 5, "bilinear": 5, "approxim": [5, 6], "anisotrop": [5, 7], "upper": [5, 7], "bc": [5, 7], "cutexobjectdestroi": [5, 7], "cudacreatetextureobject": [5, 7], "texobject": [5, 7], "cudadestroytextureobject": [5, 7], "cutexobjectgetresourcedesc": [5, 7], "cudagettextureobjectresourcedesc": [5, 7], "cutexobjectgettexturedesc": [5, 7], "cudagettextureobjecttexturedesc": [5, 7], "cutexobjectgetresourceviewdesc": [5, 7], "cudagettextureobjectresourceviewdesc": [5, 7], "cusurfobjectcr": [5, 7], "psurfobject": [5, 7], "cusurfobjectdestroi": [5, 7], "cudacreatesurfaceobject": [5, 7], "surfobject": [5, 7], "cudadestroysurfaceobject": [5, 7], "cusurfobjectgetresourcedesc": [5, 7], "cudagetsurfaceobjectresourcedesc": [5, 7], "cutensormapencodetil": 5, "tensordatatyp": 5, "tensorrank": 5, "globaladdress": 5, "globaldim": 5, "globalstrid": 5, "boxdim": 5, "cuuint32_t": 5, "elementstrid": 5, "l2promot": 5, "oobfil": 5, "tma": 5, "tensormap": 5, "stride": 5, "travers": 5, "elementsizeinbyt": 5, "ceil": 5, "th": 5, "nc": 5, "8hwc8": 5, "c8": 5, "16hwc16": 5, "c16": 5, "box": 5, "inner": 5, "multipli": [5, 6], "organ": 5, "problem": [5, 8], "shuffl": 5, "granur": 5, "dram": 5, "nan": 5, "elment": 5, "cutensormapencodeim2col": 5, "cutensormapreplaceaddress": 5, "pixelboxlowercorn": 5, "pixelboxuppercorn": 5, "channelsperpixel": 5, "pixelspercolumn": 5, "im2col": 5, "d": [5, 6, 7], "left": [5, 7], "front": [5, 6], "corner": [5, 7], "32767": 5, "bottom": 5, "dhw": 5, "pixel": 5, "peerdev": 5, "canaccessp": [5, 7], "cudadevicecanaccessp": [5, 7], "peercontext": 5, "unidirect": [5, 7], "symmetr": [5, 7], "cudadeviceenablepeeraccess": [5, 7], "cudadevicedisablepeeraccess": [5, 7], "cudevicegetp2pattribut": [5, 7], "cudaarrai": [5, 7], "cudadevicegetp2pattribut": [5, 7], "cugraphicsunregisterresourc": [5, 7], "cugraphicsd3d9registerresourc": 5, "cugraphicsd3d10registerresourc": 5, "cugraphicsd3d11registerresourc": 5, "cugraphicsglregisterbuff": [5, 7, 12], "cugraphicsglregisterimag": [5, 7, 12], "cudagraphicsunregisterresourc": [5, 7], "cugraphicssubresourcegetmappedarrai": [5, 7], "arrayindex": [5, 7], "parrai": 5, "cugraphicsresourcegetmappedpoint": 5, "cudagraphicssubresourcegetmappedarrai": [5, 7], "cugraphicsresourcegetmappedmipmappedarrai": [5, 7], "pmipmappedarrai": 5, "cudagraphicsresourcegetmappedmipmappedarrai": [5, 7], "pdevptr": 5, "ppointer": 5, "cugraphicsresourcesetmapflag": [5, 7], "cu_graphics_map_resource_flags_readonli": 5, "cu_graphics_map_resource_flags_writediscard": 5, "cugraphicsmapresourc": [5, 7], "cudagraphicsresourcesetmapflag": [5, 7], "cugraphicsunmapresourc": [5, 7], "cudagraphicsmapresourc": [5, 7], "cudagraphicsunmapresourc": [5, 7], "cudavers": [5, 7], "pfn": [5, 7], "11020": [5, 7], "typedef": [5, 7], "pick": [5, 7], "cudatypedef": [5, 7], "abi": [5, 6, 7], "symbolstatu": 5, "cuda_api_per_thread_default_stream": [5, 7], "cumemalloc_v2": [5, 7], "_v2": 5, "variant": 5, "cudagetdriverentrypoint": [5, 7], "cucoredumpset": 5, "cu_coredump_enable_on_except": 5, "cu_coredump_trigger_host": 5, "cu_coredump_lightweight": 5, "cu_coredump_fil": 5, "cu_coredump_pip": 5, "cu_coredump_generation_flag": 5, "cu_coredump_max": 5, "cucoredumpgenerationflag": 5, "cu_coredump_default_flag": 5, "cu_coredump_skip_nonrelocated_elf_imag": 5, "cu_coredump_skip_global_memori": 5, "cu_coredump_skip_shared_memori": 5, "cu_coredump_skip_local_memori": 5, "cu_coredump_skip_abort": 5, "cu_coredump_skip_constbank_memori": 5, "cu_coredump_lightweight_flag": 5, "cucoredumpgetattribut": 5, "bool": 5, "abort": 5, "elf": 5, "1023": 5, "hostnam": 5, "pid": 5, "monitor": 5, "corepip": 5, "bitwis": 5, "itself": [5, 7], "scale": 5, "equiavl": 5, "goal": [5, 8, 9], "better": [5, 7], "cucoredumpgetattributeglob": 5, "decis": 5, "basi": 5, "manipul": 5, "tradit": 5, "abil": 5, "spatial": 5, "provis": 5, "main": 5, "cudevicegetdevresourc": 5, "todai": [5, 9], "cudevsmresourcesplitbycount": 5, "cudevresourcegeneratedesc": 5, "cu_dev_resource_type_sm": 5, "mincount": 5, "guidelin": 5, "tradeoff": 5, "finer": 5, "hw": 5, "cuda_device_max_connect": 5, "workload": 5, "cuda_mps_active_thread_percentag": 5, "cudevsmresource_st": 5, "smcount": 5, "cudevresource_st": 5, "dictat": 5, "cudevresourcetyp": 5, "_internal_pad": 5, "cudevsmresourc": 5, "_overs": 5, "cudevresourc": 5, "cugreenctxcreate_flag": 5, "cu_green_ctx_default_stream": 5, "cudevsmresourcesplit_flag": 5, "cu_dev_sm_resource_split_ignore_sm_coschedul": 5, "cu_dev_sm_resource_split_max_potential_cluster_s": 5, "cu_dev_resource_type_invalid": 5, "cudevresourcedesc": 5, "encapsul": 5, "phctx": 5, "heavi": 5, "deiniti": [5, 7], "cugreenctxdestroi": 5, "pcontext": 5, "cuctxgetdevresourc": 5, "cugreenctxgetdevresourc": 5, "nbgroup": 5, "input_": 5, "useflag": 5, "adher": 5, "simul": 5, "divid": 5, "remaind": 5, "Its": 5, "carefulli": 5, "plan": 5, "discourag": [5, 7], "hierarchi": 5, "abid": 5, "cleanli": 5, "ommit": 5, "nbresourc": 5, "phdesc": 5, "came": 5, "custreamgetgreenctx": 5, "greenctx": 5, "resource_abi_vers": 5, "resource_abi_external_byt": 5, "cugraphicseglregisterimag": [5, 7, 12], "eglimagekhr": [5, 7], "pcudaresourc": [5, 7], "cugraphicsresourcegetmappedeglfram": [5, 7, 12], "accomplish": [5, 7], "glfinish": [5, 7], "command": [5, 7, 9], "glcontext": [5, 7], "acces": [5, 7], "eglimag": [5, 7], "cudagraphicseglregisterimag": [5, 7, 12], "cueglstreamconsumerconnect": [5, 7, 12], "eglstream": [5, 7], "eglstreamkhr": [5, 7], "conn": [5, 7], "cueglstreamconsumerdisconnect": [5, 7, 12], "cueglstreamconsumerreleasefram": [5, 7, 12], "cudaeglstreamconsumerconnect": [5, 7, 12], "cueglstreamconsumerconnectwithflag": [5, 7, 12], "cudaeglstreamconsumerconnectwithflag": [5, 7, 12], "disconnect": [5, 7], "conect": [5, 7], "cudaeglstreamconsumerdisconnect": [5, 7, 12], "pstream": [5, 7], "egl_support_reuse_nv": 5, "egl_fals": 5, "egl_tru": 5, "usec": [5, 7], "cudaeglstreamconsumeracquirefram": [5, 7, 12], "cudaeglstreamconsumerreleasefram": [5, 7, 12], "cueglstreamproducerconnect": [5, 7, 12], "eglint": [5, 7], "cueglstreamproducerdisconnect": [5, 7, 12], "cueglstreamproducerpresentfram": [5, 7, 12], "cudaeglstreamproducerconnect": [5, 7, 12], "cudaeglstreamproducerdisconnect": [5, 7, 12], "proucer": [5, 7], "cueglstreamproducerreturnfram": [5, 7, 12], "cudaeglstreamproducerpresentfram": [5, 7, 12], "retri": [5, 7, 9], "cudaeglstreamproducerreturnfram": [5, 7, 12], "cueventcreatefromeglsync": [5, 12], "eglsync": [5, 7], "eglsynckhr": [5, 7], "timingdata": [5, 7], "agnost": [5, 7], "cugldevicelist": 5, "cu_gl_device_list_al": 5, "cu_gl_device_list_current_fram": 5, "render": [5, 7], "cu_gl_device_list_next_fram": 5, "gluint": [5, 7], "cudagraphicsglregisterbuff": [5, 7, 12], "renderbuff": [5, 7], "gl_texture_2d": [5, 7], "gl_texture_rectangl": [5, 7], "gl_texture_cube_map": [5, 7], "gl_texture_3d": [5, 7], "gl_texture_2d_arrai": [5, 7], "gl_renderbuff": [5, 7], "abbrevi": [5, 7], "gl_r": [5, 7], "gl_rg": [5, 7], "expand": [5, 7], "gl_r8": [5, 7], "gl_r16": [5, 7], "gl_rg8": [5, 7], "gl_rg16": [5, 7], "gl_red": [5, 7], "gl_rgba": [5, 7], "gl_lumin": [5, 7], "gl_alpha": [5, 7], "gl_luminance_alpha": [5, 7], "gl_intens": [5, 7], "16f": [5, 7], "32f": [5, 7], "8ui": [5, 7], "16ui": [5, 7], "32ui": [5, 7], "8i": [5, 7], "16i": [5, 7], "32i": [5, 7], "16f_arb": [5, 7], "32f_arb": [5, 7], "8ui_ext": [5, 7], "16ui_ext": [5, 7], "32ui_ext": [5, 7], "8i_ext": [5, 7], "16i_ext": [5, 7], "32i_ext": [5, 7], "multisampl": [5, 7], "glenum": [5, 7], "cudagraphicsglregisterimag": [5, 7, 12], "cuglgetdevic": [5, 7, 12], "cudadevicecount": [5, 7], "devicelist": [5, 7], "pcudadevicecount": [5, 7], "pcudadevic": [5, 7], "predict": 5, "cudaglgetdevic": [5, 7, 12], "mac": [5, 7], "cuprofilerstart": [5, 7, 12], "cuprofilerstop": [5, 7, 12], "cuprofileriniti": [5, 12], "cudaprofilerstart": [5, 7, 12], "cudaprofilerstop": [5, 7, 12], "cuvdpaugetdevic": [5, 7, 12], "vdpdevic": [5, 7], "vdpgetprocaddress": [5, 7], "pdevic": [5, 7], "cuvdpauctxcr": [5, 12], "cugraphicsvdpauregistervideosurfac": [5, 7, 12], "cugraphicsvdpauregisteroutputsurfac": [5, 7, 12], "cudavdpaugetdevic": [5, 7, 12], "facil": 5, "vdpsurfac": [5, 7], "vdpvideosurfac": [5, 7], "shown": 5, "cudagraphicsvdpauregistervideosurfac": [5, 7, 12], "vdpoutputsurfac": [5, 7], "cudagraphicsvdpauregisteroutputsurfac": [5, 7, 12], "nvrtcresult": [6, 9], "nvrtc_success": 6, "nvrtc_error_out_of_memori": 6, "nvrtc_error_program_creation_failur": 6, "nvrtc_error_invalid_input": 6, "nvrtc_error_invalid_program": 6, "nvrtc_error_invalid_opt": 6, "nvrtc_error_compil": 6, "nvrtc_error_builtin_operation_failur": 6, "nvrtc_error_no_name_expressions_after_compil": 6, "nvrtc_error_no_lowered_names_before_compil": 6, "nvrtc_error_name_expression_not_valid": 6, "nvrtc_error_internal_error": 6, "nvrtc_error_time_file_write_fail": 6, "nvrtcgeterrorstr": [6, 9], "helper": 6, "unrecogn": [6, 7], "nvrtc_error": 6, "nvrtcversion": 6, "nvrtcgetnumsupportedarch": 6, "numarch": 6, "nvrtcgetsupportedarch": 6, "supportedarch": 6, "sort": 6, "ascend": 6, "nvrtcprogram": 6, "nvrtccreateprogram": [6, 9], "nvrtccompileprogram": [6, 9], "numhead": 6, "includenam": 6, "prog": [6, 9], "default_program": 6, "nvrtcdestroyprogram": 6, "nvrtcgetptxsiz": [6, 9], "ptxsizeret": 6, "trail": 6, "nvrtcgetptx": [6, 9], "nvrtcgetcubins": 6, "cubinsizeret": 6, "arch": 6, "nvrtcgetcubin": 6, "assembl": 6, "nvrtcgetnvvmsiz": 6, "notic": 6, "nvrtcgetltoirs": 6, "nvrtcgetltoir": 6, "nvvmsizeret": 6, "nvrtcgetnvvm": 6, "nvvm": 6, "ltoirsizeret": 6, "dlto": 6, "ltoir": 6, "nvrtcgetoptixirs": 6, "optixirsizeret": 6, "optix": 6, "nvrtcgetoptixir": 6, "optixir": 6, "nvrtcgetprogramlogs": 6, "logsizeret": 6, "nvrtcgetprogramlog": 6, "nvrtcaddnameexpress": 6, "name_express": 6, "__constant__": 6, "nvrtcgetlowerednam": 6, "extract": [6, 9], "lowered_nam": 6, "dash": 6, "compute_60": 6, "def": [6, 9], "compute_50": 6, "compute_52": 6, "compute_53": 6, "compute_61": 6, "compute_62": 6, "compute_70": 6, "compute_72": 6, "compute_75": 6, "compute_80": 6, "compute_87": 6, "compute_89": 6, "compute_90": 6, "compute_90a": 6, "sm_50": 6, "sm_52": 6, "sm_53": 6, "sm_60": 6, "sm_61": 6, "sm_62": 6, "sm_70": 6, "sm_72": 6, "sm_75": 6, "sm_80": 6, "sm_87": 6, "sm_89": 6, "sm_90a": 6, "dc": 6, "dw": 6, "ewp": 6, "dopt": 6, "ptxa": 6, "maxrregcount": 6, "therebi": 6, "trade": 6, "bump": 6, "ftz": 6, "denorm": 6, "use_fast_math": 6, "prec": 6, "sqrt": 6, "squar": 6, "ieee": 6, "div": 6, "reciproc": 6, "fmad": [6, 9], "subtract": 6, "ffma": 6, "dfma": 6, "math": 6, "vector": [6, 9], "aggress": 6, "setrlimit": 6, "dlink": 6, "intermedi": 6, "gen": 6, "nvrtcgetoptix": 6, "jump": [6, 9], "densiti": 6, "jtd": 6, "statement": 6, "brx": 6, "idx": 6, "protector": 6, "canari": 6, "preprocess": 6, "predefin": 6, "translat": [6, 9], "truncat": 6, "cancel": 6, "dir": 6, "directori": [6, 17], "preinclud": 6, "preprocessor": 6, "dialect": 6, "std": 6, "03": 6, "initializer_list": 6, "misc": 6, "inhibit": 6, "int128": 6, "__int128": 6, "cudacc_rtc_int128": 6, "inlin": 6, "emit": 6, "remark": 6, "err": [6, 9], "diagnost": 6, "diag": 6, "comma": 6, "brief": 6, "append": 6, "head": 6, "fdevic": 6, "syntax": [6, 8], "cudatextureobject_t": [6, 7], "cudadevrt": 6, "prefix": 6, "driver_typ": [6, 16], "cudaerror_t": [6, 7, 13], "difficult": 6, "exploit": 6, "safeti": 6, "assess": 6, "risk": 6, "cudasuccess": [7, 13], "impl_priv": 7, "cudaarray_t": 7, "cudamipmappedarray_t": 7, "cudapitchedptr": 7, "cudasurfaceobject_t": 7, "texturerefer": 7, "surfacerefer": 7, "cudaexternalmemory_t": 7, "cudaexternalsemaphore_t": 7, "cudagraphicsresource_t": 7, "reiniti": 7, "cudamallocfrompoolasync": 7, "rt": 7, "cudadevicescheduleblockingsync": 7, "cudalimit": 7, "cudalimitstacks": 7, "cudalimitprintffifos": 7, "cudaerrorinvalidvalu": 7, "cudalimitmallocheaps": 7, "cudalimitdevruntimesyncdepth": 7, "cudaerrormemoryalloc": 7, "cudaerrorunsupportedlimit": 7, "cudalimitdevruntimependinglaunchcount": 7, "cudalimitmaxl2fetchgranular": 7, "cudalimitpersistingl2caches": 7, "isssu": 7, "cudachannelformatdesc": 7, "fmtdesc": 7, "pcacheconfig": 7, "cudafunccacheprefernon": 7, "cudafunccacheprefershar": 7, "cudafunccachepreferl1": 7, "cudafunccachepreferequ": 7, "cudafunccach": 7, "cacheconfig": 7, "cudaerrorinvaliddevic": 7, "cudaeventinterprocess": 7, "cudaeventdisabletim": 7, "cudadevattripceventsupport": 7, "cudaerrorinvalidresourcehandl": 7, "cudaerrormapbufferobjectfail": 7, "cudaerrornotsupport": 7, "cudaipceventhandle_t": 7, "cudaipceventhandl": 7, "cudaerrordeviceuniniti": 7, "cudaipcmemhandle_t": 7, "cudaipcmemhandl": 7, "cudaipcmemlazyenablepeeraccess": 7, "cudaerrortoomanyp": 7, "returnd": 7, "cudadeviceflushgpudirectrdmawrit": 7, "cudaflushgpudirectrdmawritestarget": 7, "cudaflushgpudirectrdmawritesscop": 7, "cudadevattrgpudirectrdmawritesord": 7, "cudadevattrgpudirectrdmaflushwritesopt": 7, "cudadeviceregisterasyncnotif": 7, "cudadeviceunregisterasyncnotif": 7, "cudaerrornotpermit": 7, "cudaasynccallback": 7, "cudaerrorunknown": 7, "cudaasynccallbackhandle_t": 7, "cudachoosedevic": 7, "cudainitdevic": 7, "cudadeviceprop": 7, "totalglobalmem": 7, "warpsiz": 7, "totalconstmem": 7, "revis": 7, "texturepitchalign": 7, "deviceoverlap": 7, "asyncenginecount": 7, "multiprocessorcount": 7, "kernelexectimeouten": 7, "motherboard": 7, "card": 7, "canmaphostmemori": 7, "computemod": 7, "cudacomputemodedefault": 7, "cudacomputemodeprohibit": 7, "cudacomputemodeexclusiveprocess": 7, "occupi": 7, "cudaerrordevicesunavail": 7, "maxtexture1d": 7, "maxtexture1dmipmap": 7, "maxtexture1dlinear": 7, "maxtexture2d": 7, "maxtexture2dmipmap": 7, "maxtexture2dlinear": 7, "maxtexture2dgath": 7, "maxtexture3d": 7, "maxtexture3dalt": 7, "maxtexturecubemap": 7, "maxtexture1dlay": 7, "maxtexture2dlay": 7, "maxtexturecubemaplay": 7, "maxsurface1d": 7, "maxsurface2d": 7, "maxsurface3d": 7, "maxsurface1dlay": 7, "maxsurface2dlay": 7, "maxsurfacecubemap": 7, "maxsurfacecubemaplay": 7, "surfacealign": 7, "concurrentkernel": 7, "eccen": 7, "pcideviceid": 7, "sometim": 7, "pcidomainid": 7, "tccdriver": 7, "unifiedaddress": 7, "memoryclockr": 7, "memorybuswidth": 7, "l2caches": 7, "persistingl2cachemaxs": 7, "maxthreadspermultiprocessor": 7, "streamprioritiessupport": 7, "globall1cachesupport": 7, "locall1cachesupport": 7, "sharedmempermultiprocessor": 7, "regspermultiprocessor": 7, "managedmemori": 7, "ismultigpuboard": 7, "gemini": 7, "multigpuboardgroupid": 7, "hostnativeatomicsupport": 7, "singletodoubleprecisionperfratio": 7, "pageablememoryaccess": 7, "concurrentmanagedaccess": 7, "computepreemptionsupport": 7, "canusehostpointerforregisteredmem": 7, "cooperativelaunch": 7, "cooperativemultidevicelaunch": 7, "sharedmemperblockoptin": 7, "pageablememoryaccessuseshostpaget": 7, "directmanagedmemaccessfromhost": 7, "maxblockspermultiprocessor": 7, "accesspolicymaxwindows": 7, "reservedsharedmemperblock": 7, "hostregistersupport": 7, "sparsecudaarraysupport": 7, "hostregisterreadonlysupport": 7, "cudahostregisterreadonli": 7, "timelinesemaphoreinteropsupport": 7, "memorypoolssupport": 7, "cudamempool": 7, "gpudirectrdmasupport": 7, "gpudirectrdmaflushwritesopt": 7, "cudaflushgpudirectrdmawritesopt": 7, "gpudirectrdmawritesord": 7, "cudagpudirectrdmawritesord": 7, "memorypoolsupportedhandletyp": 7, "deferredmappingcudaarraysupport": 7, "ipceventsupport": 7, "unifiedfunctionpoint": 7, "cudadeviceattr": 7, "cudadevattrmaxthreadsperblock": 7, "cudadevattrmaxblockdimx": 7, "cudadevattrmaxblockdimi": 7, "cudadevattrmaxblockdimz": 7, "cudadevattrmaxgriddimx": 7, "cudadevattrmaxgriddimi": 7, "cudadevattrmaxgriddimz": 7, "cudadevattrmaxsharedmemoryperblock": 7, "cudadevattrtotalconstantmemori": 7, "cudadevattrwarps": 7, "cudadevattrmaxpitch": 7, "cudadevattrmaxtexture1dwidth": 7, "cudadevattrmaxtexture1dlinearwidth": 7, "cudadevattrmaxtexture1dmipmappedwidth": 7, "cudadevattrmaxtexture2dwidth": 7, "cudadevattrmaxtexture2dheight": 7, "cudadevattrmaxtexture2dlinearwidth": 7, "cudadevattrmaxtexture2dlinearheight": 7, "cudadevattrmaxtexture2dlinearpitch": 7, "cudadevattrmaxtexture2dmipmappedwidth": 7, "cudadevattrmaxtexture2dmipmappedheight": 7, "cudadevattrmaxtexture3dwidth": 7, "cudadevattrmaxtexture3dheight": 7, "cudadevattrmaxtexture3ddepth": 7, "cudadevattrmaxtexture3dwidthalt": 7, "cudadevattrmaxtexture3dheightalt": 7, "cudadevattrmaxtexture3ddepthalt": 7, "cudadevattrmaxtexturecubemapwidth": 7, "cudadevattrmaxtexture1dlayeredwidth": 7, "cudadevattrmaxtexture1dlayeredlay": 7, "cudadevattrmaxtexture2dlayeredwidth": 7, "cudadevattrmaxtexture2dlayeredheight": 7, "cudadevattrmaxtexture2dlayeredlay": 7, "cudadevattrmaxtexturecubemaplayeredwidth": 7, "cudadevattrmaxtexturecubemaplayeredlay": 7, "cudadevattrmaxsurface1dwidth": 7, "cudadevattrmaxsurface2dwidth": 7, "cudadevattrmaxsurface2dheight": 7, "cudadevattrmaxsurface3dwidth": 7, "cudadevattrmaxsurface3dheight": 7, "cudadevattrmaxsurface3ddepth": 7, "cudadevattrmaxsurface1dlayeredwidth": 7, "cudadevattrmaxsurface1dlayeredlay": 7, "cudadevattrmaxsurface2dlayeredwidth": 7, "cudadevattrmaxsurface2dlayeredheight": 7, "cudadevattrmaxsurface2dlayeredlay": 7, "cudadevattrmaxsurfacecubemapwidth": 7, "cudadevattrmaxsurfacecubemaplayeredwidth": 7, "cudadevattrmaxsurfacecubemaplayeredlay": 7, "cudadevattrmaxregistersperblock": 7, "cudadevattrclockr": 7, "cudadevattrtexturealign": 7, "cudadevattrtexturepitchalign": 7, "cudadevattrgpuoverlap": 7, "cudadevattrmultiprocessorcount": 7, "cudadevattrkernelexectimeout": 7, "cudadevattrintegr": 7, "cudadevattrcanmaphostmemori": 7, "cudadevattrcomputemod": 7, "cudadevattrconcurrentkernel": 7, "cudadevattreccen": 7, "cudadevattrpcibusid": 7, "cudadevattrpcideviceid": 7, "cudadevattrtccdriv": 7, "cudadevattrmemoryclockr": 7, "cudadevattrglobalmemorybuswidth": 7, "cudadevattrl2caches": 7, "cudadevattrmaxthreadspermultiprocessor": 7, "cudadevattrunifiedaddress": 7, "cudadevattrcomputecapabilitymajor": 7, "cudadevattrcomputecapabilityminor": 7, "cudadevattrstreamprioritiessupport": 7, "cudadevattrgloball1cachesupport": 7, "cudadevattrlocall1cachesupport": 7, "cudadevattrmaxsharedmemorypermultiprocessor": 7, "cudadevattrmaxregisterspermultiprocessor": 7, "cudadevattrmanagedmemori": 7, "cudadevattrismultigpuboard": 7, "cudadevattrmultigpuboardgroupid": 7, "cudadevattrhostnativeatomicsupport": 7, "cudadevattrsingletodoubleprecisionperfratio": 7, "cudadevattrpageablememoryaccess": 7, "cudadevattrconcurrentmanagedaccess": 7, "cudadevattrcomputepreemptionsupport": 7, "cudadevattrcanusehostpointerforregisteredmem": 7, "cudadevattrcooperativelaunch": 7, "cudadevattrcooperativemultidevicelaunch": 7, "cudadevattrcanflushremotewrit": 7, "cudadevattrhostregistersupport": 7, "cudadevattrpageablememoryaccessuseshostpaget": 7, "cudadevattrdirectmanagedmemaccessfromhost": 7, "cudadevattrmaxsharedmemoryperblockoptin": 7, "cudadevattrmaxblockspermultiprocessor": 7, "cudadevattrmaxpersistingl2caches": 7, "cudadevattrmaxaccesspolicywindows": 7, "cudadevattrreservedsharedmemoryperblock": 7, "cudadevattrsparsecudaarraysupport": 7, "cudadevattrhostregisterreadonlysupport": 7, "cudadevattrmemorypoolssupport": 7, "cudadevattrgpudirectrdmasupport": 7, "cudadevattrmemorypoolsupportedhandletyp": 7, "cudadevattrdeferredmappingcudaarraysupport": 7, "cudadevattrnumaconfig": 7, "cudadevicenumaconfig": 7, "cudadevattrnumaid": 7, "cudadevicegetdefaultmempool": 7, "cudamempooltrimto": 7, "cudamempoolgetattribut": 7, "cudadevicesetmempool": 7, "cudamempoolsetattribut": 7, "cudamempoolsetaccess": 7, "cudadevicegetmempool": 7, "cudamempoolcr": 7, "cudamempooldestroi": 7, "cudadevicegetnvscisyncattribut": 7, "cudaerrorinvalidhandl": 7, "cudanvscisyncattrsign": 7, "cudanvscisyncattrwait": 7, "cudadevicegetproperti": 7, "cudaimportexternalsemaphor": 7, "cudadestroyexternalsemaphor": 7, "cudasignalexternalsemaphoresasync": 7, "cudawaitexternalsemaphoresasync": 7, "cudadevicep2pattr": 7, "cudadevp2pattrperformancerank": 7, "cudadevp2pattraccesssupport": 7, "cudadevp2pattrnativeatomicsupport": 7, "cudadevp2pattrcudaarrayaccesssupport": 7, "criteria": 7, "deviceflag": 7, "cudainitdeviceflagsarevalid": 7, "cudaerrordeviceunavail": 7, "cudadevicescheduleauto": 7, "cudadeviceschedulespin": 7, "cudadevicescheduleyield": 7, "cudadeviceblockingsync": 7, "cudadevicemaphost": 7, "cudadevicelmemresizetomax": 7, "cudadevicesyncmemop": 7, "cudasetvaliddevic": [7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "seen": 7, "inbetween": 7, "contrari": 7, "assumpt": 7, "cudaerrormissingconfigur": 7, "cudaerrorinitializationerror": 7, "cudaerrorlaunchfailur": 7, "cudaerrorlaunchtimeout": 7, "cudaerrorlaunchoutofresourc": 7, "cudaerrorinvaliddevicefunct": 7, "cudaerrorinvalidconfigur": 7, "cudaerrorinvalidpitchvalu": 7, "cudaerrorinvalidsymbol": 7, "cudaerrorunmapbufferobjectfail": 7, "cudaerrorinvaliddevicepoint": 7, "cudaerrorinvalidtextur": 7, "cudaerrorinvalidtexturebind": 7, "cudaerrorinvalidchanneldescriptor": 7, "cudaerrorinvalidmemcpydirect": 7, "cudaerrorinvalidfilterset": 7, "cudaerrorinvalidnormset": 7, "cudaerrorinsufficientdriv": 7, "cudaerrornodevic": 7, "cudaerrorsetonactiveprocess": 7, "cudaerrorstartupfailur": 7, "cudaerrorinvalidptx": 7, "cudaerrorunsupportedptxvers": 7, "cudaerrornokernelimagefordevic": 7, "cudaerrorjitcompilernotfound": 7, "cudaerrorjitcompilationdis": 7, "cudapeekatlasterror": 7, "cudaerror": 7, "cudastreamcallback_t": 7, "cudastreamdefault": 7, "cudastreamnonblock": [7, 13], "cudactxresetpersistingl2cach": 7, "cudaaccesspolicywindow": 7, "cudastreamcopyattribut": 7, "cudastreamattrid": 7, "cudastreamgetattribut": 7, "cudastreamattrvalu": 7, "cudastreamsetattribut": 7, "cudaeventwaitdefault": 7, "cudaeventwaitextern": 7, "cudalaunchhostfunc": 7, "cudastreambegincaptur": 7, "cudastreamendcaptur": 7, "cudaerrornotreadi": 7, "cudamemattachglob": 7, "cudamemattachhost": 7, "cudamemattachsingl": 7, "cudastreamcapturemod": 7, "cudastreamiscaptur": 7, "cudastreamgetcaptureinfo": 7, "cudastreamcapturemoderelax": 7, "cudathreadexchangestreamcapturemod": 7, "cudastreambegincapturetograph": 7, "cudagraphedgedata": 7, "cudastreamcapturemodeglob": 7, "cudastreamcapturemodethreadloc": 7, "pgraph": 7, "cudaerrorstreamcapturewrongthread": 7, "cudagraphdestroi": 7, "pcapturestatu": 7, "cudastreamcapturestatusnon": 7, "cudastreamcapturestatusact": 7, "cudastreamcapturestatusinvalid": 7, "cudaerrorstreamcaptureimplicit": 7, "cudastreamcapturestatu": 7, "cudaerrorstreamcaptureunjoin": 7, "cudastreamgetcaptureinfo_v3": 7, "cudastreamupdatecapturedepend": 7, "cudaerrorlossyqueri": 7, "cudastreamaddcapturedepend": 7, "cudastreamsetcapturedepend": 7, "cudaerrorillegalst": 7, "cudastreamupdatecapturedependencies_v2": 7, "cudaeventdefault": 7, "cudaeventblockingsync": 7, "cudaeventrecordwithflag": 7, "cudaeventrecorddefault": 7, "cudaeventrecordextern": 7, "cudaimportexternalmemori": 7, "cudaexternalmemoryhandledesc": 7, "cudaexternalmemoryhandletyp": 7, "cudaexternalmemoryhandletypeopaquefd": 7, "cudaexternalmemoryhandletypeopaquewin32": 7, "cudaexternalmemoryhandletypeopaquewin32kmt": 7, "cudaexternalmemoryhandletyped3d12heap": 7, "cudaexternalmemoryhandletyped3d12resourc": 7, "cudaexternalmemoryhandletyped3d11resourc": 7, "cudaexternalmemoryhandletyped3d11resourcekmt": 7, "cudaexternalmemoryhandletypenvscibuf": 7, "approprri": 7, "cudaexternalsemaphorewaitskipnvscibufmemsync": 7, "cudaexternalsemaphoresignalskipnvscibufmemsync": 7, "cudaexternalmemoryded": 7, "cudaerroroperatingsystem": 7, "cudadestroyexternalmemori": 7, "cudaexternalmemorygetmappedbuff": 7, "cudaexternalmemorygetmappedmipmappedarrai": 7, "cudaexternalmemorybufferdesc": 7, "cudaexternalmemorymipmappedarraydesc": 7, "formatdesc": 7, "cudaarraycolorattach": 7, "cudaexternalsemaphorehandledesc": 7, "cudaexternalsemaphorehandletyp": 7, "cudaexternalsemaphorehandletypeopaquefd": 7, "cudaexternalsemaphorehandletypeopaquewin32": 7, "cudaexternalsemaphorehandletypeopaquewin32kmt": 7, "cudaexternalsemaphorehandletyped3d12f": 7, "cudaexternalsemaphorehandletyped3d11f": 7, "cudaexternalsemaphorehandletypenvscisync": 7, "cudaexternalsemaphorehandletypekeyedmutex": 7, "cudaexternalsemaphorehandletypekeyedmutexkmt": 7, "cudaexternalsemaphorehandletypetimelinesemaphorefd": 7, "cudaexternalsemaphorehandletypetimelinesemaphorewin32": 7, "cudaexternalsemaphoresignalparam": 7, "cudaexternalsemaphorewaitparam": 7, "cudaerrortimeout": 7, "overload": 7, "templat": 7, "func_nam": 7, "template_arg_0": 7, "template_arg_n": 7, "cudafuncattribut": 7, "incorrect": 7, "cudafuncattributemaxdynamicsharedmemorys": 7, "sharedsizebyt": 7, "cudafuncattributepreferredsharedmemorycarveout": 7, "cudafuncattributerequiredclusterwidth": 7, "cudafuncattributerequiredclusterheight": 7, "cudafuncattributerequiredclusterdepth": 7, "cudafuncattributenonportableclustersizeallow": 7, "cudafuncattributeclusterschedulingpolicyprefer": 7, "constrast": 7, "cudahostfn_t": 7, "besid": 7, "cudaoccupancymaxpotentialblocksizevariablesmem": 7, "cudaoccupancyavailabledynamicsmemperblock": 7, "cudaoccupancymaxpotentialblocksizevariablesmemwithflag": 7, "cudaoccupancydefault": 7, "cudaoccupancydisablecachingoverrid": 7, "cudamemadvis": 7, "cudamemprefetchasync": 7, "cudamalloc3d": 7, "cudachannelformatkind": 7, "cudachannelformatkindsign": 7, "cudachannelformatkindunsign": 7, "cudachannelformatkindfloat": 7, "cudaarraydefault": 7, "cudaarraysurfaceloadstor": 7, "cudaarraytexturegath": 7, "cudaarrayspars": 7, "cudaarraydeferredmap": 7, "cudafreeasync": 7, "cudaerrorvalu": 7, "hostptr": 7, "mipmappedarrai": 7, "cudahostallocdefault": 7, "emul": 7, "cudahostallocport": 7, "cudahostallocmap": 7, "cudahostallocwritecombin": 7, "phost": 7, "cudahostregisterdefault": 7, "cudahostregisterport": 7, "cudahostregistermap": 7, "cudahostregisteriomemori": 7, "cudadeviceattrreadonlyhostregistersupport": 7, "cudamaphost": 7, "cudaerrorhostmemoryalreadyregist": 7, "cudaerrorhostmemorynotregist": 7, "cudaext": 7, "pitcheddevptr": 7, "xsize": 7, "ysize": 7, "highli": 7, "cudamemset3d": 7, "make_cudapitchedptr": 7, "make_cudaext": 7, "cudaarraylay": 7, "cudaarraycubemap": 7, "cudagraphicscubefac": 7, "levelarrai": 7, "cudamipmappedarray_const_t": 7, "cudamemcpy3dparm": 7, "betwen": 7, "srcptr": 7, "dstptr": 7, "srcpo": 7, "dstpo": 7, "term": [7, 16], "cudamemcpyhosttohost": 7, "cudamemcpyhosttodevic": 7, "cudamemcpydevicetohost": 7, "cudamemcpydevicetodevic": 7, "cudamemcpydefault": 7, "disregard": 7, "implic": 7, "silent": 7, "fact": 7, "cudamemcpy2darraytoarrai": 7, "make_cudapo": 7, "cudamemcpy3dpeerparm": 7, "lie": 7, "cudachannelformatkindnv12": 7, "cudaarraygetmemoryrequir": 7, "cudaarraymemoryrequir": 7, "cudamipmappedarraygetmemoryrequir": 7, "cudaarraygetsparseproperti": 7, "cudaarraysparsepropertiessinglemiptail": 7, "cudamipmappedarraygetlevel": 7, "cudamipmappedarraygetsparseproperti": 7, "cudaarraysparseproperti": 7, "cudamemcpykind": 7, "ote_sync": 7, "dpitch": 7, "spitch": 7, "woffset": 7, "hoffset": 7, "cudaarray_const_t": 7, "woffsetdst": 7, "hoffsetdst": 7, "woffsetsrc": 7, "hoffsetsrc": 7, "cudamemset3dasync": 7, "narrow": 7, "secondarili": 7, "shorter": 7, "cudacpudeviceid": 7, "cudamemadvisesetreadmostli": 7, "cudamemadvisesetpreferredloc": 7, "cudamemadvisesetaccessedbi": 7, "cudamemadvise_v2": 7, "cudamemprefetchasync_v2": 7, "cudamemloc": 7, "cudamemlocationtypedevic": 7, "cudamemlocationtypehost": 7, "cudamemlocationtypehostnuma": 7, "cudamemlocationtypehostnumacurr": 7, "cudamemoryadvis": 7, "cudamemadviceunsetreadmostli": 7, "cudamemadvicereadmostli": 7, "cudamemadviseunsetpreferredloc": 7, "cudamemadvicesetaccessedbi": 7, "cudamemadviseunsetaccessedbi": 7, "cudamemrangeattribut": 7, "cudamemrangeattributereadmostli": 7, "cudamemrangeattributepreferredloc": 7, "cudainvaliddeviceid": 7, "cudamemrangeattributeaccessedbi": 7, "cudamemrangeattributelastprefetchloc": 7, "applicaton": 7, "cudamemrangeattributepreferredlocationtyp": 7, "cudamemlocationtyp": 7, "cudamemlocationtypeinvalid": 7, "cudamemrangeattributepreferredlocationid": 7, "cudamemrangeattributelastprefetchlocationtyp": 7, "cudamemrangeattributelastprefetchlocationid": 7, "xsz": 7, "ysz": 7, "cudapo": 7, "cudaerroroutofmemori": 7, "cudamempoolattr": 7, "cudamempoolattrreleasethreshold": 7, "cudamempoolreusefolloweventdepend": 7, "cudamempoolreuseallowopportunist": 7, "cudamempoolreuseallowinternaldepend": 7, "cudamempoolattrreservedmemhigh": 7, "cudamempoolattrusedmemhigh": 7, "cudamempoolattrreservedmemcurr": 7, "cudamempoolattrusedmemcurr": 7, "desclist": 7, "cudamemaccessdesc": 7, "cudamempoolgetaccess": 7, "cudamemaccessflag": 7, "cudamempoolprop": 7, "cudamemhandletypefabr": 7, "cudamempoolexporttoshareablehandl": 7, "cudamemhandletypenon": 7, "cudamemallocationhandletyp": 7, "cudamempoolimportfromshareablehandl": 7, "cudamempoolexportpoint": 7, "cudamempoolimportpoint": 7, "cudamempoolptrexportdata": 7, "exportdata": 7, "cudapointerattribut": 7, "cudamemorytypeunregist": 7, "cudamemorytypehost": 7, "cudamemorytypedevic": 7, "cudamemorytypemanag": 7, "vanish": 7, "devicepoint": 7, "alia": 7, "hostpoint": 7, "peerdevic": 7, "cudaerrorpeeraccessalreadyen": 7, "cudaerrorpeeraccessnoten": 7, "cudagldevicelist": 7, "cudagldevicelistal": 7, "cudagldevicelistcurrentfram": 7, "cudagldevicelistnextfram": 7, "cudaerrorinvalidgraphicscontext": 7, "cudagraphicsresourcegetmappedpoint": 7, "cudagraphicsregisterflagsnon": 7, "cudagraphicsregisterflagsreadonli": 7, "cudagraphicsregisterflagswritediscard": 7, "cudagraphicsregisterflagssurfaceloadstor": 7, "cudagraphicsregisterflagstexturegath": 7, "cudagraphicsresourc": 7, "cudavdpausetvdpaudevic": [7, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudagraphicsmapflagsnon": 7, "cudagraphicsmapflagsreadonli": 7, "cudagraphicsmapflagswritediscard": 7, "cudagraphicsresourcegetmappedeglfram": [7, 12], "cudaeglstreamconnect": 7, "cudaeglresourcelocationflag": 7, "cudaeglresourcelocationvidmem": 7, "cudaeglfram": 7, "cudaeglframetypepitch": 7, "cudaeglplanedesc": 7, "cudaeventcreatefromeglsync": [7, 12], "flage": 7, "cudagraphicsd3d9registerresourc": 7, "cudagraphicsd3d10registerresourc": 7, "cudagraphicsd3d11registerresourc": 7, "cudagetchanneldesc": 7, "cudacreatechanneldesc": 7, "cudaresourcedesc": 7, "cudatexturedesc": 7, "cudaresourceviewdesc": 7, "cudaresourcetypearrai": 7, "cudaresourcetypemipmappedarrai": 7, "normalizedcoord": 7, "cudaresourcetypelinear": 7, "cudaresourcetypepitch2d": 7, "cudatextureaddressmod": 7, "cudaaddressmodewrap": 7, "cudaaddressmodemirror": 7, "cudaaddressmodeclamp": 7, "cudatexturefiltermod": 7, "readmod": 7, "cudatexturereadmod": 7, "cudareadmodenormalizedfloat": 7, "cudaaddressmodebord": 7, "anistropi": 7, "disabletrilinearoptim": 7, "seamlesscubemap": 7, "cudafiltermodepoint": 7, "cudafiltermodelinear": 7, "runtimevers": 7, "sole": 7, "getlocalruntimevers": [7, 20, 21, 26], "cudagraphcr": 7, "cudagraphaddchildgraphnod": 7, "cudagraphaddemptynod": 7, "cudagraphaddkernelnod": 7, "cudagraphaddhostnod": 7, "cudagraphaddmemcpynod": 7, "cudagraphaddmemsetnod": 7, "cudagraphinstanti": 7, "cudagraphgetnod": 7, "cudagraphgetrootnod": 7, "cudagraphgetedg": 7, "cudagraphclon": 7, "pdepend": 7, "cudakernelnodeparam": 7, "pnodeparam": 7, "pgraphnod": 7, "griddim": 7, "blockdim": [7, 9], "sharedmem": 7, "cudagraphaddnod": 7, "cudagraphkernelnodegetparam": 7, "cudagraphdestroynod": 7, "cudagraphnodesetparam": 7, "cudagraphkernelnodecopyattribut": 7, "hsrc": 7, "hdst": 7, "cudakernelnodeattrid": 7, "cudaerrorinvalidcontext": 7, "cudagraphkernelnodegetattribut": 7, "cudakernelnodeattrvalu": 7, "cudagraphkernelnodesetattribut": 7, "pcopyparam": 7, "cudagraphaddmemcpynodetosymbol": [7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudagraphaddmemcpynodefromsymbol": [7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudagraphaddmemcpynode1d": 7, "cudagraphmemcpynodegetparam": 7, "cudagraphmemcpynodesetparam": 7, "cudagraphmemcpynodesetparams1d": 7, "cudagraphmemcpynodesetparamstosymbol": [7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudagraphmemcpynodesetparamsfromsymbol": [7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudamemsetparam": 7, "pmemsetparam": 7, "cudagraphmemsetnodegetparam": 7, "cudagraphmemsetnodesetparam": 7, "cudahostnodeparam": 7, "cudagraphhostnodegetparam": 7, "cudagraphhostnodesetparam": 7, "cudagraphchildgraphnodegetgraph": 7, "cudagraphnodefindinclon": 7, "cudagraphaddeventrecordnod": 7, "cudagraphaddeventwaitnod": 7, "cudagrapheventrecordnodegetev": 7, "cudagrapheventrecordnodesetev": 7, "cudagrapheventwaitnodegetev": 7, "cudagrapheventwaitnodesetev": 7, "cudagraphaddexternalsemaphoressignalnod": 7, "cudaexternalsemaphoresignalnodeparam": 7, "cudagraphexternalsemaphoressignalnodegetparam": 7, "cudagraphexternalsemaphoressignalnodesetparam": 7, "cudagraphexecexternalsemaphoressignalnodesetparam": 7, "cudagraphaddexternalsemaphoreswaitnod": 7, "cudaexternalsemaphorewaitnodeparam": 7, "cudagraphexternalsemaphoreswaitnodegetparam": 7, "cudagraphexternalsemaphoreswaitnodesetparam": 7, "cudagraphexecexternalsemaphoreswaitnodesetparam": 7, "cudagraphaddmemallocnod": 7, "cudamemallocnodeparam": 7, "cudagraphaddmemfreenod": 7, "cudamemfreeasync": 7, "cudamemfre": 7, "cudagraphinstantiateflagautofreeonlaunch": 7, "cudaerrorcudartunload": 7, "cudagraphmemallocnodegetparam": 7, "cudadevicegraphmemtrim": 7, "cudadevicegetgraphmemattribut": 7, "cudadevicesetgraphmemattribut": 7, "cudagraphmemfreenodegetparam": 7, "cudagraphmemattributetyp": 7, "cudagraphmemattrusedmemcurr": 7, "cudagraphmemattrusedmemhigh": 7, "cudagraphmemattrreservedmemcurr": 7, "cudagraphmemattrreservedmemhigh": 7, "pgraphclon": 7, "originalnod": 7, "clonedgraph": 7, "pclonednod": 7, "pnode": 7, "cudagraphnodegettyp": 7, "ptype": 7, "cudagraphnodetyp": 7, "cudagraphnodegetdepend": 7, "cudagraphnodegetdependentnod": 7, "pnumrootnod": 7, "prootnod": 7, "cudagraphadddepend": 7, "cudagraphremovedepend": 7, "cudagraphgetedges_v2": 7, "losst": 7, "pnumdepend": 7, "cudagraphnodegetdependencies_v2": 7, "pnumdependentnod": 7, "pdependentnod": 7, "cudagraphnodegetdependentnodes_v2": 7, "pfrom": 7, "pto": 7, "cudagraphadddependencies_v2": 7, "cudagraphremovedependencies_v2": 7, "pgraphexec": 7, "cudagraphinstantiateflagdevicelaunch": 7, "cudagraphinstantiateflagusenodeprior": 7, "cudagraphexecdestroi": 7, "cudagraphinstantiatewithflag": 7, "cudagraphupload": 7, "cudagraphinstantiatewithparam": 7, "cudagraphinstantiateparam": 7, "cudagraphinstantiateflagupload": 7, "uploadstream": 7, "errnode_out": 7, "cudagraphinstantiateerror": 7, "cudagraphinstantiateinvalidstructur": 7, "cudagraphinstantiatenodeoperationnotsupport": 7, "cudagraphinstantiatemultipledevicesnotsupport": 7, "cudagraphinstantiatesuccess": 7, "cudagraphexecgetflag": 7, "cudagraphexeckernelnodesetparam": 7, "cudagraphexecnodesetparam": 7, "cudagraphexecmemcpynodesetparam": 7, "cudagraphexecmemsetnodesetparam": 7, "cudagraphexechostnodesetparam": 7, "cudagraphexecchildgraphnodesetparam": 7, "cudagraphexeceventrecordnodesetev": 7, "cudagraphexeceventwaitnodesetev": 7, "cudagraphexecupd": 7, "cudagraphexecmemcpynodesetparamstosymbol": [7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudagraphexecmemcpynodesetparamsfromsymbol": [7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "cudagraphexecmemcpynodesetparams1d": 7, "cudagraphnodeseten": 7, "cudagraphnodegeten": 7, "cudagraphexecupdateerrortopologychang": 7, "cudagraphexecupdateerror": 7, "cudagraphexecupdateerrornodetypechang": 7, "cudagraphexecupdateerrorfunctionchang": 7, "cudagraphexecupdateerrorunsupportedfunctionchang": 7, "cudagraphexecupdateerrorparameterschang": 7, "cudagraphexecupdateerrorattributeschang": 7, "cudagraphexecupdateerrornotsupport": 7, "cudagraphexecupdatesuccess": 7, "cudaerrorgraphexecupdatefailur": 7, "cudagraphexecupdateresultinfo": 7, "cudagraphdebugdotprint": 7, "cudagraphdebugdotflag": 7, "cudauserobjectcr": 7, "cudauserobjectnodestructorsync": 7, "cudauserobject_t": 7, "cudauserobjectretain": 7, "cudauserobjectreleas": 7, "cudagraphretainuserobject": 7, "cudagraphreleaseuserobject": 7, "cudagraphuserobjectmov": 7, "cudagraphnodeparam": 7, "cudagraphaddnode_v2": 7, "cudagraphnodetypememalloc": 7, "cudagraphnodetypememfre": 7, "cudagraphconditionalhandlecr": 7, "cudagraphcondassigndefault": 7, "cudagraphconditionalhandl": 7, "funcptr": 7, "driverstatu": 7, "cudadriverentrypointqueryresult": 7, "cudadriverentrypointsuccess": 7, "cudadriverentrypointsymbolnotfound": 7, "cudadriverentrypointversionnotsuffic": 7, "cudart_vers": 7, "cudaenabledefault": 7, "cudaenableperthreaddefaultstream": 7, "cudaenablelegacystream": 7, "cudagetdriverentrypointbyvers": 7, "custom": 7, "style": 7, "built": [7, 9], "relationship": 7, "perspect": 7, "synonym": 7, "tear": 7, "cudad3d9setdirect3ddevic": 7, "cudad3d10setdirect3ddevic": 7, "cudad3d11setdirect3ddevic": 7, "cudaglsetgldevic": [7, 12], "substanti": 7, "strongli": 7, "cudaerrorincompatibledrivercontext": 7, "travel": 7, "interchang": 7, "cudafunction_t": 7, "cudagetkernel": 7, "entryfuncaddr": 7, "kernelptr": 7, "cudakernel_t": 7, "cudaeglplanedesc_st": 7, "channeldesc": 7, "cudaeglframe_st": 7, "typedefstructcudaeglplanedesc_st": 7, "unsignedintwidth": 7, "unsignedintheight": 7, "unsignedintdepth": 7, "unsignedintpitch": 7, "unsignedintnumchannel": 7, "structcudachannelformatdescchanneldesc": 7, "unsignedintreserv": 7, "planedesc": 7, "cudaeglframetyp": 7, "cudaeglcolorformat": 7, "anon_struct0": 7, "cudamemcpynodeparam": 7, "cudamemsetparamsv2": 7, "cudaaccessproperti": 7, "cudahostnodeparamsv2": 7, "cudaresourcetyp": 7, "anon_union0": 7, "cudaresourceviewformat": 7, "cudamemorytyp": 7, "constsizebyt": 7, "localsizebyt": 7, "numreg": 7, "ptxversion": 7, "binaryvers": 7, "cachemodeca": 7, "maxdynamicsharedsizebyt": 7, "preferredshmemcarveout": 7, "clusterdimmustbeset": 7, "requiredclusterwidth": 7, "requiredclusterheight": 7, "requiredclusterdepth": 7, "nonportableclustersizeallow": 7, "cudamemallocationtypepin": 7, "cudamemallocationtyp": 7, "cudamemhandletypewin32": 7, "tranfer": 7, "cudamemallocnodeparamsv2": 7, "cudamemfreenodeparam": 7, "cudauuid_t": 7, "luiddevicenodemask": 7, "oppos": 7, "cudacomputemod": 7, "clusterlaunch": 7, "cudaipceventhandle_st": 7, "cudaipcmemhandle_st": 7, "cudamemfabrichandle_st": 7, "anon_union1": 7, "dim3": 7, "cudakernelnodeparamsv2": 7, "cudaexternalsemaphoresignalnodeparamsv2": 7, "cudaexternalsemaphorewaitnodeparamsv2": 7, "cudaconditionalnodeparam": 7, "cudagraphconditionalnodetyp": 7, "cudachildgraphnodeparam": 7, "cudaeventrecordnodeparam": 7, "cudaeventwaitnodeparam": 7, "cudagraphedgedata_st": 7, "cudagraphkernelnodeportdefault": 7, "cudagraphkernelnodeportprogrammat": 7, "cudagraphkernelnodeportlaunchcomplet": 7, "cudagraphdependencytyp": 7, "cudagraphinstantiateparams_st": 7, "cudagraphinstantiateresult": 7, "cudagraphexecupdateresultinfo_st": 7, "cudagraphexecupdateresult": 7, "cudagraphkernelnodeupd": 7, "cudagraphdevicenode_t": 7, "updatedata": 7, "cudagraphkernelnodefield": 7, "anon_union8": 7, "cudalaunchmemsyncdomainmap_st": 7, "cudalaunchmemsyncdomainremot": 7, "cudalaunchmemsyncdomainmap": 7, "cudalaunchattributememsyncdomainmap": 7, "cudadevattrmemsyncdomaincount": 7, "cudalaunchattributevalu": 7, "cudalaunchattribut": 7, "cudalaunchattributeaccesspolicywindow": 7, "cudalaunchattributecoop": 7, "cudalaunchattributesynchronizationpolici": 7, "cudasynchronizationpolici": 7, "cudalaunchattributeclusterdimens": 7, "anon_struct20": 7, "cudalaunchattributeclusterschedulingpolicyprefer": 7, "cudalaunchattributeprogrammaticstreamseri": 7, "cudalaunchattributeprogrammaticev": 7, "cudalaunchattributeprior": 7, "cudalaunchattributememsyncdomain": 7, "cudalaunchattributelaunchcompletionev": 7, "anon_struct22": 7, "cudalaunchattributedeviceupdatablekernelnod": 7, "anon_struct23": 7, "cudalaunchattributepreferredsharedmemorycarveout": 7, "cudalaunchattribute_st": 7, "cudalaunchattributeid": 7, "cudaasyncnotificationinfo": 7, "cudaasyncnotificationtyp": 7, "cudaeglframetypearrai": 7, "cudaeglresourcelocationsysmem": 7, "cudaeglcolorformatyuv420planar": 7, "cudaeglcolorformatyuv420semiplanar": 7, "cudaeglcolorformatyuv422planar": 7, "cudaeglcolorformatyuv422semiplanar": 7, "cudaeglcolorformatargb": 7, "cudaeglcolorformatrgba": 7, "cudaeglcolorformatl": 7, "cudaeglcolorformatr": 7, "cudaeglcolorformatyuv444planar": 7, "cudaeglcolorformatyuv444semiplanar": 7, "cudaeglcolorformatyuyv422": 7, "cudaeglcolorformatuyvy422": 7, "cudaeglcolorformatabgr": 7, "cudaeglcolorformatbgra": 7, "cudaeglcolorformata": 7, "cudaeglcolorformatrg": 7, "cudaeglcolorformatayuv": 7, "cudaeglcolorformatyvu444semiplanar": 7, "cudaeglcolorformatyvu422semiplanar": 7, "cudaeglcolorformatyvu420semiplanar": 7, "cudaeglcolorformaty10v10u10_444semiplanar": 7, "cudaeglcolorformaty10v10u10_420semiplanar": 7, "cudaeglcolorformaty12v12u12_444semiplanar": 7, "cudaeglcolorformaty12v12u12_420semiplanar": 7, "cudaeglcolorformatvyuy_": 7, "cudaeglcolorformatuyvy_": 7, "cudaeglcolorformatyuyv_": 7, "cudaeglcolorformatyvyu_": 7, "cudaeglcolorformatyuva_": 7, "cudaeglcolorformatayuv_": 7, "cudaeglcolorformatyuv444planar_": 7, "cudaeglcolorformatyuv422planar_": 7, "cudaeglcolorformatyuv420planar_": 7, "cudaeglcolorformatyuv444semiplanar_": 7, "cudaeglcolorformatyuv422semiplanar_": 7, "cudaeglcolorformatyuv420semiplanar_": 7, "cudaeglcolorformatyvu444planar_": 7, "cudaeglcolorformatyvu422planar_": 7, "cudaeglcolorformatyvu420planar_": 7, "cudaeglcolorformatyvu444semiplanar_": 7, "cudaeglcolorformatyvu422semiplanar_": 7, "cudaeglcolorformatyvu420semiplanar_": 7, "cudaeglcolorformatbayerrggb": 7, "cudaeglcolorformatbayerbggr": 7, "cudaeglcolorformatbayergrbg": 7, "cudaeglcolorformatbayergbrg": 7, "cudaeglcolorformatbayer10rggb": 7, "cudaeglcolorformatbayer10bggr": 7, "cudaeglcolorformatbayer10grbg": 7, "cudaeglcolorformatbayer10gbrg": 7, "cudaeglcolorformatbayer12rggb": 7, "cudaeglcolorformatbayer12bggr": 7, "cudaeglcolorformatbayer12grbg": 7, "cudaeglcolorformatbayer12gbrg": 7, "cudaeglcolorformatbayer14rggb": 7, "cudaeglcolorformatbayer14bggr": 7, "cudaeglcolorformatbayer14grbg": 7, "cudaeglcolorformatbayer14gbrg": 7, "cudaeglcolorformatbayer20rggb": 7, "cudaeglcolorformatbayer20bggr": 7, "cudaeglcolorformatbayer20grbg": 7, "cudaeglcolorformatbayer20gbrg": 7, "cudaeglcolorformatyvu444planar": 7, "cudaeglcolorformatyvu422planar": 7, "cudaeglcolorformatyvu420planar": 7, "cudaeglcolorformatbayerisprggb": 7, "cudaeglcolorformatbayerispbggr": 7, "cudaeglcolorformatbayerispgrbg": 7, "cudaeglcolorformatbayerispgbrg": 7, "cudaeglcolorformatbayerbccr": 7, "cudaeglcolorformatbayerrccb": 7, "cudaeglcolorformatbayercrbc": 7, "cudaeglcolorformatbayercbrc": 7, "cudaeglcolorformatbayer10cccc": 7, "cudaeglcolorformatbayer12bccr": 7, "cudaeglcolorformatbayer12rccb": 7, "cudaeglcolorformatbayer12crbc": 7, "cudaeglcolorformatbayer12cbrc": 7, "cudaeglcolorformatbayer12cccc": 7, "cudaeglcolorformati": 7, "cudaeglcolorformatyuv420semiplanar_2020": 7, "cudaeglcolorformatyvu420semiplanar_2020": 7, "cudaeglcolorformatyuv420planar_2020": 7, "cudaeglcolorformatyvu420planar_2020": 7, "cudaeglcolorformatyuv420semiplanar_709": 7, "cudaeglcolorformatyvu420semiplanar_709": 7, "cudaeglcolorformatyuv420planar_709": 7, "cudaeglcolorformatyvu420planar_709": 7, "cudaeglcolorformaty10v10u10_420semiplanar_709": 7, "cudaeglcolorformaty10v10u10_420semiplanar_2020": 7, "cudaeglcolorformaty10v10u10_422semiplanar_2020": 7, "cudaeglcolorformaty10v10u10_422semiplanar": 7, "cudaeglcolorformaty10v10u10_422semiplanar_709": 7, "cudaeglcolorformaty_": 7, "cudaeglcolorformaty_709_": 7, "cudaeglcolorformaty10_": 7, "cudaeglcolorformaty10_709_": 7, "cudaeglcolorformaty12_": 7, "cudaeglcolorformaty12_709_": 7, "cudaeglcolorformatyuva": 7, "cudaeglcolorformatyvyu": 7, "cudaeglcolorformatvyui": 7, "cudaeglcolorformaty10v10u10_420semiplanar_": 7, "cudaeglcolorformaty10v10u10_420semiplanar_709_": 7, "cudaeglcolorformaty10v10u10_444semiplanar_": 7, "cudaeglcolorformaty10v10u10_444semiplanar_709_": 7, "cudaeglcolorformaty12v12u12_420semiplanar_": 7, "cudaeglcolorformaty12v12u12_420semiplanar_709_": 7, "cudaeglcolorformaty12v12u12_444semiplanar_": 7, "cudaeglcolorformaty12v12u12_444semiplanar_709_": 7, "cudaerrorprofilerdis": 7, "cudaerrorprofilernotiniti": 7, "cudaerrorprofileralreadystart": 7, "cudaerrorprofileralreadystop": 7, "cudaerrorinvalidhostpoint": 7, "cudagettexturealignmentoffset": [7, 11, 12], "cudaerroraddressofconst": 7, "forbidden": 7, "cudaerrortexturefetchfail": 7, "cudaerrortexturenotbound": 7, "cudaerrorsynchronizationerror": 7, "cudaerrormixeddeviceexecut": 7, "cudaerrornotyetimpl": 7, "cudaerrormemoryvaluetoolarg": 7, "cudaerrorstublibrari": 7, "cudaerrorcallrequiresnewerdriv": 7, "cudaerrorinvalidsurfac": 7, "cudaerrorduplicatevariablenam": 7, "cudaerrorduplicatetexturenam": 7, "cudaerrorduplicatesurfacenam": 7, "cudaconfigurecal": 7, "cudaerrorpriorlaunchfailur": 7, "cudaerrorlaunchmaxdepthexceed": 7, "cudaerrorlaunchfilescopedtex": 7, "cudaerrorlaunchfilescopedsurf": 7, "proce": 7, "proper": 7, "cudaerrordevicenotlicens": 7, "cudaerrorsoftwarevaliditynotestablish": 7, "self": 7, "startup": 7, "cudaerrorinvalidkernelimag": 7, "cudaerrorarrayismap": 7, "cudaerroralreadymap": 7, "cudaerroralreadyacquir": 7, "cudaerrornotmap": 7, "cudaerrornotmappedasarrai": 7, "cudaerrornotmappedaspoint": 7, "cudaerroreccuncorrect": 7, "cudaerrordevicealreadyinus": 7, "cudaerrorpeeraccessunsupport": 7, "cudaerrornvlinkuncorrect": 7, "cudaerrorunsupportedexecaffin": 7, "cudaerrorunsupporteddevsidesync": 7, "cudaerrorinvalidsourc": 7, "cudaerrorfilenotfound": 7, "cudaerrorsharedobjectsymbolnotfound": 7, "cudaerrorsharedobjectinitfail": 7, "cudaerrorsymbolnotfound": 7, "cudaerrorillegaladdress": 7, "cudaerrorlaunchincompatibletextur": 7, "cudaerrorcontextisdestroi": 7, "cudaerrorassert": 7, "cudaenablepeeraccess": 7, "cudaerrorhardwarestackerror": 7, "cudaerrorillegalinstruct": 7, "cudaerrormisalignedaddress": 7, "cudaerrorinvalidaddressspac": 7, "cudaerrorinvalidpc": 7, "cudaerrorcooperativelaunchtoolarg": 7, "cudaerrorsystemnotreadi": 7, "cudaerrorsystemdrivermismatch": 7, "cudaerrorcompatnotsupportedondevic": 7, "cudaerrormpsconnectionfail": 7, "cudaerrormpsrpcfailur": 7, "cudaerrormpsservernotreadi": 7, "cudaerrormpsmaxclientsreach": 7, "cudaerrormpsmaxconnectionsreach": 7, "cudaerrormpsclienttermin": 7, "cudaerrorcdpnotsupport": 7, "cudaerrorcdpversionmismatch": 7, "cudaerrorstreamcaptureunsupport": 7, "cudaerrorstreamcaptureinvalid": 7, "cudaerrorstreamcapturemerg": 7, "cudaerrorstreamcaptureunmatch": 7, "cudaerrorstreamcaptureisol": 7, "cudaerrorcapturedev": 7, "cudaerrorexternaldevic": 7, "cudaerrorinvalidclusters": 7, "cudaerrorfunctionnotload": 7, "cudaerrorinvalidresourcetyp": 7, "cudaerrorinvalidresourceconfigur": 7, "cudaerrorapifailurebas": 7, "10000": 7, "cudachannelformatkindnon": 7, "cudachannelformatkindunsignednormalized8x1": 7, "cudachannelformatkindunsignednormalized8x2": 7, "cudachannelformatkindunsignednormalized8x4": 7, "cudachannelformatkindunsignednormalized16x1": 7, "cudachannelformatkindunsignednormalized16x2": 7, "cudachannelformatkindunsignednormalized16x4": 7, "cudachannelformatkindsignednormalized8x1": 7, "cudachannelformatkindsignednormalized8x2": 7, "cudachannelformatkindsignednormalized8x4": 7, "cudachannelformatkindsignednormalized16x1": 7, "cudachannelformatkindsignednormalized16x2": 7, "cudachannelformatkindsignednormalized16x4": 7, "cudachannelformatkindunsignedblockcompressed1": 7, "cudachannelformatkindunsignedblockcompressed1srgb": 7, "cudachannelformatkindunsignedblockcompressed2": 7, "cudachannelformatkindunsignedblockcompressed2srgb": 7, "cudachannelformatkindunsignedblockcompressed3": 7, "cudachannelformatkindunsignedblockcompressed3srgb": 7, "cudachannelformatkindunsignedblockcompressed4": 7, "cudachannelformatkindsignedblockcompressed4": 7, "cudachannelformatkindunsignedblockcompressed5": 7, "cudachannelformatkindsignedblockcompressed5": 7, "cudachannelformatkindunsignedblockcompressed6h": 7, "cudachannelformatkindsignedblockcompressed6h": 7, "cudachannelformatkindunsignedblockcompressed7": 7, "cudachannelformatkindunsignedblockcompressed7srgb": 7, "cudaaccesspropertynorm": 7, "cudaaccesspropertystream": 7, "cudaaccesspropertypersist": 7, "cudasyncpolicyauto": 7, "cudasyncpolicyspin": 7, "cudasyncpolicyyield": 7, "cudasyncpolicyblockingsync": 7, "cudaclusterschedulingpolicydefault": 7, "cudaclusterschedulingpolicyspread": 7, "cudaclusterschedulingpolicyloadbalanc": 7, "cudastreamupdatecapturedependenciesflag": 7, "cudauserobjectflag": 7, "cudauserobjectretainflag": 7, "cudagraphicsregisterflag": 7, "cudagraphicsmapflag": 7, "cudagraphicscubefacepositivex": 7, "cudagraphicscubefacenegativex": 7, "cudagraphicscubefacepositivei": 7, "cudagraphicscubefacenegativei": 7, "cudagraphicscubefacepositivez": 7, "cudagraphicscubefacenegativez": 7, "cudaresviewformatnon": 7, "cudaresviewformatunsignedchar1": 7, "cudaresviewformatunsignedchar2": 7, "cudaresviewformatunsignedchar4": 7, "cudaresviewformatsignedchar1": 7, "cudaresviewformatsignedchar2": 7, "cudaresviewformatsignedchar4": 7, "cudaresviewformatunsignedshort1": 7, "cudaresviewformatunsignedshort2": 7, "cudaresviewformatunsignedshort4": 7, "cudaresviewformatsignedshort1": 7, "cudaresviewformatsignedshort2": 7, "cudaresviewformatsignedshort4": 7, "cudaresviewformatunsignedint1": 7, "cudaresviewformatunsignedint2": 7, "cudaresviewformatunsignedint4": 7, "cudaresviewformatsignedint1": 7, "cudaresviewformatsignedint2": 7, "cudaresviewformatsignedint4": 7, "cudaresviewformathalf1": 7, "cudaresviewformathalf2": 7, "cudaresviewformathalf4": 7, "cudaresviewformatfloat1": 7, "cudaresviewformatfloat2": 7, "cudaresviewformatfloat4": 7, "cudaresviewformatunsignedblockcompressed1": 7, "cudaresviewformatunsignedblockcompressed2": 7, "cudaresviewformatunsignedblockcompressed3": 7, "cudaresviewformatunsignedblockcompressed4": 7, "cudaresviewformatsignedblockcompressed4": 7, "cudaresviewformatunsignedblockcompressed5": 7, "cudaresviewformatsignedblockcompressed5": 7, "cudaresviewformatunsignedblockcompressed6h": 7, "cudaresviewformatsignedblockcompressed6h": 7, "cudaresviewformatunsignedblockcompressed7": 7, "cudafuncattributeclusterdimmustbeset": 7, "cudafuncattributemax": 7, "cudasharedmemconfig": 7, "cudasharedmembanksizedefault": 7, "cudasharedmembanksizefourbyt": 7, "cudasharedmembanksizeeightbyt": 7, "cudasharedcarveout": 7, "cudasharedmemcarveoutdefault": 7, "cudasharedmemcarveoutmaxshar": 7, "cudasharedmemcarveoutmaxl1": 7, "cudacomputemodeexclus": 7, "occassion": 7, "cudamemadviseunsetreadmostli": 7, "cudaflushgpudirectrdmawritesoptionhost": 7, "cudaflushgpudirectrdmawritesoptionmemop": 7, "cudagpudirectrdmawritesorderingnon": 7, "cudaflushgpudirectrdmawrit": 7, "cudagpudirectrdmawritesorderingown": 7, "cudagpudirectrdmawritesorderingalldevic": 7, "cudaflushgpudirectrdmawritestoown": 7, "cudaflushgpudirectrdmawritestoalldevic": 7, "cudaflushgpudirectrdmawritestargetcurrentdevic": 7, "cudadevattrsurfacealign": 7, "cudadevattrasyncenginecount": 7, "cudadevattrmaxtexture2dgatherwidth": 7, "cudadevattrmaxtexture2dgatherheight": 7, "cudadevattrpcidomainid": 7, "cudadevattrreserved92": 7, "cudadevattrreserved93": 7, "cudadevattrreserved94": 7, "chip": 7, "cudadevattrtimelinesemaphoreinteropsupport": 7, "cudadevattrmaxtimelinesemaphoreinteropsupport": 7, "cudadevattrclusterlaunch": 7, "cudadevattrreserved122": 7, "cudadevattrreserved123": 7, "cudadevattrreserved124": 7, "cudadevattrreserved127": 7, "cudadevattrreserved128": 7, "cudadevattrreserved129": 7, "cudadevattrreserved132": 7, "cudadevattrmpsen": 7, "cudadevattrhostnumaid": 7, "cudadevattrd3d12cigsupport": 7, "cudadevattrmax": 7, "cudamemaccessflagsprotnon": 7, "cudamemaccessflagsprotread": 7, "cudamemaccessflagsprotreadwrit": 7, "cudamemallocationtypeinvalid": 7, "cudamemallocationtypemax": 7, "cudamemhandletypeposixfiledescriptor": 7, "cudamemhandletypewin32kmt": 7, "cudamemfabrichandle_t": 7, "cudacgscop": 7, "cudacgscopeinvalid": 7, "cudacgscopegrid": 7, "grid_group": 7, "cudacgscopemultigrid": 7, "multi_grid_group": 7, "cudagraphconditionalhandleflag": 7, "cudagraphcondtypeif": 7, "cudagraphcondtypewhil": 7, "cudagraphnodetypekernel": 7, "cudagraphnodetypememcpi": 7, "cudagraphnodetypememset": 7, "cudagraphnodetypehost": 7, "cudagraphnodetypegraph": 7, "cudagraphnodetypeempti": 7, "cudagraphnodetypewaitev": 7, "cudagraphnodetypeeventrecord": 7, "cudagraphnodetypeextsemaphoresign": 7, "cudagraphnodetypeextsemaphorewait": 7, "cudagraphnodetypecondit": 7, "cudagraphnodetypecount": 7, "cudagraphdependencytypedefault": 7, "cudagraphdependencytypeprogrammat": 7, "cudagraphkernelnodefieldinvalid": 7, "cudagraphkernelnodefieldgriddim": 7, "cudagraphkernelnodefieldparam": 7, "cudagraphkernelnodefielden": 7, "cudagetdriverentrypointflag": 7, "cudaapigetdriverentrypoint": 7, "wasn": 7, "great": 7, "cudagraphdebugdotflagsverbos": 7, "cudagraphdebugdotflagskernelnodeparam": 7, "cudagraphdebugdotflagsmemcpynodeparam": 7, "cudagraphdebugdotflagsmemsetnodeparam": 7, "cudagraphdebugdotflagshostnodeparam": 7, "cudagraphdebugdotflagseventnodeparam": 7, "cudagraphdebugdotflagsextsemassignalnodeparam": 7, "cudagraphdebugdotflagsextsemaswaitnodeparam": 7, "cudagraphdebugdotflagskernelnodeattribut": 7, "cudagraphdebugdotflagshandl": 7, "cudagraphdebugdotflagsconditionalnodeparam": 7, "cudagraphinstantiateflag": 7, "cudalaunchmemsyncdomaindefault": 7, "cudalaunchattributeignor": 7, "cudadevicenumaconfignon": 7, "cudadevicenumaconfignumanod": 7, "cudaasyncnotificationtypeoverbudget": 7, "cudasurfaceboundarymod": 7, "cudaboundarymodezero": 7, "cudaboundarymodeclamp": 7, "cudaboundarymodetrap": 7, "trap": 7, "cudasurfaceformatmod": 7, "cudaformatmodeforc": 7, "cudaformatmodeauto": 7, "auto": 7, "cudareadmodeelementtyp": 7, "cudaasyncnotificationinfo_t": 7, "cuda_egl_max_plan": 7, "cudapeeraccessdefault": 7, "cudadeviceschedulemask": 7, "cudadevicemask": 7, "tell": 7, "cudacooperativelaunchmultidevicenopresync": 7, "cudacooperativelaunchmultidevicenopostsync": 7, "cuda_ipc_handle_s": 7, "cudastreamattributeaccesspolicywindow": 7, "cudastreamattributesynchronizationpolici": 7, "cudastreamattributememsyncdomainmap": 7, "cudastreamattributememsyncdomain": 7, "cudastreamattributeprior": 7, "cudakernelnodeattributeaccesspolicywindow": 7, "cudakernelnodeattributecoop": 7, "cudakernelnodeattributeprior": 7, "cudakernelnodeattributeclusterdimens": 7, "cudakernelnodeattributeclusterschedulingpolicyprefer": 7, "cudakernelnodeattributememsyncdomainmap": 7, "cudakernelnodeattributememsyncdomain": 7, "cudakernelnodeattributepreferredsharedmemorycarveout": 7, "cudakernelnodeattributedeviceupdatablekernelnod": 7, "cudasurfacetype1d": 7, "cudasurfacetype2d": 7, "cudasurfacetype3d": 7, "cudasurfacetypecubemap": 7, "cudasurfacetype1dlay": 7, "241": 7, "cudasurfacetype2dlay": 7, "242": 7, "cudasurfacetypecubemaplay": 7, "252": 7, "cudatexturetype1d": 7, "cudatexturetype2d": 7, "cudatexturetype3d": 7, "cudatexturetypecubemap": 7, "cudatexturetype1dlay": 7, "cudatexturetype2dlay": 7, "cudatexturetypecubemaplay": 7, "wrapper": [8, 11, 12], "simplifi": 8, "popular": 8, "scienc": [8, 9], "analyt": [8, 9], "deep": [8, 9], "ecosystem": [8, 9], "coverag": [8, 9], "uniform": 8, "hpc": 8, "ai": 8, "numba": 8, "anaconda": 8, "easi": [8, 9], "increasingli": 8, "sophist": 8, "jargon": 8, "world": 8, "rapid": 8, "cupi": 8, "numpi": [8, 9], "scipi": 8, "network": 8, "footprint": 8, "lighter": 8, "mainten": 8, "wheel": 8, "benefit": 8, "foundat": [8, 9], "unison": 8, "compos": 8, "solv": 8, "matthew": 9, "nice": 9, "plai": 9, "role": 9, "massiv": 9, "deliv": 9, "simplif": 9, "importantli": 9, "understand": 9, "isa": 9, "saxpi": 9, "thing": 9, "np": 9, "fundament": 9, "practic": 9, "_cudageterrorenum": 9, "isinst": 9, "elif": 9, "runtimeerror": 9, "checkcudaerror": 9, "len": 9, "tripl": 9, "quot": 9, "easier": 9, "introduct": 9, "__global__": 9, "tid": 9, "blockidx": 9, "threadidx": 9, "ahead": 9, "rememb": 9, "arch_arg": 9, "compute_": 9, "str": 9, "cu": 9, "ptxsize": 9, "analog": 9, "uncommon": 9, "ctype": 9, "prepar": 9, "num_thread": 9, "num_block": 9, "dtype": 9, "float32": 9, "uint32": 9, "buffers": 9, "items": 9, "hx": 9, "random": 9, "rand": 9, "astyp": 9, "hy": 9, "hout": 9, "transform": 9, "movement": 9, "concept": 9, "xx": 9, "dxclass": 9, "dyclass": 9, "doutclass": 9, "prep": 9, "grab": 9, "intuit": 9, "dx": 9, "uint64": 9, "dy": 9, "dout": 9, "halt": 9, "hz": 9, "allclos": 9, "valueerror": 9, "toler": 9, "verif": 9, "nearli": 9, "nsight": 9, "nsy": 9, "stat": 9, "352\u00b5": 9, "1076m": 9, "1080m": 9, "insight": 9, "fig": 9, "screenshot": 9, "overal": 9, "hightlight": 10, "august": [11, 29], "2021": [11, 12], "ea": 11, "cudagettexturerefer": [11, 12], "cudagetsurfacerefer": [11, 12], "cudabindtextur": [11, 12], "cudabindtexture2d": [11, 12], "cudabindtexturetoarrai": [11, 12], "cudabindtexturetomipmappedarrai": [11, 12], "cudaunbindtextur": [11, 12], "cudabindsurfacetoarrai": [11, 12], "cudagetfuncbysymbol": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "octob": [12, 17, 20, 21, 26, 30], "ga": 12, "cudaprofileriniti": 12, "gl": [12, 13], "cuwglgetdevic": 12, "cuglctxcreat": 12, "cuglinit": 12, "cuglregisterbufferobject": 12, "cuglmapbufferobject": 12, "cuglunmapbufferobject": 12, "cuglunregisterbufferobject": 12, "cuglsetbufferobjectmapflag": 12, "cuglmapbufferobjectasync": 12, "cuglunmapbufferobjectasync": 12, "cudawglgetdevic": 12, "cudaglregisterbufferobject": 12, "cudaglmapbufferobject": 12, "cudaglunmapbufferobject": 12, "cudaglunregisterbufferobject": 12, "cudaglsetbufferobjectmapflag": 12, "cudaglmapbufferobjectasync": 12, "cudaglunmapbufferobjectasync": 12, "janurai": 13, "2022": [13, 14, 15, 16, 17, 18, 22], "relax": [13, 16], "ptd": 13, "cuda_python_cuda_per_thread_default_stream": 13, "explan": 13, "underlin": 13, "waitparam": 13, "value64": 13, "march": [14, 27], "decomposit": 14, "wsl": 14, "june": [16, 24], "propag": 16, "ctk": [16, 17, 18], "mobil": [16, 17], "workaround": [16, 17], "gitlab": 16, "cudaprofil": 16, "cuda_runtim": 16, "pars": 17, "variat": 17, "novemb": 18, "2023": [19, 20, 23, 24, 26], "libcuda": 19, "modern": 20, "2024": [21, 25, 27, 28, 29, 30], "libcudart": 21, "preper": [21, 30], "metapackag": [21, 30], "decemb": 22, "rebas": [22, 23, 24, 26, 27, 28, 29], "mr28": 22, "mr35": 22, "februari": 23, "drop": [23, 26], "nogil": 24, "pypars": 24, "januari": 25, "hard": 26, "ppc64": 26, "cudafuncgetnam": [26, 27, 28, 29, 30], "cudafuncgetparaminfo": [27, 28, 29, 30], "pywin32": 29}, "objects": {"cuda.bindings.driver": [[5, 0, 1, "", "CUCoredumpGenerationFlags"], [5, 1, 1, "", "CUDA_ARRAY3D_2DARRAY"], [5, 1, 1, "", "CUDA_ARRAY3D_COLOR_ATTACHMENT"], [5, 1, 1, "", "CUDA_ARRAY3D_CUBEMAP"], [5, 1, 1, "", "CUDA_ARRAY3D_DEFERRED_MAPPING"], [5, 1, 1, "", "CUDA_ARRAY3D_DEPTH_TEXTURE"], [5, 0, 1, "", "CUDA_ARRAY3D_DESCRIPTOR"], [5, 0, 1, "", "CUDA_ARRAY3D_DESCRIPTOR_st"], [5, 0, 1, "", "CUDA_ARRAY3D_DESCRIPTOR_v2"], [5, 1, 1, "", "CUDA_ARRAY3D_LAYERED"], [5, 1, 1, "", "CUDA_ARRAY3D_SPARSE"], [5, 1, 1, "", "CUDA_ARRAY3D_SURFACE_LDST"], [5, 1, 1, "", "CUDA_ARRAY3D_TEXTURE_GATHER"], [5, 1, 1, "", "CUDA_ARRAY3D_VIDEO_ENCODE_DECODE"], [5, 0, 1, "", "CUDA_ARRAY_DESCRIPTOR"], [5, 0, 1, "", "CUDA_ARRAY_DESCRIPTOR_st"], [5, 0, 1, "", "CUDA_ARRAY_DESCRIPTOR_v2"], [5, 0, 1, "", "CUDA_ARRAY_MEMORY_REQUIREMENTS"], [5, 0, 1, "", "CUDA_ARRAY_MEMORY_REQUIREMENTS_st"], [5, 0, 1, "", "CUDA_ARRAY_MEMORY_REQUIREMENTS_v1"], [5, 0, 1, "", "CUDA_ARRAY_SPARSE_PROPERTIES"], [5, 0, 1, "", "CUDA_ARRAY_SPARSE_PROPERTIES_st"], [5, 0, 1, "", "CUDA_ARRAY_SPARSE_PROPERTIES_v1"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_CHILD_GRAPH_NODE_PARAMS"], [5, 0, 1, "", "CUDA_CHILD_GRAPH_NODE_PARAMS_st"], [5, 0, 1, "id0", "CUDA_CONDITIONAL_NODE_PARAMS"], [5, 1, 1, "", "CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC"], [5, 1, 1, "", "CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC"], [5, 1, 1, "", "CUDA_EGL_INFINITE_TIMEOUT"], [5, 0, 1, "", "CUDA_EVENT_RECORD_NODE_PARAMS"], [5, 0, 1, "", "CUDA_EVENT_RECORD_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_EVENT_WAIT_NODE_PARAMS"], [5, 0, 1, "", "CUDA_EVENT_WAIT_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_BUFFER_DESC"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1"], [5, 1, 1, "", "CUDA_EXTERNAL_MEMORY_DEDICATED"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_HANDLE_DESC"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st"], [5, 0, 1, "", "CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1"], [5, 1, 1, "", "CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st"], [5, 0, 1, "", "CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1"], [5, 1, 1, "", "CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_GRAPH_INSTANTIATE_PARAMS"], [5, 0, 1, "", "CUDA_GRAPH_INSTANTIATE_PARAMS_st"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_HOST_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v3"], [5, 0, 1, "", "CUDA_KERNEL_NODE_PARAMS_v3_st"], [5, 0, 1, "", "CUDA_LAUNCH_PARAMS"], [5, 0, 1, "", "CUDA_LAUNCH_PARAMS_st"], [5, 0, 1, "", "CUDA_LAUNCH_PARAMS_v1"], [5, 0, 1, "", "CUDA_MEMCPY2D"], [5, 0, 1, "", "CUDA_MEMCPY2D_st"], [5, 0, 1, "", "CUDA_MEMCPY2D_v2"], [5, 0, 1, "", "CUDA_MEMCPY3D"], [5, 0, 1, "", "CUDA_MEMCPY3D_PEER"], [5, 0, 1, "", "CUDA_MEMCPY3D_PEER_st"], [5, 0, 1, "", "CUDA_MEMCPY3D_PEER_v1"], [5, 0, 1, "", "CUDA_MEMCPY3D_st"], [5, 0, 1, "", "CUDA_MEMCPY3D_v2"], [5, 0, 1, "", "CUDA_MEMCPY_NODE_PARAMS"], [5, 0, 1, "", "CUDA_MEMCPY_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS_st"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_MEMSET_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS_v1"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS_v1_st"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS_v2"], [5, 0, 1, "", "CUDA_MEM_ALLOC_NODE_PARAMS_v2_st"], [5, 0, 1, "", "CUDA_MEM_FREE_NODE_PARAMS"], [5, 0, 1, "", "CUDA_MEM_FREE_NODE_PARAMS_st"], [5, 1, 1, "", "CUDA_NVSCISYNC_ATTR_SIGNAL"], [5, 1, 1, "", "CUDA_NVSCISYNC_ATTR_WAIT"], [5, 0, 1, "", "CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS"], [5, 0, 1, "", "CUDA_POINTER_ATTRIBUTE_P2P_TOKENS"], [5, 0, 1, "", "CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st"], [5, 0, 1, "", "CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1"], [5, 0, 1, "", "CUDA_RESOURCE_DESC"], [5, 0, 1, "", "CUDA_RESOURCE_DESC_st"], [5, 0, 1, "", "CUDA_RESOURCE_DESC_v1"], [5, 0, 1, "", "CUDA_RESOURCE_VIEW_DESC"], [5, 0, 1, "", "CUDA_RESOURCE_VIEW_DESC_st"], [5, 0, 1, "", "CUDA_RESOURCE_VIEW_DESC_v1"], [5, 0, 1, "", "CUDA_TEXTURE_DESC"], [5, 0, 1, "", "CUDA_TEXTURE_DESC_st"], [5, 0, 1, "", "CUDA_TEXTURE_DESC_v1"], [5, 1, 1, "", "CUDA_VERSION"], [5, 0, 1, "", "CUGLDeviceList"], [5, 0, 1, "", "CUGPUDirectRDMAWritesOrdering"], [5, 1, 1, "", "CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL"], [5, 1, 1, "", "CU_COMPUTE_ACCELERATED_TARGET_BASE"], [5, 1, 1, "", "CU_DEVICE_CPU"], [5, 1, 1, "", "CU_DEVICE_INVALID"], [5, 1, 1, "", "CU_GRAPH_COND_ASSIGN_DEFAULT"], [5, 1, 1, "", "CU_GRAPH_KERNEL_NODE_PORT_DEFAULT"], [5, 1, 1, "", "CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER"], [5, 1, 1, "", "CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC"], [5, 1, 1, "", "CU_IPC_HANDLE_SIZE"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT"], [5, 1, 1, "", "CU_KERNEL_NODE_ATTRIBUTE_PRIORITY"], [5, 1, 1, "", "CU_LAUNCH_PARAM_BUFFER_POINTER"], [5, 1, 1, "", "CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT"], [5, 1, 1, "", "CU_LAUNCH_PARAM_BUFFER_SIZE"], [5, 1, 1, "", "CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT"], [5, 1, 1, "", "CU_LAUNCH_PARAM_END"], [5, 1, 1, "", "CU_LAUNCH_PARAM_END_AS_INT"], [5, 1, 1, "", "CU_MEMHOSTALLOC_DEVICEMAP"], [5, 1, 1, "", "CU_MEMHOSTALLOC_PORTABLE"], [5, 1, 1, "", "CU_MEMHOSTALLOC_WRITECOMBINED"], [5, 1, 1, "", "CU_MEMHOSTREGISTER_DEVICEMAP"], [5, 1, 1, "", "CU_MEMHOSTREGISTER_IOMEMORY"], [5, 1, 1, "", "CU_MEMHOSTREGISTER_PORTABLE"], [5, 1, 1, "", "CU_MEMHOSTREGISTER_READ_ONLY"], [5, 1, 1, "", "CU_MEM_CREATE_USAGE_TILE_POOL"], [5, 1, 1, "", "CU_PARAM_TR_DEFAULT"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_PRIORITY"], [5, 1, 1, "", "CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY"], [5, 1, 1, "", "CU_STREAM_LEGACY"], [5, 1, 1, "", "CU_STREAM_PER_THREAD"], [5, 1, 1, "", "CU_TENSOR_MAP_NUM_QWORDS"], [5, 1, 1, "", "CU_TRSA_OVERRIDE_FORMAT"], [5, 1, 1, "", "CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION"], [5, 1, 1, "", "CU_TRSF_NORMALIZED_COORDINATES"], [5, 1, 1, "", "CU_TRSF_READ_AS_INTEGER"], [5, 1, 1, "", "CU_TRSF_SEAMLESS_CUBEMAP"], [5, 1, 1, "", "CU_TRSF_SRGB"], [5, 0, 1, "", "CUaccessPolicyWindow"], [5, 0, 1, "", "CUaccessPolicyWindow_st"], [5, 0, 1, "", "CUaccessPolicyWindow_v1"], [5, 0, 1, "", "CUaccessProperty"], [5, 0, 1, "", "CUaddress_mode"], [5, 0, 1, "", "CUarray"], [5, 0, 1, "", "CUarrayMapInfo"], [5, 0, 1, "", "CUarrayMapInfo_st"], [5, 0, 1, "", "CUarrayMapInfo_v1"], [5, 0, 1, "", "CUarraySparseSubresourceType"], [5, 0, 1, "", "CUarray_cubemap_face"], [5, 0, 1, "", "CUarray_format"], [5, 0, 1, "", "CUasyncCallback"], [5, 0, 1, "", "CUasyncCallbackHandle"], [5, 0, 1, "", "CUasyncNotificationInfo"], [5, 0, 1, "", "CUasyncNotificationInfo_st"], [5, 0, 1, "", "CUasyncNotificationType"], [5, 0, 1, "", "CUcigDataType"], [5, 0, 1, "", "CUclusterSchedulingPolicy"], [5, 0, 1, "", "CUcomputemode"], [5, 0, 1, "", "CUcontext"], [5, 0, 1, "", "CUcoredumpSettings"], [5, 0, 1, "", "CUctxCigParam"], [5, 0, 1, "", "CUctxCigParam_st"], [5, 0, 1, "", "CUctxCreateParams"], [5, 0, 1, "", "CUctxCreateParams_st"], [5, 0, 1, "", "CUctx_flags"], [5, 0, 1, "", "CUdevResource"], [5, 0, 1, "", "CUdevResourceDesc"], [5, 0, 1, "", "CUdevResourceType"], [5, 0, 1, "", "CUdevResource_st"], [5, 0, 1, "id73", "CUdevSmResource"], [5, 0, 1, "", "CUdevSmResourceSplit_flags"], [5, 0, 1, "", "CUdevSmResource_st"], [5, 0, 1, "", "CUdevice"], [5, 0, 1, "", "CUdeviceNumaConfig"], [5, 0, 1, "", "CUdevice_P2PAttribute"], [5, 0, 1, "", "CUdevice_attribute"], [5, 0, 1, "", "CUdevice_v1"], [5, 0, 1, "", "CUdeviceptr"], [5, 0, 1, "", "CUdeviceptr_v2"], [5, 0, 1, "", "CUdevprop"], [5, 0, 1, "", "CUdevprop_st"], [5, 0, 1, "", "CUdevprop_v1"], [5, 0, 1, "", "CUdriverProcAddressQueryResult"], [5, 0, 1, "", "CUdriverProcAddress_flags"], [5, 0, 1, "", "CUeglColorFormat"], [5, 0, 1, "", "CUeglFrame"], [5, 0, 1, "", "CUeglFrameType"], [5, 0, 1, "", "CUeglFrame_st"], [5, 0, 1, "", "CUeglFrame_v1"], [5, 0, 1, "", "CUeglResourceLocationFlags"], [5, 0, 1, "", "CUeglStreamConnection"], [5, 0, 1, "", "CUevent"], [5, 0, 1, "", "CUevent_flags"], [5, 0, 1, "", "CUevent_record_flags"], [5, 0, 1, "", "CUevent_sched_flags"], [5, 0, 1, "", "CUevent_wait_flags"], [5, 0, 1, "", "CUexecAffinityParam"], [5, 0, 1, "", "CUexecAffinityParam_st"], [5, 0, 1, "", "CUexecAffinityParam_v1"], [5, 0, 1, "", "CUexecAffinitySmCount"], [5, 0, 1, "", "CUexecAffinitySmCount_st"], [5, 0, 1, "", "CUexecAffinitySmCount_v1"], [5, 0, 1, "", "CUexecAffinityType"], [5, 0, 1, "", "CUexternalMemory"], [5, 0, 1, "", "CUexternalMemoryHandleType"], [5, 0, 1, "", "CUexternalSemaphore"], [5, 0, 1, "", "CUexternalSemaphoreHandleType"], [5, 0, 1, "", "CUfilter_mode"], [5, 0, 1, "", "CUflushGPUDirectRDMAWritesOptions"], [5, 0, 1, "", "CUflushGPUDirectRDMAWritesScope"], [5, 0, 1, "", "CUflushGPUDirectRDMAWritesTarget"], [5, 0, 1, "", "CUfunc_cache"], [5, 0, 1, "", "CUfunction"], [5, 0, 1, "", "CUfunctionLoadingState"], [5, 0, 1, "", "CUfunction_attribute"], [5, 0, 1, "", "CUgraph"], [5, 0, 1, "", "CUgraphConditionalHandle"], [5, 0, 1, "", "CUgraphConditionalNodeType"], [5, 0, 1, "", "CUgraphDebugDot_flags"], [5, 0, 1, "", "CUgraphDependencyType"], [5, 0, 1, "", "CUgraphDeviceNode"], [5, 0, 1, "", "CUgraphEdgeData"], [5, 0, 1, "", "CUgraphEdgeData_st"], [5, 0, 1, "", "CUgraphExec"], [5, 0, 1, "", "CUgraphExecUpdateResult"], [5, 0, 1, "", "CUgraphExecUpdateResultInfo"], [5, 0, 1, "", "CUgraphExecUpdateResultInfo_st"], [5, 0, 1, "", "CUgraphExecUpdateResultInfo_v1"], [5, 0, 1, "", "CUgraphInstantiateResult"], [5, 0, 1, "", "CUgraphInstantiate_flags"], [5, 0, 1, "", "CUgraphMem_attribute"], [5, 0, 1, "", "CUgraphNode"], [5, 0, 1, "", "CUgraphNodeParams"], [5, 0, 1, "", "CUgraphNodeParams_st"], [5, 0, 1, "", "CUgraphNodeType"], [5, 0, 1, "", "CUgraphicsMapResourceFlags"], [5, 0, 1, "", "CUgraphicsRegisterFlags"], [5, 0, 1, "", "CUgraphicsResource"], [5, 0, 1, "", "CUgreenCtx"], [5, 0, 1, "", "CUgreenCtxCreate_flags"], [5, 0, 1, "", "CUhostFn"], [5, 0, 1, "", "CUipcEventHandle"], [5, 0, 1, "", "CUipcEventHandle_st"], [5, 0, 1, "", "CUipcEventHandle_v1"], [5, 0, 1, "", "CUipcMemHandle"], [5, 0, 1, "", "CUipcMemHandle_st"], [5, 0, 1, "", "CUipcMemHandle_v1"], [5, 0, 1, "", "CUipcMem_flags"], [5, 0, 1, "", "CUjitInputType"], [5, 0, 1, "", "CUjit_cacheMode"], [5, 0, 1, "", "CUjit_fallback"], [5, 0, 1, "", "CUjit_option"], [5, 0, 1, "", "CUjit_target"], [5, 0, 1, "", "CUkernel"], [5, 0, 1, "", "CUkernelNodeAttrID"], [5, 0, 1, "", "CUkernelNodeAttrValue"], [5, 0, 1, "", "CUkernelNodeAttrValue_v1"], [5, 0, 1, "", "CUlaunchAttribute"], [5, 0, 1, "", "CUlaunchAttributeID"], [5, 0, 1, "", "CUlaunchAttributeValue"], [5, 0, 1, "", "CUlaunchAttributeValue_union"], [5, 0, 1, "", "CUlaunchAttribute_st"], [5, 0, 1, "", "CUlaunchConfig"], [5, 0, 1, "", "CUlaunchConfig_st"], [5, 0, 1, "", "CUlaunchMemSyncDomain"], [5, 0, 1, "", "CUlaunchMemSyncDomainMap"], [5, 0, 1, "", "CUlaunchMemSyncDomainMap_st"], [5, 0, 1, "", "CUlibrary"], [5, 0, 1, "", "CUlibraryHostUniversalFunctionAndDataTable"], [5, 0, 1, "", "CUlibraryHostUniversalFunctionAndDataTable_st"], [5, 0, 1, "", "CUlibraryOption"], [5, 0, 1, "", "CUlimit"], [5, 0, 1, "", "CUlinkState"], [5, 0, 1, "", "CUmemAccessDesc"], [5, 0, 1, "", "CUmemAccessDesc_st"], [5, 0, 1, "", "CUmemAccessDesc_v1"], [5, 0, 1, "", "CUmemAccess_flags"], [5, 0, 1, "", "CUmemAllocationCompType"], [5, 0, 1, "", "CUmemAllocationGranularity_flags"], [5, 0, 1, "", "CUmemAllocationHandleType"], [5, 0, 1, "", "CUmemAllocationProp"], [5, 0, 1, "", "CUmemAllocationProp_st"], [5, 0, 1, "", "CUmemAllocationProp_v1"], [5, 0, 1, "", "CUmemAllocationType"], [5, 0, 1, "", "CUmemAttach_flags"], [5, 0, 1, "", "CUmemFabricHandle"], [5, 0, 1, "", "CUmemFabricHandle_st"], [5, 0, 1, "", "CUmemFabricHandle_v1"], [5, 0, 1, "", "CUmemGenericAllocationHandle"], [5, 0, 1, "", "CUmemGenericAllocationHandle_v1"], [5, 0, 1, "", "CUmemHandleType"], [5, 0, 1, "", "CUmemLocation"], [5, 0, 1, "", "CUmemLocationType"], [5, 0, 1, "", "CUmemLocation_st"], [5, 0, 1, "", "CUmemLocation_v1"], [5, 0, 1, "", "CUmemOperationType"], [5, 0, 1, "", "CUmemPoolProps"], [5, 0, 1, "", "CUmemPoolProps_st"], [5, 0, 1, "", "CUmemPoolProps_v1"], [5, 0, 1, "", "CUmemPoolPtrExportData"], [5, 0, 1, "", "CUmemPoolPtrExportData_st"], [5, 0, 1, "", "CUmemPoolPtrExportData_v1"], [5, 0, 1, "", "CUmemPool_attribute"], [5, 0, 1, "", "CUmemRangeHandleType"], [5, 0, 1, "", "CUmem_advise"], [5, 0, 1, "", "CUmem_range_attribute"], [5, 0, 1, "", "CUmemoryPool"], [5, 0, 1, "", "CUmemorytype"], [5, 0, 1, "", "CUmipmappedArray"], [5, 0, 1, "", "CUmodule"], [5, 0, 1, "", "CUmoduleLoadingMode"], [5, 0, 1, "", "CUmulticastGranularity_flags"], [5, 0, 1, "", "CUmulticastObjectProp"], [5, 0, 1, "", "CUmulticastObjectProp_st"], [5, 0, 1, "", "CUmulticastObjectProp_v1"], [5, 0, 1, "", "CUoccupancyB2DSize"], [5, 0, 1, "", "CUoccupancy_flags"], [5, 0, 1, "", "CUpointer_attribute"], [5, 0, 1, "", "CUresourceViewFormat"], [5, 0, 1, "", "CUresourcetype"], [5, 0, 1, "", "CUresult"], [5, 0, 1, "", "CUshared_carveout"], [5, 0, 1, "", "CUsharedconfig"], [5, 0, 1, "", "CUstream"], [5, 0, 1, "", "CUstreamAttrID"], [5, 0, 1, "", "CUstreamAttrValue"], [5, 0, 1, "", "CUstreamAttrValue_v1"], [5, 0, 1, "", "CUstreamBatchMemOpParams"], [5, 0, 1, "", "CUstreamBatchMemOpParams_union"], [5, 0, 1, "", "CUstreamBatchMemOpParams_v1"], [5, 0, 1, "", "CUstreamBatchMemOpType"], [5, 0, 1, "", "CUstreamCallback"], [5, 0, 1, "", "CUstreamCaptureMode"], [5, 0, 1, "", "CUstreamCaptureStatus"], [5, 0, 1, "", "CUstreamMemoryBarrier_flags"], [5, 0, 1, "", "CUstreamUpdateCaptureDependencies_flags"], [5, 0, 1, "", "CUstreamWaitValue_flags"], [5, 0, 1, "", "CUstreamWriteValue_flags"], [5, 0, 1, "", "CUstream_flags"], [5, 0, 1, "", "CUsurfObject"], [5, 0, 1, "", "CUsurfObject_v1"], [5, 0, 1, "", "CUsurfref"], [5, 0, 1, "", "CUsynchronizationPolicy"], [5, 0, 1, "", "CUtensorMap"], [5, 0, 1, "", "CUtensorMapDataType"], [5, 0, 1, "", "CUtensorMapFloatOOBfill"], [5, 0, 1, "", "CUtensorMapInterleave"], [5, 0, 1, "", "CUtensorMapL2promotion"], [5, 0, 1, "", "CUtensorMapSwizzle"], [5, 0, 1, "", "CUtensorMap_st"], [5, 0, 1, "", "CUtexObject"], [5, 0, 1, "", "CUtexObject_v1"], [5, 0, 1, "", "CUtexref"], [5, 0, 1, "", "CUuserObject"], [5, 0, 1, "", "CUuserObjectRetain_flags"], [5, 0, 1, "", "CUuserObject_flags"], [5, 0, 1, "", "CUuuid"], [5, 0, 1, "", "CUuuid_st"], [5, 1, 1, "", "MAX_PLANES"], [5, 1, 1, "", "RESOURCE_ABI_EXTERNAL_BYTES"], [5, 1, 1, "", "RESOURCE_ABI_VERSION"], [5, 0, 1, "", "cl_context_flags"], [5, 0, 1, "", "cl_event_flags"], [5, 3, 1, "", "cuArray3DCreate"], [5, 3, 1, "", "cuArray3DGetDescriptor"], [5, 3, 1, "", "cuArrayCreate"], [5, 3, 1, "", "cuArrayDestroy"], [5, 3, 1, "", "cuArrayGetDescriptor"], [5, 3, 1, "", "cuArrayGetMemoryRequirements"], [5, 3, 1, "", "cuArrayGetPlane"], [5, 3, 1, "", "cuArrayGetSparseProperties"], [5, 3, 1, "", "cuCoredumpGetAttribute"], [5, 3, 1, "", "cuCoredumpGetAttributeGlobal"], [5, 3, 1, "", "cuCoredumpSetAttribute"], [5, 3, 1, "", "cuCoredumpSetAttributeGlobal"], [5, 3, 1, "", "cuCtxCreate"], [5, 3, 1, "", "cuCtxCreate_v3"], [5, 3, 1, "", "cuCtxCreate_v4"], [5, 3, 1, "", "cuCtxDestroy"], [5, 3, 1, "", "cuCtxDisablePeerAccess"], [5, 3, 1, "", "cuCtxEnablePeerAccess"], [5, 3, 1, "", "cuCtxFromGreenCtx"], [5, 3, 1, "", "cuCtxGetApiVersion"], [5, 3, 1, "", "cuCtxGetCacheConfig"], [5, 3, 1, "", "cuCtxGetCurrent"], [5, 3, 1, "", "cuCtxGetDevResource"], [5, 3, 1, "", "cuCtxGetDevice"], [5, 3, 1, "", "cuCtxGetExecAffinity"], [5, 3, 1, "", "cuCtxGetFlags"], [5, 3, 1, "", "cuCtxGetId"], [5, 3, 1, "", "cuCtxGetLimit"], [5, 3, 1, "", "cuCtxGetStreamPriorityRange"], [5, 3, 1, "", "cuCtxPopCurrent"], [5, 3, 1, "", "cuCtxPushCurrent"], [5, 3, 1, "", "cuCtxRecordEvent"], [5, 3, 1, "", "cuCtxResetPersistingL2Cache"], [5, 3, 1, "", "cuCtxSetCacheConfig"], [5, 3, 1, "", "cuCtxSetCurrent"], [5, 3, 1, "", "cuCtxSetFlags"], [5, 3, 1, "", "cuCtxSetLimit"], [5, 3, 1, "", "cuCtxSynchronize"], [5, 3, 1, "", "cuCtxWaitEvent"], [5, 3, 1, "", "cuDestroyExternalMemory"], [5, 3, 1, "", "cuDestroyExternalSemaphore"], [5, 3, 1, "", "cuDevResourceGenerateDesc"], [5, 3, 1, "", "cuDevSmResourceSplitByCount"], [5, 3, 1, "", "cuDeviceCanAccessPeer"], [5, 3, 1, "", "cuDeviceGet"], [5, 3, 1, "", "cuDeviceGetAttribute"], [5, 3, 1, "", "cuDeviceGetByPCIBusId"], [5, 3, 1, "", "cuDeviceGetCount"], [5, 3, 1, "", "cuDeviceGetDefaultMemPool"], [5, 3, 1, "", "cuDeviceGetDevResource"], [5, 3, 1, "", "cuDeviceGetExecAffinitySupport"], [5, 3, 1, "", "cuDeviceGetGraphMemAttribute"], [5, 3, 1, "", "cuDeviceGetLuid"], [5, 3, 1, "", "cuDeviceGetMemPool"], [5, 3, 1, "", "cuDeviceGetName"], [5, 3, 1, "", "cuDeviceGetNvSciSyncAttributes"], [5, 3, 1, "", "cuDeviceGetP2PAttribute"], [5, 3, 1, "", "cuDeviceGetPCIBusId"], [5, 3, 1, "", "cuDeviceGetTexture1DLinearMaxWidth"], [5, 3, 1, "", "cuDeviceGetUuid"], [5, 3, 1, "", "cuDeviceGetUuid_v2"], [5, 3, 1, "", "cuDeviceGraphMemTrim"], [5, 3, 1, "", "cuDevicePrimaryCtxGetState"], [5, 3, 1, "", "cuDevicePrimaryCtxRelease"], [5, 3, 1, "", "cuDevicePrimaryCtxReset"], [5, 3, 1, "", "cuDevicePrimaryCtxRetain"], [5, 3, 1, "", "cuDevicePrimaryCtxSetFlags"], [5, 3, 1, "", "cuDeviceRegisterAsyncNotification"], [5, 3, 1, "", "cuDeviceSetGraphMemAttribute"], [5, 3, 1, "", "cuDeviceSetMemPool"], [5, 3, 1, "", "cuDeviceTotalMem"], [5, 3, 1, "", "cuDeviceUnregisterAsyncNotification"], [5, 3, 1, "", "cuDriverGetVersion"], [5, 3, 1, "", "cuEGLStreamConsumerAcquireFrame"], [5, 3, 1, "", "cuEGLStreamConsumerConnect"], [5, 3, 1, "", "cuEGLStreamConsumerConnectWithFlags"], [5, 3, 1, "", "cuEGLStreamConsumerDisconnect"], [5, 3, 1, "", "cuEGLStreamConsumerReleaseFrame"], [5, 3, 1, "", "cuEGLStreamProducerConnect"], [5, 3, 1, "", "cuEGLStreamProducerDisconnect"], [5, 3, 1, "", "cuEGLStreamProducerPresentFrame"], [5, 3, 1, "", "cuEGLStreamProducerReturnFrame"], [5, 3, 1, "", "cuEventCreate"], [5, 3, 1, "", "cuEventCreateFromEGLSync"], [5, 3, 1, "", "cuEventDestroy"], [5, 3, 1, "", "cuEventElapsedTime"], [5, 3, 1, "", "cuEventQuery"], [5, 3, 1, "", "cuEventRecord"], [5, 3, 1, "", "cuEventRecordWithFlags"], [5, 3, 1, "", "cuEventSynchronize"], [5, 3, 1, "", "cuExternalMemoryGetMappedBuffer"], [5, 3, 1, "", "cuExternalMemoryGetMappedMipmappedArray"], [5, 3, 1, "", "cuFlushGPUDirectRDMAWrites"], [5, 3, 1, "", "cuFuncGetAttribute"], [5, 3, 1, "", "cuFuncGetModule"], [5, 3, 1, "", "cuFuncGetName"], [5, 3, 1, "", "cuFuncGetParamInfo"], [5, 3, 1, "", "cuFuncIsLoaded"], [5, 3, 1, "", "cuFuncLoad"], [5, 3, 1, "", "cuFuncSetAttribute"], [5, 3, 1, "", "cuFuncSetCacheConfig"], [5, 3, 1, "", "cuGLGetDevices"], [5, 3, 1, "", "cuGetErrorName"], [5, 3, 1, "", "cuGetErrorString"], [5, 3, 1, "", "cuGetProcAddress"], [5, 3, 1, "", "cuGraphAddBatchMemOpNode"], [5, 3, 1, "", "cuGraphAddChildGraphNode"], [5, 3, 1, "", "cuGraphAddDependencies"], [5, 3, 1, "", "cuGraphAddDependencies_v2"], [5, 3, 1, "", "cuGraphAddEmptyNode"], [5, 3, 1, "", "cuGraphAddEventRecordNode"], [5, 3, 1, "", "cuGraphAddEventWaitNode"], [5, 3, 1, "", "cuGraphAddExternalSemaphoresSignalNode"], [5, 3, 1, "", "cuGraphAddExternalSemaphoresWaitNode"], [5, 3, 1, "", "cuGraphAddHostNode"], [5, 3, 1, "", "cuGraphAddKernelNode"], [5, 3, 1, "", "cuGraphAddMemAllocNode"], [5, 3, 1, "", "cuGraphAddMemFreeNode"], [5, 3, 1, "", "cuGraphAddMemcpyNode"], [5, 3, 1, "", "cuGraphAddMemsetNode"], [5, 3, 1, "", "cuGraphAddNode"], [5, 3, 1, "", "cuGraphAddNode_v2"], [5, 3, 1, "", "cuGraphBatchMemOpNodeGetParams"], [5, 3, 1, "", "cuGraphBatchMemOpNodeSetParams"], [5, 3, 1, "", "cuGraphChildGraphNodeGetGraph"], [5, 3, 1, "", "cuGraphClone"], [5, 3, 1, "", "cuGraphConditionalHandleCreate"], [5, 3, 1, "", "cuGraphCreate"], [5, 3, 1, "", "cuGraphDebugDotPrint"], [5, 3, 1, "", "cuGraphDestroy"], [5, 3, 1, "", "cuGraphDestroyNode"], [5, 3, 1, "", "cuGraphEventRecordNodeGetEvent"], [5, 3, 1, "", "cuGraphEventRecordNodeSetEvent"], [5, 3, 1, "", "cuGraphEventWaitNodeGetEvent"], [5, 3, 1, "", "cuGraphEventWaitNodeSetEvent"], [5, 3, 1, "", "cuGraphExecBatchMemOpNodeSetParams"], [5, 3, 1, "", "cuGraphExecChildGraphNodeSetParams"], [5, 3, 1, "", "cuGraphExecDestroy"], [5, 3, 1, "", "cuGraphExecEventRecordNodeSetEvent"], [5, 3, 1, "", "cuGraphExecEventWaitNodeSetEvent"], [5, 3, 1, "", "cuGraphExecExternalSemaphoresSignalNodeSetParams"], [5, 3, 1, "", "cuGraphExecExternalSemaphoresWaitNodeSetParams"], [5, 3, 1, "", "cuGraphExecGetFlags"], [5, 3, 1, "", "cuGraphExecHostNodeSetParams"], [5, 3, 1, "", "cuGraphExecKernelNodeSetParams"], [5, 3, 1, "", "cuGraphExecMemcpyNodeSetParams"], [5, 3, 1, "", "cuGraphExecMemsetNodeSetParams"], [5, 3, 1, "", "cuGraphExecNodeSetParams"], [5, 3, 1, "", "cuGraphExecUpdate"], [5, 3, 1, "", "cuGraphExternalSemaphoresSignalNodeGetParams"], [5, 3, 1, "", "cuGraphExternalSemaphoresSignalNodeSetParams"], [5, 3, 1, "", "cuGraphExternalSemaphoresWaitNodeGetParams"], [5, 3, 1, "", "cuGraphExternalSemaphoresWaitNodeSetParams"], [5, 3, 1, "", "cuGraphGetEdges"], [5, 3, 1, "", "cuGraphGetEdges_v2"], [5, 3, 1, "", "cuGraphGetNodes"], [5, 3, 1, "", "cuGraphGetRootNodes"], [5, 3, 1, "", "cuGraphHostNodeGetParams"], [5, 3, 1, "", "cuGraphHostNodeSetParams"], [5, 3, 1, "", "cuGraphInstantiate"], [5, 3, 1, "", "cuGraphInstantiateWithParams"], [5, 3, 1, "", "cuGraphKernelNodeCopyAttributes"], [5, 3, 1, "", "cuGraphKernelNodeGetAttribute"], [5, 3, 1, "", "cuGraphKernelNodeGetParams"], [5, 3, 1, "", "cuGraphKernelNodeSetAttribute"], [5, 3, 1, "", "cuGraphKernelNodeSetParams"], [5, 3, 1, "", "cuGraphLaunch"], [5, 3, 1, "", "cuGraphMemAllocNodeGetParams"], [5, 3, 1, "", "cuGraphMemFreeNodeGetParams"], [5, 3, 1, "", "cuGraphMemcpyNodeGetParams"], [5, 3, 1, "", "cuGraphMemcpyNodeSetParams"], [5, 3, 1, "", "cuGraphMemsetNodeGetParams"], [5, 3, 1, "", "cuGraphMemsetNodeSetParams"], [5, 3, 1, "", "cuGraphNodeFindInClone"], [5, 3, 1, "", "cuGraphNodeGetDependencies"], [5, 3, 1, "", "cuGraphNodeGetDependencies_v2"], [5, 3, 1, "", "cuGraphNodeGetDependentNodes"], [5, 3, 1, "", "cuGraphNodeGetDependentNodes_v2"], [5, 3, 1, "", "cuGraphNodeGetEnabled"], [5, 3, 1, "", "cuGraphNodeGetType"], [5, 3, 1, "", "cuGraphNodeSetEnabled"], [5, 3, 1, "", "cuGraphNodeSetParams"], [5, 3, 1, "", "cuGraphReleaseUserObject"], [5, 3, 1, "", "cuGraphRemoveDependencies"], [5, 3, 1, "", "cuGraphRemoveDependencies_v2"], [5, 3, 1, "", "cuGraphRetainUserObject"], [5, 3, 1, "", "cuGraphUpload"], [5, 3, 1, "", "cuGraphicsEGLRegisterImage"], [5, 3, 1, "", "cuGraphicsGLRegisterBuffer"], [5, 3, 1, "", "cuGraphicsGLRegisterImage"], [5, 3, 1, "", "cuGraphicsMapResources"], [5, 3, 1, "", "cuGraphicsResourceGetMappedEglFrame"], [5, 3, 1, "", "cuGraphicsResourceGetMappedMipmappedArray"], [5, 3, 1, "", "cuGraphicsResourceGetMappedPointer"], [5, 3, 1, "", "cuGraphicsResourceSetMapFlags"], [5, 3, 1, "", "cuGraphicsSubResourceGetMappedArray"], [5, 3, 1, "", "cuGraphicsUnmapResources"], [5, 3, 1, "", "cuGraphicsUnregisterResource"], [5, 3, 1, "", "cuGraphicsVDPAURegisterOutputSurface"], [5, 3, 1, "", "cuGraphicsVDPAURegisterVideoSurface"], [5, 3, 1, "", "cuGreenCtxCreate"], [5, 3, 1, "", "cuGreenCtxDestroy"], [5, 3, 1, "", "cuGreenCtxGetDevResource"], [5, 3, 1, "", "cuGreenCtxRecordEvent"], [5, 3, 1, "", "cuGreenCtxStreamCreate"], [5, 3, 1, "", "cuGreenCtxWaitEvent"], [5, 3, 1, "", "cuImportExternalMemory"], [5, 3, 1, "", "cuImportExternalSemaphore"], [5, 3, 1, "", "cuInit"], [5, 3, 1, "", "cuIpcCloseMemHandle"], [5, 3, 1, "", "cuIpcGetEventHandle"], [5, 3, 1, "", "cuIpcGetMemHandle"], [5, 3, 1, "", "cuIpcOpenEventHandle"], [5, 3, 1, "", "cuIpcOpenMemHandle"], [5, 3, 1, "", "cuKernelGetAttribute"], [5, 3, 1, "", "cuKernelGetFunction"], [5, 3, 1, "", "cuKernelGetLibrary"], [5, 3, 1, "", "cuKernelGetName"], [5, 3, 1, "", "cuKernelGetParamInfo"], [5, 3, 1, "", "cuKernelSetAttribute"], [5, 3, 1, "", "cuKernelSetCacheConfig"], [5, 3, 1, "", "cuLaunchCooperativeKernel"], [5, 3, 1, "", "cuLaunchCooperativeKernelMultiDevice"], [5, 3, 1, "", "cuLaunchHostFunc"], [5, 3, 1, "", "cuLaunchKernel"], [5, 3, 1, "", "cuLaunchKernelEx"], [5, 3, 1, "", "cuLibraryEnumerateKernels"], [5, 3, 1, "", "cuLibraryGetGlobal"], [5, 3, 1, "", "cuLibraryGetKernel"], [5, 3, 1, "", "cuLibraryGetKernelCount"], [5, 3, 1, "", "cuLibraryGetManaged"], [5, 3, 1, "", "cuLibraryGetModule"], [5, 3, 1, "", "cuLibraryGetUnifiedFunction"], [5, 3, 1, "", "cuLibraryLoadData"], [5, 3, 1, "", "cuLibraryLoadFromFile"], [5, 3, 1, "", "cuLibraryUnload"], [5, 3, 1, "", "cuLinkAddData"], [5, 3, 1, "", "cuLinkAddFile"], [5, 3, 1, "", "cuLinkComplete"], [5, 3, 1, "", "cuLinkCreate"], [5, 3, 1, "", "cuLinkDestroy"], [5, 3, 1, "", "cuMemAddressFree"], [5, 3, 1, "", "cuMemAddressReserve"], [5, 3, 1, "", "cuMemAdvise"], [5, 3, 1, "", "cuMemAdvise_v2"], [5, 3, 1, "", "cuMemAlloc"], [5, 3, 1, "", "cuMemAllocAsync"], [5, 3, 1, "", "cuMemAllocFromPoolAsync"], [5, 3, 1, "", "cuMemAllocHost"], [5, 3, 1, "", "cuMemAllocManaged"], [5, 3, 1, "", "cuMemAllocPitch"], [5, 3, 1, "", "cuMemCreate"], [5, 3, 1, "", "cuMemExportToShareableHandle"], [5, 3, 1, "", "cuMemFree"], [5, 3, 1, "", "cuMemFreeAsync"], [5, 3, 1, "", "cuMemFreeHost"], [5, 3, 1, "", "cuMemGetAccess"], [5, 3, 1, "", "cuMemGetAddressRange"], [5, 3, 1, "", "cuMemGetAllocationGranularity"], [5, 3, 1, "", "cuMemGetAllocationPropertiesFromHandle"], [5, 3, 1, "", "cuMemGetHandleForAddressRange"], [5, 3, 1, "", "cuMemGetInfo"], [5, 3, 1, "", "cuMemHostAlloc"], [5, 3, 1, "", "cuMemHostGetDevicePointer"], [5, 3, 1, "", "cuMemHostGetFlags"], [5, 3, 1, "", "cuMemHostRegister"], [5, 3, 1, "", "cuMemHostUnregister"], [5, 3, 1, "", "cuMemImportFromShareableHandle"], [5, 3, 1, "", "cuMemMap"], [5, 3, 1, "", "cuMemMapArrayAsync"], [5, 3, 1, "", "cuMemPoolCreate"], [5, 3, 1, "", "cuMemPoolDestroy"], [5, 3, 1, "", "cuMemPoolExportPointer"], [5, 3, 1, "", "cuMemPoolExportToShareableHandle"], [5, 3, 1, "", "cuMemPoolGetAccess"], [5, 3, 1, "", "cuMemPoolGetAttribute"], [5, 3, 1, "", "cuMemPoolImportFromShareableHandle"], [5, 3, 1, "", "cuMemPoolImportPointer"], [5, 3, 1, "", "cuMemPoolSetAccess"], [5, 3, 1, "", "cuMemPoolSetAttribute"], [5, 3, 1, "", "cuMemPoolTrimTo"], [5, 3, 1, "", "cuMemPrefetchAsync"], [5, 3, 1, "", "cuMemPrefetchAsync_v2"], [5, 3, 1, "", "cuMemRangeGetAttribute"], [5, 3, 1, "", "cuMemRangeGetAttributes"], [5, 3, 1, "", "cuMemRelease"], [5, 3, 1, "", "cuMemRetainAllocationHandle"], [5, 3, 1, "", "cuMemSetAccess"], [5, 3, 1, "", "cuMemUnmap"], [5, 3, 1, "", "cuMemcpy"], [5, 3, 1, "", "cuMemcpy2D"], [5, 3, 1, "", "cuMemcpy2DAsync"], [5, 3, 1, "", "cuMemcpy2DUnaligned"], [5, 3, 1, "", "cuMemcpy3D"], [5, 3, 1, "", "cuMemcpy3DAsync"], [5, 3, 1, "", "cuMemcpy3DPeer"], [5, 3, 1, "", "cuMemcpy3DPeerAsync"], [5, 3, 1, "", "cuMemcpyAsync"], [5, 3, 1, "", "cuMemcpyAtoA"], [5, 3, 1, "", "cuMemcpyAtoD"], [5, 3, 1, "", "cuMemcpyAtoH"], [5, 3, 1, "", "cuMemcpyAtoHAsync"], [5, 3, 1, "", "cuMemcpyDtoA"], [5, 3, 1, "", "cuMemcpyDtoD"], [5, 3, 1, "", "cuMemcpyDtoDAsync"], [5, 3, 1, "", "cuMemcpyDtoH"], [5, 3, 1, "", "cuMemcpyDtoHAsync"], [5, 3, 1, "", "cuMemcpyHtoA"], [5, 3, 1, "", "cuMemcpyHtoAAsync"], [5, 3, 1, "", "cuMemcpyHtoD"], [5, 3, 1, "", "cuMemcpyHtoDAsync"], [5, 3, 1, "", "cuMemcpyPeer"], [5, 3, 1, "", "cuMemcpyPeerAsync"], [5, 3, 1, "", "cuMemsetD16"], [5, 3, 1, "", "cuMemsetD16Async"], [5, 3, 1, "", "cuMemsetD2D16"], [5, 3, 1, "", "cuMemsetD2D16Async"], [5, 3, 1, "", "cuMemsetD2D32"], [5, 3, 1, "", "cuMemsetD2D32Async"], [5, 3, 1, "", "cuMemsetD2D8"], [5, 3, 1, "", "cuMemsetD2D8Async"], [5, 3, 1, "", "cuMemsetD32"], [5, 3, 1, "", "cuMemsetD32Async"], [5, 3, 1, "", "cuMemsetD8"], [5, 3, 1, "", "cuMemsetD8Async"], [5, 3, 1, "", "cuMipmappedArrayCreate"], [5, 3, 1, "", "cuMipmappedArrayDestroy"], [5, 3, 1, "", "cuMipmappedArrayGetLevel"], [5, 3, 1, "", "cuMipmappedArrayGetMemoryRequirements"], [5, 3, 1, "", "cuMipmappedArrayGetSparseProperties"], [5, 3, 1, "", "cuModuleEnumerateFunctions"], [5, 3, 1, "", "cuModuleGetFunction"], [5, 3, 1, "", "cuModuleGetFunctionCount"], [5, 3, 1, "", "cuModuleGetGlobal"], [5, 3, 1, "", "cuModuleGetLoadingMode"], [5, 3, 1, "", "cuModuleLoad"], [5, 3, 1, "", "cuModuleLoadData"], [5, 3, 1, "", "cuModuleLoadDataEx"], [5, 3, 1, "", "cuModuleLoadFatBinary"], [5, 3, 1, "", "cuModuleUnload"], [5, 3, 1, "", "cuMulticastAddDevice"], [5, 3, 1, "", "cuMulticastBindAddr"], [5, 3, 1, "", "cuMulticastBindMem"], [5, 3, 1, "", "cuMulticastCreate"], [5, 3, 1, "", "cuMulticastGetGranularity"], [5, 3, 1, "", "cuMulticastUnbind"], [5, 3, 1, "", "cuOccupancyAvailableDynamicSMemPerBlock"], [5, 3, 1, "", "cuOccupancyMaxActiveBlocksPerMultiprocessor"], [5, 3, 1, "", "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags"], [5, 3, 1, "", "cuOccupancyMaxActiveClusters"], [5, 3, 1, "", "cuOccupancyMaxPotentialBlockSize"], [5, 3, 1, "", "cuOccupancyMaxPotentialBlockSizeWithFlags"], [5, 3, 1, "", "cuOccupancyMaxPotentialClusterSize"], [5, 3, 1, "", "cuPointerGetAttribute"], [5, 3, 1, "", "cuPointerGetAttributes"], [5, 3, 1, "", "cuPointerSetAttribute"], [5, 3, 1, "", "cuProfilerStart"], [5, 3, 1, "", "cuProfilerStop"], [5, 3, 1, "", "cuSignalExternalSemaphoresAsync"], [5, 3, 1, "", "cuStreamAddCallback"], [5, 3, 1, "", "cuStreamAttachMemAsync"], [5, 3, 1, "", "cuStreamBatchMemOp"], [5, 3, 1, "", "cuStreamBeginCapture"], [5, 3, 1, "", "cuStreamBeginCaptureToGraph"], [5, 3, 1, "", "cuStreamCopyAttributes"], [5, 3, 1, "", "cuStreamCreate"], [5, 3, 1, "", "cuStreamCreateWithPriority"], [5, 3, 1, "", "cuStreamDestroy"], [5, 3, 1, "", "cuStreamEndCapture"], [5, 3, 1, "", "cuStreamGetAttribute"], [5, 3, 1, "", "cuStreamGetCaptureInfo"], [5, 3, 1, "", "cuStreamGetCaptureInfo_v3"], [5, 3, 1, "", "cuStreamGetCtx"], [5, 3, 1, "", "cuStreamGetCtx_v2"], [5, 3, 1, "", "cuStreamGetFlags"], [5, 3, 1, "", "cuStreamGetGreenCtx"], [5, 3, 1, "", "cuStreamGetId"], [5, 3, 1, "", "cuStreamGetPriority"], [5, 3, 1, "", "cuStreamIsCapturing"], [5, 3, 1, "", "cuStreamQuery"], [5, 3, 1, "", "cuStreamSetAttribute"], [5, 3, 1, "", "cuStreamSynchronize"], [5, 3, 1, "", "cuStreamUpdateCaptureDependencies"], [5, 3, 1, "", "cuStreamUpdateCaptureDependencies_v2"], [5, 3, 1, "", "cuStreamWaitEvent"], [5, 3, 1, "", "cuStreamWaitValue32"], [5, 3, 1, "", "cuStreamWaitValue64"], [5, 3, 1, "", "cuStreamWriteValue32"], [5, 3, 1, "", "cuStreamWriteValue64"], [5, 3, 1, "", "cuSurfObjectCreate"], [5, 3, 1, "", "cuSurfObjectDestroy"], [5, 3, 1, "", "cuSurfObjectGetResourceDesc"], [5, 3, 1, "", "cuTensorMapEncodeIm2col"], [5, 3, 1, "", "cuTensorMapEncodeTiled"], [5, 3, 1, "", "cuTensorMapReplaceAddress"], [5, 3, 1, "", "cuTexObjectCreate"], [5, 3, 1, "", "cuTexObjectDestroy"], [5, 3, 1, "", "cuTexObjectGetResourceDesc"], [5, 3, 1, "", "cuTexObjectGetResourceViewDesc"], [5, 3, 1, "", "cuTexObjectGetTextureDesc"], [5, 3, 1, "", "cuThreadExchangeStreamCaptureMode"], [5, 3, 1, "", "cuUserObjectCreate"], [5, 3, 1, "", "cuUserObjectRelease"], [5, 3, 1, "", "cuUserObjectRetain"], [5, 3, 1, "", "cuVDPAUCtxCreate"], [5, 3, 1, "", "cuVDPAUGetDevice"], [5, 3, 1, "", "cuWaitExternalSemaphoresAsync"]], "cuda.bindings.driver.CUCoredumpGenerationFlags": [[5, 1, 1, "", "CU_COREDUMP_DEFAULT_FLAGS"], [5, 1, 1, "", "CU_COREDUMP_LIGHTWEIGHT_FLAGS"], [5, 1, 1, "", "CU_COREDUMP_SKIP_ABORT"], [5, 1, 1, "", "CU_COREDUMP_SKIP_CONSTBANK_MEMORY"], [5, 1, 1, "", "CU_COREDUMP_SKIP_GLOBAL_MEMORY"], [5, 1, 1, "", "CU_COREDUMP_SKIP_LOCAL_MEMORY"], [5, 1, 1, "", "CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES"], [5, 1, 1, "", "CU_COREDUMP_SKIP_SHARED_MEMORY"]], "cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Flags"], [5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR_st": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Flags"], [5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR_v2": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Flags"], [5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR": [[5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR_st": [[5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR_v2": [[5, 1, 1, "", "Format"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "NumChannels"], [5, 1, 1, "", "Width"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS": [[5, 1, 1, "", "alignment"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS_st": [[5, 1, 1, "", "alignment"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1": [[5, 1, 1, "", "alignment"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "miptailFirstLevel"], [5, 1, 1, "", "miptailSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "tileExtent"]], "cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "miptailFirstLevel"], [5, 1, 1, "", "miptailSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "tileExtent"]], "cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "miptailFirstLevel"], [5, 1, 1, "", "miptailSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "tileExtent"]], "cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st": [[5, 1, 1, "", "count"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "paramArray"]], "cuda.bindings.driver.CUDA_CHILD_GRAPH_NODE_PARAMS": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "graph"]], "cuda.bindings.driver.CUDA_CHILD_GRAPH_NODE_PARAMS_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "graph"]], "cuda.bindings.driver.CUDA_CONDITIONAL_NODE_PARAMS": [[5, 1, 1, "id13", "ctx"], [5, 2, 1, "id14", "getPtr"], [5, 1, 1, "id9", "handle"], [5, 1, 1, "id12", "phGraph_out"], [5, 1, 1, "id11", "size"], [5, 1, 1, "id10", "type"]], "cuda.bindings.driver.CUDA_EVENT_RECORD_NODE_PARAMS": [[5, 1, 1, "", "event"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_EVENT_RECORD_NODE_PARAMS_st": [[5, 1, 1, "", "event"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_EVENT_WAIT_NODE_PARAMS": [[5, 1, 1, "", "event"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_EVENT_WAIT_NODE_PARAMS_st": [[5, 1, 1, "", "event"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"]], "cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "size"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC": [[5, 1, 1, "", "arrayDesc"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numLevels"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st": [[5, 1, 1, "", "arrayDesc"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numLevels"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1": [[5, 1, 1, "", "arrayDesc"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numLevels"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handle"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "params"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st": [[5, 1, 1, "", "extSemArray"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExtSems"], [5, 1, 1, "", "paramsArray"]], "cuda.bindings.driver.CUDA_GRAPH_INSTANTIATE_PARAMS": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hErrNode_out"], [5, 1, 1, "", "hUploadStream"], [5, 1, 1, "", "result_out"]], "cuda.bindings.driver.CUDA_GRAPH_INSTANTIATE_PARAMS_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hErrNode_out"], [5, 1, 1, "", "hUploadStream"], [5, 1, 1, "", "result_out"]], "cuda.bindings.driver.CUDA_HOST_NODE_PARAMS": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_st": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v1": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v2": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v2_st": [[5, 1, 1, "", "fn"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "userData"]], "cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_st": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v1": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v2": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v2_st": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v3": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v3_st": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "ctx"], [5, 1, 1, "", "extra"], [5, 1, 1, "", "func"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "kern"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_LAUNCH_PARAMS": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "function"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_LAUNCH_PARAMS_st": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "function"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_LAUNCH_PARAMS_v1": [[5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 1, 1, "", "function"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "kernelParams"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUDA_MEMCPY2D": [[5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"]], "cuda.bindings.driver.CUDA_MEMCPY2D_st": [[5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"]], "cuda.bindings.driver.CUDA_MEMCPY2D_v2": [[5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"]], "cuda.bindings.driver.CUDA_MEMCPY3D": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.bindings.driver.CUDA_MEMCPY3D_PEER": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstContext"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcContext"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.bindings.driver.CUDA_MEMCPY3D_PEER_st": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstContext"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcContext"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.bindings.driver.CUDA_MEMCPY3D_PEER_v1": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstContext"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcContext"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.bindings.driver.CUDA_MEMCPY3D_st": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.bindings.driver.CUDA_MEMCPY3D_v2": [[5, 1, 1, "", "Depth"], [5, 1, 1, "", "Height"], [5, 1, 1, "", "WidthInBytes"], [5, 1, 1, "", "dstArray"], [5, 1, 1, "", "dstDevice"], [5, 1, 1, "", "dstHeight"], [5, 1, 1, "", "dstHost"], [5, 1, 1, "", "dstLOD"], [5, 1, 1, "", "dstMemoryType"], [5, 1, 1, "", "dstPitch"], [5, 1, 1, "", "dstXInBytes"], [5, 1, 1, "", "dstY"], [5, 1, 1, "", "dstZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "srcArray"], [5, 1, 1, "", "srcDevice"], [5, 1, 1, "", "srcHeight"], [5, 1, 1, "", "srcHost"], [5, 1, 1, "", "srcLOD"], [5, 1, 1, "", "srcMemoryType"], [5, 1, 1, "", "srcPitch"], [5, 1, 1, "", "srcXInBytes"], [5, 1, 1, "", "srcY"], [5, 1, 1, "", "srcZ"]], "cuda.bindings.driver.CUDA_MEMCPY_NODE_PARAMS": [[5, 1, 1, "", "copyCtx"], [5, 1, 1, "", "copyParams"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_MEMCPY_NODE_PARAMS_st": [[5, 1, 1, "", "copyCtx"], [5, 1, 1, "", "copyParams"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS": [[5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_st": [[5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v1": [[5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v2": [[5, 1, 1, "", "ctx"], [5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v2_st": [[5, 1, 1, "", "ctx"], [5, 1, 1, "", "dst"], [5, 1, 1, "", "elementSize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "value"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v1": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v2": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st": [[5, 1, 1, "", "accessDescCount"], [5, 1, 1, "", "accessDescs"], [5, 1, 1, "", "bytesize"], [5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "poolProps"]], "cuda.bindings.driver.CUDA_MEM_FREE_NODE_PARAMS": [[5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_MEM_FREE_NODE_PARAMS_st": [[5, 1, 1, "", "dptr"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS": [[5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE"]], "cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "p2pToken"], [5, 1, 1, "", "vaSpaceToken"]], "cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "p2pToken"], [5, 1, 1, "", "vaSpaceToken"]], "cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "p2pToken"], [5, 1, 1, "", "vaSpaceToken"]], "cuda.bindings.driver.CUDA_RESOURCE_DESC": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "res"], [5, 1, 1, "", "resType"]], "cuda.bindings.driver.CUDA_RESOURCE_DESC_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "res"], [5, 1, 1, "", "resType"]], "cuda.bindings.driver.CUDA_RESOURCE_DESC_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "res"], [5, 1, 1, "", "resType"]], "cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC": [[5, 1, 1, "", "depth"], [5, 1, 1, "", "firstLayer"], [5, 1, 1, "", "firstMipmapLevel"], [5, 1, 1, "", "format"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "lastLayer"], [5, 1, 1, "", "lastMipmapLevel"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC_st": [[5, 1, 1, "", "depth"], [5, 1, 1, "", "firstLayer"], [5, 1, 1, "", "firstMipmapLevel"], [5, 1, 1, "", "format"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "lastLayer"], [5, 1, 1, "", "lastMipmapLevel"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC_v1": [[5, 1, 1, "", "depth"], [5, 1, 1, "", "firstLayer"], [5, 1, 1, "", "firstMipmapLevel"], [5, 1, 1, "", "format"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "lastLayer"], [5, 1, 1, "", "lastMipmapLevel"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUDA_TEXTURE_DESC": [[5, 1, 1, "", "addressMode"], [5, 1, 1, "", "borderColor"], [5, 1, 1, "", "filterMode"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxAnisotropy"], [5, 1, 1, "", "maxMipmapLevelClamp"], [5, 1, 1, "", "minMipmapLevelClamp"], [5, 1, 1, "", "mipmapFilterMode"], [5, 1, 1, "", "mipmapLevelBias"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_TEXTURE_DESC_st": [[5, 1, 1, "", "addressMode"], [5, 1, 1, "", "borderColor"], [5, 1, 1, "", "filterMode"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxAnisotropy"], [5, 1, 1, "", "maxMipmapLevelClamp"], [5, 1, 1, "", "minMipmapLevelClamp"], [5, 1, 1, "", "mipmapFilterMode"], [5, 1, 1, "", "mipmapLevelBias"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUDA_TEXTURE_DESC_v1": [[5, 1, 1, "", "addressMode"], [5, 1, 1, "", "borderColor"], [5, 1, 1, "", "filterMode"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxAnisotropy"], [5, 1, 1, "", "maxMipmapLevelClamp"], [5, 1, 1, "", "minMipmapLevelClamp"], [5, 1, 1, "", "mipmapFilterMode"], [5, 1, 1, "", "mipmapLevelBias"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUGLDeviceList": [[5, 1, 1, "", "CU_GL_DEVICE_LIST_ALL"], [5, 1, 1, "", "CU_GL_DEVICE_LIST_CURRENT_FRAME"], [5, 1, 1, "", "CU_GL_DEVICE_LIST_NEXT_FRAME"]], "cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering": [[5, 1, 1, "", "CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES"], [5, 1, 1, "", "CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE"], [5, 1, 1, "", "CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER"]], "cuda.bindings.driver.CUaccessPolicyWindow": [[5, 1, 1, "", "base_ptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hitProp"], [5, 1, 1, "", "hitRatio"], [5, 1, 1, "", "missProp"], [5, 1, 1, "", "num_bytes"]], "cuda.bindings.driver.CUaccessPolicyWindow_st": [[5, 1, 1, "", "base_ptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hitProp"], [5, 1, 1, "", "hitRatio"], [5, 1, 1, "", "missProp"], [5, 1, 1, "", "num_bytes"]], "cuda.bindings.driver.CUaccessPolicyWindow_v1": [[5, 1, 1, "", "base_ptr"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "hitProp"], [5, 1, 1, "", "hitRatio"], [5, 1, 1, "", "missProp"], [5, 1, 1, "", "num_bytes"]], "cuda.bindings.driver.CUaccessProperty": [[5, 1, 1, "", "CU_ACCESS_PROPERTY_NORMAL"], [5, 1, 1, "", "CU_ACCESS_PROPERTY_PERSISTING"], [5, 1, 1, "", "CU_ACCESS_PROPERTY_STREAMING"]], "cuda.bindings.driver.CUaddress_mode": [[5, 1, 1, "", "CU_TR_ADDRESS_MODE_BORDER"], [5, 1, 1, "", "CU_TR_ADDRESS_MODE_CLAMP"], [5, 1, 1, "", "CU_TR_ADDRESS_MODE_MIRROR"], [5, 1, 1, "", "CU_TR_ADDRESS_MODE_WRAP"]], "cuda.bindings.driver.CUarray": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUarrayMapInfo": [[5, 1, 1, "", "deviceBitMask"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memHandle"], [5, 1, 1, "", "memHandleType"], [5, 1, 1, "", "memOperationType"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "resource"], [5, 1, 1, "", "resourceType"], [5, 1, 1, "", "subresource"], [5, 1, 1, "", "subresourceType"]], "cuda.bindings.driver.CUarrayMapInfo_st": [[5, 1, 1, "", "deviceBitMask"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memHandle"], [5, 1, 1, "", "memHandleType"], [5, 1, 1, "", "memOperationType"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "resource"], [5, 1, 1, "", "resourceType"], [5, 1, 1, "", "subresource"], [5, 1, 1, "", "subresourceType"]], "cuda.bindings.driver.CUarrayMapInfo_v1": [[5, 1, 1, "", "deviceBitMask"], [5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memHandle"], [5, 1, 1, "", "memHandleType"], [5, 1, 1, "", "memOperationType"], [5, 1, 1, "", "offset"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "resource"], [5, 1, 1, "", "resourceType"], [5, 1, 1, "", "subresource"], [5, 1, 1, "", "subresourceType"]], "cuda.bindings.driver.CUarraySparseSubresourceType": [[5, 1, 1, "", "CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL"], [5, 1, 1, "", "CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL"]], "cuda.bindings.driver.CUarray_cubemap_face": [[5, 1, 1, "", "CU_CUBEMAP_FACE_NEGATIVE_X"], [5, 1, 1, "", "CU_CUBEMAP_FACE_NEGATIVE_Y"], [5, 1, 1, "", "CU_CUBEMAP_FACE_NEGATIVE_Z"], [5, 1, 1, "", "CU_CUBEMAP_FACE_POSITIVE_X"], [5, 1, 1, "", "CU_CUBEMAP_FACE_POSITIVE_Y"], [5, 1, 1, "", "CU_CUBEMAP_FACE_POSITIVE_Z"]], "cuda.bindings.driver.CUarray_format": [[5, 1, 1, "", "CU_AD_FORMAT_AYUV"], [5, 1, 1, "", "CU_AD_FORMAT_BC1_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC1_UNORM_SRGB"], [5, 1, 1, "", "CU_AD_FORMAT_BC2_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC2_UNORM_SRGB"], [5, 1, 1, "", "CU_AD_FORMAT_BC3_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC3_UNORM_SRGB"], [5, 1, 1, "", "CU_AD_FORMAT_BC4_SNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC4_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC5_SNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC5_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC6H_SF16"], [5, 1, 1, "", "CU_AD_FORMAT_BC6H_UF16"], [5, 1, 1, "", "CU_AD_FORMAT_BC7_UNORM"], [5, 1, 1, "", "CU_AD_FORMAT_BC7_UNORM_SRGB"], [5, 1, 1, "", "CU_AD_FORMAT_FLOAT"], [5, 1, 1, "", "CU_AD_FORMAT_HALF"], [5, 1, 1, "", "CU_AD_FORMAT_MAX"], [5, 1, 1, "", "CU_AD_FORMAT_NV12"], [5, 1, 1, "", "CU_AD_FORMAT_NV16"], [5, 1, 1, "", "CU_AD_FORMAT_P010"], [5, 1, 1, "", "CU_AD_FORMAT_P016"], [5, 1, 1, "", "CU_AD_FORMAT_P210"], [5, 1, 1, "", "CU_AD_FORMAT_P216"], [5, 1, 1, "", "CU_AD_FORMAT_SIGNED_INT16"], [5, 1, 1, "", "CU_AD_FORMAT_SIGNED_INT32"], [5, 1, 1, "", "CU_AD_FORMAT_SIGNED_INT8"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT16X1"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT16X2"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT16X4"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT8X1"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT8X2"], [5, 1, 1, "", "CU_AD_FORMAT_SNORM_INT8X4"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT16X1"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT16X2"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT16X4"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT8X1"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT8X2"], [5, 1, 1, "", "CU_AD_FORMAT_UNORM_INT8X4"], [5, 1, 1, "", "CU_AD_FORMAT_UNSIGNED_INT16"], [5, 1, 1, "", "CU_AD_FORMAT_UNSIGNED_INT32"], [5, 1, 1, "", "CU_AD_FORMAT_UNSIGNED_INT8"], [5, 1, 1, "", "CU_AD_FORMAT_Y210"], [5, 1, 1, "", "CU_AD_FORMAT_Y216"], [5, 1, 1, "", "CU_AD_FORMAT_Y410"], [5, 1, 1, "", "CU_AD_FORMAT_Y416"], [5, 1, 1, "", "CU_AD_FORMAT_Y444_PLANAR10"], [5, 1, 1, "", "CU_AD_FORMAT_Y444_PLANAR8"], [5, 1, 1, "", "CU_AD_FORMAT_YUY2"]], "cuda.bindings.driver.CUasyncCallback": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUasyncCallbackHandle": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUasyncNotificationInfo": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "info"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUasyncNotificationInfo_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "info"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUasyncNotificationType": [[5, 1, 1, "", "CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET"]], "cuda.bindings.driver.CUcigDataType": [[5, 1, 1, "", "CIG_DATA_TYPE_D3D12_COMMAND_QUEUE"]], "cuda.bindings.driver.CUclusterSchedulingPolicy": [[5, 1, 1, "", "CU_CLUSTER_SCHEDULING_POLICY_DEFAULT"], [5, 1, 1, "", "CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING"], [5, 1, 1, "", "CU_CLUSTER_SCHEDULING_POLICY_SPREAD"]], "cuda.bindings.driver.CUcomputemode": [[5, 1, 1, "", "CU_COMPUTEMODE_DEFAULT"], [5, 1, 1, "", "CU_COMPUTEMODE_EXCLUSIVE_PROCESS"], [5, 1, 1, "", "CU_COMPUTEMODE_PROHIBITED"]], "cuda.bindings.driver.CUcontext": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUcoredumpSettings": [[5, 1, 1, "", "CU_COREDUMP_ENABLE_ON_EXCEPTION"], [5, 1, 1, "", "CU_COREDUMP_ENABLE_USER_TRIGGER"], [5, 1, 1, "", "CU_COREDUMP_FILE"], [5, 1, 1, "", "CU_COREDUMP_GENERATION_FLAGS"], [5, 1, 1, "", "CU_COREDUMP_LIGHTWEIGHT"], [5, 1, 1, "", "CU_COREDUMP_MAX"], [5, 1, 1, "", "CU_COREDUMP_PIPE"], [5, 1, 1, "", "CU_COREDUMP_TRIGGER_HOST"]], "cuda.bindings.driver.CUctxCigParam": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "sharedData"], [5, 1, 1, "", "sharedDataType"]], "cuda.bindings.driver.CUctxCigParam_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "sharedData"], [5, 1, 1, "", "sharedDataType"]], "cuda.bindings.driver.CUctxCreateParams": [[5, 1, 1, "", "cigParams"], [5, 1, 1, "", "execAffinityParams"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExecAffinityParams"]], "cuda.bindings.driver.CUctxCreateParams_st": [[5, 1, 1, "", "cigParams"], [5, 1, 1, "", "execAffinityParams"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "numExecAffinityParams"]], "cuda.bindings.driver.CUctx_flags": [[5, 1, 1, "", "CU_CTX_BLOCKING_SYNC"], [5, 1, 1, "", "CU_CTX_COREDUMP_ENABLE"], [5, 1, 1, "", "CU_CTX_FLAGS_MASK"], [5, 1, 1, "", "CU_CTX_LMEM_RESIZE_TO_MAX"], [5, 1, 1, "", "CU_CTX_MAP_HOST"], [5, 1, 1, "", "CU_CTX_SCHED_AUTO"], [5, 1, 1, "", "CU_CTX_SCHED_BLOCKING_SYNC"], [5, 1, 1, "", "CU_CTX_SCHED_MASK"], [5, 1, 1, "", "CU_CTX_SCHED_SPIN"], [5, 1, 1, "", "CU_CTX_SCHED_YIELD"], [5, 1, 1, "", "CU_CTX_SYNC_MEMOPS"], [5, 1, 1, "", "CU_CTX_USER_COREDUMP_ENABLE"]], "cuda.bindings.driver.CUdevResource": [[5, 1, 1, "", "_internal_padding"], [5, 1, 1, "", "_oversize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "sm"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUdevResourceDesc": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUdevResourceType": [[5, 1, 1, "", "CU_DEV_RESOURCE_TYPE_INVALID"], [5, 1, 1, "", "CU_DEV_RESOURCE_TYPE_SM"]], "cuda.bindings.driver.CUdevResource_st": [[5, 1, 1, "", "_internal_padding"], [5, 1, 1, "", "_oversize"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "sm"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUdevSmResource": [[5, 2, 1, "id75", "getPtr"], [5, 1, 1, "id74", "smCount"]], "cuda.bindings.driver.CUdevSmResourceSplit_flags": [[5, 1, 1, "", "CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING"], [5, 1, 1, "", "CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE"]], "cuda.bindings.driver.CUdevSmResource_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "smCount"]], "cuda.bindings.driver.CUdevice": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUdeviceNumaConfig": [[5, 1, 1, "", "CU_DEVICE_NUMA_CONFIG_NONE"], [5, 1, 1, "", "CU_DEVICE_NUMA_CONFIG_NUMA_NODE"]], "cuda.bindings.driver.CUdevice_P2PAttribute": [[5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK"]], "cuda.bindings.driver.CUdevice_attribute": [[5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CLOCK_RATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COMPUTE_MODE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_ECC_ENABLED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_GPU_OVERLAP"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_INTEGRATED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_PITCH"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MPS_ENABLED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_NUMA_CONFIG"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_NUMA_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PCI_BUS_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TCC_DRIVER"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED"], [5, 1, 1, "", "CU_DEVICE_ATTRIBUTE_WARP_SIZE"]], "cuda.bindings.driver.CUdevice_v1": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUdeviceptr": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUdeviceptr_v2": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUdevprop": [[5, 1, 1, "", "SIMDWidth"], [5, 1, 1, "", "clockRate"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxGridSize"], [5, 1, 1, "", "maxThreadsDim"], [5, 1, 1, "", "maxThreadsPerBlock"], [5, 1, 1, "", "memPitch"], [5, 1, 1, "", "regsPerBlock"], [5, 1, 1, "", "sharedMemPerBlock"], [5, 1, 1, "", "textureAlign"], [5, 1, 1, "", "totalConstantMemory"]], "cuda.bindings.driver.CUdevprop_st": [[5, 1, 1, "", "SIMDWidth"], [5, 1, 1, "", "clockRate"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxGridSize"], [5, 1, 1, "", "maxThreadsDim"], [5, 1, 1, "", "maxThreadsPerBlock"], [5, 1, 1, "", "memPitch"], [5, 1, 1, "", "regsPerBlock"], [5, 1, 1, "", "sharedMemPerBlock"], [5, 1, 1, "", "textureAlign"], [5, 1, 1, "", "totalConstantMemory"]], "cuda.bindings.driver.CUdevprop_v1": [[5, 1, 1, "", "SIMDWidth"], [5, 1, 1, "", "clockRate"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "maxGridSize"], [5, 1, 1, "", "maxThreadsDim"], [5, 1, 1, "", "maxThreadsPerBlock"], [5, 1, 1, "", "memPitch"], [5, 1, 1, "", "regsPerBlock"], [5, 1, 1, "", "sharedMemPerBlock"], [5, 1, 1, "", "textureAlign"], [5, 1, 1, "", "totalConstantMemory"]], "cuda.bindings.driver.CUdriverProcAddressQueryResult": [[5, 1, 1, "", "CU_GET_PROC_ADDRESS_SUCCESS"], [5, 1, 1, "", "CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND"], [5, 1, 1, "", "CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT"]], "cuda.bindings.driver.CUdriverProcAddress_flags": [[5, 1, 1, "", "CU_GET_PROC_ADDRESS_DEFAULT"], [5, 1, 1, "", "CU_GET_PROC_ADDRESS_LEGACY_STREAM"], [5, 1, 1, "", "CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM"]], "cuda.bindings.driver.CUeglColorFormat": [[5, 1, 1, "", "CU_EGL_COLOR_FORMAT_A"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_ABGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_ARGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_AYUV"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_AYUV_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_CCCC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER10_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_BCCR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_CBRC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_CCCC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_CRBC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_RCCB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER12_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER14_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER14_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER14_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER14_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER20_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER20_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER20_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER20_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_BCCR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_CBRC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_CRBC"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_RCCB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BAYER_RGGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BGR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_BGRA"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_L"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_MAX"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_R"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_RG"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_RGB"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_RGBA"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_UYVY_422"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_UYVY_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_VYUY"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_VYUY_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y10_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y12_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV422_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV444_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUVA"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUVA_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUV_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUYV_422"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YUYV_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU422_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU444_PLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVYU"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_YVYU_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y_709_ER"], [5, 1, 1, "", "CU_EGL_COLOR_FORMAT_Y_ER"]], "cuda.bindings.driver.CUeglFrame": [[5, 1, 1, "", "cuFormat"], [5, 1, 1, "", "depth"], [5, 1, 1, "", "eglColorFormat"], [5, 1, 1, "", "frame"], [5, 1, 1, "", "frameType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "numChannels"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "planeCount"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUeglFrameType": [[5, 1, 1, "", "CU_EGL_FRAME_TYPE_ARRAY"], [5, 1, 1, "", "CU_EGL_FRAME_TYPE_PITCH"]], "cuda.bindings.driver.CUeglFrame_st": [[5, 1, 1, "", "cuFormat"], [5, 1, 1, "", "depth"], [5, 1, 1, "", "eglColorFormat"], [5, 1, 1, "", "frame"], [5, 1, 1, "", "frameType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "numChannels"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "planeCount"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUeglFrame_v1": [[5, 1, 1, "", "cuFormat"], [5, 1, 1, "", "depth"], [5, 1, 1, "", "eglColorFormat"], [5, 1, 1, "", "frame"], [5, 1, 1, "", "frameType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "height"], [5, 1, 1, "", "numChannels"], [5, 1, 1, "", "pitch"], [5, 1, 1, "", "planeCount"], [5, 1, 1, "", "width"]], "cuda.bindings.driver.CUeglResourceLocationFlags": [[5, 1, 1, "", "CU_EGL_RESOURCE_LOCATION_SYSMEM"], [5, 1, 1, "", "CU_EGL_RESOURCE_LOCATION_VIDMEM"]], "cuda.bindings.driver.CUeglStreamConnection": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUevent": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUevent_flags": [[5, 1, 1, "", "CU_EVENT_BLOCKING_SYNC"], [5, 1, 1, "", "CU_EVENT_DEFAULT"], [5, 1, 1, "", "CU_EVENT_DISABLE_TIMING"], [5, 1, 1, "", "CU_EVENT_INTERPROCESS"]], "cuda.bindings.driver.CUevent_record_flags": [[5, 1, 1, "", "CU_EVENT_RECORD_DEFAULT"], [5, 1, 1, "", "CU_EVENT_RECORD_EXTERNAL"]], "cuda.bindings.driver.CUevent_sched_flags": [[5, 1, 1, "", "CU_EVENT_SCHED_AUTO"], [5, 1, 1, "", "CU_EVENT_SCHED_BLOCKING_SYNC"], [5, 1, 1, "", "CU_EVENT_SCHED_SPIN"], [5, 1, 1, "", "CU_EVENT_SCHED_YIELD"]], "cuda.bindings.driver.CUevent_wait_flags": [[5, 1, 1, "", "CU_EVENT_WAIT_DEFAULT"], [5, 1, 1, "", "CU_EVENT_WAIT_EXTERNAL"]], "cuda.bindings.driver.CUexecAffinityParam": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "param"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUexecAffinityParam_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "param"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUexecAffinityParam_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "param"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUexecAffinitySmCount": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "val"]], "cuda.bindings.driver.CUexecAffinitySmCount_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "val"]], "cuda.bindings.driver.CUexecAffinitySmCount_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "val"]], "cuda.bindings.driver.CUexecAffinityType": [[5, 1, 1, "", "CU_EXEC_AFFINITY_TYPE_MAX"], [5, 1, 1, "", "CU_EXEC_AFFINITY_TYPE_SM_COUNT"]], "cuda.bindings.driver.CUexternalMemory": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUexternalMemoryHandleType": [[5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32"], [5, 1, 1, "", "CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT"]], "cuda.bindings.driver.CUexternalSemaphore": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUexternalSemaphoreHandleType": [[5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD"], [5, 1, 1, "", "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32"]], "cuda.bindings.driver.CUfilter_mode": [[5, 1, 1, "", "CU_TR_FILTER_MODE_LINEAR"], [5, 1, 1, "", "CU_TR_FILTER_MODE_POINT"]], "cuda.bindings.driver.CUflushGPUDirectRDMAWritesOptions": [[5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST"], [5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS"]], "cuda.bindings.driver.CUflushGPUDirectRDMAWritesScope": [[5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES"], [5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER"]], "cuda.bindings.driver.CUflushGPUDirectRDMAWritesTarget": [[5, 1, 1, "", "CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX"]], "cuda.bindings.driver.CUfunc_cache": [[5, 1, 1, "", "CU_FUNC_CACHE_PREFER_EQUAL"], [5, 1, 1, "", "CU_FUNC_CACHE_PREFER_L1"], [5, 1, 1, "", "CU_FUNC_CACHE_PREFER_NONE"], [5, 1, 1, "", "CU_FUNC_CACHE_PREFER_SHARED"]], "cuda.bindings.driver.CUfunction": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUfunctionLoadingState": [[5, 1, 1, "", "CU_FUNCTION_LOADING_STATE_LOADED"], [5, 1, 1, "", "CU_FUNCTION_LOADING_STATE_MAX"], [5, 1, 1, "", "CU_FUNCTION_LOADING_STATE_UNLOADED"]], "cuda.bindings.driver.CUfunction_attribute": [[5, 1, 1, "", "CU_FUNC_ATTRIBUTE_BINARY_VERSION"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_CACHE_MODE_CA"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_MAX"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_NUM_REGS"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_PTX_VERSION"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH"], [5, 1, 1, "", "CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES"]], "cuda.bindings.driver.CUgraph": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUgraphConditionalHandle": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUgraphConditionalNodeType": [[5, 1, 1, "", "CU_GRAPH_COND_TYPE_IF"], [5, 1, 1, "", "CU_GRAPH_COND_TYPE_WHILE"]], "cuda.bindings.driver.CUgraphDebugDot_flags": [[5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES"], [5, 1, 1, "", "CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE"]], "cuda.bindings.driver.CUgraphDependencyType": [[5, 1, 1, "", "CU_GRAPH_DEPENDENCY_TYPE_DEFAULT"], [5, 1, 1, "", "CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC"]], "cuda.bindings.driver.CUgraphDeviceNode": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUgraphEdgeData": [[5, 1, 1, "", "from_port"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "to_port"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUgraphEdgeData_st": [[5, 1, 1, "", "from_port"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "to_port"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUgraphExec": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUgraphExecUpdateResult": [[5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE"], [5, 1, 1, "", "CU_GRAPH_EXEC_UPDATE_SUCCESS"]], "cuda.bindings.driver.CUgraphExecUpdateResultInfo": [[5, 1, 1, "", "errorFromNode"], [5, 1, 1, "", "errorNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "result"]], "cuda.bindings.driver.CUgraphExecUpdateResultInfo_st": [[5, 1, 1, "", "errorFromNode"], [5, 1, 1, "", "errorNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "result"]], "cuda.bindings.driver.CUgraphExecUpdateResultInfo_v1": [[5, 1, 1, "", "errorFromNode"], [5, 1, 1, "", "errorNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "result"]], "cuda.bindings.driver.CUgraphInstantiateResult": [[5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_ERROR"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_SUCCESS"]], "cuda.bindings.driver.CUgraphInstantiate_flags": [[5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD"], [5, 1, 1, "", "CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY"]], "cuda.bindings.driver.CUgraphMem_attribute": [[5, 1, 1, "", "CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT"], [5, 1, 1, "", "CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH"], [5, 1, 1, "", "CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT"], [5, 1, 1, "", "CU_GRAPH_MEM_ATTR_USED_MEM_HIGH"]], "cuda.bindings.driver.CUgraphNode": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUgraphNodeParams": [[5, 1, 1, "", "alloc"], [5, 1, 1, "", "conditional"], [5, 1, 1, "", "eventRecord"], [5, 1, 1, "", "eventWait"], [5, 1, 1, "", "extSemSignal"], [5, 1, 1, "", "extSemWait"], [5, 1, 1, "", "free"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "graph"], [5, 1, 1, "", "host"], [5, 1, 1, "", "kernel"], [5, 1, 1, "", "memOp"], [5, 1, 1, "", "memcpy"], [5, 1, 1, "", "memset"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "reserved2"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUgraphNodeParams_st": [[5, 1, 1, "", "alloc"], [5, 1, 1, "", "conditional"], [5, 1, 1, "", "eventRecord"], [5, 1, 1, "", "eventWait"], [5, 1, 1, "", "extSemSignal"], [5, 1, 1, "", "extSemWait"], [5, 1, 1, "", "free"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "graph"], [5, 1, 1, "", "host"], [5, 1, 1, "", "kernel"], [5, 1, 1, "", "memOp"], [5, 1, 1, "", "memcpy"], [5, 1, 1, "", "memset"], [5, 1, 1, "", "reserved0"], [5, 1, 1, "", "reserved1"], [5, 1, 1, "", "reserved2"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUgraphNodeType": [[5, 1, 1, "", "CU_GRAPH_NODE_TYPE_BATCH_MEM_OP"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_CONDITIONAL"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_EMPTY"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_EVENT_RECORD"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_GRAPH"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_HOST"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_KERNEL"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_MEMCPY"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_MEMSET"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_MEM_ALLOC"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_MEM_FREE"], [5, 1, 1, "", "CU_GRAPH_NODE_TYPE_WAIT_EVENT"]], "cuda.bindings.driver.CUgraphicsMapResourceFlags": [[5, 1, 1, "", "CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE"], [5, 1, 1, "", "CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY"], [5, 1, 1, "", "CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD"]], "cuda.bindings.driver.CUgraphicsRegisterFlags": [[5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_NONE"], [5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY"], [5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST"], [5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER"], [5, 1, 1, "", "CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD"]], "cuda.bindings.driver.CUgraphicsResource": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUgreenCtx": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUgreenCtxCreate_flags": [[5, 1, 1, "", "CU_GREEN_CTX_DEFAULT_STREAM"]], "cuda.bindings.driver.CUhostFn": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUipcEventHandle": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUipcEventHandle_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUipcEventHandle_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUipcMemHandle": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUipcMemHandle_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUipcMemHandle_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUipcMem_flags": [[5, 1, 1, "", "CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS"]], "cuda.bindings.driver.CUjitInputType": [[5, 1, 1, "", "CU_JIT_INPUT_CUBIN"], [5, 1, 1, "", "CU_JIT_INPUT_FATBINARY"], [5, 1, 1, "", "CU_JIT_INPUT_LIBRARY"], [5, 1, 1, "", "CU_JIT_INPUT_NVVM"], [5, 1, 1, "", "CU_JIT_INPUT_OBJECT"], [5, 1, 1, "", "CU_JIT_INPUT_PTX"], [5, 1, 1, "", "CU_JIT_NUM_INPUT_TYPES"]], "cuda.bindings.driver.CUjit_cacheMode": [[5, 1, 1, "", "CU_JIT_CACHE_OPTION_CA"], [5, 1, 1, "", "CU_JIT_CACHE_OPTION_CG"], [5, 1, 1, "", "CU_JIT_CACHE_OPTION_NONE"]], "cuda.bindings.driver.CUjit_fallback": [[5, 1, 1, "", "CU_PREFER_BINARY"], [5, 1, 1, "", "CU_PREFER_PTX"]], "cuda.bindings.driver.CUjit_option": [[5, 1, 1, "", "CU_JIT_CACHE_MODE"], [5, 1, 1, "", "CU_JIT_ERROR_LOG_BUFFER"], [5, 1, 1, "", "CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES"], [5, 1, 1, "", "CU_JIT_FALLBACK_STRATEGY"], [5, 1, 1, "", "CU_JIT_FAST_COMPILE"], [5, 1, 1, "", "CU_JIT_FMA"], [5, 1, 1, "", "CU_JIT_FTZ"], [5, 1, 1, "", "CU_JIT_GENERATE_DEBUG_INFO"], [5, 1, 1, "", "CU_JIT_GENERATE_LINE_INFO"], [5, 1, 1, "", "CU_JIT_GLOBAL_SYMBOL_ADDRESSES"], [5, 1, 1, "", "CU_JIT_GLOBAL_SYMBOL_COUNT"], [5, 1, 1, "", "CU_JIT_GLOBAL_SYMBOL_NAMES"], [5, 1, 1, "", "CU_JIT_INFO_LOG_BUFFER"], [5, 1, 1, "", "CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES"], [5, 1, 1, "", "CU_JIT_LOG_VERBOSE"], [5, 1, 1, "", "CU_JIT_LTO"], [5, 1, 1, "", "CU_JIT_MAX_REGISTERS"], [5, 1, 1, "", "CU_JIT_MAX_THREADS_PER_BLOCK"], [5, 1, 1, "", "CU_JIT_MIN_CTA_PER_SM"], [5, 1, 1, "", "CU_JIT_NEW_SM3X_OPT"], [5, 1, 1, "", "CU_JIT_NUM_OPTIONS"], [5, 1, 1, "", "CU_JIT_OPTIMIZATION_LEVEL"], [5, 1, 1, "", "CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES"], [5, 1, 1, "", "CU_JIT_OVERRIDE_DIRECTIVE_VALUES"], [5, 1, 1, "", "CU_JIT_POSITION_INDEPENDENT_CODE"], [5, 1, 1, "", "CU_JIT_PREC_DIV"], [5, 1, 1, "", "CU_JIT_PREC_SQRT"], [5, 1, 1, "", "CU_JIT_REFERENCED_KERNEL_COUNT"], [5, 1, 1, "", "CU_JIT_REFERENCED_KERNEL_NAMES"], [5, 1, 1, "", "CU_JIT_REFERENCED_VARIABLE_COUNT"], [5, 1, 1, "", "CU_JIT_REFERENCED_VARIABLE_NAMES"], [5, 1, 1, "", "CU_JIT_TARGET"], [5, 1, 1, "", "CU_JIT_TARGET_FROM_CUCONTEXT"], [5, 1, 1, "", "CU_JIT_THREADS_PER_BLOCK"], [5, 1, 1, "", "CU_JIT_WALL_TIME"]], "cuda.bindings.driver.CUjit_target": [[5, 1, 1, "", "CU_TARGET_COMPUTE_30"], [5, 1, 1, "", "CU_TARGET_COMPUTE_32"], [5, 1, 1, "", "CU_TARGET_COMPUTE_35"], [5, 1, 1, "", "CU_TARGET_COMPUTE_37"], [5, 1, 1, "", "CU_TARGET_COMPUTE_50"], [5, 1, 1, "", "CU_TARGET_COMPUTE_52"], [5, 1, 1, "", "CU_TARGET_COMPUTE_53"], [5, 1, 1, "", "CU_TARGET_COMPUTE_60"], [5, 1, 1, "", "CU_TARGET_COMPUTE_61"], [5, 1, 1, "", "CU_TARGET_COMPUTE_62"], [5, 1, 1, "", "CU_TARGET_COMPUTE_70"], [5, 1, 1, "", "CU_TARGET_COMPUTE_72"], [5, 1, 1, "", "CU_TARGET_COMPUTE_75"], [5, 1, 1, "", "CU_TARGET_COMPUTE_80"], [5, 1, 1, "", "CU_TARGET_COMPUTE_86"], [5, 1, 1, "", "CU_TARGET_COMPUTE_87"], [5, 1, 1, "", "CU_TARGET_COMPUTE_89"], [5, 1, 1, "", "CU_TARGET_COMPUTE_90"], [5, 1, 1, "", "CU_TARGET_COMPUTE_90A"]], "cuda.bindings.driver.CUkernel": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUkernelNodeAttrValue": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.bindings.driver.CUkernelNodeAttrValue_v1": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.bindings.driver.CUlaunchAttribute": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "value"]], "cuda.bindings.driver.CUlaunchAttributeID": [[5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_COOPERATIVE"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_IGNORE"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_PRIORITY"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION"], [5, 1, 1, "", "CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY"]], "cuda.bindings.driver.CUlaunchAttributeValue": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.bindings.driver.CUlaunchAttributeValue_union": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.bindings.driver.CUlaunchAttribute_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "value"]], "cuda.bindings.driver.CUlaunchConfig": [[5, 1, 1, "", "attrs"], [5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "numAttrs"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUlaunchConfig_st": [[5, 1, 1, "", "attrs"], [5, 1, 1, "", "blockDimX"], [5, 1, 1, "", "blockDimY"], [5, 1, 1, "", "blockDimZ"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "gridDimX"], [5, 1, 1, "", "gridDimY"], [5, 1, 1, "", "gridDimZ"], [5, 1, 1, "", "hStream"], [5, 1, 1, "", "numAttrs"], [5, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.driver.CUlaunchMemSyncDomain": [[5, 1, 1, "", "CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT"], [5, 1, 1, "", "CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE"]], "cuda.bindings.driver.CUlaunchMemSyncDomainMap": [[5, 1, 1, "", "default_"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "remote"]], "cuda.bindings.driver.CUlaunchMemSyncDomainMap_st": [[5, 1, 1, "", "default_"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "remote"]], "cuda.bindings.driver.CUlibrary": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUlibraryHostUniversalFunctionAndDataTable": [[5, 1, 1, "", "dataTable"], [5, 1, 1, "", "dataWindowSize"], [5, 1, 1, "", "functionTable"], [5, 1, 1, "", "functionWindowSize"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUlibraryHostUniversalFunctionAndDataTable_st": [[5, 1, 1, "", "dataTable"], [5, 1, 1, "", "dataWindowSize"], [5, 1, 1, "", "functionTable"], [5, 1, 1, "", "functionWindowSize"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUlibraryOption": [[5, 1, 1, "", "CU_LIBRARY_BINARY_IS_PRESERVED"], [5, 1, 1, "", "CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE"], [5, 1, 1, "", "CU_LIBRARY_NUM_OPTIONS"]], "cuda.bindings.driver.CUlimit": [[5, 1, 1, "", "CU_LIMIT_CIG_ENABLED"], [5, 1, 1, "", "CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED"], [5, 1, 1, "", "CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT"], [5, 1, 1, "", "CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH"], [5, 1, 1, "", "CU_LIMIT_MALLOC_HEAP_SIZE"], [5, 1, 1, "", "CU_LIMIT_MAX"], [5, 1, 1, "", "CU_LIMIT_MAX_L2_FETCH_GRANULARITY"], [5, 1, 1, "", "CU_LIMIT_PERSISTING_L2_CACHE_SIZE"], [5, 1, 1, "", "CU_LIMIT_PRINTF_FIFO_SIZE"], [5, 1, 1, "", "CU_LIMIT_SHMEM_SIZE"], [5, 1, 1, "", "CU_LIMIT_STACK_SIZE"]], "cuda.bindings.driver.CUlinkState": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUmemAccessDesc": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"]], "cuda.bindings.driver.CUmemAccessDesc_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"]], "cuda.bindings.driver.CUmemAccessDesc_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"]], "cuda.bindings.driver.CUmemAccess_flags": [[5, 1, 1, "", "CU_MEM_ACCESS_FLAGS_PROT_MAX"], [5, 1, 1, "", "CU_MEM_ACCESS_FLAGS_PROT_NONE"], [5, 1, 1, "", "CU_MEM_ACCESS_FLAGS_PROT_READ"], [5, 1, 1, "", "CU_MEM_ACCESS_FLAGS_PROT_READWRITE"]], "cuda.bindings.driver.CUmemAllocationCompType": [[5, 1, 1, "", "CU_MEM_ALLOCATION_COMP_GENERIC"], [5, 1, 1, "", "CU_MEM_ALLOCATION_COMP_NONE"]], "cuda.bindings.driver.CUmemAllocationGranularity_flags": [[5, 1, 1, "", "CU_MEM_ALLOC_GRANULARITY_MINIMUM"], [5, 1, 1, "", "CU_MEM_ALLOC_GRANULARITY_RECOMMENDED"]], "cuda.bindings.driver.CUmemAllocationHandleType": [[5, 1, 1, "", "CU_MEM_HANDLE_TYPE_FABRIC"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_MAX"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_NONE"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_WIN32"], [5, 1, 1, "", "CU_MEM_HANDLE_TYPE_WIN32_KMT"]], "cuda.bindings.driver.CUmemAllocationProp": [[5, 1, 1, "", "allocFlags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"], [5, 1, 1, "", "requestedHandleTypes"], [5, 1, 1, "", "type"], [5, 1, 1, "", "win32HandleMetaData"]], "cuda.bindings.driver.CUmemAllocationProp_st": [[5, 1, 1, "", "allocFlags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"], [5, 1, 1, "", "requestedHandleTypes"], [5, 1, 1, "", "type"], [5, 1, 1, "", "win32HandleMetaData"]], "cuda.bindings.driver.CUmemAllocationProp_v1": [[5, 1, 1, "", "allocFlags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "location"], [5, 1, 1, "", "requestedHandleTypes"], [5, 1, 1, "", "type"], [5, 1, 1, "", "win32HandleMetaData"]], "cuda.bindings.driver.CUmemAllocationType": [[5, 1, 1, "", "CU_MEM_ALLOCATION_TYPE_INVALID"], [5, 1, 1, "", "CU_MEM_ALLOCATION_TYPE_MAX"], [5, 1, 1, "", "CU_MEM_ALLOCATION_TYPE_PINNED"]], "cuda.bindings.driver.CUmemAttach_flags": [[5, 1, 1, "", "CU_MEM_ATTACH_GLOBAL"], [5, 1, 1, "", "CU_MEM_ATTACH_HOST"], [5, 1, 1, "", "CU_MEM_ATTACH_SINGLE"]], "cuda.bindings.driver.CUmemFabricHandle": [[5, 1, 1, "", "data"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUmemFabricHandle_st": [[5, 1, 1, "", "data"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUmemFabricHandle_v1": [[5, 1, 1, "", "data"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUmemGenericAllocationHandle": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUmemGenericAllocationHandle_v1": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUmemHandleType": [[5, 1, 1, "", "CU_MEM_HANDLE_TYPE_GENERIC"]], "cuda.bindings.driver.CUmemLocation": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUmemLocationType": [[5, 1, 1, "", "CU_MEM_LOCATION_TYPE_DEVICE"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_HOST"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_HOST_NUMA"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_INVALID"], [5, 1, 1, "", "CU_MEM_LOCATION_TYPE_MAX"]], "cuda.bindings.driver.CUmemLocation_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUmemLocation_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "id"], [5, 1, 1, "", "type"]], "cuda.bindings.driver.CUmemOperationType": [[5, 1, 1, "", "CU_MEM_OPERATION_TYPE_MAP"], [5, 1, 1, "", "CU_MEM_OPERATION_TYPE_UNMAP"]], "cuda.bindings.driver.CUmemPoolProps": [[5, 1, 1, "", "allocType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "location"], [5, 1, 1, "", "maxSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "usage"], [5, 1, 1, "", "win32SecurityAttributes"]], "cuda.bindings.driver.CUmemPoolProps_st": [[5, 1, 1, "", "allocType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "location"], [5, 1, 1, "", "maxSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "usage"], [5, 1, 1, "", "win32SecurityAttributes"]], "cuda.bindings.driver.CUmemPoolProps_v1": [[5, 1, 1, "", "allocType"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "location"], [5, 1, 1, "", "maxSize"], [5, 1, 1, "", "reserved"], [5, 1, 1, "", "usage"], [5, 1, 1, "", "win32SecurityAttributes"]], "cuda.bindings.driver.CUmemPoolPtrExportData": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUmemPoolPtrExportData_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUmemPoolPtrExportData_v1": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "reserved"]], "cuda.bindings.driver.CUmemPool_attribute": [[5, 1, 1, "", "CU_MEMPOOL_ATTR_RELEASE_THRESHOLD"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_USED_MEM_CURRENT"], [5, 1, 1, "", "CU_MEMPOOL_ATTR_USED_MEM_HIGH"]], "cuda.bindings.driver.CUmemRangeHandleType": [[5, 1, 1, "", "CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD"], [5, 1, 1, "", "CU_MEM_RANGE_HANDLE_TYPE_MAX"]], "cuda.bindings.driver.CUmem_advise": [[5, 1, 1, "", "CU_MEM_ADVISE_SET_ACCESSED_BY"], [5, 1, 1, "", "CU_MEM_ADVISE_SET_PREFERRED_LOCATION"], [5, 1, 1, "", "CU_MEM_ADVISE_SET_READ_MOSTLY"], [5, 1, 1, "", "CU_MEM_ADVISE_UNSET_ACCESSED_BY"], [5, 1, 1, "", "CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION"], [5, 1, 1, "", "CU_MEM_ADVISE_UNSET_READ_MOSTLY"]], "cuda.bindings.driver.CUmem_range_attribute": [[5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE"], [5, 1, 1, "", "CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY"]], "cuda.bindings.driver.CUmemoryPool": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUmemorytype": [[5, 1, 1, "", "CU_MEMORYTYPE_ARRAY"], [5, 1, 1, "", "CU_MEMORYTYPE_DEVICE"], [5, 1, 1, "", "CU_MEMORYTYPE_HOST"], [5, 1, 1, "", "CU_MEMORYTYPE_UNIFIED"]], "cuda.bindings.driver.CUmipmappedArray": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUmodule": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUmoduleLoadingMode": [[5, 1, 1, "", "CU_MODULE_EAGER_LOADING"], [5, 1, 1, "", "CU_MODULE_LAZY_LOADING"]], "cuda.bindings.driver.CUmulticastGranularity_flags": [[5, 1, 1, "", "CU_MULTICAST_GRANULARITY_MINIMUM"], [5, 1, 1, "", "CU_MULTICAST_GRANULARITY_RECOMMENDED"]], "cuda.bindings.driver.CUmulticastObjectProp": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "numDevices"], [5, 1, 1, "", "size"]], "cuda.bindings.driver.CUmulticastObjectProp_st": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "numDevices"], [5, 1, 1, "", "size"]], "cuda.bindings.driver.CUmulticastObjectProp_v1": [[5, 1, 1, "", "flags"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "handleTypes"], [5, 1, 1, "", "numDevices"], [5, 1, 1, "", "size"]], "cuda.bindings.driver.CUoccupancyB2DSize": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUoccupancy_flags": [[5, 1, 1, "", "CU_OCCUPANCY_DEFAULT"], [5, 1, 1, "", "CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE"]], "cuda.bindings.driver.CUpointer_attribute": [[5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ACCESS_FLAGS"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_BUFFER_ID"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_CONTEXT"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_DEVICE_POINTER"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_HOST_POINTER"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_IS_MANAGED"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MAPPED"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MAPPING_SIZE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MEMORY_TYPE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_P2P_TOKENS"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_RANGE_SIZE"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_RANGE_START_ADDR"], [5, 1, 1, "", "CU_POINTER_ATTRIBUTE_SYNC_MEMOPS"]], "cuda.bindings.driver.CUresourceViewFormat": [[5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_1X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_1X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_2X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_2X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_4X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_FLOAT_4X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_NONE"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SIGNED_BC4"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SIGNED_BC5"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SIGNED_BC6H"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_1X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_1X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_1X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_2X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_2X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_2X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_4X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_4X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_SINT_4X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_1X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_1X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_1X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_2X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_2X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_2X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_4X16"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_4X32"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UINT_4X8"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC1"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC2"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC3"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC4"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC5"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC6H"], [5, 1, 1, "", "CU_RES_VIEW_FORMAT_UNSIGNED_BC7"]], "cuda.bindings.driver.CUresourcetype": [[5, 1, 1, "", "CU_RESOURCE_TYPE_ARRAY"], [5, 1, 1, "", "CU_RESOURCE_TYPE_LINEAR"], [5, 1, 1, "", "CU_RESOURCE_TYPE_MIPMAPPED_ARRAY"], [5, 1, 1, "", "CU_RESOURCE_TYPE_PITCH2D"]], "cuda.bindings.driver.CUresult": [[5, 1, 1, "", "CUDA_ERROR_ALREADY_ACQUIRED"], [5, 1, 1, "", "CUDA_ERROR_ALREADY_MAPPED"], [5, 1, 1, "", "CUDA_ERROR_ARRAY_IS_MAPPED"], [5, 1, 1, "", "CUDA_ERROR_ASSERT"], [5, 1, 1, "", "CUDA_ERROR_CAPTURED_EVENT"], [5, 1, 1, "", "CUDA_ERROR_CDP_NOT_SUPPORTED"], [5, 1, 1, "", "CUDA_ERROR_CDP_VERSION_MISMATCH"], [5, 1, 1, "", "CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE"], [5, 1, 1, "", "CUDA_ERROR_CONTEXT_ALREADY_CURRENT"], [5, 1, 1, "", "CUDA_ERROR_CONTEXT_ALREADY_IN_USE"], [5, 1, 1, "", "CUDA_ERROR_CONTEXT_IS_DESTROYED"], [5, 1, 1, "", "CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE"], [5, 1, 1, "", "CUDA_ERROR_DEINITIALIZED"], [5, 1, 1, "", "CUDA_ERROR_DEVICE_NOT_LICENSED"], [5, 1, 1, "", "CUDA_ERROR_DEVICE_UNAVAILABLE"], [5, 1, 1, "", "CUDA_ERROR_ECC_UNCORRECTABLE"], [5, 1, 1, "", "CUDA_ERROR_EXTERNAL_DEVICE"], [5, 1, 1, "", "CUDA_ERROR_FILE_NOT_FOUND"], [5, 1, 1, "", "CUDA_ERROR_FUNCTION_NOT_LOADED"], [5, 1, 1, "", "CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE"], [5, 1, 1, "", "CUDA_ERROR_HARDWARE_STACK_ERROR"], [5, 1, 1, "", "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED"], [5, 1, 1, "", "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED"], [5, 1, 1, "", "CUDA_ERROR_ILLEGAL_ADDRESS"], [5, 1, 1, "", "CUDA_ERROR_ILLEGAL_INSTRUCTION"], [5, 1, 1, "", "CUDA_ERROR_ILLEGAL_STATE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_ADDRESS_SPACE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_CLUSTER_SIZE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_CONTEXT"], [5, 1, 1, "", "CUDA_ERROR_INVALID_DEVICE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_GRAPHICS_CONTEXT"], [5, 1, 1, "", "CUDA_ERROR_INVALID_HANDLE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_IMAGE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_PC"], [5, 1, 1, "", "CUDA_ERROR_INVALID_PTX"], [5, 1, 1, "", "CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION"], [5, 1, 1, "", "CUDA_ERROR_INVALID_RESOURCE_TYPE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_SOURCE"], [5, 1, 1, "", "CUDA_ERROR_INVALID_VALUE"], [5, 1, 1, "", "CUDA_ERROR_JIT_COMPILATION_DISABLED"], [5, 1, 1, "", "CUDA_ERROR_JIT_COMPILER_NOT_FOUND"], [5, 1, 1, "", "CUDA_ERROR_LAUNCH_FAILED"], [5, 1, 1, "", "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING"], [5, 1, 1, "", "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES"], [5, 1, 1, "", "CUDA_ERROR_LAUNCH_TIMEOUT"], [5, 1, 1, "", "CUDA_ERROR_LOSSY_QUERY"], [5, 1, 1, "", "CUDA_ERROR_MAP_FAILED"], [5, 1, 1, "", "CUDA_ERROR_MISALIGNED_ADDRESS"], [5, 1, 1, "", "CUDA_ERROR_MPS_CLIENT_TERMINATED"], [5, 1, 1, "", "CUDA_ERROR_MPS_CONNECTION_FAILED"], [5, 1, 1, "", "CUDA_ERROR_MPS_MAX_CLIENTS_REACHED"], [5, 1, 1, "", "CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED"], [5, 1, 1, "", "CUDA_ERROR_MPS_RPC_FAILURE"], [5, 1, 1, "", "CUDA_ERROR_MPS_SERVER_NOT_READY"], [5, 1, 1, "", "CUDA_ERROR_NOT_FOUND"], [5, 1, 1, "", "CUDA_ERROR_NOT_INITIALIZED"], [5, 1, 1, "", "CUDA_ERROR_NOT_MAPPED"], [5, 1, 1, "", "CUDA_ERROR_NOT_MAPPED_AS_ARRAY"], [5, 1, 1, "", "CUDA_ERROR_NOT_MAPPED_AS_POINTER"], [5, 1, 1, "", "CUDA_ERROR_NOT_PERMITTED"], [5, 1, 1, "", "CUDA_ERROR_NOT_READY"], [5, 1, 1, "", "CUDA_ERROR_NOT_SUPPORTED"], [5, 1, 1, "", "CUDA_ERROR_NO_BINARY_FOR_GPU"], [5, 1, 1, "", "CUDA_ERROR_NO_DEVICE"], [5, 1, 1, "", "CUDA_ERROR_NVLINK_UNCORRECTABLE"], [5, 1, 1, "", "CUDA_ERROR_OPERATING_SYSTEM"], [5, 1, 1, "", "CUDA_ERROR_OUT_OF_MEMORY"], [5, 1, 1, "", "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED"], [5, 1, 1, "", "CUDA_ERROR_PEER_ACCESS_NOT_ENABLED"], [5, 1, 1, "", "CUDA_ERROR_PEER_ACCESS_UNSUPPORTED"], [5, 1, 1, "", "CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE"], [5, 1, 1, "", "CUDA_ERROR_PROFILER_ALREADY_STARTED"], [5, 1, 1, "", "CUDA_ERROR_PROFILER_ALREADY_STOPPED"], [5, 1, 1, "", "CUDA_ERROR_PROFILER_DISABLED"], [5, 1, 1, "", "CUDA_ERROR_PROFILER_NOT_INITIALIZED"], [5, 1, 1, "", "CUDA_ERROR_SHARED_OBJECT_INIT_FAILED"], [5, 1, 1, "", "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_IMPLICIT"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_INVALIDATED"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_ISOLATION"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_MERGE"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_UNJOINED"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_UNMATCHED"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED"], [5, 1, 1, "", "CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD"], [5, 1, 1, "", "CUDA_ERROR_STUB_LIBRARY"], [5, 1, 1, "", "CUDA_ERROR_SYSTEM_DRIVER_MISMATCH"], [5, 1, 1, "", "CUDA_ERROR_SYSTEM_NOT_READY"], [5, 1, 1, "", "CUDA_ERROR_TIMEOUT"], [5, 1, 1, "", "CUDA_ERROR_TOO_MANY_PEERS"], [5, 1, 1, "", "CUDA_ERROR_UNKNOWN"], [5, 1, 1, "", "CUDA_ERROR_UNMAP_FAILED"], [5, 1, 1, "", "CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC"], [5, 1, 1, "", "CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY"], [5, 1, 1, "", "CUDA_ERROR_UNSUPPORTED_LIMIT"], [5, 1, 1, "", "CUDA_ERROR_UNSUPPORTED_PTX_VERSION"], [5, 1, 1, "", "CUDA_SUCCESS"]], "cuda.bindings.driver.CUshared_carveout": [[5, 1, 1, "", "CU_SHAREDMEM_CARVEOUT_DEFAULT"], [5, 1, 1, "", "CU_SHAREDMEM_CARVEOUT_MAX_L1"], [5, 1, 1, "", "CU_SHAREDMEM_CARVEOUT_MAX_SHARED"]], "cuda.bindings.driver.CUsharedconfig": [[5, 1, 1, "", "CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE"], [5, 1, 1, "", "CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE"], [5, 1, 1, "", "CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE"]], "cuda.bindings.driver.CUstream": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUstreamAttrValue": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.bindings.driver.CUstreamAttrValue_v1": [[5, 1, 1, "", "accessPolicyWindow"], [5, 1, 1, "", "clusterDim"], [5, 1, 1, "", "clusterSchedulingPolicyPreference"], [5, 1, 1, "", "cooperative"], [5, 1, 1, "", "deviceUpdatableKernelNode"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "launchCompletionEvent"], [5, 1, 1, "", "memSyncDomain"], [5, 1, 1, "", "memSyncDomainMap"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "priority"], [5, 1, 1, "", "programmaticEvent"], [5, 1, 1, "", "programmaticStreamSerializationAllowed"], [5, 1, 1, "", "sharedMemCarveout"], [5, 1, 1, "", "syncPolicy"]], "cuda.bindings.driver.CUstreamBatchMemOpParams": [[5, 1, 1, "", "flushRemoteWrites"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memoryBarrier"], [5, 1, 1, "", "operation"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "waitValue"], [5, 1, 1, "", "writeValue"]], "cuda.bindings.driver.CUstreamBatchMemOpParams_union": [[5, 1, 1, "", "flushRemoteWrites"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memoryBarrier"], [5, 1, 1, "", "operation"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "waitValue"], [5, 1, 1, "", "writeValue"]], "cuda.bindings.driver.CUstreamBatchMemOpParams_v1": [[5, 1, 1, "", "flushRemoteWrites"], [5, 2, 1, "", "getPtr"], [5, 1, 1, "", "memoryBarrier"], [5, 1, 1, "", "operation"], [5, 1, 1, "", "pad"], [5, 1, 1, "", "waitValue"], [5, 1, 1, "", "writeValue"]], "cuda.bindings.driver.CUstreamBatchMemOpType": [[5, 1, 1, "", "CU_STREAM_MEM_OP_BARRIER"], [5, 1, 1, "", "CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES"], [5, 1, 1, "", "CU_STREAM_MEM_OP_WAIT_VALUE_32"], [5, 1, 1, "", "CU_STREAM_MEM_OP_WAIT_VALUE_64"], [5, 1, 1, "", "CU_STREAM_MEM_OP_WRITE_VALUE_32"], [5, 1, 1, "", "CU_STREAM_MEM_OP_WRITE_VALUE_64"]], "cuda.bindings.driver.CUstreamCallback": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUstreamCaptureMode": [[5, 1, 1, "", "CU_STREAM_CAPTURE_MODE_GLOBAL"], [5, 1, 1, "", "CU_STREAM_CAPTURE_MODE_RELAXED"], [5, 1, 1, "", "CU_STREAM_CAPTURE_MODE_THREAD_LOCAL"]], "cuda.bindings.driver.CUstreamCaptureStatus": [[5, 1, 1, "", "CU_STREAM_CAPTURE_STATUS_ACTIVE"], [5, 1, 1, "", "CU_STREAM_CAPTURE_STATUS_INVALIDATED"], [5, 1, 1, "", "CU_STREAM_CAPTURE_STATUS_NONE"]], "cuda.bindings.driver.CUstreamMemoryBarrier_flags": [[5, 1, 1, "", "CU_STREAM_MEMORY_BARRIER_TYPE_GPU"], [5, 1, 1, "", "CU_STREAM_MEMORY_BARRIER_TYPE_SYS"]], "cuda.bindings.driver.CUstreamUpdateCaptureDependencies_flags": [[5, 1, 1, "", "CU_STREAM_ADD_CAPTURE_DEPENDENCIES"], [5, 1, 1, "", "CU_STREAM_SET_CAPTURE_DEPENDENCIES"]], "cuda.bindings.driver.CUstreamWaitValue_flags": [[5, 1, 1, "", "CU_STREAM_WAIT_VALUE_AND"], [5, 1, 1, "", "CU_STREAM_WAIT_VALUE_EQ"], [5, 1, 1, "", "CU_STREAM_WAIT_VALUE_FLUSH"], [5, 1, 1, "", "CU_STREAM_WAIT_VALUE_GEQ"], [5, 1, 1, "", "CU_STREAM_WAIT_VALUE_NOR"]], "cuda.bindings.driver.CUstreamWriteValue_flags": [[5, 1, 1, "", "CU_STREAM_WRITE_VALUE_DEFAULT"], [5, 1, 1, "", "CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER"]], "cuda.bindings.driver.CUstream_flags": [[5, 1, 1, "", "CU_STREAM_DEFAULT"], [5, 1, 1, "", "CU_STREAM_NON_BLOCKING"]], "cuda.bindings.driver.CUsurfObject": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUsurfObject_v1": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUsurfref": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUsynchronizationPolicy": [[5, 1, 1, "", "CU_SYNC_POLICY_AUTO"], [5, 1, 1, "", "CU_SYNC_POLICY_BLOCKING_SYNC"], [5, 1, 1, "", "CU_SYNC_POLICY_SPIN"], [5, 1, 1, "", "CU_SYNC_POLICY_YIELD"]], "cuda.bindings.driver.CUtensorMap": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "opaque"]], "cuda.bindings.driver.CUtensorMapDataType": [[5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_BFLOAT16"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_FLOAT16"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_FLOAT32"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_FLOAT64"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_INT32"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_INT64"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_TFLOAT32"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_UINT16"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_UINT32"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_UINT64"], [5, 1, 1, "", "CU_TENSOR_MAP_DATA_TYPE_UINT8"]], "cuda.bindings.driver.CUtensorMapFloatOOBfill": [[5, 1, 1, "", "CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA"], [5, 1, 1, "", "CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE"]], "cuda.bindings.driver.CUtensorMapInterleave": [[5, 1, 1, "", "CU_TENSOR_MAP_INTERLEAVE_16B"], [5, 1, 1, "", "CU_TENSOR_MAP_INTERLEAVE_32B"], [5, 1, 1, "", "CU_TENSOR_MAP_INTERLEAVE_NONE"]], "cuda.bindings.driver.CUtensorMapL2promotion": [[5, 1, 1, "", "CU_TENSOR_MAP_L2_PROMOTION_L2_128B"], [5, 1, 1, "", "CU_TENSOR_MAP_L2_PROMOTION_L2_256B"], [5, 1, 1, "", "CU_TENSOR_MAP_L2_PROMOTION_L2_64B"], [5, 1, 1, "", "CU_TENSOR_MAP_L2_PROMOTION_NONE"]], "cuda.bindings.driver.CUtensorMapSwizzle": [[5, 1, 1, "", "CU_TENSOR_MAP_SWIZZLE_128B"], [5, 1, 1, "", "CU_TENSOR_MAP_SWIZZLE_32B"], [5, 1, 1, "", "CU_TENSOR_MAP_SWIZZLE_64B"], [5, 1, 1, "", "CU_TENSOR_MAP_SWIZZLE_NONE"]], "cuda.bindings.driver.CUtensorMap_st": [[5, 2, 1, "", "getPtr"], [5, 1, 1, "", "opaque"]], "cuda.bindings.driver.CUtexObject": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUtexObject_v1": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUtexref": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUuserObject": [[5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUuserObjectRetain_flags": [[5, 1, 1, "", "CU_GRAPH_USER_OBJECT_MOVE"]], "cuda.bindings.driver.CUuserObject_flags": [[5, 1, 1, "", "CU_USER_OBJECT_NO_DESTRUCTOR_SYNC"]], "cuda.bindings.driver.CUuuid": [[5, 1, 1, "", "bytes"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.CUuuid_st": [[5, 1, 1, "", "bytes"], [5, 2, 1, "", "getPtr"]], "cuda.bindings.driver.cl_context_flags": [[5, 1, 1, "", "NVCL_CTX_SCHED_AUTO"], [5, 1, 1, "", "NVCL_CTX_SCHED_BLOCKING_SYNC"], [5, 1, 1, "", "NVCL_CTX_SCHED_SPIN"], [5, 1, 1, "", "NVCL_CTX_SCHED_YIELD"]], "cuda.bindings.driver.cl_event_flags": [[5, 1, 1, "", "NVCL_EVENT_SCHED_AUTO"], [5, 1, 1, "", "NVCL_EVENT_SCHED_BLOCKING_SYNC"], [5, 1, 1, "", "NVCL_EVENT_SCHED_SPIN"], [5, 1, 1, "", "NVCL_EVENT_SCHED_YIELD"]], "cuda.bindings.nvrtc": [[6, 3, 1, "", "nvrtcAddNameExpression"], [6, 3, 1, "", "nvrtcCompileProgram"], [6, 3, 1, "", "nvrtcCreateProgram"], [6, 3, 1, "", "nvrtcDestroyProgram"], [6, 3, 1, "", "nvrtcGetCUBIN"], [6, 3, 1, "", "nvrtcGetCUBINSize"], [6, 3, 1, "", "nvrtcGetErrorString"], [6, 3, 1, "", "nvrtcGetLTOIR"], [6, 3, 1, "", "nvrtcGetLTOIRSize"], [6, 3, 1, "", "nvrtcGetLoweredName"], [6, 3, 1, "", "nvrtcGetNVVM"], [6, 3, 1, "", "nvrtcGetNVVMSize"], [6, 3, 1, "", "nvrtcGetNumSupportedArchs"], [6, 3, 1, "", "nvrtcGetOptiXIR"], [6, 3, 1, "", "nvrtcGetOptiXIRSize"], [6, 3, 1, "", "nvrtcGetPTX"], [6, 3, 1, "", "nvrtcGetPTXSize"], [6, 3, 1, "", "nvrtcGetProgramLog"], [6, 3, 1, "", "nvrtcGetProgramLogSize"], [6, 3, 1, "", "nvrtcGetSupportedArchs"], [6, 0, 1, "", "nvrtcProgram"], [6, 0, 1, "", "nvrtcResult"], [6, 3, 1, "", "nvrtcVersion"]], "cuda.bindings.nvrtc.nvrtcProgram": [[6, 2, 1, "", "getPtr"]], "cuda.bindings.nvrtc.nvrtcResult": [[6, 1, 1, "", "NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"], [6, 1, 1, "", "NVRTC_ERROR_COMPILATION"], [6, 1, 1, "", "NVRTC_ERROR_INTERNAL_ERROR"], [6, 1, 1, "", "NVRTC_ERROR_INVALID_INPUT"], [6, 1, 1, "", "NVRTC_ERROR_INVALID_OPTION"], [6, 1, 1, "", "NVRTC_ERROR_INVALID_PROGRAM"], [6, 1, 1, "", "NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID"], [6, 1, 1, "", "NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION"], [6, 1, 1, "", "NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION"], [6, 1, 1, "", "NVRTC_ERROR_OUT_OF_MEMORY"], [6, 1, 1, "", "NVRTC_ERROR_PROGRAM_CREATION_FAILURE"], [6, 1, 1, "", "NVRTC_ERROR_TIME_FILE_WRITE_FAILED"], [6, 1, 1, "", "NVRTC_SUCCESS"]], "cuda.bindings.runtime": [[7, 1, 1, "", "CUDA_EGL_MAX_PLANES"], [7, 1, 1, "", "CUDA_IPC_HANDLE_SIZE"], [7, 0, 1, "", "CUuuid"], [7, 0, 1, "", "CUuuid_st"], [7, 0, 1, "", "cudaAccessPolicyWindow"], [7, 0, 1, "", "cudaAccessProperty"], [7, 1, 1, "", "cudaArrayColorAttachment"], [7, 1, 1, "", "cudaArrayCubemap"], [7, 1, 1, "", "cudaArrayDefault"], [7, 1, 1, "", "cudaArrayDeferredMapping"], [7, 3, 1, "", "cudaArrayGetInfo"], [7, 3, 1, "", "cudaArrayGetMemoryRequirements"], [7, 3, 1, "", "cudaArrayGetPlane"], [7, 3, 1, "", "cudaArrayGetSparseProperties"], [7, 1, 1, "", "cudaArrayLayered"], [7, 0, 1, "", "cudaArrayMemoryRequirements"], [7, 1, 1, "", "cudaArraySparse"], [7, 0, 1, "", "cudaArraySparseProperties"], [7, 1, 1, "", "cudaArraySparsePropertiesSingleMipTail"], [7, 1, 1, "", "cudaArraySurfaceLoadStore"], [7, 1, 1, "", "cudaArrayTextureGather"], [7, 0, 1, "", "cudaArray_const_t"], [7, 0, 1, "", "cudaArray_t"], [7, 0, 1, "", "cudaAsyncCallback"], [7, 0, 1, "", "cudaAsyncCallbackHandle_t"], [7, 0, 1, "", "cudaAsyncNotificationInfo"], [7, 0, 1, "", "cudaAsyncNotificationInfo_t"], [7, 0, 1, "", "cudaAsyncNotificationType"], [7, 0, 1, "", "cudaCGScope"], [7, 0, 1, "", "cudaChannelFormatDesc"], [7, 0, 1, "", "cudaChannelFormatKind"], [7, 0, 1, "", "cudaChildGraphNodeParams"], [7, 3, 1, "", "cudaChooseDevice"], [7, 0, 1, "", "cudaClusterSchedulingPolicy"], [7, 0, 1, "", "cudaComputeMode"], [7, 0, 1, "", "cudaConditionalNodeParams"], [7, 1, 1, "", "cudaCooperativeLaunchMultiDeviceNoPostSync"], [7, 1, 1, "", "cudaCooperativeLaunchMultiDeviceNoPreSync"], [7, 1, 1, "", "cudaCpuDeviceId"], [7, 3, 1, "", "cudaCreateChannelDesc"], [7, 3, 1, "", "cudaCreateSurfaceObject"], [7, 3, 1, "", "cudaCreateTextureObject"], [7, 3, 1, "", "cudaCtxResetPersistingL2Cache"], [7, 3, 1, "", "cudaDestroyExternalMemory"], [7, 3, 1, "", "cudaDestroyExternalSemaphore"], [7, 3, 1, "", "cudaDestroySurfaceObject"], [7, 3, 1, "", "cudaDestroyTextureObject"], [7, 0, 1, "", "cudaDeviceAttr"], [7, 1, 1, "", "cudaDeviceBlockingSync"], [7, 3, 1, "", "cudaDeviceCanAccessPeer"], [7, 3, 1, "", "cudaDeviceDisablePeerAccess"], [7, 3, 1, "", "cudaDeviceEnablePeerAccess"], [7, 3, 1, "", "cudaDeviceFlushGPUDirectRDMAWrites"], [7, 3, 1, "", "cudaDeviceGetAttribute"], [7, 3, 1, "", "cudaDeviceGetByPCIBusId"], [7, 3, 1, "", "cudaDeviceGetCacheConfig"], [7, 3, 1, "", "cudaDeviceGetDefaultMemPool"], [7, 3, 1, "", "cudaDeviceGetGraphMemAttribute"], [7, 3, 1, "", "cudaDeviceGetLimit"], [7, 3, 1, "", "cudaDeviceGetMemPool"], [7, 3, 1, "", "cudaDeviceGetNvSciSyncAttributes"], [7, 3, 1, "", "cudaDeviceGetP2PAttribute"], [7, 3, 1, "", "cudaDeviceGetPCIBusId"], [7, 3, 1, "", "cudaDeviceGetStreamPriorityRange"], [7, 3, 1, "", "cudaDeviceGetTexture1DLinearMaxWidth"], [7, 3, 1, "", "cudaDeviceGraphMemTrim"], [7, 1, 1, "", "cudaDeviceLmemResizeToMax"], [7, 1, 1, "", "cudaDeviceMapHost"], [7, 1, 1, "", "cudaDeviceMask"], [7, 0, 1, "", "cudaDeviceNumaConfig"], [7, 0, 1, "", "cudaDeviceP2PAttr"], [7, 0, 1, "", "cudaDeviceProp"], [7, 3, 1, "", "cudaDeviceRegisterAsyncNotification"], [7, 3, 1, "", "cudaDeviceReset"], [7, 1, 1, "", "cudaDeviceScheduleAuto"], [7, 1, 1, "", "cudaDeviceScheduleBlockingSync"], [7, 1, 1, "", "cudaDeviceScheduleMask"], [7, 1, 1, "", "cudaDeviceScheduleSpin"], [7, 1, 1, "", "cudaDeviceScheduleYield"], [7, 3, 1, "", "cudaDeviceSetCacheConfig"], [7, 3, 1, "", "cudaDeviceSetGraphMemAttribute"], [7, 3, 1, "", "cudaDeviceSetLimit"], [7, 3, 1, "", "cudaDeviceSetMemPool"], [7, 1, 1, "", "cudaDeviceSyncMemops"], [7, 3, 1, "", "cudaDeviceSynchronize"], [7, 3, 1, "", "cudaDeviceUnregisterAsyncNotification"], [7, 0, 1, "", "cudaDriverEntryPointQueryResult"], [7, 3, 1, "", "cudaDriverGetVersion"], [7, 3, 1, "", "cudaEGLStreamConsumerAcquireFrame"], [7, 3, 1, "", "cudaEGLStreamConsumerConnect"], [7, 3, 1, "", "cudaEGLStreamConsumerConnectWithFlags"], [7, 3, 1, "", "cudaEGLStreamConsumerDisconnect"], [7, 3, 1, "", "cudaEGLStreamConsumerReleaseFrame"], [7, 3, 1, "", "cudaEGLStreamProducerConnect"], [7, 3, 1, "", "cudaEGLStreamProducerDisconnect"], [7, 3, 1, "", "cudaEGLStreamProducerPresentFrame"], [7, 3, 1, "", "cudaEGLStreamProducerReturnFrame"], [7, 0, 1, "", "cudaEglColorFormat"], [7, 0, 1, "", "cudaEglFrame"], [7, 0, 1, "", "cudaEglFrameType"], [7, 0, 1, "", "cudaEglFrame_st"], [7, 0, 1, "", "cudaEglPlaneDesc"], [7, 0, 1, "", "cudaEglPlaneDesc_st"], [7, 0, 1, "", "cudaEglResourceLocationFlags"], [7, 0, 1, "", "cudaEglStreamConnection"], [7, 0, 1, "", "cudaError_t"], [7, 1, 1, "", "cudaEventBlockingSync"], [7, 3, 1, "", "cudaEventCreate"], [7, 3, 1, "", "cudaEventCreateFromEGLSync"], [7, 3, 1, "", "cudaEventCreateWithFlags"], [7, 1, 1, "", "cudaEventDefault"], [7, 3, 1, "", "cudaEventDestroy"], [7, 1, 1, "", "cudaEventDisableTiming"], [7, 3, 1, "", "cudaEventElapsedTime"], [7, 1, 1, "", "cudaEventInterprocess"], [7, 3, 1, "", "cudaEventQuery"], [7, 3, 1, "", "cudaEventRecord"], [7, 1, 1, "", "cudaEventRecordDefault"], [7, 1, 1, "", "cudaEventRecordExternal"], [7, 0, 1, "", "cudaEventRecordNodeParams"], [7, 3, 1, "", "cudaEventRecordWithFlags"], [7, 3, 1, "", "cudaEventSynchronize"], [7, 1, 1, "", "cudaEventWaitDefault"], [7, 1, 1, "", "cudaEventWaitExternal"], [7, 0, 1, "", "cudaEventWaitNodeParams"], [7, 0, 1, "", "cudaEvent_t"], [7, 0, 1, "", "cudaExtent"], [7, 0, 1, "", "cudaExternalMemoryBufferDesc"], [7, 1, 1, "", "cudaExternalMemoryDedicated"], [7, 3, 1, "", "cudaExternalMemoryGetMappedBuffer"], [7, 3, 1, "", "cudaExternalMemoryGetMappedMipmappedArray"], [7, 0, 1, "", "cudaExternalMemoryHandleDesc"], [7, 0, 1, "", "cudaExternalMemoryHandleType"], [7, 0, 1, "", "cudaExternalMemoryMipmappedArrayDesc"], [7, 0, 1, "", "cudaExternalMemory_t"], [7, 0, 1, "", "cudaExternalSemaphoreHandleDesc"], [7, 0, 1, "", "cudaExternalSemaphoreHandleType"], [7, 0, 1, "", "cudaExternalSemaphoreSignalNodeParams"], [7, 0, 1, "", "cudaExternalSemaphoreSignalNodeParamsV2"], [7, 0, 1, "", "cudaExternalSemaphoreSignalParams"], [7, 1, 1, "", "cudaExternalSemaphoreSignalSkipNvSciBufMemSync"], [7, 0, 1, "", "cudaExternalSemaphoreWaitNodeParams"], [7, 0, 1, "", "cudaExternalSemaphoreWaitNodeParamsV2"], [7, 0, 1, "", "cudaExternalSemaphoreWaitParams"], [7, 1, 1, "", "cudaExternalSemaphoreWaitSkipNvSciBufMemSync"], [7, 0, 1, "", "cudaExternalSemaphore_t"], [7, 0, 1, "", "cudaFlushGPUDirectRDMAWritesOptions"], [7, 0, 1, "", "cudaFlushGPUDirectRDMAWritesScope"], [7, 0, 1, "", "cudaFlushGPUDirectRDMAWritesTarget"], [7, 3, 1, "", "cudaFree"], [7, 3, 1, "", "cudaFreeArray"], [7, 3, 1, "", "cudaFreeAsync"], [7, 3, 1, "", "cudaFreeHost"], [7, 3, 1, "", "cudaFreeMipmappedArray"], [7, 0, 1, "", "cudaFuncAttribute"], [7, 0, 1, "", "cudaFuncAttributes"], [7, 0, 1, "", "cudaFuncCache"], [7, 3, 1, "", "cudaFuncGetAttributes"], [7, 3, 1, "", "cudaFuncSetAttribute"], [7, 3, 1, "", "cudaFuncSetCacheConfig"], [7, 0, 1, "", "cudaFunction_t"], [7, 0, 1, "", "cudaGLDeviceList"], [7, 3, 1, "", "cudaGLGetDevices"], [7, 0, 1, "", "cudaGPUDirectRDMAWritesOrdering"], [7, 3, 1, "", "cudaGetChannelDesc"], [7, 3, 1, "", "cudaGetDevice"], [7, 3, 1, "", "cudaGetDeviceCount"], [7, 3, 1, "", "cudaGetDeviceFlags"], [7, 3, 1, "", "cudaGetDeviceProperties"], [7, 3, 1, "", "cudaGetDriverEntryPoint"], [7, 3, 1, "", "cudaGetDriverEntryPointByVersion"], [7, 0, 1, "", "cudaGetDriverEntryPointFlags"], [7, 3, 1, "", "cudaGetErrorName"], [7, 3, 1, "", "cudaGetErrorString"], [7, 3, 1, "", "cudaGetKernel"], [7, 3, 1, "", "cudaGetLastError"], [7, 3, 1, "", "cudaGetMipmappedArrayLevel"], [7, 3, 1, "", "cudaGetSurfaceObjectResourceDesc"], [7, 3, 1, "", "cudaGetTextureObjectResourceDesc"], [7, 3, 1, "", "cudaGetTextureObjectResourceViewDesc"], [7, 3, 1, "", "cudaGetTextureObjectTextureDesc"], [7, 3, 1, "", "cudaGraphAddChildGraphNode"], [7, 3, 1, "", "cudaGraphAddDependencies"], [7, 3, 1, "", "cudaGraphAddDependencies_v2"], [7, 3, 1, "", "cudaGraphAddEmptyNode"], [7, 3, 1, "", "cudaGraphAddEventRecordNode"], [7, 3, 1, "", "cudaGraphAddEventWaitNode"], [7, 3, 1, "", "cudaGraphAddExternalSemaphoresSignalNode"], [7, 3, 1, "", "cudaGraphAddExternalSemaphoresWaitNode"], [7, 3, 1, "", "cudaGraphAddHostNode"], [7, 3, 1, "", "cudaGraphAddKernelNode"], [7, 3, 1, "", "cudaGraphAddMemAllocNode"], [7, 3, 1, "", "cudaGraphAddMemFreeNode"], [7, 3, 1, "", "cudaGraphAddMemcpyNode"], [7, 3, 1, "", "cudaGraphAddMemcpyNode1D"], [7, 3, 1, "", "cudaGraphAddMemsetNode"], [7, 3, 1, "", "cudaGraphAddNode"], [7, 3, 1, "", "cudaGraphAddNode_v2"], [7, 3, 1, "", "cudaGraphChildGraphNodeGetGraph"], [7, 3, 1, "", "cudaGraphClone"], [7, 0, 1, "", "cudaGraphConditionalHandle"], [7, 3, 1, "", "cudaGraphConditionalHandleCreate"], [7, 0, 1, "", "cudaGraphConditionalHandleFlags"], [7, 0, 1, "", "cudaGraphConditionalNodeType"], [7, 3, 1, "", "cudaGraphCreate"], [7, 0, 1, "", "cudaGraphDebugDotFlags"], [7, 3, 1, "", "cudaGraphDebugDotPrint"], [7, 0, 1, "", "cudaGraphDependencyType"], [7, 3, 1, "", "cudaGraphDestroy"], [7, 3, 1, "", "cudaGraphDestroyNode"], [7, 0, 1, "", "cudaGraphDeviceNode_t"], [7, 0, 1, "", "cudaGraphEdgeData"], [7, 0, 1, "", "cudaGraphEdgeData_st"], [7, 3, 1, "", "cudaGraphEventRecordNodeGetEvent"], [7, 3, 1, "", "cudaGraphEventRecordNodeSetEvent"], [7, 3, 1, "", "cudaGraphEventWaitNodeGetEvent"], [7, 3, 1, "", "cudaGraphEventWaitNodeSetEvent"], [7, 3, 1, "", "cudaGraphExecChildGraphNodeSetParams"], [7, 3, 1, "", "cudaGraphExecDestroy"], [7, 3, 1, "", "cudaGraphExecEventRecordNodeSetEvent"], [7, 3, 1, "", "cudaGraphExecEventWaitNodeSetEvent"], [7, 3, 1, "", "cudaGraphExecExternalSemaphoresSignalNodeSetParams"], [7, 3, 1, "", "cudaGraphExecExternalSemaphoresWaitNodeSetParams"], [7, 3, 1, "", "cudaGraphExecGetFlags"], [7, 3, 1, "", "cudaGraphExecHostNodeSetParams"], [7, 3, 1, "", "cudaGraphExecKernelNodeSetParams"], [7, 3, 1, "", "cudaGraphExecMemcpyNodeSetParams"], [7, 3, 1, "", "cudaGraphExecMemcpyNodeSetParams1D"], [7, 3, 1, "", "cudaGraphExecMemsetNodeSetParams"], [7, 3, 1, "", "cudaGraphExecNodeSetParams"], [7, 3, 1, "", "cudaGraphExecUpdate"], [7, 0, 1, "", "cudaGraphExecUpdateResult"], [7, 0, 1, "", "cudaGraphExecUpdateResultInfo"], [7, 0, 1, "", "cudaGraphExecUpdateResultInfo_st"], [7, 0, 1, "", "cudaGraphExec_t"], [7, 3, 1, "", "cudaGraphExternalSemaphoresSignalNodeGetParams"], [7, 3, 1, "", "cudaGraphExternalSemaphoresSignalNodeSetParams"], [7, 3, 1, "", "cudaGraphExternalSemaphoresWaitNodeGetParams"], [7, 3, 1, "", "cudaGraphExternalSemaphoresWaitNodeSetParams"], [7, 3, 1, "", "cudaGraphGetEdges"], [7, 3, 1, "", "cudaGraphGetEdges_v2"], [7, 3, 1, "", "cudaGraphGetNodes"], [7, 3, 1, "", "cudaGraphGetRootNodes"], [7, 3, 1, "", "cudaGraphHostNodeGetParams"], [7, 3, 1, "", "cudaGraphHostNodeSetParams"], [7, 3, 1, "", "cudaGraphInstantiate"], [7, 0, 1, "", "cudaGraphInstantiateFlags"], [7, 0, 1, "", "cudaGraphInstantiateParams"], [7, 0, 1, "", "cudaGraphInstantiateParams_st"], [7, 0, 1, "", "cudaGraphInstantiateResult"], [7, 3, 1, "", "cudaGraphInstantiateWithFlags"], [7, 3, 1, "", "cudaGraphInstantiateWithParams"], [7, 3, 1, "", "cudaGraphKernelNodeCopyAttributes"], [7, 0, 1, "", "cudaGraphKernelNodeField"], [7, 3, 1, "", "cudaGraphKernelNodeGetAttribute"], [7, 3, 1, "", "cudaGraphKernelNodeGetParams"], [7, 1, 1, "", "cudaGraphKernelNodePortDefault"], [7, 1, 1, "", "cudaGraphKernelNodePortLaunchCompletion"], [7, 1, 1, "", "cudaGraphKernelNodePortProgrammatic"], [7, 3, 1, "", "cudaGraphKernelNodeSetAttribute"], [7, 3, 1, "", "cudaGraphKernelNodeSetParams"], [7, 0, 1, "", "cudaGraphKernelNodeUpdate"], [7, 3, 1, "", "cudaGraphLaunch"], [7, 3, 1, "", "cudaGraphMemAllocNodeGetParams"], [7, 0, 1, "", "cudaGraphMemAttributeType"], [7, 3, 1, "", "cudaGraphMemFreeNodeGetParams"], [7, 3, 1, "", "cudaGraphMemcpyNodeGetParams"], [7, 3, 1, "", "cudaGraphMemcpyNodeSetParams"], [7, 3, 1, "", "cudaGraphMemcpyNodeSetParams1D"], [7, 3, 1, "", "cudaGraphMemsetNodeGetParams"], [7, 3, 1, "", "cudaGraphMemsetNodeSetParams"], [7, 3, 1, "", "cudaGraphNodeFindInClone"], [7, 3, 1, "", "cudaGraphNodeGetDependencies"], [7, 3, 1, "", "cudaGraphNodeGetDependencies_v2"], [7, 3, 1, "", "cudaGraphNodeGetDependentNodes"], [7, 3, 1, "", "cudaGraphNodeGetDependentNodes_v2"], [7, 3, 1, "", "cudaGraphNodeGetEnabled"], [7, 3, 1, "", "cudaGraphNodeGetType"], [7, 0, 1, "", "cudaGraphNodeParams"], [7, 3, 1, "", "cudaGraphNodeSetEnabled"], [7, 3, 1, "", "cudaGraphNodeSetParams"], [7, 0, 1, "", "cudaGraphNodeType"], [7, 0, 1, "", "cudaGraphNode_t"], [7, 3, 1, "", "cudaGraphReleaseUserObject"], [7, 3, 1, "", "cudaGraphRemoveDependencies"], [7, 3, 1, "", "cudaGraphRemoveDependencies_v2"], [7, 3, 1, "", "cudaGraphRetainUserObject"], [7, 3, 1, "", "cudaGraphUpload"], [7, 0, 1, "", "cudaGraph_t"], [7, 0, 1, "", "cudaGraphicsCubeFace"], [7, 3, 1, "", "cudaGraphicsEGLRegisterImage"], [7, 3, 1, "", "cudaGraphicsGLRegisterBuffer"], [7, 3, 1, "", "cudaGraphicsGLRegisterImage"], [7, 0, 1, "", "cudaGraphicsMapFlags"], [7, 3, 1, "", "cudaGraphicsMapResources"], [7, 0, 1, "", "cudaGraphicsRegisterFlags"], [7, 3, 1, "", "cudaGraphicsResourceGetMappedEglFrame"], [7, 3, 1, "", "cudaGraphicsResourceGetMappedMipmappedArray"], [7, 3, 1, "", "cudaGraphicsResourceGetMappedPointer"], [7, 3, 1, "", "cudaGraphicsResourceSetMapFlags"], [7, 0, 1, "", "cudaGraphicsResource_t"], [7, 3, 1, "", "cudaGraphicsSubResourceGetMappedArray"], [7, 3, 1, "", "cudaGraphicsUnmapResources"], [7, 3, 1, "", "cudaGraphicsUnregisterResource"], [7, 3, 1, "", "cudaGraphicsVDPAURegisterOutputSurface"], [7, 3, 1, "", "cudaGraphicsVDPAURegisterVideoSurface"], [7, 3, 1, "", "cudaHostAlloc"], [7, 1, 1, "", "cudaHostAllocDefault"], [7, 1, 1, "", "cudaHostAllocMapped"], [7, 1, 1, "", "cudaHostAllocPortable"], [7, 1, 1, "", "cudaHostAllocWriteCombined"], [7, 0, 1, "", "cudaHostFn_t"], [7, 3, 1, "", "cudaHostGetDevicePointer"], [7, 3, 1, "", "cudaHostGetFlags"], [7, 0, 1, "", "cudaHostNodeParams"], [7, 0, 1, "", "cudaHostNodeParamsV2"], [7, 3, 1, "", "cudaHostRegister"], [7, 1, 1, "", "cudaHostRegisterDefault"], [7, 1, 1, "", "cudaHostRegisterIoMemory"], [7, 1, 1, "", "cudaHostRegisterMapped"], [7, 1, 1, "", "cudaHostRegisterPortable"], [7, 1, 1, "", "cudaHostRegisterReadOnly"], [7, 3, 1, "", "cudaHostUnregister"], [7, 3, 1, "", "cudaImportExternalMemory"], [7, 3, 1, "", "cudaImportExternalSemaphore"], [7, 3, 1, "", "cudaInitDevice"], [7, 1, 1, "", "cudaInitDeviceFlagsAreValid"], [7, 1, 1, "", "cudaInvalidDeviceId"], [7, 3, 1, "", "cudaIpcCloseMemHandle"], [7, 0, 1, "", "cudaIpcEventHandle_st"], [7, 0, 1, "", "cudaIpcEventHandle_t"], [7, 3, 1, "", "cudaIpcGetEventHandle"], [7, 3, 1, "", "cudaIpcGetMemHandle"], [7, 0, 1, "", "cudaIpcMemHandle_st"], [7, 0, 1, "", "cudaIpcMemHandle_t"], [7, 1, 1, "", "cudaIpcMemLazyEnablePeerAccess"], [7, 3, 1, "", "cudaIpcOpenEventHandle"], [7, 3, 1, "", "cudaIpcOpenMemHandle"], [7, 1, 1, "", "cudaKernelNodeAttrID"], [7, 1, 1, "", "cudaKernelNodeAttrValue"], [7, 1, 1, "", "cudaKernelNodeAttributeAccessPolicyWindow"], [7, 1, 1, "", "cudaKernelNodeAttributeClusterDimension"], [7, 1, 1, "", "cudaKernelNodeAttributeClusterSchedulingPolicyPreference"], [7, 1, 1, "", "cudaKernelNodeAttributeCooperative"], [7, 1, 1, "", "cudaKernelNodeAttributeDeviceUpdatableKernelNode"], [7, 1, 1, "", "cudaKernelNodeAttributeMemSyncDomain"], [7, 1, 1, "", "cudaKernelNodeAttributeMemSyncDomainMap"], [7, 1, 1, "", "cudaKernelNodeAttributePreferredSharedMemoryCarveout"], [7, 1, 1, "", "cudaKernelNodeAttributePriority"], [7, 0, 1, "", "cudaKernelNodeParams"], [7, 0, 1, "", "cudaKernelNodeParamsV2"], [7, 0, 1, "", "cudaKernel_t"], [7, 0, 1, "", "cudaLaunchAttribute"], [7, 0, 1, "", "cudaLaunchAttributeID"], [7, 0, 1, "id0", "cudaLaunchAttributeValue"], [7, 0, 1, "", "cudaLaunchAttribute_st"], [7, 3, 1, "", "cudaLaunchHostFunc"], [7, 0, 1, "", "cudaLaunchMemSyncDomain"], [7, 0, 1, "", "cudaLaunchMemSyncDomainMap"], [7, 0, 1, "", "cudaLaunchMemSyncDomainMap_st"], [7, 0, 1, "", "cudaLimit"], [7, 3, 1, "", "cudaMalloc"], [7, 3, 1, "", "cudaMalloc3D"], [7, 3, 1, "", "cudaMalloc3DArray"], [7, 3, 1, "", "cudaMallocArray"], [7, 3, 1, "", "cudaMallocAsync"], [7, 3, 1, "", "cudaMallocFromPoolAsync"], [7, 3, 1, "", "cudaMallocHost"], [7, 3, 1, "", "cudaMallocManaged"], [7, 3, 1, "", "cudaMallocMipmappedArray"], [7, 3, 1, "", "cudaMallocPitch"], [7, 0, 1, "", "cudaMemAccessDesc"], [7, 0, 1, "", "cudaMemAccessFlags"], [7, 3, 1, "", "cudaMemAdvise"], [7, 3, 1, "", "cudaMemAdvise_v2"], [7, 0, 1, "", "cudaMemAllocNodeParams"], [7, 0, 1, "", "cudaMemAllocNodeParamsV2"], [7, 0, 1, "", "cudaMemAllocationHandleType"], [7, 0, 1, "", "cudaMemAllocationType"], [7, 1, 1, "", "cudaMemAttachGlobal"], [7, 1, 1, "", "cudaMemAttachHost"], [7, 1, 1, "", "cudaMemAttachSingle"], [7, 0, 1, "", "cudaMemFabricHandle_st"], [7, 0, 1, "", "cudaMemFabricHandle_t"], [7, 0, 1, "", "cudaMemFreeNodeParams"], [7, 3, 1, "", "cudaMemGetInfo"], [7, 0, 1, "", "cudaMemLocation"], [7, 0, 1, "", "cudaMemLocationType"], [7, 0, 1, "", "cudaMemPoolAttr"], [7, 3, 1, "", "cudaMemPoolCreate"], [7, 3, 1, "", "cudaMemPoolDestroy"], [7, 3, 1, "", "cudaMemPoolExportPointer"], [7, 3, 1, "", "cudaMemPoolExportToShareableHandle"], [7, 3, 1, "", "cudaMemPoolGetAccess"], [7, 3, 1, "", "cudaMemPoolGetAttribute"], [7, 3, 1, "", "cudaMemPoolImportFromShareableHandle"], [7, 3, 1, "", "cudaMemPoolImportPointer"], [7, 0, 1, "", "cudaMemPoolProps"], [7, 0, 1, "", "cudaMemPoolPtrExportData"], [7, 3, 1, "", "cudaMemPoolSetAccess"], [7, 3, 1, "", "cudaMemPoolSetAttribute"], [7, 3, 1, "", "cudaMemPoolTrimTo"], [7, 0, 1, "", "cudaMemPool_t"], [7, 3, 1, "", "cudaMemPrefetchAsync"], [7, 3, 1, "", "cudaMemPrefetchAsync_v2"], [7, 0, 1, "", "cudaMemRangeAttribute"], [7, 3, 1, "", "cudaMemRangeGetAttribute"], [7, 3, 1, "", "cudaMemRangeGetAttributes"], [7, 3, 1, "", "cudaMemcpy"], [7, 3, 1, "", "cudaMemcpy2D"], [7, 3, 1, "", "cudaMemcpy2DArrayToArray"], [7, 3, 1, "", "cudaMemcpy2DAsync"], [7, 3, 1, "", "cudaMemcpy2DFromArray"], [7, 3, 1, "", "cudaMemcpy2DFromArrayAsync"], [7, 3, 1, "", "cudaMemcpy2DToArray"], [7, 3, 1, "", "cudaMemcpy2DToArrayAsync"], [7, 3, 1, "", "cudaMemcpy3D"], [7, 3, 1, "", "cudaMemcpy3DAsync"], [7, 0, 1, "", "cudaMemcpy3DParms"], [7, 3, 1, "", "cudaMemcpy3DPeer"], [7, 3, 1, "", "cudaMemcpy3DPeerAsync"], [7, 0, 1, "", "cudaMemcpy3DPeerParms"], [7, 3, 1, "", "cudaMemcpyAsync"], [7, 0, 1, "", "cudaMemcpyKind"], [7, 0, 1, "", "cudaMemcpyNodeParams"], [7, 3, 1, "", "cudaMemcpyPeer"], [7, 3, 1, "", "cudaMemcpyPeerAsync"], [7, 0, 1, "", "cudaMemoryAdvise"], [7, 0, 1, "", "cudaMemoryType"], [7, 3, 1, "", "cudaMemset"], [7, 3, 1, "", "cudaMemset2D"], [7, 3, 1, "", "cudaMemset2DAsync"], [7, 3, 1, "", "cudaMemset3D"], [7, 3, 1, "", "cudaMemset3DAsync"], [7, 3, 1, "", "cudaMemsetAsync"], [7, 0, 1, "", "cudaMemsetParams"], [7, 0, 1, "", "cudaMemsetParamsV2"], [7, 3, 1, "", "cudaMipmappedArrayGetMemoryRequirements"], [7, 3, 1, "", "cudaMipmappedArrayGetSparseProperties"], [7, 0, 1, "", "cudaMipmappedArray_const_t"], [7, 0, 1, "", "cudaMipmappedArray_t"], [7, 1, 1, "", "cudaNvSciSyncAttrSignal"], [7, 1, 1, "", "cudaNvSciSyncAttrWait"], [7, 3, 1, "", "cudaOccupancyAvailableDynamicSMemPerBlock"], [7, 1, 1, "", "cudaOccupancyDefault"], [7, 1, 1, "", "cudaOccupancyDisableCachingOverride"], [7, 3, 1, "", "cudaOccupancyMaxActiveBlocksPerMultiprocessor"], [7, 3, 1, "", "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags"], [7, 3, 1, "", "cudaPeekAtLastError"], [7, 1, 1, "", "cudaPeerAccessDefault"], [7, 0, 1, "", "cudaPitchedPtr"], [7, 0, 1, "", "cudaPointerAttributes"], [7, 3, 1, "", "cudaPointerGetAttributes"], [7, 0, 1, "", "cudaPos"], [7, 3, 1, "", "cudaProfilerStart"], [7, 3, 1, "", "cudaProfilerStop"], [7, 0, 1, "", "cudaResourceDesc"], [7, 0, 1, "", "cudaResourceType"], [7, 0, 1, "", "cudaResourceViewDesc"], [7, 0, 1, "", "cudaResourceViewFormat"], [7, 3, 1, "", "cudaRuntimeGetVersion"], [7, 3, 1, "", "cudaSetDevice"], [7, 3, 1, "", "cudaSetDeviceFlags"], [7, 0, 1, "", "cudaSharedCarveout"], [7, 0, 1, "", "cudaSharedMemConfig"], [7, 3, 1, "", "cudaSignalExternalSemaphoresAsync"], [7, 3, 1, "", "cudaStreamAddCallback"], [7, 3, 1, "", "cudaStreamAttachMemAsync"], [7, 1, 1, "", "cudaStreamAttrID"], [7, 1, 1, "", "cudaStreamAttrValue"], [7, 1, 1, "", "cudaStreamAttributeAccessPolicyWindow"], [7, 1, 1, "", "cudaStreamAttributeMemSyncDomain"], [7, 1, 1, "", "cudaStreamAttributeMemSyncDomainMap"], [7, 1, 1, "", "cudaStreamAttributePriority"], [7, 1, 1, "", "cudaStreamAttributeSynchronizationPolicy"], [7, 3, 1, "", "cudaStreamBeginCapture"], [7, 3, 1, "", "cudaStreamBeginCaptureToGraph"], [7, 0, 1, "", "cudaStreamCallback_t"], [7, 0, 1, "", "cudaStreamCaptureMode"], [7, 0, 1, "", "cudaStreamCaptureStatus"], [7, 3, 1, "", "cudaStreamCopyAttributes"], [7, 3, 1, "", "cudaStreamCreate"], [7, 3, 1, "", "cudaStreamCreateWithFlags"], [7, 3, 1, "", "cudaStreamCreateWithPriority"], [7, 1, 1, "", "cudaStreamDefault"], [7, 3, 1, "", "cudaStreamDestroy"], [7, 3, 1, "", "cudaStreamEndCapture"], [7, 3, 1, "", "cudaStreamGetAttribute"], [7, 3, 1, "", "cudaStreamGetCaptureInfo"], [7, 3, 1, "", "cudaStreamGetCaptureInfo_v3"], [7, 3, 1, "", "cudaStreamGetFlags"], [7, 3, 1, "", "cudaStreamGetId"], [7, 3, 1, "", "cudaStreamGetPriority"], [7, 3, 1, "", "cudaStreamIsCapturing"], [7, 1, 1, "", "cudaStreamLegacy"], [7, 1, 1, "", "cudaStreamNonBlocking"], [7, 1, 1, "", "cudaStreamPerThread"], [7, 3, 1, "", "cudaStreamQuery"], [7, 3, 1, "", "cudaStreamSetAttribute"], [7, 3, 1, "", "cudaStreamSynchronize"], [7, 3, 1, "", "cudaStreamUpdateCaptureDependencies"], [7, 0, 1, "", "cudaStreamUpdateCaptureDependenciesFlags"], [7, 3, 1, "", "cudaStreamUpdateCaptureDependencies_v2"], [7, 3, 1, "", "cudaStreamWaitEvent"], [7, 0, 1, "", "cudaStream_t"], [7, 0, 1, "", "cudaSurfaceBoundaryMode"], [7, 0, 1, "", "cudaSurfaceFormatMode"], [7, 0, 1, "", "cudaSurfaceObject_t"], [7, 1, 1, "", "cudaSurfaceType1D"], [7, 1, 1, "", "cudaSurfaceType1DLayered"], [7, 1, 1, "", "cudaSurfaceType2D"], [7, 1, 1, "", "cudaSurfaceType2DLayered"], [7, 1, 1, "", "cudaSurfaceType3D"], [7, 1, 1, "", "cudaSurfaceTypeCubemap"], [7, 1, 1, "", "cudaSurfaceTypeCubemapLayered"], [7, 0, 1, "", "cudaSynchronizationPolicy"], [7, 0, 1, "", "cudaTextureAddressMode"], [7, 0, 1, "", "cudaTextureDesc"], [7, 0, 1, "", "cudaTextureFilterMode"], [7, 0, 1, "", "cudaTextureObject_t"], [7, 0, 1, "", "cudaTextureReadMode"], [7, 1, 1, "", "cudaTextureType1D"], [7, 1, 1, "", "cudaTextureType1DLayered"], [7, 1, 1, "", "cudaTextureType2D"], [7, 1, 1, "", "cudaTextureType2DLayered"], [7, 1, 1, "", "cudaTextureType3D"], [7, 1, 1, "", "cudaTextureTypeCubemap"], [7, 1, 1, "", "cudaTextureTypeCubemapLayered"], [7, 3, 1, "", "cudaThreadExchangeStreamCaptureMode"], [7, 0, 1, "", "cudaUUID_t"], [7, 3, 1, "", "cudaUserObjectCreate"], [7, 0, 1, "", "cudaUserObjectFlags"], [7, 3, 1, "", "cudaUserObjectRelease"], [7, 3, 1, "", "cudaUserObjectRetain"], [7, 0, 1, "", "cudaUserObjectRetainFlags"], [7, 0, 1, "", "cudaUserObject_t"], [7, 3, 1, "", "cudaVDPAUGetDevice"], [7, 3, 1, "", "cudaVDPAUSetVDPAUDevice"], [7, 3, 1, "", "cudaWaitExternalSemaphoresAsync"], [7, 3, 1, "", "getLocalRuntimeVersion"], [7, 3, 1, "", "make_cudaExtent"], [7, 3, 1, "", "make_cudaPitchedPtr"], [7, 3, 1, "", "make_cudaPos"]], "cuda.bindings.runtime.CUuuid": [[7, 1, 1, "", "bytes"], [7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.CUuuid_st": [[7, 1, 1, "", "bytes"], [7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaAccessPolicyWindow": [[7, 1, 1, "", "base_ptr"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "hitProp"], [7, 1, 1, "", "hitRatio"], [7, 1, 1, "", "missProp"], [7, 1, 1, "", "num_bytes"]], "cuda.bindings.runtime.cudaAccessProperty": [[7, 1, 1, "", "cudaAccessPropertyNormal"], [7, 1, 1, "", "cudaAccessPropertyPersisting"], [7, 1, 1, "", "cudaAccessPropertyStreaming"]], "cuda.bindings.runtime.cudaArrayMemoryRequirements": [[7, 1, 1, "", "alignment"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"], [7, 1, 1, "", "size"]], "cuda.bindings.runtime.cudaArraySparseProperties": [[7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "miptailFirstLevel"], [7, 1, 1, "", "miptailSize"], [7, 1, 1, "", "reserved"], [7, 1, 1, "", "tileExtent"]], "cuda.bindings.runtime.cudaArray_const_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaArray_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaAsyncCallback": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaAsyncCallbackHandle_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaAsyncNotificationInfo": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "info"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaAsyncNotificationInfo_t": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "info"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaAsyncNotificationType": [[7, 1, 1, "", "cudaAsyncNotificationTypeOverBudget"]], "cuda.bindings.runtime.cudaCGScope": [[7, 1, 1, "", "cudaCGScopeGrid"], [7, 1, 1, "", "cudaCGScopeInvalid"], [7, 1, 1, "", "cudaCGScopeMultiGrid"]], "cuda.bindings.runtime.cudaChannelFormatDesc": [[7, 1, 1, "", "f"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "w"], [7, 1, 1, "", "x"], [7, 1, 1, "", "y"], [7, 1, 1, "", "z"]], "cuda.bindings.runtime.cudaChannelFormatKind": [[7, 1, 1, "", "cudaChannelFormatKindFloat"], [7, 1, 1, "", "cudaChannelFormatKindNV12"], [7, 1, 1, "", "cudaChannelFormatKindNone"], [7, 1, 1, "", "cudaChannelFormatKindSigned"], [7, 1, 1, "", "cudaChannelFormatKindSignedBlockCompressed4"], [7, 1, 1, "", "cudaChannelFormatKindSignedBlockCompressed5"], [7, 1, 1, "", "cudaChannelFormatKindSignedBlockCompressed6H"], [7, 1, 1, "", "cudaChannelFormatKindSignedNormalized16X1"], [7, 1, 1, "", "cudaChannelFormatKindSignedNormalized16X2"], [7, 1, 1, "", "cudaChannelFormatKindSignedNormalized16X4"], [7, 1, 1, "", "cudaChannelFormatKindSignedNormalized8X1"], [7, 1, 1, "", "cudaChannelFormatKindSignedNormalized8X2"], [7, 1, 1, "", "cudaChannelFormatKindSignedNormalized8X4"], [7, 1, 1, "", "cudaChannelFormatKindUnsigned"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed1"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed1SRGB"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed2"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed2SRGB"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed3"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed3SRGB"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed4"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed5"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed6H"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed7"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedBlockCompressed7SRGB"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized16X1"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized16X2"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized16X4"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized8X1"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized8X2"], [7, 1, 1, "", "cudaChannelFormatKindUnsignedNormalized8X4"]], "cuda.bindings.runtime.cudaChildGraphNodeParams": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "graph"]], "cuda.bindings.runtime.cudaClusterSchedulingPolicy": [[7, 1, 1, "", "cudaClusterSchedulingPolicyDefault"], [7, 1, 1, "", "cudaClusterSchedulingPolicyLoadBalancing"], [7, 1, 1, "", "cudaClusterSchedulingPolicySpread"]], "cuda.bindings.runtime.cudaComputeMode": [[7, 1, 1, "", "cudaComputeModeDefault"], [7, 1, 1, "", "cudaComputeModeExclusive"], [7, 1, 1, "", "cudaComputeModeExclusiveProcess"], [7, 1, 1, "", "cudaComputeModeProhibited"]], "cuda.bindings.runtime.cudaConditionalNodeParams": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "handle"], [7, 1, 1, "", "phGraph_out"], [7, 1, 1, "", "size"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaDeviceAttr": [[7, 1, 1, "", "cudaDevAttrAsyncEngineCount"], [7, 1, 1, "", "cudaDevAttrCanFlushRemoteWrites"], [7, 1, 1, "", "cudaDevAttrCanMapHostMemory"], [7, 1, 1, "", "cudaDevAttrCanUseHostPointerForRegisteredMem"], [7, 1, 1, "", "cudaDevAttrClockRate"], [7, 1, 1, "", "cudaDevAttrClusterLaunch"], [7, 1, 1, "", "cudaDevAttrComputeCapabilityMajor"], [7, 1, 1, "", "cudaDevAttrComputeCapabilityMinor"], [7, 1, 1, "", "cudaDevAttrComputeMode"], [7, 1, 1, "", "cudaDevAttrComputePreemptionSupported"], [7, 1, 1, "", "cudaDevAttrConcurrentKernels"], [7, 1, 1, "", "cudaDevAttrConcurrentManagedAccess"], [7, 1, 1, "", "cudaDevAttrCooperativeLaunch"], [7, 1, 1, "", "cudaDevAttrCooperativeMultiDeviceLaunch"], [7, 1, 1, "", "cudaDevAttrD3D12CigSupported"], [7, 1, 1, "", "cudaDevAttrDeferredMappingCudaArraySupported"], [7, 1, 1, "", "cudaDevAttrDirectManagedMemAccessFromHost"], [7, 1, 1, "", "cudaDevAttrEccEnabled"], [7, 1, 1, "", "cudaDevAttrGPUDirectRDMAFlushWritesOptions"], [7, 1, 1, "", "cudaDevAttrGPUDirectRDMASupported"], [7, 1, 1, "", "cudaDevAttrGPUDirectRDMAWritesOrdering"], [7, 1, 1, "", "cudaDevAttrGlobalL1CacheSupported"], [7, 1, 1, "", "cudaDevAttrGlobalMemoryBusWidth"], [7, 1, 1, "", "cudaDevAttrGpuOverlap"], [7, 1, 1, "", "cudaDevAttrHostNativeAtomicSupported"], [7, 1, 1, "", "cudaDevAttrHostNumaId"], [7, 1, 1, "", "cudaDevAttrHostRegisterReadOnlySupported"], [7, 1, 1, "", "cudaDevAttrHostRegisterSupported"], [7, 1, 1, "", "cudaDevAttrIntegrated"], [7, 1, 1, "", "cudaDevAttrIpcEventSupport"], [7, 1, 1, "", "cudaDevAttrIsMultiGpuBoard"], [7, 1, 1, "", "cudaDevAttrKernelExecTimeout"], [7, 1, 1, "", "cudaDevAttrL2CacheSize"], [7, 1, 1, "", "cudaDevAttrLocalL1CacheSupported"], [7, 1, 1, "", "cudaDevAttrManagedMemory"], [7, 1, 1, "", "cudaDevAttrMax"], [7, 1, 1, "", "cudaDevAttrMaxAccessPolicyWindowSize"], [7, 1, 1, "", "cudaDevAttrMaxBlockDimX"], [7, 1, 1, "", "cudaDevAttrMaxBlockDimY"], [7, 1, 1, "", "cudaDevAttrMaxBlockDimZ"], [7, 1, 1, "", "cudaDevAttrMaxBlocksPerMultiprocessor"], [7, 1, 1, "", "cudaDevAttrMaxGridDimX"], [7, 1, 1, "", "cudaDevAttrMaxGridDimY"], [7, 1, 1, "", "cudaDevAttrMaxGridDimZ"], [7, 1, 1, "", "cudaDevAttrMaxPersistingL2CacheSize"], [7, 1, 1, "", "cudaDevAttrMaxPitch"], [7, 1, 1, "", "cudaDevAttrMaxRegistersPerBlock"], [7, 1, 1, "", "cudaDevAttrMaxRegistersPerMultiprocessor"], [7, 1, 1, "", "cudaDevAttrMaxSharedMemoryPerBlock"], [7, 1, 1, "", "cudaDevAttrMaxSharedMemoryPerBlockOptin"], [7, 1, 1, "", "cudaDevAttrMaxSharedMemoryPerMultiprocessor"], [7, 1, 1, "", "cudaDevAttrMaxSurface1DLayeredLayers"], [7, 1, 1, "", "cudaDevAttrMaxSurface1DLayeredWidth"], [7, 1, 1, "", "cudaDevAttrMaxSurface1DWidth"], [7, 1, 1, "", "cudaDevAttrMaxSurface2DHeight"], [7, 1, 1, "", "cudaDevAttrMaxSurface2DLayeredHeight"], [7, 1, 1, "", "cudaDevAttrMaxSurface2DLayeredLayers"], [7, 1, 1, "", "cudaDevAttrMaxSurface2DLayeredWidth"], [7, 1, 1, "", "cudaDevAttrMaxSurface2DWidth"], [7, 1, 1, "", "cudaDevAttrMaxSurface3DDepth"], [7, 1, 1, "", "cudaDevAttrMaxSurface3DHeight"], [7, 1, 1, "", "cudaDevAttrMaxSurface3DWidth"], [7, 1, 1, "", "cudaDevAttrMaxSurfaceCubemapLayeredLayers"], [7, 1, 1, "", "cudaDevAttrMaxSurfaceCubemapLayeredWidth"], [7, 1, 1, "", "cudaDevAttrMaxSurfaceCubemapWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture1DLayeredLayers"], [7, 1, 1, "", "cudaDevAttrMaxTexture1DLayeredWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture1DLinearWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture1DMipmappedWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture1DWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DGatherHeight"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DGatherWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DHeight"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DLayeredHeight"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DLayeredLayers"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DLayeredWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DLinearHeight"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DLinearPitch"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DLinearWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DMipmappedHeight"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DMipmappedWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture2DWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture3DDepth"], [7, 1, 1, "", "cudaDevAttrMaxTexture3DDepthAlt"], [7, 1, 1, "", "cudaDevAttrMaxTexture3DHeight"], [7, 1, 1, "", "cudaDevAttrMaxTexture3DHeightAlt"], [7, 1, 1, "", "cudaDevAttrMaxTexture3DWidth"], [7, 1, 1, "", "cudaDevAttrMaxTexture3DWidthAlt"], [7, 1, 1, "", "cudaDevAttrMaxTextureCubemapLayeredLayers"], [7, 1, 1, "", "cudaDevAttrMaxTextureCubemapLayeredWidth"], [7, 1, 1, "", "cudaDevAttrMaxTextureCubemapWidth"], [7, 1, 1, "", "cudaDevAttrMaxThreadsPerBlock"], [7, 1, 1, "", "cudaDevAttrMaxThreadsPerMultiProcessor"], [7, 1, 1, "", "cudaDevAttrMaxTimelineSemaphoreInteropSupported"], [7, 1, 1, "", "cudaDevAttrMemSyncDomainCount"], [7, 1, 1, "", "cudaDevAttrMemoryClockRate"], [7, 1, 1, "", "cudaDevAttrMemoryPoolSupportedHandleTypes"], [7, 1, 1, "", "cudaDevAttrMemoryPoolsSupported"], [7, 1, 1, "", "cudaDevAttrMpsEnabled"], [7, 1, 1, "", "cudaDevAttrMultiGpuBoardGroupID"], [7, 1, 1, "", "cudaDevAttrMultiProcessorCount"], [7, 1, 1, "", "cudaDevAttrNumaConfig"], [7, 1, 1, "", "cudaDevAttrNumaId"], [7, 1, 1, "", "cudaDevAttrPageableMemoryAccess"], [7, 1, 1, "", "cudaDevAttrPageableMemoryAccessUsesHostPageTables"], [7, 1, 1, "", "cudaDevAttrPciBusId"], [7, 1, 1, "", "cudaDevAttrPciDeviceId"], [7, 1, 1, "", "cudaDevAttrPciDomainId"], [7, 1, 1, "", "cudaDevAttrReserved122"], [7, 1, 1, "", "cudaDevAttrReserved123"], [7, 1, 1, "", "cudaDevAttrReserved124"], [7, 1, 1, "", "cudaDevAttrReserved127"], [7, 1, 1, "", "cudaDevAttrReserved128"], [7, 1, 1, "", "cudaDevAttrReserved129"], [7, 1, 1, "", "cudaDevAttrReserved132"], [7, 1, 1, "", "cudaDevAttrReserved92"], [7, 1, 1, "", "cudaDevAttrReserved93"], [7, 1, 1, "", "cudaDevAttrReserved94"], [7, 1, 1, "", "cudaDevAttrReservedSharedMemoryPerBlock"], [7, 1, 1, "", "cudaDevAttrSingleToDoublePrecisionPerfRatio"], [7, 1, 1, "", "cudaDevAttrSparseCudaArraySupported"], [7, 1, 1, "", "cudaDevAttrStreamPrioritiesSupported"], [7, 1, 1, "", "cudaDevAttrSurfaceAlignment"], [7, 1, 1, "", "cudaDevAttrTccDriver"], [7, 1, 1, "", "cudaDevAttrTextureAlignment"], [7, 1, 1, "", "cudaDevAttrTexturePitchAlignment"], [7, 1, 1, "", "cudaDevAttrTimelineSemaphoreInteropSupported"], [7, 1, 1, "", "cudaDevAttrTotalConstantMemory"], [7, 1, 1, "", "cudaDevAttrUnifiedAddressing"], [7, 1, 1, "", "cudaDevAttrWarpSize"]], "cuda.bindings.runtime.cudaDeviceNumaConfig": [[7, 1, 1, "", "cudaDeviceNumaConfigNone"], [7, 1, 1, "", "cudaDeviceNumaConfigNumaNode"]], "cuda.bindings.runtime.cudaDeviceP2PAttr": [[7, 1, 1, "", "cudaDevP2PAttrAccessSupported"], [7, 1, 1, "", "cudaDevP2PAttrCudaArrayAccessSupported"], [7, 1, 1, "", "cudaDevP2PAttrNativeAtomicSupported"], [7, 1, 1, "", "cudaDevP2PAttrPerformanceRank"]], "cuda.bindings.runtime.cudaDeviceProp": [[7, 1, 1, "", "ECCEnabled"], [7, 1, 1, "", "accessPolicyMaxWindowSize"], [7, 1, 1, "", "asyncEngineCount"], [7, 1, 1, "", "canMapHostMemory"], [7, 1, 1, "", "canUseHostPointerForRegisteredMem"], [7, 1, 1, "", "clockRate"], [7, 1, 1, "", "clusterLaunch"], [7, 1, 1, "", "computeMode"], [7, 1, 1, "", "computePreemptionSupported"], [7, 1, 1, "", "concurrentKernels"], [7, 1, 1, "", "concurrentManagedAccess"], [7, 1, 1, "", "cooperativeLaunch"], [7, 1, 1, "", "cooperativeMultiDeviceLaunch"], [7, 1, 1, "", "deferredMappingCudaArraySupported"], [7, 1, 1, "", "deviceOverlap"], [7, 1, 1, "", "directManagedMemAccessFromHost"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "globalL1CacheSupported"], [7, 1, 1, "", "gpuDirectRDMAFlushWritesOptions"], [7, 1, 1, "", "gpuDirectRDMASupported"], [7, 1, 1, "", "gpuDirectRDMAWritesOrdering"], [7, 1, 1, "", "hostNativeAtomicSupported"], [7, 1, 1, "", "hostRegisterReadOnlySupported"], [7, 1, 1, "", "hostRegisterSupported"], [7, 1, 1, "", "integrated"], [7, 1, 1, "", "ipcEventSupported"], [7, 1, 1, "", "isMultiGpuBoard"], [7, 1, 1, "", "kernelExecTimeoutEnabled"], [7, 1, 1, "", "l2CacheSize"], [7, 1, 1, "", "localL1CacheSupported"], [7, 1, 1, "", "luid"], [7, 1, 1, "", "luidDeviceNodeMask"], [7, 1, 1, "", "major"], [7, 1, 1, "", "managedMemory"], [7, 1, 1, "", "maxBlocksPerMultiProcessor"], [7, 1, 1, "", "maxGridSize"], [7, 1, 1, "", "maxSurface1D"], [7, 1, 1, "", "maxSurface1DLayered"], [7, 1, 1, "", "maxSurface2D"], [7, 1, 1, "", "maxSurface2DLayered"], [7, 1, 1, "", "maxSurface3D"], [7, 1, 1, "", "maxSurfaceCubemap"], [7, 1, 1, "", "maxSurfaceCubemapLayered"], [7, 1, 1, "", "maxTexture1D"], [7, 1, 1, "", "maxTexture1DLayered"], [7, 1, 1, "", "maxTexture1DLinear"], [7, 1, 1, "", "maxTexture1DMipmap"], [7, 1, 1, "", "maxTexture2D"], [7, 1, 1, "", "maxTexture2DGather"], [7, 1, 1, "", "maxTexture2DLayered"], [7, 1, 1, "", "maxTexture2DLinear"], [7, 1, 1, "", "maxTexture2DMipmap"], [7, 1, 1, "", "maxTexture3D"], [7, 1, 1, "", "maxTexture3DAlt"], [7, 1, 1, "", "maxTextureCubemap"], [7, 1, 1, "", "maxTextureCubemapLayered"], [7, 1, 1, "", "maxThreadsDim"], [7, 1, 1, "", "maxThreadsPerBlock"], [7, 1, 1, "", "maxThreadsPerMultiProcessor"], [7, 1, 1, "", "memPitch"], [7, 1, 1, "", "memoryBusWidth"], [7, 1, 1, "", "memoryClockRate"], [7, 1, 1, "", "memoryPoolSupportedHandleTypes"], [7, 1, 1, "", "memoryPoolsSupported"], [7, 1, 1, "", "minor"], [7, 1, 1, "", "multiGpuBoardGroupID"], [7, 1, 1, "", "multiProcessorCount"], [7, 1, 1, "", "name"], [7, 1, 1, "", "pageableMemoryAccess"], [7, 1, 1, "", "pageableMemoryAccessUsesHostPageTables"], [7, 1, 1, "", "pciBusID"], [7, 1, 1, "", "pciDeviceID"], [7, 1, 1, "", "pciDomainID"], [7, 1, 1, "", "persistingL2CacheMaxSize"], [7, 1, 1, "", "regsPerBlock"], [7, 1, 1, "", "regsPerMultiprocessor"], [7, 1, 1, "", "reserved"], [7, 1, 1, "", "reserved1"], [7, 1, 1, "", "reserved2"], [7, 1, 1, "", "reservedSharedMemPerBlock"], [7, 1, 1, "", "sharedMemPerBlock"], [7, 1, 1, "", "sharedMemPerBlockOptin"], [7, 1, 1, "", "sharedMemPerMultiprocessor"], [7, 1, 1, "", "singleToDoublePrecisionPerfRatio"], [7, 1, 1, "", "sparseCudaArraySupported"], [7, 1, 1, "", "streamPrioritiesSupported"], [7, 1, 1, "", "surfaceAlignment"], [7, 1, 1, "", "tccDriver"], [7, 1, 1, "", "textureAlignment"], [7, 1, 1, "", "texturePitchAlignment"], [7, 1, 1, "", "timelineSemaphoreInteropSupported"], [7, 1, 1, "", "totalConstMem"], [7, 1, 1, "", "totalGlobalMem"], [7, 1, 1, "", "unifiedAddressing"], [7, 1, 1, "", "unifiedFunctionPointers"], [7, 1, 1, "", "uuid"], [7, 1, 1, "", "warpSize"]], "cuda.bindings.runtime.cudaDriverEntryPointQueryResult": [[7, 1, 1, "", "cudaDriverEntryPointSuccess"], [7, 1, 1, "", "cudaDriverEntryPointSymbolNotFound"], [7, 1, 1, "", "cudaDriverEntryPointVersionNotSufficent"]], "cuda.bindings.runtime.cudaEglColorFormat": [[7, 1, 1, "", "cudaEglColorFormatA"], [7, 1, 1, "", "cudaEglColorFormatABGR"], [7, 1, 1, "", "cudaEglColorFormatARGB"], [7, 1, 1, "", "cudaEglColorFormatAYUV"], [7, 1, 1, "", "cudaEglColorFormatAYUV_ER"], [7, 1, 1, "", "cudaEglColorFormatBGRA"], [7, 1, 1, "", "cudaEglColorFormatBayer10BGGR"], [7, 1, 1, "", "cudaEglColorFormatBayer10CCCC"], [7, 1, 1, "", "cudaEglColorFormatBayer10GBRG"], [7, 1, 1, "", "cudaEglColorFormatBayer10GRBG"], [7, 1, 1, "", "cudaEglColorFormatBayer10RGGB"], [7, 1, 1, "", "cudaEglColorFormatBayer12BCCR"], [7, 1, 1, "", "cudaEglColorFormatBayer12BGGR"], [7, 1, 1, "", "cudaEglColorFormatBayer12CBRC"], [7, 1, 1, "", "cudaEglColorFormatBayer12CCCC"], [7, 1, 1, "", "cudaEglColorFormatBayer12CRBC"], [7, 1, 1, "", "cudaEglColorFormatBayer12GBRG"], [7, 1, 1, "", "cudaEglColorFormatBayer12GRBG"], [7, 1, 1, "", "cudaEglColorFormatBayer12RCCB"], [7, 1, 1, "", "cudaEglColorFormatBayer12RGGB"], [7, 1, 1, "", "cudaEglColorFormatBayer14BGGR"], [7, 1, 1, "", "cudaEglColorFormatBayer14GBRG"], [7, 1, 1, "", "cudaEglColorFormatBayer14GRBG"], [7, 1, 1, "", "cudaEglColorFormatBayer14RGGB"], [7, 1, 1, "", "cudaEglColorFormatBayer20BGGR"], [7, 1, 1, "", "cudaEglColorFormatBayer20GBRG"], [7, 1, 1, "", "cudaEglColorFormatBayer20GRBG"], [7, 1, 1, "", "cudaEglColorFormatBayer20RGGB"], [7, 1, 1, "", "cudaEglColorFormatBayerBCCR"], [7, 1, 1, "", "cudaEglColorFormatBayerBGGR"], [7, 1, 1, "", "cudaEglColorFormatBayerCBRC"], [7, 1, 1, "", "cudaEglColorFormatBayerCRBC"], [7, 1, 1, "", "cudaEglColorFormatBayerGBRG"], [7, 1, 1, "", "cudaEglColorFormatBayerGRBG"], [7, 1, 1, "", "cudaEglColorFormatBayerIspBGGR"], [7, 1, 1, "", "cudaEglColorFormatBayerIspGBRG"], [7, 1, 1, "", "cudaEglColorFormatBayerIspGRBG"], [7, 1, 1, "", "cudaEglColorFormatBayerIspRGGB"], [7, 1, 1, "", "cudaEglColorFormatBayerRCCB"], [7, 1, 1, "", "cudaEglColorFormatBayerRGGB"], [7, 1, 1, "", "cudaEglColorFormatL"], [7, 1, 1, "", "cudaEglColorFormatR"], [7, 1, 1, "", "cudaEglColorFormatRG"], [7, 1, 1, "", "cudaEglColorFormatRGBA"], [7, 1, 1, "", "cudaEglColorFormatUYVY422"], [7, 1, 1, "", "cudaEglColorFormatUYVY_ER"], [7, 1, 1, "", "cudaEglColorFormatVYUY"], [7, 1, 1, "", "cudaEglColorFormatVYUY_ER"], [7, 1, 1, "", "cudaEglColorFormatY"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar_2020"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar_709"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_420SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_422SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_422SemiPlanar_2020"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_422SemiPlanar_709"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_444SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER"], [7, 1, 1, "", "cudaEglColorFormatY10V10U10_444SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatY10_709_ER"], [7, 1, 1, "", "cudaEglColorFormatY10_ER"], [7, 1, 1, "", "cudaEglColorFormatY12V12U12_420SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER"], [7, 1, 1, "", "cudaEglColorFormatY12V12U12_420SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatY12V12U12_444SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER"], [7, 1, 1, "", "cudaEglColorFormatY12V12U12_444SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatY12_709_ER"], [7, 1, 1, "", "cudaEglColorFormatY12_ER"], [7, 1, 1, "", "cudaEglColorFormatYUV420Planar"], [7, 1, 1, "", "cudaEglColorFormatYUV420Planar_2020"], [7, 1, 1, "", "cudaEglColorFormatYUV420Planar_709"], [7, 1, 1, "", "cudaEglColorFormatYUV420Planar_ER"], [7, 1, 1, "", "cudaEglColorFormatYUV420SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatYUV420SemiPlanar_2020"], [7, 1, 1, "", "cudaEglColorFormatYUV420SemiPlanar_709"], [7, 1, 1, "", "cudaEglColorFormatYUV420SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatYUV422Planar"], [7, 1, 1, "", "cudaEglColorFormatYUV422Planar_ER"], [7, 1, 1, "", "cudaEglColorFormatYUV422SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatYUV422SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatYUV444Planar"], [7, 1, 1, "", "cudaEglColorFormatYUV444Planar_ER"], [7, 1, 1, "", "cudaEglColorFormatYUV444SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatYUV444SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatYUVA"], [7, 1, 1, "", "cudaEglColorFormatYUVA_ER"], [7, 1, 1, "", "cudaEglColorFormatYUYV422"], [7, 1, 1, "", "cudaEglColorFormatYUYV_ER"], [7, 1, 1, "", "cudaEglColorFormatYVU420Planar"], [7, 1, 1, "", "cudaEglColorFormatYVU420Planar_2020"], [7, 1, 1, "", "cudaEglColorFormatYVU420Planar_709"], [7, 1, 1, "", "cudaEglColorFormatYVU420Planar_ER"], [7, 1, 1, "", "cudaEglColorFormatYVU420SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatYVU420SemiPlanar_2020"], [7, 1, 1, "", "cudaEglColorFormatYVU420SemiPlanar_709"], [7, 1, 1, "", "cudaEglColorFormatYVU420SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatYVU422Planar"], [7, 1, 1, "", "cudaEglColorFormatYVU422Planar_ER"], [7, 1, 1, "", "cudaEglColorFormatYVU422SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatYVU422SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatYVU444Planar"], [7, 1, 1, "", "cudaEglColorFormatYVU444Planar_ER"], [7, 1, 1, "", "cudaEglColorFormatYVU444SemiPlanar"], [7, 1, 1, "", "cudaEglColorFormatYVU444SemiPlanar_ER"], [7, 1, 1, "", "cudaEglColorFormatYVYU"], [7, 1, 1, "", "cudaEglColorFormatYVYU_ER"], [7, 1, 1, "", "cudaEglColorFormatY_709_ER"], [7, 1, 1, "", "cudaEglColorFormatY_ER"]], "cuda.bindings.runtime.cudaEglFrame": [[7, 1, 1, "", "eglColorFormat"], [7, 1, 1, "", "frame"], [7, 1, 1, "", "frameType"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "planeCount"], [7, 1, 1, "", "planeDesc"]], "cuda.bindings.runtime.cudaEglFrameType": [[7, 1, 1, "", "cudaEglFrameTypeArray"], [7, 1, 1, "", "cudaEglFrameTypePitch"]], "cuda.bindings.runtime.cudaEglFrame_st": [[7, 1, 1, "", "eglColorFormat"], [7, 1, 1, "", "frame"], [7, 1, 1, "", "frameType"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "planeCount"], [7, 1, 1, "", "planeDesc"]], "cuda.bindings.runtime.cudaEglPlaneDesc": [[7, 1, 1, "", "channelDesc"], [7, 1, 1, "", "depth"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "height"], [7, 1, 1, "", "numChannels"], [7, 1, 1, "", "pitch"], [7, 1, 1, "", "reserved"], [7, 1, 1, "", "width"]], "cuda.bindings.runtime.cudaEglPlaneDesc_st": [[7, 1, 1, "", "channelDesc"], [7, 1, 1, "", "depth"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "height"], [7, 1, 1, "", "numChannels"], [7, 1, 1, "", "pitch"], [7, 1, 1, "", "reserved"], [7, 1, 1, "", "width"]], "cuda.bindings.runtime.cudaEglResourceLocationFlags": [[7, 1, 1, "", "cudaEglResourceLocationSysmem"], [7, 1, 1, "", "cudaEglResourceLocationVidmem"]], "cuda.bindings.runtime.cudaEglStreamConnection": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaError_t": [[7, 1, 1, "", "cudaErrorAddressOfConstant"], [7, 1, 1, "", "cudaErrorAlreadyAcquired"], [7, 1, 1, "", "cudaErrorAlreadyMapped"], [7, 1, 1, "", "cudaErrorApiFailureBase"], [7, 1, 1, "", "cudaErrorArrayIsMapped"], [7, 1, 1, "", "cudaErrorAssert"], [7, 1, 1, "", "cudaErrorCallRequiresNewerDriver"], [7, 1, 1, "", "cudaErrorCapturedEvent"], [7, 1, 1, "", "cudaErrorCdpNotSupported"], [7, 1, 1, "", "cudaErrorCdpVersionMismatch"], [7, 1, 1, "", "cudaErrorCompatNotSupportedOnDevice"], [7, 1, 1, "", "cudaErrorContextIsDestroyed"], [7, 1, 1, "", "cudaErrorCooperativeLaunchTooLarge"], [7, 1, 1, "", "cudaErrorCudartUnloading"], [7, 1, 1, "", "cudaErrorDeviceAlreadyInUse"], [7, 1, 1, "", "cudaErrorDeviceNotLicensed"], [7, 1, 1, "", "cudaErrorDeviceUninitialized"], [7, 1, 1, "", "cudaErrorDevicesUnavailable"], [7, 1, 1, "", "cudaErrorDuplicateSurfaceName"], [7, 1, 1, "", "cudaErrorDuplicateTextureName"], [7, 1, 1, "", "cudaErrorDuplicateVariableName"], [7, 1, 1, "", "cudaErrorECCUncorrectable"], [7, 1, 1, "", "cudaErrorExternalDevice"], [7, 1, 1, "", "cudaErrorFileNotFound"], [7, 1, 1, "", "cudaErrorFunctionNotLoaded"], [7, 1, 1, "", "cudaErrorGraphExecUpdateFailure"], [7, 1, 1, "", "cudaErrorHardwareStackError"], [7, 1, 1, "", "cudaErrorHostMemoryAlreadyRegistered"], [7, 1, 1, "", "cudaErrorHostMemoryNotRegistered"], [7, 1, 1, "", "cudaErrorIllegalAddress"], [7, 1, 1, "", "cudaErrorIllegalInstruction"], [7, 1, 1, "", "cudaErrorIllegalState"], [7, 1, 1, "", "cudaErrorIncompatibleDriverContext"], [7, 1, 1, "", "cudaErrorInitializationError"], [7, 1, 1, "", "cudaErrorInsufficientDriver"], [7, 1, 1, "", "cudaErrorInvalidAddressSpace"], [7, 1, 1, "", "cudaErrorInvalidChannelDescriptor"], [7, 1, 1, "", "cudaErrorInvalidClusterSize"], [7, 1, 1, "", "cudaErrorInvalidConfiguration"], [7, 1, 1, "", "cudaErrorInvalidDevice"], [7, 1, 1, "", "cudaErrorInvalidDeviceFunction"], [7, 1, 1, "", "cudaErrorInvalidDevicePointer"], [7, 1, 1, "", "cudaErrorInvalidFilterSetting"], [7, 1, 1, "", "cudaErrorInvalidGraphicsContext"], [7, 1, 1, "", "cudaErrorInvalidHostPointer"], [7, 1, 1, "", "cudaErrorInvalidKernelImage"], [7, 1, 1, "", "cudaErrorInvalidMemcpyDirection"], [7, 1, 1, "", "cudaErrorInvalidNormSetting"], [7, 1, 1, "", "cudaErrorInvalidPc"], [7, 1, 1, "", "cudaErrorInvalidPitchValue"], [7, 1, 1, "", "cudaErrorInvalidPtx"], [7, 1, 1, "", "cudaErrorInvalidResourceConfiguration"], [7, 1, 1, "", "cudaErrorInvalidResourceHandle"], [7, 1, 1, "", "cudaErrorInvalidResourceType"], [7, 1, 1, "", "cudaErrorInvalidSource"], [7, 1, 1, "", "cudaErrorInvalidSurface"], [7, 1, 1, "", "cudaErrorInvalidSymbol"], [7, 1, 1, "", "cudaErrorInvalidTexture"], [7, 1, 1, "", "cudaErrorInvalidTextureBinding"], [7, 1, 1, "", "cudaErrorInvalidValue"], [7, 1, 1, "", "cudaErrorJitCompilationDisabled"], [7, 1, 1, "", "cudaErrorJitCompilerNotFound"], [7, 1, 1, "", "cudaErrorLaunchFailure"], [7, 1, 1, "", "cudaErrorLaunchFileScopedSurf"], [7, 1, 1, "", "cudaErrorLaunchFileScopedTex"], [7, 1, 1, "", "cudaErrorLaunchIncompatibleTexturing"], [7, 1, 1, "", "cudaErrorLaunchMaxDepthExceeded"], [7, 1, 1, "", "cudaErrorLaunchOutOfResources"], [7, 1, 1, "", "cudaErrorLaunchPendingCountExceeded"], [7, 1, 1, "", "cudaErrorLaunchTimeout"], [7, 1, 1, "", "cudaErrorLossyQuery"], [7, 1, 1, "", "cudaErrorMapBufferObjectFailed"], [7, 1, 1, "", "cudaErrorMemoryAllocation"], [7, 1, 1, "", "cudaErrorMemoryValueTooLarge"], [7, 1, 1, "", "cudaErrorMisalignedAddress"], [7, 1, 1, "", "cudaErrorMissingConfiguration"], [7, 1, 1, "", "cudaErrorMixedDeviceExecution"], [7, 1, 1, "", "cudaErrorMpsClientTerminated"], [7, 1, 1, "", "cudaErrorMpsConnectionFailed"], [7, 1, 1, "", "cudaErrorMpsMaxClientsReached"], [7, 1, 1, "", "cudaErrorMpsMaxConnectionsReached"], [7, 1, 1, "", "cudaErrorMpsRpcFailure"], [7, 1, 1, "", "cudaErrorMpsServerNotReady"], [7, 1, 1, "", "cudaErrorNoDevice"], [7, 1, 1, "", "cudaErrorNoKernelImageForDevice"], [7, 1, 1, "", "cudaErrorNotMapped"], [7, 1, 1, "", "cudaErrorNotMappedAsArray"], [7, 1, 1, "", "cudaErrorNotMappedAsPointer"], [7, 1, 1, "", "cudaErrorNotPermitted"], [7, 1, 1, "", "cudaErrorNotReady"], [7, 1, 1, "", "cudaErrorNotSupported"], [7, 1, 1, "", "cudaErrorNotYetImplemented"], [7, 1, 1, "", "cudaErrorNvlinkUncorrectable"], [7, 1, 1, "", "cudaErrorOperatingSystem"], [7, 1, 1, "", "cudaErrorPeerAccessAlreadyEnabled"], [7, 1, 1, "", "cudaErrorPeerAccessNotEnabled"], [7, 1, 1, "", "cudaErrorPeerAccessUnsupported"], [7, 1, 1, "", "cudaErrorPriorLaunchFailure"], [7, 1, 1, "", "cudaErrorProfilerAlreadyStarted"], [7, 1, 1, "", "cudaErrorProfilerAlreadyStopped"], [7, 1, 1, "", "cudaErrorProfilerDisabled"], [7, 1, 1, "", "cudaErrorProfilerNotInitialized"], [7, 1, 1, "", "cudaErrorSetOnActiveProcess"], [7, 1, 1, "", "cudaErrorSharedObjectInitFailed"], [7, 1, 1, "", "cudaErrorSharedObjectSymbolNotFound"], [7, 1, 1, "", "cudaErrorSoftwareValidityNotEstablished"], [7, 1, 1, "", "cudaErrorStartupFailure"], [7, 1, 1, "", "cudaErrorStreamCaptureImplicit"], [7, 1, 1, "", "cudaErrorStreamCaptureInvalidated"], [7, 1, 1, "", "cudaErrorStreamCaptureIsolation"], [7, 1, 1, "", "cudaErrorStreamCaptureMerge"], [7, 1, 1, "", "cudaErrorStreamCaptureUnjoined"], [7, 1, 1, "", "cudaErrorStreamCaptureUnmatched"], [7, 1, 1, "", "cudaErrorStreamCaptureUnsupported"], [7, 1, 1, "", "cudaErrorStreamCaptureWrongThread"], [7, 1, 1, "", "cudaErrorStubLibrary"], [7, 1, 1, "", "cudaErrorSymbolNotFound"], [7, 1, 1, "", "cudaErrorSyncDepthExceeded"], [7, 1, 1, "", "cudaErrorSynchronizationError"], [7, 1, 1, "", "cudaErrorSystemDriverMismatch"], [7, 1, 1, "", "cudaErrorSystemNotReady"], [7, 1, 1, "", "cudaErrorTextureFetchFailed"], [7, 1, 1, "", "cudaErrorTextureNotBound"], [7, 1, 1, "", "cudaErrorTimeout"], [7, 1, 1, "", "cudaErrorTooManyPeers"], [7, 1, 1, "", "cudaErrorUnknown"], [7, 1, 1, "", "cudaErrorUnmapBufferObjectFailed"], [7, 1, 1, "", "cudaErrorUnsupportedDevSideSync"], [7, 1, 1, "", "cudaErrorUnsupportedExecAffinity"], [7, 1, 1, "", "cudaErrorUnsupportedLimit"], [7, 1, 1, "", "cudaErrorUnsupportedPtxVersion"], [7, 1, 1, "", "cudaSuccess"]], "cuda.bindings.runtime.cudaEventRecordNodeParams": [[7, 1, 1, "", "event"], [7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaEventWaitNodeParams": [[7, 1, 1, "", "event"], [7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaEvent_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaExtent": [[7, 1, 1, "", "depth"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "height"], [7, 1, 1, "", "width"]], "cuda.bindings.runtime.cudaExternalMemoryBufferDesc": [[7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "offset"], [7, 1, 1, "", "size"]], "cuda.bindings.runtime.cudaExternalMemoryHandleDesc": [[7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "handle"], [7, 1, 1, "", "size"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaExternalMemoryHandleType": [[7, 1, 1, "", "cudaExternalMemoryHandleTypeD3D11Resource"], [7, 1, 1, "", "cudaExternalMemoryHandleTypeD3D11ResourceKmt"], [7, 1, 1, "", "cudaExternalMemoryHandleTypeD3D12Heap"], [7, 1, 1, "", "cudaExternalMemoryHandleTypeD3D12Resource"], [7, 1, 1, "", "cudaExternalMemoryHandleTypeNvSciBuf"], [7, 1, 1, "", "cudaExternalMemoryHandleTypeOpaqueFd"], [7, 1, 1, "", "cudaExternalMemoryHandleTypeOpaqueWin32"], [7, 1, 1, "", "cudaExternalMemoryHandleTypeOpaqueWin32Kmt"]], "cuda.bindings.runtime.cudaExternalMemoryMipmappedArrayDesc": [[7, 1, 1, "", "extent"], [7, 1, 1, "", "flags"], [7, 1, 1, "", "formatDesc"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "numLevels"], [7, 1, 1, "", "offset"]], "cuda.bindings.runtime.cudaExternalMemory_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaExternalSemaphoreHandleDesc": [[7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "handle"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaExternalSemaphoreHandleType": [[7, 1, 1, "", "cudaExternalSemaphoreHandleTypeD3D11Fence"], [7, 1, 1, "", "cudaExternalSemaphoreHandleTypeD3D12Fence"], [7, 1, 1, "", "cudaExternalSemaphoreHandleTypeKeyedMutex"], [7, 1, 1, "", "cudaExternalSemaphoreHandleTypeKeyedMutexKmt"], [7, 1, 1, "", "cudaExternalSemaphoreHandleTypeNvSciSync"], [7, 1, 1, "", "cudaExternalSemaphoreHandleTypeOpaqueFd"], [7, 1, 1, "", "cudaExternalSemaphoreHandleTypeOpaqueWin32"], [7, 1, 1, "", "cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt"], [7, 1, 1, "", "cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd"], [7, 1, 1, "", "cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32"]], "cuda.bindings.runtime.cudaExternalSemaphoreSignalNodeParams": [[7, 1, 1, "", "extSemArray"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "numExtSems"], [7, 1, 1, "", "paramsArray"]], "cuda.bindings.runtime.cudaExternalSemaphoreSignalNodeParamsV2": [[7, 1, 1, "", "extSemArray"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "numExtSems"], [7, 1, 1, "", "paramsArray"]], "cuda.bindings.runtime.cudaExternalSemaphoreSignalParams": [[7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "params"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaExternalSemaphoreWaitNodeParams": [[7, 1, 1, "", "extSemArray"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "numExtSems"], [7, 1, 1, "", "paramsArray"]], "cuda.bindings.runtime.cudaExternalSemaphoreWaitNodeParamsV2": [[7, 1, 1, "", "extSemArray"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "numExtSems"], [7, 1, 1, "", "paramsArray"]], "cuda.bindings.runtime.cudaExternalSemaphoreWaitParams": [[7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "params"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaExternalSemaphore_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesOptions": [[7, 1, 1, "", "cudaFlushGPUDirectRDMAWritesOptionHost"], [7, 1, 1, "", "cudaFlushGPUDirectRDMAWritesOptionMemOps"]], "cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesScope": [[7, 1, 1, "", "cudaFlushGPUDirectRDMAWritesToAllDevices"], [7, 1, 1, "", "cudaFlushGPUDirectRDMAWritesToOwner"]], "cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesTarget": [[7, 1, 1, "", "cudaFlushGPUDirectRDMAWritesTargetCurrentDevice"]], "cuda.bindings.runtime.cudaFuncAttribute": [[7, 1, 1, "", "cudaFuncAttributeClusterDimMustBeSet"], [7, 1, 1, "", "cudaFuncAttributeClusterSchedulingPolicyPreference"], [7, 1, 1, "", "cudaFuncAttributeMax"], [7, 1, 1, "", "cudaFuncAttributeMaxDynamicSharedMemorySize"], [7, 1, 1, "", "cudaFuncAttributeNonPortableClusterSizeAllowed"], [7, 1, 1, "", "cudaFuncAttributePreferredSharedMemoryCarveout"], [7, 1, 1, "", "cudaFuncAttributeRequiredClusterDepth"], [7, 1, 1, "", "cudaFuncAttributeRequiredClusterHeight"], [7, 1, 1, "", "cudaFuncAttributeRequiredClusterWidth"]], "cuda.bindings.runtime.cudaFuncAttributes": [[7, 1, 1, "", "binaryVersion"], [7, 1, 1, "", "cacheModeCA"], [7, 1, 1, "", "clusterDimMustBeSet"], [7, 1, 1, "", "clusterSchedulingPolicyPreference"], [7, 1, 1, "", "constSizeBytes"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "localSizeBytes"], [7, 1, 1, "", "maxDynamicSharedSizeBytes"], [7, 1, 1, "", "maxThreadsPerBlock"], [7, 1, 1, "", "nonPortableClusterSizeAllowed"], [7, 1, 1, "", "numRegs"], [7, 1, 1, "", "preferredShmemCarveout"], [7, 1, 1, "", "ptxVersion"], [7, 1, 1, "", "requiredClusterDepth"], [7, 1, 1, "", "requiredClusterHeight"], [7, 1, 1, "", "requiredClusterWidth"], [7, 1, 1, "", "reserved"], [7, 1, 1, "", "sharedSizeBytes"]], "cuda.bindings.runtime.cudaFuncCache": [[7, 1, 1, "", "cudaFuncCachePreferEqual"], [7, 1, 1, "", "cudaFuncCachePreferL1"], [7, 1, 1, "", "cudaFuncCachePreferNone"], [7, 1, 1, "", "cudaFuncCachePreferShared"]], "cuda.bindings.runtime.cudaFunction_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaGLDeviceList": [[7, 1, 1, "", "cudaGLDeviceListAll"], [7, 1, 1, "", "cudaGLDeviceListCurrentFrame"], [7, 1, 1, "", "cudaGLDeviceListNextFrame"]], "cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering": [[7, 1, 1, "", "cudaGPUDirectRDMAWritesOrderingAllDevices"], [7, 1, 1, "", "cudaGPUDirectRDMAWritesOrderingNone"], [7, 1, 1, "", "cudaGPUDirectRDMAWritesOrderingOwner"]], "cuda.bindings.runtime.cudaGetDriverEntryPointFlags": [[7, 1, 1, "", "cudaEnableDefault"], [7, 1, 1, "", "cudaEnableLegacyStream"], [7, 1, 1, "", "cudaEnablePerThreadDefaultStream"]], "cuda.bindings.runtime.cudaGraphConditionalHandle": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaGraphConditionalHandleFlags": [[7, 1, 1, "", "cudaGraphCondAssignDefault"]], "cuda.bindings.runtime.cudaGraphConditionalNodeType": [[7, 1, 1, "", "cudaGraphCondTypeIf"], [7, 1, 1, "", "cudaGraphCondTypeWhile"]], "cuda.bindings.runtime.cudaGraphDebugDotFlags": [[7, 1, 1, "", "cudaGraphDebugDotFlagsConditionalNodeParams"], [7, 1, 1, "", "cudaGraphDebugDotFlagsEventNodeParams"], [7, 1, 1, "", "cudaGraphDebugDotFlagsExtSemasSignalNodeParams"], [7, 1, 1, "", "cudaGraphDebugDotFlagsExtSemasWaitNodeParams"], [7, 1, 1, "", "cudaGraphDebugDotFlagsHandles"], [7, 1, 1, "", "cudaGraphDebugDotFlagsHostNodeParams"], [7, 1, 1, "", "cudaGraphDebugDotFlagsKernelNodeAttributes"], [7, 1, 1, "", "cudaGraphDebugDotFlagsKernelNodeParams"], [7, 1, 1, "", "cudaGraphDebugDotFlagsMemcpyNodeParams"], [7, 1, 1, "", "cudaGraphDebugDotFlagsMemsetNodeParams"], [7, 1, 1, "", "cudaGraphDebugDotFlagsVerbose"]], "cuda.bindings.runtime.cudaGraphDependencyType": [[7, 1, 1, "", "cudaGraphDependencyTypeDefault"], [7, 1, 1, "", "cudaGraphDependencyTypeProgrammatic"]], "cuda.bindings.runtime.cudaGraphDeviceNode_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaGraphEdgeData": [[7, 1, 1, "", "from_port"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"], [7, 1, 1, "", "to_port"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaGraphEdgeData_st": [[7, 1, 1, "", "from_port"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"], [7, 1, 1, "", "to_port"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaGraphExecUpdateResult": [[7, 1, 1, "", "cudaGraphExecUpdateError"], [7, 1, 1, "", "cudaGraphExecUpdateErrorAttributesChanged"], [7, 1, 1, "", "cudaGraphExecUpdateErrorFunctionChanged"], [7, 1, 1, "", "cudaGraphExecUpdateErrorNodeTypeChanged"], [7, 1, 1, "", "cudaGraphExecUpdateErrorNotSupported"], [7, 1, 1, "", "cudaGraphExecUpdateErrorParametersChanged"], [7, 1, 1, "", "cudaGraphExecUpdateErrorTopologyChanged"], [7, 1, 1, "", "cudaGraphExecUpdateErrorUnsupportedFunctionChange"], [7, 1, 1, "", "cudaGraphExecUpdateSuccess"]], "cuda.bindings.runtime.cudaGraphExecUpdateResultInfo": [[7, 1, 1, "", "errorFromNode"], [7, 1, 1, "", "errorNode"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "result"]], "cuda.bindings.runtime.cudaGraphExecUpdateResultInfo_st": [[7, 1, 1, "", "errorFromNode"], [7, 1, 1, "", "errorNode"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "result"]], "cuda.bindings.runtime.cudaGraphExec_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaGraphInstantiateFlags": [[7, 1, 1, "", "cudaGraphInstantiateFlagAutoFreeOnLaunch"], [7, 1, 1, "", "cudaGraphInstantiateFlagDeviceLaunch"], [7, 1, 1, "", "cudaGraphInstantiateFlagUpload"], [7, 1, 1, "", "cudaGraphInstantiateFlagUseNodePriority"]], "cuda.bindings.runtime.cudaGraphInstantiateParams": [[7, 1, 1, "", "errNode_out"], [7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "result_out"], [7, 1, 1, "", "uploadStream"]], "cuda.bindings.runtime.cudaGraphInstantiateParams_st": [[7, 1, 1, "", "errNode_out"], [7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "result_out"], [7, 1, 1, "", "uploadStream"]], "cuda.bindings.runtime.cudaGraphInstantiateResult": [[7, 1, 1, "", "cudaGraphInstantiateError"], [7, 1, 1, "", "cudaGraphInstantiateInvalidStructure"], [7, 1, 1, "", "cudaGraphInstantiateMultipleDevicesNotSupported"], [7, 1, 1, "", "cudaGraphInstantiateNodeOperationNotSupported"], [7, 1, 1, "", "cudaGraphInstantiateSuccess"]], "cuda.bindings.runtime.cudaGraphKernelNodeField": [[7, 1, 1, "", "cudaGraphKernelNodeFieldEnabled"], [7, 1, 1, "", "cudaGraphKernelNodeFieldGridDim"], [7, 1, 1, "", "cudaGraphKernelNodeFieldInvalid"], [7, 1, 1, "", "cudaGraphKernelNodeFieldParam"]], "cuda.bindings.runtime.cudaGraphKernelNodeUpdate": [[7, 1, 1, "", "field"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "node"], [7, 1, 1, "", "updateData"]], "cuda.bindings.runtime.cudaGraphMemAttributeType": [[7, 1, 1, "", "cudaGraphMemAttrReservedMemCurrent"], [7, 1, 1, "", "cudaGraphMemAttrReservedMemHigh"], [7, 1, 1, "", "cudaGraphMemAttrUsedMemCurrent"], [7, 1, 1, "", "cudaGraphMemAttrUsedMemHigh"]], "cuda.bindings.runtime.cudaGraphNodeParams": [[7, 1, 1, "", "alloc"], [7, 1, 1, "", "conditional"], [7, 1, 1, "", "eventRecord"], [7, 1, 1, "", "eventWait"], [7, 1, 1, "", "extSemSignal"], [7, 1, 1, "", "extSemWait"], [7, 1, 1, "", "free"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "graph"], [7, 1, 1, "", "host"], [7, 1, 1, "", "kernel"], [7, 1, 1, "", "memcpy"], [7, 1, 1, "", "memset"], [7, 1, 1, "", "reserved0"], [7, 1, 1, "", "reserved1"], [7, 1, 1, "", "reserved2"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaGraphNodeType": [[7, 1, 1, "", "cudaGraphNodeTypeConditional"], [7, 1, 1, "", "cudaGraphNodeTypeCount"], [7, 1, 1, "", "cudaGraphNodeTypeEmpty"], [7, 1, 1, "", "cudaGraphNodeTypeEventRecord"], [7, 1, 1, "", "cudaGraphNodeTypeExtSemaphoreSignal"], [7, 1, 1, "", "cudaGraphNodeTypeExtSemaphoreWait"], [7, 1, 1, "", "cudaGraphNodeTypeGraph"], [7, 1, 1, "", "cudaGraphNodeTypeHost"], [7, 1, 1, "", "cudaGraphNodeTypeKernel"], [7, 1, 1, "", "cudaGraphNodeTypeMemAlloc"], [7, 1, 1, "", "cudaGraphNodeTypeMemFree"], [7, 1, 1, "", "cudaGraphNodeTypeMemcpy"], [7, 1, 1, "", "cudaGraphNodeTypeMemset"], [7, 1, 1, "", "cudaGraphNodeTypeWaitEvent"]], "cuda.bindings.runtime.cudaGraphNode_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaGraph_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaGraphicsCubeFace": [[7, 1, 1, "", "cudaGraphicsCubeFaceNegativeX"], [7, 1, 1, "", "cudaGraphicsCubeFaceNegativeY"], [7, 1, 1, "", "cudaGraphicsCubeFaceNegativeZ"], [7, 1, 1, "", "cudaGraphicsCubeFacePositiveX"], [7, 1, 1, "", "cudaGraphicsCubeFacePositiveY"], [7, 1, 1, "", "cudaGraphicsCubeFacePositiveZ"]], "cuda.bindings.runtime.cudaGraphicsMapFlags": [[7, 1, 1, "", "cudaGraphicsMapFlagsNone"], [7, 1, 1, "", "cudaGraphicsMapFlagsReadOnly"], [7, 1, 1, "", "cudaGraphicsMapFlagsWriteDiscard"]], "cuda.bindings.runtime.cudaGraphicsRegisterFlags": [[7, 1, 1, "", "cudaGraphicsRegisterFlagsNone"], [7, 1, 1, "", "cudaGraphicsRegisterFlagsReadOnly"], [7, 1, 1, "", "cudaGraphicsRegisterFlagsSurfaceLoadStore"], [7, 1, 1, "", "cudaGraphicsRegisterFlagsTextureGather"], [7, 1, 1, "", "cudaGraphicsRegisterFlagsWriteDiscard"]], "cuda.bindings.runtime.cudaGraphicsResource_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaHostFn_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaHostNodeParams": [[7, 1, 1, "", "fn"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "userData"]], "cuda.bindings.runtime.cudaHostNodeParamsV2": [[7, 1, 1, "", "fn"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "userData"]], "cuda.bindings.runtime.cudaIpcEventHandle_st": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaIpcEventHandle_t": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaIpcMemHandle_st": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaIpcMemHandle_t": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaKernelNodeParams": [[7, 1, 1, "", "blockDim"], [7, 1, 1, "", "extra"], [7, 1, 1, "", "func"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "gridDim"], [7, 1, 1, "", "kernelParams"], [7, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.runtime.cudaKernelNodeParamsV2": [[7, 1, 1, "", "blockDim"], [7, 1, 1, "", "extra"], [7, 1, 1, "", "func"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "gridDim"], [7, 1, 1, "", "kernelParams"], [7, 1, 1, "", "sharedMemBytes"]], "cuda.bindings.runtime.cudaKernel_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaLaunchAttribute": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "id"], [7, 1, 1, "", "val"]], "cuda.bindings.runtime.cudaLaunchAttributeID": [[7, 1, 1, "", "cudaLaunchAttributeAccessPolicyWindow"], [7, 1, 1, "", "cudaLaunchAttributeClusterDimension"], [7, 1, 1, "", "cudaLaunchAttributeClusterSchedulingPolicyPreference"], [7, 1, 1, "", "cudaLaunchAttributeCooperative"], [7, 1, 1, "", "cudaLaunchAttributeDeviceUpdatableKernelNode"], [7, 1, 1, "", "cudaLaunchAttributeIgnore"], [7, 1, 1, "", "cudaLaunchAttributeLaunchCompletionEvent"], [7, 1, 1, "", "cudaLaunchAttributeMemSyncDomain"], [7, 1, 1, "", "cudaLaunchAttributeMemSyncDomainMap"], [7, 1, 1, "", "cudaLaunchAttributePreferredSharedMemoryCarveout"], [7, 1, 1, "", "cudaLaunchAttributePriority"], [7, 1, 1, "", "cudaLaunchAttributeProgrammaticEvent"], [7, 1, 1, "", "cudaLaunchAttributeProgrammaticStreamSerialization"], [7, 1, 1, "", "cudaLaunchAttributeSynchronizationPolicy"]], "cuda.bindings.runtime.cudaLaunchAttributeValue": [[7, 1, 1, "id28", "accessPolicyWindow"], [7, 1, 1, "id31", "clusterDim"], [7, 1, 1, "id32", "clusterSchedulingPolicyPreference"], [7, 1, 1, "id29", "cooperative"], [7, 1, 1, "id39", "deviceUpdatableKernelNode"], [7, 2, 1, "id41", "getPtr"], [7, 1, 1, "id38", "launchCompletionEvent"], [7, 1, 1, "id37", "memSyncDomain"], [7, 1, 1, "id36", "memSyncDomainMap"], [7, 1, 1, "id27", "pad"], [7, 1, 1, "id35", "priority"], [7, 1, 1, "id34", "programmaticEvent"], [7, 1, 1, "id33", "programmaticStreamSerializationAllowed"], [7, 1, 1, "id40", "sharedMemCarveout"], [7, 1, 1, "id30", "syncPolicy"]], "cuda.bindings.runtime.cudaLaunchAttribute_st": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "id"], [7, 1, 1, "", "val"]], "cuda.bindings.runtime.cudaLaunchMemSyncDomain": [[7, 1, 1, "", "cudaLaunchMemSyncDomainDefault"], [7, 1, 1, "", "cudaLaunchMemSyncDomainRemote"]], "cuda.bindings.runtime.cudaLaunchMemSyncDomainMap": [[7, 1, 1, "", "default_"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "remote"]], "cuda.bindings.runtime.cudaLaunchMemSyncDomainMap_st": [[7, 1, 1, "", "default_"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "remote"]], "cuda.bindings.runtime.cudaLimit": [[7, 1, 1, "", "cudaLimitDevRuntimePendingLaunchCount"], [7, 1, 1, "", "cudaLimitDevRuntimeSyncDepth"], [7, 1, 1, "", "cudaLimitMallocHeapSize"], [7, 1, 1, "", "cudaLimitMaxL2FetchGranularity"], [7, 1, 1, "", "cudaLimitPersistingL2CacheSize"], [7, 1, 1, "", "cudaLimitPrintfFifoSize"], [7, 1, 1, "", "cudaLimitStackSize"]], "cuda.bindings.runtime.cudaMemAccessDesc": [[7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "location"]], "cuda.bindings.runtime.cudaMemAccessFlags": [[7, 1, 1, "", "cudaMemAccessFlagsProtNone"], [7, 1, 1, "", "cudaMemAccessFlagsProtRead"], [7, 1, 1, "", "cudaMemAccessFlagsProtReadWrite"]], "cuda.bindings.runtime.cudaMemAllocNodeParams": [[7, 1, 1, "", "accessDescCount"], [7, 1, 1, "", "accessDescs"], [7, 1, 1, "", "bytesize"], [7, 1, 1, "", "dptr"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "poolProps"]], "cuda.bindings.runtime.cudaMemAllocNodeParamsV2": [[7, 1, 1, "", "accessDescCount"], [7, 1, 1, "", "accessDescs"], [7, 1, 1, "", "bytesize"], [7, 1, 1, "", "dptr"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "poolProps"]], "cuda.bindings.runtime.cudaMemAllocationHandleType": [[7, 1, 1, "", "cudaMemHandleTypeFabric"], [7, 1, 1, "", "cudaMemHandleTypeNone"], [7, 1, 1, "", "cudaMemHandleTypePosixFileDescriptor"], [7, 1, 1, "", "cudaMemHandleTypeWin32"], [7, 1, 1, "", "cudaMemHandleTypeWin32Kmt"]], "cuda.bindings.runtime.cudaMemAllocationType": [[7, 1, 1, "", "cudaMemAllocationTypeInvalid"], [7, 1, 1, "", "cudaMemAllocationTypeMax"], [7, 1, 1, "", "cudaMemAllocationTypePinned"]], "cuda.bindings.runtime.cudaMemFabricHandle_st": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaMemFabricHandle_t": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaMemFreeNodeParams": [[7, 1, 1, "", "dptr"], [7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaMemLocation": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "id"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaMemLocationType": [[7, 1, 1, "", "cudaMemLocationTypeDevice"], [7, 1, 1, "", "cudaMemLocationTypeHost"], [7, 1, 1, "", "cudaMemLocationTypeHostNuma"], [7, 1, 1, "", "cudaMemLocationTypeHostNumaCurrent"], [7, 1, 1, "", "cudaMemLocationTypeInvalid"]], "cuda.bindings.runtime.cudaMemPoolAttr": [[7, 1, 1, "", "cudaMemPoolAttrReleaseThreshold"], [7, 1, 1, "", "cudaMemPoolAttrReservedMemCurrent"], [7, 1, 1, "", "cudaMemPoolAttrReservedMemHigh"], [7, 1, 1, "", "cudaMemPoolAttrUsedMemCurrent"], [7, 1, 1, "", "cudaMemPoolAttrUsedMemHigh"], [7, 1, 1, "", "cudaMemPoolReuseAllowInternalDependencies"], [7, 1, 1, "", "cudaMemPoolReuseAllowOpportunistic"], [7, 1, 1, "", "cudaMemPoolReuseFollowEventDependencies"]], "cuda.bindings.runtime.cudaMemPoolProps": [[7, 1, 1, "", "allocType"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "handleTypes"], [7, 1, 1, "", "location"], [7, 1, 1, "", "maxSize"], [7, 1, 1, "", "reserved"], [7, 1, 1, "", "usage"], [7, 1, 1, "", "win32SecurityAttributes"]], "cuda.bindings.runtime.cudaMemPoolPtrExportData": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaMemPool_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaMemRangeAttribute": [[7, 1, 1, "", "cudaMemRangeAttributeAccessedBy"], [7, 1, 1, "", "cudaMemRangeAttributeLastPrefetchLocation"], [7, 1, 1, "", "cudaMemRangeAttributeLastPrefetchLocationId"], [7, 1, 1, "", "cudaMemRangeAttributeLastPrefetchLocationType"], [7, 1, 1, "", "cudaMemRangeAttributePreferredLocation"], [7, 1, 1, "", "cudaMemRangeAttributePreferredLocationId"], [7, 1, 1, "", "cudaMemRangeAttributePreferredLocationType"], [7, 1, 1, "", "cudaMemRangeAttributeReadMostly"]], "cuda.bindings.runtime.cudaMemcpy3DParms": [[7, 1, 1, "", "dstArray"], [7, 1, 1, "", "dstPos"], [7, 1, 1, "", "dstPtr"], [7, 1, 1, "", "extent"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "kind"], [7, 1, 1, "", "srcArray"], [7, 1, 1, "", "srcPos"], [7, 1, 1, "", "srcPtr"]], "cuda.bindings.runtime.cudaMemcpy3DPeerParms": [[7, 1, 1, "", "dstArray"], [7, 1, 1, "", "dstDevice"], [7, 1, 1, "", "dstPos"], [7, 1, 1, "", "dstPtr"], [7, 1, 1, "", "extent"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "srcArray"], [7, 1, 1, "", "srcDevice"], [7, 1, 1, "", "srcPos"], [7, 1, 1, "", "srcPtr"]], "cuda.bindings.runtime.cudaMemcpyKind": [[7, 1, 1, "", "cudaMemcpyDefault"], [7, 1, 1, "", "cudaMemcpyDeviceToDevice"], [7, 1, 1, "", "cudaMemcpyDeviceToHost"], [7, 1, 1, "", "cudaMemcpyHostToDevice"], [7, 1, 1, "", "cudaMemcpyHostToHost"]], "cuda.bindings.runtime.cudaMemcpyNodeParams": [[7, 1, 1, "", "copyParams"], [7, 1, 1, "", "flags"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "reserved"]], "cuda.bindings.runtime.cudaMemoryAdvise": [[7, 1, 1, "", "cudaMemAdviseSetAccessedBy"], [7, 1, 1, "", "cudaMemAdviseSetPreferredLocation"], [7, 1, 1, "", "cudaMemAdviseSetReadMostly"], [7, 1, 1, "", "cudaMemAdviseUnsetAccessedBy"], [7, 1, 1, "", "cudaMemAdviseUnsetPreferredLocation"], [7, 1, 1, "", "cudaMemAdviseUnsetReadMostly"]], "cuda.bindings.runtime.cudaMemoryType": [[7, 1, 1, "", "cudaMemoryTypeDevice"], [7, 1, 1, "", "cudaMemoryTypeHost"], [7, 1, 1, "", "cudaMemoryTypeManaged"], [7, 1, 1, "", "cudaMemoryTypeUnregistered"]], "cuda.bindings.runtime.cudaMemsetParams": [[7, 1, 1, "", "dst"], [7, 1, 1, "", "elementSize"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "height"], [7, 1, 1, "", "pitch"], [7, 1, 1, "", "value"], [7, 1, 1, "", "width"]], "cuda.bindings.runtime.cudaMemsetParamsV2": [[7, 1, 1, "", "dst"], [7, 1, 1, "", "elementSize"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "height"], [7, 1, 1, "", "pitch"], [7, 1, 1, "", "value"], [7, 1, 1, "", "width"]], "cuda.bindings.runtime.cudaMipmappedArray_const_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaMipmappedArray_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaPitchedPtr": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "pitch"], [7, 1, 1, "", "ptr"], [7, 1, 1, "", "xsize"], [7, 1, 1, "", "ysize"]], "cuda.bindings.runtime.cudaPointerAttributes": [[7, 1, 1, "", "device"], [7, 1, 1, "", "devicePointer"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "hostPointer"], [7, 1, 1, "", "type"]], "cuda.bindings.runtime.cudaPos": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "x"], [7, 1, 1, "", "y"], [7, 1, 1, "", "z"]], "cuda.bindings.runtime.cudaResourceDesc": [[7, 2, 1, "", "getPtr"], [7, 1, 1, "", "res"], [7, 1, 1, "", "resType"]], "cuda.bindings.runtime.cudaResourceType": [[7, 1, 1, "", "cudaResourceTypeArray"], [7, 1, 1, "", "cudaResourceTypeLinear"], [7, 1, 1, "", "cudaResourceTypeMipmappedArray"], [7, 1, 1, "", "cudaResourceTypePitch2D"]], "cuda.bindings.runtime.cudaResourceViewDesc": [[7, 1, 1, "", "depth"], [7, 1, 1, "", "firstLayer"], [7, 1, 1, "", "firstMipmapLevel"], [7, 1, 1, "", "format"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "height"], [7, 1, 1, "", "lastLayer"], [7, 1, 1, "", "lastMipmapLevel"], [7, 1, 1, "", "width"]], "cuda.bindings.runtime.cudaResourceViewFormat": [[7, 1, 1, "", "cudaResViewFormatFloat1"], [7, 1, 1, "", "cudaResViewFormatFloat2"], [7, 1, 1, "", "cudaResViewFormatFloat4"], [7, 1, 1, "", "cudaResViewFormatHalf1"], [7, 1, 1, "", "cudaResViewFormatHalf2"], [7, 1, 1, "", "cudaResViewFormatHalf4"], [7, 1, 1, "", "cudaResViewFormatNone"], [7, 1, 1, "", "cudaResViewFormatSignedBlockCompressed4"], [7, 1, 1, "", "cudaResViewFormatSignedBlockCompressed5"], [7, 1, 1, "", "cudaResViewFormatSignedBlockCompressed6H"], [7, 1, 1, "", "cudaResViewFormatSignedChar1"], [7, 1, 1, "", "cudaResViewFormatSignedChar2"], [7, 1, 1, "", "cudaResViewFormatSignedChar4"], [7, 1, 1, "", "cudaResViewFormatSignedInt1"], [7, 1, 1, "", "cudaResViewFormatSignedInt2"], [7, 1, 1, "", "cudaResViewFormatSignedInt4"], [7, 1, 1, "", "cudaResViewFormatSignedShort1"], [7, 1, 1, "", "cudaResViewFormatSignedShort2"], [7, 1, 1, "", "cudaResViewFormatSignedShort4"], [7, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed1"], [7, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed2"], [7, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed3"], [7, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed4"], [7, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed5"], [7, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed6H"], [7, 1, 1, "", "cudaResViewFormatUnsignedBlockCompressed7"], [7, 1, 1, "", "cudaResViewFormatUnsignedChar1"], [7, 1, 1, "", "cudaResViewFormatUnsignedChar2"], [7, 1, 1, "", "cudaResViewFormatUnsignedChar4"], [7, 1, 1, "", "cudaResViewFormatUnsignedInt1"], [7, 1, 1, "", "cudaResViewFormatUnsignedInt2"], [7, 1, 1, "", "cudaResViewFormatUnsignedInt4"], [7, 1, 1, "", "cudaResViewFormatUnsignedShort1"], [7, 1, 1, "", "cudaResViewFormatUnsignedShort2"], [7, 1, 1, "", "cudaResViewFormatUnsignedShort4"]], "cuda.bindings.runtime.cudaSharedCarveout": [[7, 1, 1, "", "cudaSharedmemCarveoutDefault"], [7, 1, 1, "", "cudaSharedmemCarveoutMaxL1"], [7, 1, 1, "", "cudaSharedmemCarveoutMaxShared"]], "cuda.bindings.runtime.cudaSharedMemConfig": [[7, 1, 1, "", "cudaSharedMemBankSizeDefault"], [7, 1, 1, "", "cudaSharedMemBankSizeEightByte"], [7, 1, 1, "", "cudaSharedMemBankSizeFourByte"]], "cuda.bindings.runtime.cudaStreamCallback_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaStreamCaptureMode": [[7, 1, 1, "", "cudaStreamCaptureModeGlobal"], [7, 1, 1, "", "cudaStreamCaptureModeRelaxed"], [7, 1, 1, "", "cudaStreamCaptureModeThreadLocal"]], "cuda.bindings.runtime.cudaStreamCaptureStatus": [[7, 1, 1, "", "cudaStreamCaptureStatusActive"], [7, 1, 1, "", "cudaStreamCaptureStatusInvalidated"], [7, 1, 1, "", "cudaStreamCaptureStatusNone"]], "cuda.bindings.runtime.cudaStreamUpdateCaptureDependenciesFlags": [[7, 1, 1, "", "cudaStreamAddCaptureDependencies"], [7, 1, 1, "", "cudaStreamSetCaptureDependencies"]], "cuda.bindings.runtime.cudaStream_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaSurfaceBoundaryMode": [[7, 1, 1, "", "cudaBoundaryModeClamp"], [7, 1, 1, "", "cudaBoundaryModeTrap"], [7, 1, 1, "", "cudaBoundaryModeZero"]], "cuda.bindings.runtime.cudaSurfaceFormatMode": [[7, 1, 1, "", "cudaFormatModeAuto"], [7, 1, 1, "", "cudaFormatModeForced"]], "cuda.bindings.runtime.cudaSurfaceObject_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaSynchronizationPolicy": [[7, 1, 1, "", "cudaSyncPolicyAuto"], [7, 1, 1, "", "cudaSyncPolicyBlockingSync"], [7, 1, 1, "", "cudaSyncPolicySpin"], [7, 1, 1, "", "cudaSyncPolicyYield"]], "cuda.bindings.runtime.cudaTextureAddressMode": [[7, 1, 1, "", "cudaAddressModeBorder"], [7, 1, 1, "", "cudaAddressModeClamp"], [7, 1, 1, "", "cudaAddressModeMirror"], [7, 1, 1, "", "cudaAddressModeWrap"]], "cuda.bindings.runtime.cudaTextureDesc": [[7, 1, 1, "", "addressMode"], [7, 1, 1, "", "borderColor"], [7, 1, 1, "", "disableTrilinearOptimization"], [7, 1, 1, "", "filterMode"], [7, 2, 1, "", "getPtr"], [7, 1, 1, "", "maxAnisotropy"], [7, 1, 1, "", "maxMipmapLevelClamp"], [7, 1, 1, "", "minMipmapLevelClamp"], [7, 1, 1, "", "mipmapFilterMode"], [7, 1, 1, "", "mipmapLevelBias"], [7, 1, 1, "", "normalizedCoords"], [7, 1, 1, "", "readMode"], [7, 1, 1, "", "sRGB"], [7, 1, 1, "", "seamlessCubemap"]], "cuda.bindings.runtime.cudaTextureFilterMode": [[7, 1, 1, "", "cudaFilterModeLinear"], [7, 1, 1, "", "cudaFilterModePoint"]], "cuda.bindings.runtime.cudaTextureObject_t": [[7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaTextureReadMode": [[7, 1, 1, "", "cudaReadModeElementType"], [7, 1, 1, "", "cudaReadModeNormalizedFloat"]], "cuda.bindings.runtime.cudaUUID_t": [[7, 1, 1, "", "bytes"], [7, 2, 1, "", "getPtr"]], "cuda.bindings.runtime.cudaUserObjectFlags": [[7, 1, 1, "", "cudaUserObjectNoDestructorSync"]], "cuda.bindings.runtime.cudaUserObjectRetainFlags": [[7, 1, 1, "", "cudaGraphUserObjectMove"]], "cuda.bindings.runtime.cudaUserObject_t": [[7, 2, 1, "", "getPtr"]]}, "objtypes": {"0": "py:class", "1": "py:attribute", "2": "py:method", "3": "py:function"}, "objnames": {"0": ["py", "class", "Python class"], "1": ["py", "attribute", "Python attribute"], "2": ["py", "method", "Python method"], "3": ["py", "function", "Python function"]}, "titleterms": {"cuda": [0, 3, 5, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "python": [0, 3, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "api": [0, 5, 7], "refer": 0, "captionhold": 0, "code": 1, "conduct": 1, "overview": [1, 9], "our": 1, "pledg": 1, "standard": 1, "respons": 1, "scope": 1, "enforc": 1, "attribut": [1, 5], "contribut": 2, "manual": 3, "content": 3, "indic": 3, "tabl": 3, "instal": 4, "runtim": [4, 7], "requir": 4, "from": 4, "pypi": 4, "conda": 4, "sourc": [4, 16, 17], "build": [4, 16, 17], "In": 4, "place": 4, "develop": 4, "doc": 4, "publish": 4, "driver": [5, 7], "data": [5, 7], "type": [5, 7], "us": [5, 7], "error": [5, 6, 7], "handl": [5, 6, 7], "initi": 5, "version": [5, 7], "manag": [5, 7], "devic": [5, 7], "primari": 5, "context": 5, "modul": 5, "librari": 5, "memori": [5, 7], "virtual": 5, "stream": [5, 7, 13], "order": [5, 7], "alloc": [5, 7], "multicast": 5, "object": [5, 7], "unifi": [5, 7], "address": [5, 7], "event": [5, 7], "extern": [5, 7], "resourc": [5, 7], "interoper": [5, 7, 13], "oper": 5, "execut": [5, 7], "control": [5, 7], "graph": [5, 7], "occup": [5, 7], "textur": [5, 7], "surfac": [5, 7], "tensor": 5, "map": 5, "peer": [5, 7], "access": [5, 7], "graphic": [5, 7], "entri": [5, 7], "point": [5, 7], "coredump": 5, "green": 5, "egl": [5, 7], "opengl": [5, 7], "profil": [5, 7], "vdpau": [5, 7], "nvrtc": 6, "gener": 6, "inform": 6, "queri": 6, "compil": 6, "support": [6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "option": 6, "direct3d": 7, "9": 7, "10": 7, "11": [7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], "c": 7, "routin": 7, "interact": 7, "motiv": 8, "what": 8, "why": 8, "workflow": 9, "perform": 9, "kernel": 9, "applic": 9, "comparison": 9, "futur": 9, "releas": [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "note": [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "4": [11, 21, 27], "0": [11, 12, 13, 15, 17, 22, 23, 24, 26, 27, 28, 29], "hightlight": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "limit": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "function": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "Not": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "thi": [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], "5": [12, 28], "6": [13, 14, 29, 30], "default": 13, "primit": 13, "1": [14, 16, 18, 23, 25, 30], "7": [15, 16], "8": [17, 18, 19, 20, 21], "2": [19, 24, 25], "3": [20, 26], "12": [22, 23, 24, 25, 26, 27, 28, 29, 30]}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 6, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 56}}) \ No newline at end of file diff --git a/docs_src/source/api.rst b/docs_src/source/api.rst index aec35f5e..4dcac337 100644 --- a/docs_src/source/api.rst +++ b/docs_src/source/api.rst @@ -6,6 +6,6 @@ CUDA Python API Reference :maxdepth: 3 :caption: CaptionHolder: - module/cuda - module/cudart + module/driver + module/runtime module/nvrtc diff --git a/docs_src/source/conf.py b/docs_src/source/conf.py index 173f753c..62de0de1 100644 --- a/docs_src/source/conf.py +++ b/docs_src/source/conf.py @@ -22,7 +22,7 @@ author = 'NVIDIA' # The full version, including alpha/beta/rc tags -release = '12.6.0' +release = '12.6.1' # -- General configuration --------------------------------------------------- diff --git a/docs_src/source/module/cuda.rst b/docs_src/source/module/cuda.rst deleted file mode 100644 index 8aa8c8fc..00000000 --- a/docs_src/source/module/cuda.rst +++ /dev/null @@ -1,6792 +0,0 @@ ----- -cuda ----- - -Data types used by CUDA driver ------------------------------- - - - -.. autoclass:: cuda.cuda.CUuuid_st -.. autoclass:: cuda.cuda.CUmemFabricHandle_st -.. autoclass:: cuda.cuda.CUipcEventHandle_st -.. autoclass:: cuda.cuda.CUipcMemHandle_st -.. autoclass:: cuda.cuda.CUstreamBatchMemOpParams_union -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUasyncNotificationInfo_st -.. autoclass:: cuda.cuda.CUdevprop_st -.. autoclass:: cuda.cuda.CUaccessPolicyWindow_st -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v3_st -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_CONDITIONAL_NODE_PARAMS -.. autoclass:: cuda.cuda.CUgraphEdgeData_st -.. autoclass:: cuda.cuda.CUDA_GRAPH_INSTANTIATE_PARAMS_st -.. autoclass:: cuda.cuda.CUlaunchMemSyncDomainMap_st -.. autoclass:: cuda.cuda.CUlaunchAttributeValue_union -.. autoclass:: cuda.cuda.CUlaunchAttribute_st -.. autoclass:: cuda.cuda.CUlaunchConfig_st -.. autoclass:: cuda.cuda.CUexecAffinitySmCount_st -.. autoclass:: cuda.cuda.CUexecAffinityParam_st -.. autoclass:: cuda.cuda.CUctxCigParam_st -.. autoclass:: cuda.cuda.CUctxCreateParams_st -.. autoclass:: cuda.cuda.CUlibraryHostUniversalFunctionAndDataTable_st -.. autoclass:: cuda.cuda.CUDA_MEMCPY2D_st -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_st -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_PEER_st -.. autoclass:: cuda.cuda.CUDA_MEMCPY_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_ARRAY_DESCRIPTOR_st -.. autoclass:: cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR_st -.. autoclass:: cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES_st -.. autoclass:: cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_st -.. autoclass:: cuda.cuda.CUDA_RESOURCE_DESC_st -.. autoclass:: cuda.cuda.CUDA_TEXTURE_DESC_st -.. autoclass:: cuda.cuda.CUDA_RESOURCE_VIEW_DESC_st -.. autoclass:: cuda.cuda.CUtensorMap_st -.. autoclass:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st -.. autoclass:: cuda.cuda.CUDA_LAUNCH_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUarrayMapInfo_st -.. autoclass:: cuda.cuda.CUmemLocation_st -.. autoclass:: cuda.cuda.CUmemAllocationProp_st -.. autoclass:: cuda.cuda.CUmulticastObjectProp_st -.. autoclass:: cuda.cuda.CUmemAccessDesc_st -.. autoclass:: cuda.cuda.CUgraphExecUpdateResultInfo_st -.. autoclass:: cuda.cuda.CUmemPoolProps_st -.. autoclass:: cuda.cuda.CUmemPoolPtrExportData_st -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st -.. autoclass:: cuda.cuda.CUDA_MEM_FREE_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_CHILD_GRAPH_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EVENT_RECORD_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUDA_EVENT_WAIT_NODE_PARAMS_st -.. autoclass:: cuda.cuda.CUgraphNodeParams_st -.. autoclass:: cuda.cuda.CUeglFrame_st -.. autoclass:: cuda.cuda.CUipcMem_flags - - .. autoattribute:: cuda.cuda.CUipcMem_flags.CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS - - - Automatically enable peer access between remote devices as needed - -.. autoclass:: cuda.cuda.CUmemAttach_flags - - .. autoattribute:: cuda.cuda.CUmemAttach_flags.CU_MEM_ATTACH_GLOBAL - - - Memory can be accessed by any stream on any device - - - .. autoattribute:: cuda.cuda.CUmemAttach_flags.CU_MEM_ATTACH_HOST - - - Memory cannot be accessed by any stream on any device - - - .. autoattribute:: cuda.cuda.CUmemAttach_flags.CU_MEM_ATTACH_SINGLE - - - Memory can only be accessed by a single stream on the associated device - -.. autoclass:: cuda.cuda.CUctx_flags - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_AUTO - - - Automatic scheduling - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_SPIN - - - Set spin as default scheduling - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_YIELD - - - Set yield as default scheduling - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling [Deprecated] - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SCHED_MASK - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_MAP_HOST - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_LMEM_RESIZE_TO_MAX - - - Keep local memory allocation after launch - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_COREDUMP_ENABLE - - - Trigger coredumps from exceptions in this context - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_USER_COREDUMP_ENABLE - - - Enable user pipe to trigger coredumps in this context - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_SYNC_MEMOPS - - - Ensure synchronous memory operations on this context will synchronize - - - .. autoattribute:: cuda.cuda.CUctx_flags.CU_CTX_FLAGS_MASK - -.. autoclass:: cuda.cuda.CUevent_sched_flags - - .. autoattribute:: cuda.cuda.CUevent_sched_flags.CU_EVENT_SCHED_AUTO - - - Automatic scheduling - - - .. autoattribute:: cuda.cuda.CUevent_sched_flags.CU_EVENT_SCHED_SPIN - - - Set spin as default scheduling - - - .. autoattribute:: cuda.cuda.CUevent_sched_flags.CU_EVENT_SCHED_YIELD - - - Set yield as default scheduling - - - .. autoattribute:: cuda.cuda.CUevent_sched_flags.CU_EVENT_SCHED_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling - -.. autoclass:: cuda.cuda.cl_event_flags - - .. autoattribute:: cuda.cuda.cl_event_flags.NVCL_EVENT_SCHED_AUTO - - - Automatic scheduling - - - .. autoattribute:: cuda.cuda.cl_event_flags.NVCL_EVENT_SCHED_SPIN - - - Set spin as default scheduling - - - .. autoattribute:: cuda.cuda.cl_event_flags.NVCL_EVENT_SCHED_YIELD - - - Set yield as default scheduling - - - .. autoattribute:: cuda.cuda.cl_event_flags.NVCL_EVENT_SCHED_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling - -.. autoclass:: cuda.cuda.cl_context_flags - - .. autoattribute:: cuda.cuda.cl_context_flags.NVCL_CTX_SCHED_AUTO - - - Automatic scheduling - - - .. autoattribute:: cuda.cuda.cl_context_flags.NVCL_CTX_SCHED_SPIN - - - Set spin as default scheduling - - - .. autoattribute:: cuda.cuda.cl_context_flags.NVCL_CTX_SCHED_YIELD - - - Set yield as default scheduling - - - .. autoattribute:: cuda.cuda.cl_context_flags.NVCL_CTX_SCHED_BLOCKING_SYNC - - - Set blocking synchronization as default scheduling - -.. autoclass:: cuda.cuda.CUstream_flags - - .. autoattribute:: cuda.cuda.CUstream_flags.CU_STREAM_DEFAULT - - - Default stream flag - - - .. autoattribute:: cuda.cuda.CUstream_flags.CU_STREAM_NON_BLOCKING - - - Stream does not synchronize with stream 0 (the NULL stream) - -.. autoclass:: cuda.cuda.CUevent_flags - - .. autoattribute:: cuda.cuda.CUevent_flags.CU_EVENT_DEFAULT - - - Default event flag - - - .. autoattribute:: cuda.cuda.CUevent_flags.CU_EVENT_BLOCKING_SYNC - - - Event uses blocking synchronization - - - .. autoattribute:: cuda.cuda.CUevent_flags.CU_EVENT_DISABLE_TIMING - - - Event will not record timing data - - - .. autoattribute:: cuda.cuda.CUevent_flags.CU_EVENT_INTERPROCESS - - - Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set - -.. autoclass:: cuda.cuda.CUevent_record_flags - - .. autoattribute:: cuda.cuda.CUevent_record_flags.CU_EVENT_RECORD_DEFAULT - - - Default event record flag - - - .. autoattribute:: cuda.cuda.CUevent_record_flags.CU_EVENT_RECORD_EXTERNAL - - - When using stream capture, create an event record node instead of the default behavior. This flag is invalid when used outside of capture. - -.. autoclass:: cuda.cuda.CUevent_wait_flags - - .. autoattribute:: cuda.cuda.CUevent_wait_flags.CU_EVENT_WAIT_DEFAULT - - - Default event wait flag - - - .. autoattribute:: cuda.cuda.CUevent_wait_flags.CU_EVENT_WAIT_EXTERNAL - - - When using stream capture, create an event wait node instead of the default behavior. This flag is invalid when used outside of capture. - -.. autoclass:: cuda.cuda.CUstreamWaitValue_flags - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_GEQ - - - Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit values). Note this is a cyclic comparison which ignores wraparound. (Default behavior.) - - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_EQ - - - Wait until *addr == value. - - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_AND - - - Wait until (*addr & value) != 0. - - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_NOR - - - Wait until ~(*addr | value) != 0. Support for this operation can be queried with :py:obj:`~.cuDeviceGetAttribute()` and :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR`. - - - .. autoattribute:: cuda.cuda.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_FLUSH - - - Follow the wait operation with a flush of outstanding remote writes. This means that, if a remote write operation is guaranteed to have reached the device before the wait can be satisfied, that write is guaranteed to be visible to downstream device work. The device is permitted to reorder remote writes internally. For example, this flag would be required if two remote writes arrive in a defined order, the wait is satisfied by the second write, and downstream work needs to observe the first write. Support for this operation is restricted to selected platforms and can be queried with :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES`. - -.. autoclass:: cuda.cuda.CUstreamWriteValue_flags - - .. autoattribute:: cuda.cuda.CUstreamWriteValue_flags.CU_STREAM_WRITE_VALUE_DEFAULT - - - Default behavior - - - .. autoattribute:: cuda.cuda.CUstreamWriteValue_flags.CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER - - - Permits the write to be reordered with writes which were issued before it, as a performance optimization. Normally, :py:obj:`~.cuStreamWriteValue32` will provide a memory fence before the write, which has similar semantics to __threadfence_system() but is scoped to the stream rather than a CUDA thread. This flag is not supported in the v2 API. - -.. autoclass:: cuda.cuda.CUstreamBatchMemOpType - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_32 - - - Represents a :py:obj:`~.cuStreamWaitValue32` operation - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WRITE_VALUE_32 - - - Represents a :py:obj:`~.cuStreamWriteValue32` operation - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_64 - - - Represents a :py:obj:`~.cuStreamWaitValue64` operation - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WRITE_VALUE_64 - - - Represents a :py:obj:`~.cuStreamWriteValue64` operation - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_BARRIER - - - Insert a memory barrier of the specified type - - - .. autoattribute:: cuda.cuda.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES - - - This has the same effect as :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH`, but as a standalone operation. - -.. autoclass:: cuda.cuda.CUstreamMemoryBarrier_flags - - .. autoattribute:: cuda.cuda.CUstreamMemoryBarrier_flags.CU_STREAM_MEMORY_BARRIER_TYPE_SYS - - - System-wide memory barrier. - - - .. autoattribute:: cuda.cuda.CUstreamMemoryBarrier_flags.CU_STREAM_MEMORY_BARRIER_TYPE_GPU - - - Limit memory barrier scope to the GPU. - -.. autoclass:: cuda.cuda.CUoccupancy_flags - - .. autoattribute:: cuda.cuda.CUoccupancy_flags.CU_OCCUPANCY_DEFAULT - - - Default behavior - - - .. autoattribute:: cuda.cuda.CUoccupancy_flags.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE - - - Assume global caching is enabled and cannot be automatically turned off - -.. autoclass:: cuda.cuda.CUstreamUpdateCaptureDependencies_flags - - .. autoattribute:: cuda.cuda.CUstreamUpdateCaptureDependencies_flags.CU_STREAM_ADD_CAPTURE_DEPENDENCIES - - - Add new nodes to the dependency set - - - .. autoattribute:: cuda.cuda.CUstreamUpdateCaptureDependencies_flags.CU_STREAM_SET_CAPTURE_DEPENDENCIES - - - Replace the dependency set with the new nodes - -.. autoclass:: cuda.cuda.CUasyncNotificationType - - .. autoattribute:: cuda.cuda.CUasyncNotificationType.CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET - -.. autoclass:: cuda.cuda.CUarray_format - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT8 - - - Unsigned 8-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT16 - - - Unsigned 16-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT32 - - - Unsigned 32-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SIGNED_INT8 - - - Signed 8-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SIGNED_INT16 - - - Signed 16-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SIGNED_INT32 - - - Signed 32-bit integers - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_HALF - - - 16-bit floating point - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_FLOAT - - - 32-bit floating point - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_NV12 - - - 8-bit YUV planar format, with 4:2:0 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT8X1 - - - 1 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT8X2 - - - 2 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT8X4 - - - 4 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT16X1 - - - 1 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT16X2 - - - 2 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_UNORM_INT16X4 - - - 4 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT8X1 - - - 1 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT8X2 - - - 2 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT8X4 - - - 4 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT16X1 - - - 1 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT16X2 - - - 2 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_SNORM_INT16X4 - - - 4 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC1_UNORM - - - 4 channel unsigned normalized block-compressed (BC1 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC1_UNORM_SRGB - - - 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC2_UNORM - - - 4 channel unsigned normalized block-compressed (BC2 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC2_UNORM_SRGB - - - 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC3_UNORM - - - 4 channel unsigned normalized block-compressed (BC3 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC3_UNORM_SRGB - - - 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC4_UNORM - - - 1 channel unsigned normalized block-compressed (BC4 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC4_SNORM - - - 1 channel signed normalized block-compressed (BC4 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC5_UNORM - - - 2 channel unsigned normalized block-compressed (BC5 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC5_SNORM - - - 2 channel signed normalized block-compressed (BC5 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC6H_UF16 - - - 3 channel unsigned half-float block-compressed (BC6H compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC6H_SF16 - - - 3 channel signed half-float block-compressed (BC6H compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC7_UNORM - - - 4 channel unsigned normalized block-compressed (BC7 compression) format - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_BC7_UNORM_SRGB - - - 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_P010 - - - 10-bit YUV planar format, with 4:2:0 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_P016 - - - 16-bit YUV planar format, with 4:2:0 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_NV16 - - - 8-bit YUV planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_P210 - - - 10-bit YUV planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_P216 - - - 16-bit YUV planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_YUY2 - - - 2 channel, 8-bit YUV packed planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y210 - - - 2 channel, 10-bit YUV packed planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y216 - - - 2 channel, 16-bit YUV packed planar format, with 4:2:2 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_AYUV - - - 4 channel, 8-bit YUV packed planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y410 - - - 10-bit YUV packed planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y416 - - - 4 channel, 12-bit YUV packed planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y444_PLANAR8 - - - 3 channel 8-bit YUV planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_Y444_PLANAR10 - - - 3 channel 10-bit YUV planar format, with 4:4:4 sampling - - - .. autoattribute:: cuda.cuda.CUarray_format.CU_AD_FORMAT_MAX - -.. autoclass:: cuda.cuda.CUaddress_mode - - .. autoattribute:: cuda.cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_WRAP - - - Wrapping address mode - - - .. autoattribute:: cuda.cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_CLAMP - - - Clamp to edge address mode - - - .. autoattribute:: cuda.cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_MIRROR - - - Mirror address mode - - - .. autoattribute:: cuda.cuda.CUaddress_mode.CU_TR_ADDRESS_MODE_BORDER - - - Border address mode - -.. autoclass:: cuda.cuda.CUfilter_mode - - .. autoattribute:: cuda.cuda.CUfilter_mode.CU_TR_FILTER_MODE_POINT - - - Point filter mode - - - .. autoattribute:: cuda.cuda.CUfilter_mode.CU_TR_FILTER_MODE_LINEAR - - - Linear filter mode - -.. autoclass:: cuda.cuda.CUdevice_attribute - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK - - - Maximum number of threads per block - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X - - - Maximum block dimension X - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y - - - Maximum block dimension Y - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z - - - Maximum block dimension Z - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X - - - Maximum grid dimension X - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y - - - Maximum grid dimension Y - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z - - - Maximum grid dimension Z - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK - - - Maximum shared memory available per block in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY - - - Memory available on device for constant variables in a CUDA C kernel in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_WARP_SIZE - - - Warp size in threads - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_PITCH - - - Maximum pitch in bytes allowed by memory copies - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK - - - Maximum number of 32-bit registers available per block - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CLOCK_RATE - - - Typical clock frequency in kilohertz - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT - - - Alignment requirement for textures - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_OVERLAP - - - Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT - - - Number of multiprocessors on device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT - - - Specifies whether there is a run time limit on kernels - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_INTEGRATED - - - Device is integrated with host memory - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY - - - Device can map host memory into CUDA address space - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_MODE - - - Compute mode (See :py:obj:`~.CUcomputemode` for details) - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH - - - Maximum 1D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH - - - Maximum 2D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT - - - Maximum 2D texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH - - - Maximum 3D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT - - - Maximum 3D texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH - - - Maximum 3D texture depth - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH - - - Maximum 2D layered texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT - - - Maximum 2D layered texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS - - - Maximum layers in a 2D layered texture - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES - - - Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT - - - Alignment requirement for surfaces - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS - - - Device can possibly execute multiple kernels concurrently - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_ECC_ENABLED - - - Device has ECC support enabled - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_BUS_ID - - - PCI bus ID of the device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID - - - PCI device ID of the device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TCC_DRIVER - - - Device is using TCC driver model - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE - - - Peak memory clock frequency in kilohertz - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH - - - Global memory bus width in bits - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE - - - Size of L2 cache in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR - - - Maximum resident threads per multiprocessor - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT - - - Number of asynchronous engines - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING - - - Device shares a unified address space with the host - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH - - - Maximum 1D layered texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS - - - Maximum layers in a 1D layered texture - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER - - - Deprecated, do not use. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH - - - Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT - - - Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE - - - Alternate maximum 3D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE - - - Alternate maximum 3D texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE - - - Alternate maximum 3D texture depth - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID - - - PCI domain ID of the device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT - - - Pitch alignment requirement for textures - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH - - - Maximum cubemap texture width/height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH - - - Maximum cubemap layered texture width/height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS - - - Maximum layers in a cubemap layered texture - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH - - - Maximum 1D surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH - - - Maximum 2D surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT - - - Maximum 2D surface height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH - - - Maximum 3D surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT - - - Maximum 3D surface height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH - - - Maximum 3D surface depth - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH - - - Maximum 1D layered surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS - - - Maximum layers in a 1D layered surface - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH - - - Maximum 2D layered surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT - - - Maximum 2D layered surface height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS - - - Maximum layers in a 2D layered surface - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH - - - Maximum cubemap surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH - - - Maximum cubemap layered surface width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS - - - Maximum layers in a cubemap layered surface - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH - - - Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or :py:obj:`~.cuDeviceGetTexture1DLinearMaxWidth()` instead. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH - - - Maximum 2D linear texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT - - - Maximum 2D linear texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH - - - Maximum 2D linear texture pitch in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH - - - Maximum mipmapped 2D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT - - - Maximum mipmapped 2D texture height - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR - - - Major compute capability version number - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR - - - Minor compute capability version number - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH - - - Maximum mipmapped 1D texture width - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED - - - Device supports stream priorities - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED - - - Device supports caching globals in L1 - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED - - - Device supports caching locals in L1 - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR - - - Maximum shared memory available per multiprocessor in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR - - - Maximum number of 32-bit registers available per multiprocessor - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY - - - Device can allocate managed memory on this system - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD - - - Device is on a multi-GPU board - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID - - - Unique id for a group of devices on the same multi-GPU board - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED - - - Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware) - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO - - - Ratio of single precision performance (in floating-point operations per second) to double precision performance - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS - - - Device supports coherently accessing pageable memory without calling cudaHostRegister on it - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS - - - Device can coherently access managed memory concurrently with the CPU - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED - - - Device supports compute preemption. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM - - - Device can access host registered memory at the same virtual address as the CPU - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 - - - Deprecated, along with v1 MemOps API, :py:obj:`~.cuStreamBatchMemOp` and related APIs are supported. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 - - - Deprecated, along with v1 MemOps API, 64-bit operations are supported in :py:obj:`~.cuStreamBatchMemOp` and related APIs. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 - - - Deprecated, along with v1 MemOps API, :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH - - - Device supports launching cooperative kernels via :py:obj:`~.cuLaunchCooperativeKernel` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH - - - Deprecated, :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` is deprecated. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN - - - Maximum optin shared memory per block - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES - - - The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the device. See :py:obj:`~.Stream Memory Operations` for additional details. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED - - - Device supports host memory registration via :py:obj:`~.cudaHostRegister`. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES - - - Device accesses pageable memory via the host's page tables. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST - - - The host can directly access managed memory on the device without migration. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED - - - Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED - - - Device supports virtual memory management APIs like :py:obj:`~.cuMemAddressReserve`, :py:obj:`~.cuMemCreate`, :py:obj:`~.cuMemMap` and related APIs - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED - - - Device supports exporting memory to a posix file descriptor with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED - - - Device supports exporting memory to a Win32 NT handle with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED - - - Device supports exporting memory to a Win32 KMT handle with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR - - - Maximum number of blocks per multiprocessor - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED - - - Device supports compression of memory - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE - - - Maximum L2 persisting lines capacity setting in bytes. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE - - - Maximum value of :py:obj:`~.CUaccessPolicyWindow.num_bytes`. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED - - - Device supports specifying the GPUDirect RDMA flag with :py:obj:`~.cuMemCreate` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK - - - Shared memory reserved by CUDA driver per block in bytes - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED - - - Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED - - - Device supports using the :py:obj:`~.cuMemHostRegister` flag :py:obj:`~.CU_MEMHOSTERGISTER_READ_ONLY` to register memory that must be mapped as read-only to the GPU - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED - - - External timeline semaphore interop is supported on the device - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED - - - Device supports using the :py:obj:`~.cuMemAllocAsync` and :py:obj:`~.cuMemPool` family of APIs - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED - - - Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS - - - The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the :py:obj:`~.CUflushGPUDirectRDMAWritesOptions` enum - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING - - - GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See :py:obj:`~.CUGPUDirectRDMAWritesOrdering` for the numerical values returned here. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES - - - Handle types supported with mempool based IPC - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH - - - Indicates device supports cluster launch - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED - - - Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS - - - 64-bit operations are supported in :py:obj:`~.cuStreamBatchMemOp` and related MemOp APIs. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR - - - :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported by MemOp APIs. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED - - - Device supports buffer sharing with dma_buf mechanism. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED - - - Device supports IPC Events. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT - - - Number of memory domains the device supports. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED - - - Device supports accessing memory using Tensor Map. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED - - - Device supports exporting memory to a fabric handle with :py:obj:`~.cuMemExportToShareableHandle()` or requested with :py:obj:`~.cuMemCreate()` - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS - - - Device supports unified function pointers. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_NUMA_CONFIG - - - NUMA configuration of a device: value is of type :py:obj:`~.CUdeviceNumaConfig` enum - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_NUMA_ID - - - NUMA node ID of the GPU memory - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED - - - Device supports switch multicast and reduction operations. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MPS_ENABLED - - - Indicates if contexts created on this device will be shared via MPS - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID - - - NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED - - - Device supports CIG with D3D12. - - - .. autoattribute:: cuda.cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX - -.. autoclass:: cuda.cuda.CUpointer_attribute - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_CONTEXT - - - The :py:obj:`~.CUcontext` on which a pointer was allocated or registered - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_TYPE - - - The :py:obj:`~.CUmemorytype` describing the physical location of a pointer - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_POINTER - - - The address at which a pointer's memory may be accessed on the device - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_HOST_POINTER - - - The address at which a pointer's memory may be accessed on the host - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_P2P_TOKENS - - - A pair of tokens for use with the nv-p2p.h Linux kernel interface - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS - - - Synchronize every synchronous memory operation initiated on this region - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_BUFFER_ID - - - A process-wide unique ID for an allocated memory region - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_MANAGED - - - Indicates if the pointer points to managed memory - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL - - - A device ordinal of a device on which a pointer was allocated or registered - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE - - - 1 if this pointer maps to an allocation that is suitable for :py:obj:`~.cudaIpcGetMemHandle`, 0 otherwise - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR - - - Starting address for this requested pointer - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_SIZE - - - Size of the address range for this requested pointer - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPED - - - 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES - - - Bitmask of allowed :py:obj:`~.CUmemAllocationHandleType` for this allocation - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE - - - 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS - - - Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE - - - Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL. - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPING_SIZE - - - Size of the actual underlying mapping that the pointer belongs to - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR - - - The start address of the mapping that the pointer belongs to - - - .. autoattribute:: cuda.cuda.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID - - - A process-wide unique id corresponding to the physical allocation the pointer belongs to - -.. autoclass:: cuda.cuda.CUfunction_attribute - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK - - - The maximum number of threads per block, beyond which a launch of the function would fail. This number depends on both the function and the device on which the function is currently loaded. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES - - - The size in bytes of statically-allocated shared memory required by this function. This does not include dynamically-allocated shared memory requested by the user at runtime. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES - - - The size in bytes of user-allocated constant memory required by this function. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES - - - The size in bytes of local memory used by each thread of this function. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NUM_REGS - - - The number of registers used by each thread of this function. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_PTX_VERSION - - - The PTX virtual architecture version for which the function was compiled. This value is the major PTX version * 10 + the minor PTX version, so a PTX version 1.3 function would return the value 13. Note that this may return the undefined value of 0 for cubins compiled prior to CUDA 3.0. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_BINARY_VERSION - - - The binary architecture version for which the function was compiled. This value is the major binary version * 10 + the minor binary version, so a binary version 1.3 function would return the value 13. Note that this will return a value of 10 for legacy cubins that do not have a properly-encoded binary architecture version. - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CACHE_MODE_CA - - - The attribute to indicate whether the function has been compiled with user specified option "-Xptxas --dlcm=ca" set . - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES - - - The maximum size in bytes of dynamically-allocated shared memory that can be used by this function. If the user-specified dynamic shared memory size is larger than this value, the launch will fail. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT - - - On devices where the L1 cache and shared memory use the same hardware resources, this sets the shared memory carveout preference, in percent of the total shared memory. Refer to :py:obj:`~.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR`. This is only a hint, and the driver can choose a different ratio if required to execute the function. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET - - - If this attribute is set, the kernel must launch with a valid cluster size specified. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH - - - The required cluster width in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. - - - - If the value is set during compile time, it cannot be set at runtime. Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT - - - The required cluster height in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. - - - - If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH - - - The required cluster depth in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. - - - - If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED - - - Whether the function can be launched with non-portable cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster size may only function on the specific SKUs the program is tested on. The launch might fail if the program is run on a different hardware platform. - - - - CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking whether the desired size can be launched on the current device. - - - - Portable Cluster Size - - - - A portable cluster size is guaranteed to be functional on all compute capabilities higher than the target compute capability. The portable cluster size for sm_90 is 8 blocks per cluster. This value may increase for future compute capabilities. - - - - The specific hardware unit may support higher cluster sizes that’s not guaranteed to be portable. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE - - - The block scheduling policy of a function. The value type is CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` - - - .. autoattribute:: cuda.cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX - -.. autoclass:: cuda.cuda.CUfunc_cache - - .. autoattribute:: cuda.cuda.CUfunc_cache.CU_FUNC_CACHE_PREFER_NONE - - - no preference for shared memory or L1 (default) - - - .. autoattribute:: cuda.cuda.CUfunc_cache.CU_FUNC_CACHE_PREFER_SHARED - - - prefer larger shared memory and smaller L1 cache - - - .. autoattribute:: cuda.cuda.CUfunc_cache.CU_FUNC_CACHE_PREFER_L1 - - - prefer larger L1 cache and smaller shared memory - - - .. autoattribute:: cuda.cuda.CUfunc_cache.CU_FUNC_CACHE_PREFER_EQUAL - - - prefer equal sized L1 cache and shared memory - -.. autoclass:: cuda.cuda.CUsharedconfig - - .. autoattribute:: cuda.cuda.CUsharedconfig.CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE - - - set default shared memory bank size - - - .. autoattribute:: cuda.cuda.CUsharedconfig.CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE - - - set shared memory bank width to four bytes - - - .. autoattribute:: cuda.cuda.CUsharedconfig.CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE - - - set shared memory bank width to eight bytes - -.. autoclass:: cuda.cuda.CUshared_carveout - - .. autoattribute:: cuda.cuda.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_DEFAULT - - - No preference for shared memory or L1 (default) - - - .. autoattribute:: cuda.cuda.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_MAX_SHARED - - - Prefer maximum available shared memory, minimum L1 cache - - - .. autoattribute:: cuda.cuda.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_MAX_L1 - - - Prefer maximum available L1 cache, minimum shared memory - -.. autoclass:: cuda.cuda.CUmemorytype - - .. autoattribute:: cuda.cuda.CUmemorytype.CU_MEMORYTYPE_HOST - - - Host memory - - - .. autoattribute:: cuda.cuda.CUmemorytype.CU_MEMORYTYPE_DEVICE - - - Device memory - - - .. autoattribute:: cuda.cuda.CUmemorytype.CU_MEMORYTYPE_ARRAY - - - Array memory - - - .. autoattribute:: cuda.cuda.CUmemorytype.CU_MEMORYTYPE_UNIFIED - - - Unified device or host memory - -.. autoclass:: cuda.cuda.CUcomputemode - - .. autoattribute:: cuda.cuda.CUcomputemode.CU_COMPUTEMODE_DEFAULT - - - Default compute mode (Multiple contexts allowed per device) - - - .. autoattribute:: cuda.cuda.CUcomputemode.CU_COMPUTEMODE_PROHIBITED - - - Compute-prohibited mode (No contexts can be created on this device at this time) - - - .. autoattribute:: cuda.cuda.CUcomputemode.CU_COMPUTEMODE_EXCLUSIVE_PROCESS - - - Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time) - -.. autoclass:: cuda.cuda.CUmem_advise - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_SET_READ_MOSTLY - - - Data will mostly be read and only occasionally be written to - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_UNSET_READ_MOSTLY - - - Undo the effect of :py:obj:`~.CU_MEM_ADVISE_SET_READ_MOSTLY` - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_SET_PREFERRED_LOCATION - - - Set the preferred location for the data as the specified device - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION - - - Clear the preferred location for the data - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_SET_ACCESSED_BY - - - Data will be accessed by the specified device, so prevent page faults as much as possible - - - .. autoattribute:: cuda.cuda.CUmem_advise.CU_MEM_ADVISE_UNSET_ACCESSED_BY - - - Let the Unified Memory subsystem decide on the page faulting policy for the specified device - -.. autoclass:: cuda.cuda.CUmem_range_attribute - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY - - - Whether the range will mostly be read and only occasionally be written to - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION - - - The preferred location of the range - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY - - - Memory range has :py:obj:`~.CU_MEM_ADVISE_SET_ACCESSED_BY` set for specified device - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION - - - The last location to which the range was prefetched - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE - - - The preferred location type of the range - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID - - - The preferred location id of the range - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE - - - The last location type to which the range was prefetched - - - .. autoattribute:: cuda.cuda.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID - - - The last location id to which the range was prefetched - -.. autoclass:: cuda.cuda.CUjit_option - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_MAX_REGISTERS - - - Max number of registers that a thread may use. - - Option type: unsigned int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_THREADS_PER_BLOCK - - - IN: Specifies minimum number of threads per block to target compilation for - - OUT: Returns the number of threads the compiler actually targeted. This restricts the resource utilization of the compiler (e.g. max registers) such that a block with the given number of threads should be able to launch based on register limitations. Note, this option does not currently take into account any other resource limitations, such as shared memory utilization. - - Cannot be combined with :py:obj:`~.CU_JIT_TARGET`. - - Option type: unsigned int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_WALL_TIME - - - Overwrites the option value with the total wall clock time, in milliseconds, spent in the compiler and linker - - Option type: float - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_INFO_LOG_BUFFER - - - Pointer to a buffer in which to print any log messages that are informational in nature (the buffer size is specified via option :py:obj:`~.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES`) - - Option type: char * - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES - - - IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator) - - OUT: Amount of log buffer filled with messages - - Option type: unsigned int - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_ERROR_LOG_BUFFER - - - Pointer to a buffer in which to print any log messages that reflect errors (the buffer size is specified via option :py:obj:`~.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES`) - - Option type: char * - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES - - - IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator) - - OUT: Amount of log buffer filled with messages - - Option type: unsigned int - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_OPTIMIZATION_LEVEL - - - Level of optimizations to apply to generated code (0 - 4), with 4 being the default and highest level of optimizations. - - Option type: unsigned int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_TARGET_FROM_CUCONTEXT - - - No option value required. Determines the target based on the current attached context (default) - - Option type: No option value needed - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_TARGET - - - Target is chosen based on supplied :py:obj:`~.CUjit_target`. Cannot be combined with :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`. - - Option type: unsigned int for enumerated type :py:obj:`~.CUjit_target` - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_FALLBACK_STRATEGY - - - Specifies choice of fallback strategy if matching cubin is not found. Choice is based on supplied :py:obj:`~.CUjit_fallback`. This option cannot be used with cuLink* APIs as the linker requires exact matches. - - Option type: unsigned int for enumerated type :py:obj:`~.CUjit_fallback` - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GENERATE_DEBUG_INFO - - - Specifies whether to create debug information in output (-g) (0: false, default) - - Option type: int - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_LOG_VERBOSE - - - Generate verbose log messages (0: false, default) - - Option type: int - - Applies to: compiler and linker - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GENERATE_LINE_INFO - - - Generate line number information (-lineinfo) (0: false, default) - - Option type: int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_CACHE_MODE - - - Specifies whether to enable caching explicitly (-dlcm) - - Choice is based on supplied :py:obj:`~.CUjit_cacheMode_enum`. - - Option type: unsigned int for enumerated type :py:obj:`~.CUjit_cacheMode_enum` - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_NEW_SM3X_OPT - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_FAST_COMPILE - - - This jit option is used for internal purpose only. - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GLOBAL_SYMBOL_NAMES - - - Array of device symbol names that will be relocated to the corresponding host addresses stored in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_ADDRESSES`. - - Must contain :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_COUNT` entries. - - When loading a device module, driver will relocate all encountered unresolved symbols to the host addresses. - - It is only allowed to register symbols that correspond to unresolved global variables. - - It is illegal to register the same device symbol at multiple addresses. - - Option type: const char ** - - Applies to: dynamic linker only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GLOBAL_SYMBOL_ADDRESSES - - - Array of host addresses that will be used to relocate corresponding device symbols stored in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_NAMES`. - - Must contain :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_COUNT` entries. - - Option type: void ** - - Applies to: dynamic linker only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_GLOBAL_SYMBOL_COUNT - - - Number of entries in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_NAMES` and :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_ADDRESSES` arrays. - - Option type: unsigned int - - Applies to: dynamic linker only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_LTO - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_FTZ - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_PREC_DIV - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_PREC_SQRT - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_FMA - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_REFERENCED_KERNEL_NAMES - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_REFERENCED_KERNEL_COUNT - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_REFERENCED_VARIABLE_NAMES - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_REFERENCED_VARIABLE_COUNT - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_POSITION_INDEPENDENT_CODE - - - Generate position independent code (0: false) - - Option type: int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_MIN_CTA_PER_SM - - - This option hints to the JIT compiler the minimum number of CTAs from the kernel’s grid to be mapped to a SM. This option is ignored when used together with :py:obj:`~.CU_JIT_MAX_REGISTERS` or :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`. Optimizations based on this option need :py:obj:`~.CU_JIT_MAX_THREADS_PER_BLOCK` to be specified as well. For kernels already using PTX directive .minnctapersm, this option will be ignored by default. Use :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take precedence over the PTX directive. Option type: unsigned int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_MAX_THREADS_PER_BLOCK - - - Maximum number threads in a thread block, computed as the product of the maximum extent specifed for each dimension of the block. This limit is guaranteed not to be exeeded in any invocation of the kernel. Exceeding the the maximum number of threads results in runtime error or kernel launch failure. For kernels already using PTX directive .maxntid, this option will be ignored by default. Use :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take precedence over the PTX directive. Option type: int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_OVERRIDE_DIRECTIVE_VALUES - - - This option lets the values specified using :py:obj:`~.CU_JIT_MAX_REGISTERS`, :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`, :py:obj:`~.CU_JIT_MAX_THREADS_PER_BLOCK` and :py:obj:`~.CU_JIT_MIN_CTA_PER_SM` take precedence over any PTX directives. (0: Disable, default; 1: Enable) Option type: int - - Applies to: compiler only - - - .. autoattribute:: cuda.cuda.CUjit_option.CU_JIT_NUM_OPTIONS - -.. autoclass:: cuda.cuda.CUjit_target - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_30 - - - Compute device class 3.0 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_32 - - - Compute device class 3.2 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_35 - - - Compute device class 3.5 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_37 - - - Compute device class 3.7 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_50 - - - Compute device class 5.0 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_52 - - - Compute device class 5.2 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_53 - - - Compute device class 5.3 - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_60 - - - Compute device class 6.0. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_61 - - - Compute device class 6.1. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_62 - - - Compute device class 6.2. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_70 - - - Compute device class 7.0. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_72 - - - Compute device class 7.2. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_75 - - - Compute device class 7.5. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_80 - - - Compute device class 8.0. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_86 - - - Compute device class 8.6. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_87 - - - Compute device class 8.7. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_89 - - - Compute device class 8.9. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_90 - - - Compute device class 9.0. Compute device class 9.0. with accelerated features. - - - .. autoattribute:: cuda.cuda.CUjit_target.CU_TARGET_COMPUTE_90A - -.. autoclass:: cuda.cuda.CUjit_fallback - - .. autoattribute:: cuda.cuda.CUjit_fallback.CU_PREFER_PTX - - - Prefer to compile ptx if exact binary match not found - - - .. autoattribute:: cuda.cuda.CUjit_fallback.CU_PREFER_BINARY - - - Prefer to fall back to compatible binary code if exact match not found - -.. autoclass:: cuda.cuda.CUjit_cacheMode - - .. autoattribute:: cuda.cuda.CUjit_cacheMode.CU_JIT_CACHE_OPTION_NONE - - - Compile with no -dlcm flag specified - - - .. autoattribute:: cuda.cuda.CUjit_cacheMode.CU_JIT_CACHE_OPTION_CG - - - Compile with L1 cache disabled - - - .. autoattribute:: cuda.cuda.CUjit_cacheMode.CU_JIT_CACHE_OPTION_CA - - - Compile with L1 cache enabled - -.. autoclass:: cuda.cuda.CUjitInputType - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_CUBIN - - - Compiled device-class-specific device code - - Applicable options: none - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_PTX - - - PTX source code - - Applicable options: PTX compiler options - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_FATBINARY - - - Bundle of multiple cubins and/or PTX of some device code - - Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_OBJECT - - - Host object with embedded device code - - Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_LIBRARY - - - Archive of host objects with embedded device code - - Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_INPUT_NVVM - - - [Deprecated] - - - - Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 - - - .. autoattribute:: cuda.cuda.CUjitInputType.CU_JIT_NUM_INPUT_TYPES - -.. autoclass:: cuda.cuda.CUgraphicsRegisterFlags - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_NONE - - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY - - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD - - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST - - - .. autoattribute:: cuda.cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER - -.. autoclass:: cuda.cuda.CUgraphicsMapResourceFlags - - .. autoattribute:: cuda.cuda.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE - - - .. autoattribute:: cuda.cuda.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY - - - .. autoattribute:: cuda.cuda.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD - -.. autoclass:: cuda.cuda.CUarray_cubemap_face - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_X - - - Positive X face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_X - - - Negative X face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_Y - - - Positive Y face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_Y - - - Negative Y face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_Z - - - Positive Z face of cubemap - - - .. autoattribute:: cuda.cuda.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_Z - - - Negative Z face of cubemap - -.. autoclass:: cuda.cuda.CUlimit - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_STACK_SIZE - - - GPU thread stack size - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_PRINTF_FIFO_SIZE - - - GPU printf FIFO size - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_MALLOC_HEAP_SIZE - - - GPU malloc heap size - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH - - - GPU device runtime launch synchronize depth - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT - - - GPU device runtime pending launch count - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_MAX_L2_FETCH_GRANULARITY - - - A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_PERSISTING_L2_CACHE_SIZE - - - A size in bytes for L2 persisting lines cache size - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_SHMEM_SIZE - - - A maximum size in bytes of shared memory available to CUDA kernels on a CIG context. Can only be queried, cannot be set - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_CIG_ENABLED - - - A non-zero value indicates this CUDA context is a CIG-enabled context. Can only be queried, cannot be set - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED - - - When set to a non-zero value, CUDA will fail to launch a kernel on a CIG context, instead of using the fallback path, if the kernel uses more shared memory than available - - - .. autoattribute:: cuda.cuda.CUlimit.CU_LIMIT_MAX - -.. autoclass:: cuda.cuda.CUresourcetype - - .. autoattribute:: cuda.cuda.CUresourcetype.CU_RESOURCE_TYPE_ARRAY - - - Array resource - - - .. autoattribute:: cuda.cuda.CUresourcetype.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY - - - Mipmapped array resource - - - .. autoattribute:: cuda.cuda.CUresourcetype.CU_RESOURCE_TYPE_LINEAR - - - Linear resource - - - .. autoattribute:: cuda.cuda.CUresourcetype.CU_RESOURCE_TYPE_PITCH2D - - - Pitch 2D resource - -.. autoclass:: cuda.cuda.CUaccessProperty - - .. autoattribute:: cuda.cuda.CUaccessProperty.CU_ACCESS_PROPERTY_NORMAL - - - Normal cache persistence. - - - .. autoattribute:: cuda.cuda.CUaccessProperty.CU_ACCESS_PROPERTY_STREAMING - - - Streaming access is less likely to persit from cache. - - - .. autoattribute:: cuda.cuda.CUaccessProperty.CU_ACCESS_PROPERTY_PERSISTING - - - Persisting access is more likely to persist in cache. - -.. autoclass:: cuda.cuda.CUgraphConditionalNodeType - - .. autoattribute:: cuda.cuda.CUgraphConditionalNodeType.CU_GRAPH_COND_TYPE_IF - - - Conditional 'if' Node. Body executed once if condition value is non-zero. - - - .. autoattribute:: cuda.cuda.CUgraphConditionalNodeType.CU_GRAPH_COND_TYPE_WHILE - - - Conditional 'while' Node. Body executed repeatedly while condition value is non-zero. - -.. autoclass:: cuda.cuda.CUgraphNodeType - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_KERNEL - - - GPU kernel node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMCPY - - - Memcpy node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMSET - - - Memset node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_HOST - - - Host (executable) node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_GRAPH - - - Node which executes an embedded graph - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EMPTY - - - Empty (no-op) node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_WAIT_EVENT - - - External event wait node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EVENT_RECORD - - - External event record node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL - - - External semaphore signal node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT - - - External semaphore wait node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEM_ALLOC - - - Memory Allocation Node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEM_FREE - - - Memory Free Node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_BATCH_MEM_OP - - - Batch MemOp Node - - - .. autoattribute:: cuda.cuda.CUgraphNodeType.CU_GRAPH_NODE_TYPE_CONDITIONAL - - - Conditional Node May be used to implement a conditional execution path or loop - - inside of a graph. The graph(s) contained within the body of the conditional node - - can be selectively executed or iterated upon based on the value of a conditional - - variable. - - - - Handles must be created in advance of creating the node - - using :py:obj:`~.cuGraphConditionalHandleCreate`. - - - - The following restrictions apply to graphs which contain conditional nodes: - - The graph cannot be used in a child node. - - Only one instantiation of the graph may exist at any point in time. - - The graph cannot be cloned. - - - - To set the control value, supply a default value when creating the handle and/or - - call :py:obj:`~.cudaGraphSetConditional` from device code. - -.. autoclass:: cuda.cuda.CUgraphDependencyType - - .. autoattribute:: cuda.cuda.CUgraphDependencyType.CU_GRAPH_DEPENDENCY_TYPE_DEFAULT - - - This is an ordinary dependency. - - - .. autoattribute:: cuda.cuda.CUgraphDependencyType.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC - - - This dependency type allows the downstream node to use `cudaGridDependencySynchronize()`. It may only be used between kernel nodes, and must be used with either the :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC` or :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER` outgoing port. - -.. autoclass:: cuda.cuda.CUgraphInstantiateResult - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_SUCCESS - - - Instantiation succeeded - - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_ERROR - - - Instantiation failed for an unexpected reason which is described in the return value of the function - - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE - - - Instantiation failed due to invalid structure, such as cycles - - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED - - - Instantiation for device launch failed because the graph contained an unsupported operation - - - .. autoattribute:: cuda.cuda.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED - - - Instantiation for device launch failed due to the nodes belonging to different contexts - -.. autoclass:: cuda.cuda.CUsynchronizationPolicy - - .. autoattribute:: cuda.cuda.CUsynchronizationPolicy.CU_SYNC_POLICY_AUTO - - - .. autoattribute:: cuda.cuda.CUsynchronizationPolicy.CU_SYNC_POLICY_SPIN - - - .. autoattribute:: cuda.cuda.CUsynchronizationPolicy.CU_SYNC_POLICY_YIELD - - - .. autoattribute:: cuda.cuda.CUsynchronizationPolicy.CU_SYNC_POLICY_BLOCKING_SYNC - -.. autoclass:: cuda.cuda.CUclusterSchedulingPolicy - - .. autoattribute:: cuda.cuda.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_DEFAULT - - - the default policy - - - .. autoattribute:: cuda.cuda.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_SPREAD - - - spread the blocks within a cluster to the SMs - - - .. autoattribute:: cuda.cuda.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING - - - allow the hardware to load-balance the blocks in a cluster to the SMs - -.. autoclass:: cuda.cuda.CUlaunchMemSyncDomain - - .. autoattribute:: cuda.cuda.CUlaunchMemSyncDomain.CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT - - - Launch kernels in the default domain - - - .. autoattribute:: cuda.cuda.CUlaunchMemSyncDomain.CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE - - - Launch kernels in the remote domain - -.. autoclass:: cuda.cuda.CUlaunchAttributeID - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_IGNORE - - - Ignored entry, for convenient composition - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW - - - Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.accessPolicyWindow`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_COOPERATIVE - - - Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.cooperative`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY - - - Valid for streams. See :py:obj:`~.CUlaunchAttributeValue.syncPolicy`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION - - - Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.clusterDim`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE - - - Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.clusterSchedulingPolicyPreference`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION - - - Valid for launches. Setting :py:obj:`~.CUlaunchAttributeValue.programmaticStreamSerializationAllowed` to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid's execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT - - - Valid for launches. Set :py:obj:`~.CUlaunchAttributeValue.programmaticEvent` to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event through PTX launchdep.release or CUDA builtin function cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be inserted at the beginning of each block's execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling :py:obj:`~.cuEventSynchronize()`) are not guaranteed to observe the release precisely when it is released. For example, :py:obj:`~.cuEventSynchronize()` may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks. - - The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PRIORITY - - - Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.priority`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP - - - Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.memSyncDomainMap`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN - - - Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.memSyncDomain`. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT - - - Valid for launches. Set :py:obj:`~.CUlaunchAttributeValue.launchCompletionEvent` to record the event. - - Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock. - - A launch completion event is nominally similar to a programmatic event with `triggerAtBlockStart` set except that it is not visible to `cudaGridDependencySynchronize()` and can be used with compute capability less than 9.0. - - The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE - - - Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error. - - :py:obj:`~.CUlaunchAttributeValue`::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via :py:obj:`~.CUlaunchAttributeValue`::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node's kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see :py:obj:`~.cudaGraphKernelNodeUpdatesApply`. - - Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via :py:obj:`~.cuGraphDestroyNode`. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via :py:obj:`~.cuGraphKernelNodeCopyAttributes`. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to :py:obj:`~.cuGraphExecUpdate`. - - If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with :py:obj:`~.cuGraphUpload` before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again. - - - .. autoattribute:: cuda.cuda.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT - - - Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting :py:obj:`~.CUlaunchAttributeValue.sharedMemCarveout` to a percentage between 0-100 signals the CUDA driver to set the shared memory carveout preference, in percent of the total shared memory for that kernel launch. This attribute takes precedence over :py:obj:`~.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT`. This is only a hint, and the CUDA driver can choose a different configuration if required for the launch. - -.. autoclass:: cuda.cuda.CUstreamCaptureStatus - - .. autoattribute:: cuda.cuda.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_NONE - - - Stream is not capturing - - - .. autoattribute:: cuda.cuda.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_ACTIVE - - - Stream is actively capturing - - - .. autoattribute:: cuda.cuda.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_INVALIDATED - - - Stream is part of a capture sequence that has been invalidated, but not terminated - -.. autoclass:: cuda.cuda.CUstreamCaptureMode - - .. autoattribute:: cuda.cuda.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_GLOBAL - - - .. autoattribute:: cuda.cuda.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_THREAD_LOCAL - - - .. autoattribute:: cuda.cuda.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_RELAXED - -.. autoclass:: cuda.cuda.CUdriverProcAddress_flags - - .. autoattribute:: cuda.cuda.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_DEFAULT - - - Default search mode for driver symbols. - - - .. autoattribute:: cuda.cuda.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_LEGACY_STREAM - - - Search for legacy versions of driver symbols. - - - .. autoattribute:: cuda.cuda.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM - - - Search for per-thread versions of driver symbols. - -.. autoclass:: cuda.cuda.CUdriverProcAddressQueryResult - - .. autoattribute:: cuda.cuda.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_SUCCESS - - - Symbol was succesfully found - - - .. autoattribute:: cuda.cuda.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND - - - Symbol was not found in search - - - .. autoattribute:: cuda.cuda.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT - - - Symbol was found but version supplied was not sufficient - -.. autoclass:: cuda.cuda.CUexecAffinityType - - .. autoattribute:: cuda.cuda.CUexecAffinityType.CU_EXEC_AFFINITY_TYPE_SM_COUNT - - - Create a context with limited SMs. - - - .. autoattribute:: cuda.cuda.CUexecAffinityType.CU_EXEC_AFFINITY_TYPE_MAX - -.. autoclass:: cuda.cuda.CUcigDataType - - .. autoattribute:: cuda.cuda.CUcigDataType.CIG_DATA_TYPE_D3D12_COMMAND_QUEUE - -.. autoclass:: cuda.cuda.CUlibraryOption - - .. autoattribute:: cuda.cuda.CUlibraryOption.CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE - - - .. autoattribute:: cuda.cuda.CUlibraryOption.CU_LIBRARY_BINARY_IS_PRESERVED - - - Specifes that the argument `code` passed to :py:obj:`~.cuLibraryLoadData()` will be preserved. Specifying this option will let the driver know that `code` can be accessed at any point until :py:obj:`~.cuLibraryUnload()`. The default behavior is for the driver to allocate and maintain its own copy of `code`. Note that this is only a memory usage optimization hint and the driver can choose to ignore it if required. Specifying this option with :py:obj:`~.cuLibraryLoadFromFile()` is invalid and will return :py:obj:`~.CUDA_ERROR_INVALID_VALUE`. - - - .. autoattribute:: cuda.cuda.CUlibraryOption.CU_LIBRARY_NUM_OPTIONS - -.. autoclass:: cuda.cuda.CUresult - - .. autoattribute:: cuda.cuda.CUresult.CUDA_SUCCESS - - - The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`). - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_VALUE - - - This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_OUT_OF_MEMORY - - - The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_INITIALIZED - - - This indicates that the CUDA driver has not been initialized with :py:obj:`~.cuInit()` or that initialization has failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_DEINITIALIZED - - - This indicates that the CUDA driver is in the process of shutting down. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PROFILER_DISABLED - - - This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PROFILER_NOT_INITIALIZED - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PROFILER_ALREADY_STARTED - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PROFILER_ALREADY_STOPPED - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STUB_LIBRARY - - - This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_DEVICE_UNAVAILABLE - - - This indicates that requested CUDA device is unavailable at the current time. Devices are often unavailable due to use of :py:obj:`~.CU_COMPUTEMODE_EXCLUSIVE_PROCESS` or :py:obj:`~.CU_COMPUTEMODE_PROHIBITED`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NO_DEVICE - - - This indicates that no CUDA-capable devices were detected by the installed CUDA driver. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_DEVICE - - - This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_DEVICE_NOT_LICENSED - - - This error indicates that the Grid license is not applied. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_IMAGE - - - This indicates that the device kernel image is invalid. This can also indicate an invalid CUDA module. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_CONTEXT - - - This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had :py:obj:`~.cuCtxDestroy()` invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See :py:obj:`~.cuCtxGetApiVersion()` for more details. This can also be returned if the green context passed to an API call was not converted to a :py:obj:`~.CUcontext` using :py:obj:`~.cuCtxFromGreenCtx` API. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CONTEXT_ALREADY_CURRENT - - - This indicated that the context being supplied as a parameter to the API call was already the active context. [Deprecated] - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MAP_FAILED - - - This indicates that a map or register operation has failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNMAP_FAILED - - - This indicates that an unmap or unregister operation has failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ARRAY_IS_MAPPED - - - This indicates that the specified array is currently mapped and thus cannot be destroyed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ALREADY_MAPPED - - - This indicates that the resource is already mapped. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NO_BINARY_FOR_GPU - - - This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ALREADY_ACQUIRED - - - This indicates that a resource has already been acquired. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_MAPPED - - - This indicates that a resource is not mapped. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_MAPPED_AS_ARRAY - - - This indicates that a mapped resource is not available for access as an array. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_MAPPED_AS_POINTER - - - This indicates that a mapped resource is not available for access as a pointer. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ECC_UNCORRECTABLE - - - This indicates that an uncorrectable ECC error was detected during execution. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNSUPPORTED_LIMIT - - - This indicates that the :py:obj:`~.CUlimit` passed to the API call is not supported by the active device. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CONTEXT_ALREADY_IN_USE - - - This indicates that the :py:obj:`~.CUcontext` passed to the API call can only be bound to a single CPU thread at a time but is already bound to a CPU thread. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PEER_ACCESS_UNSUPPORTED - - - This indicates that peer access is not supported across the given devices. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_PTX - - - This indicates that a PTX JIT compilation failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_GRAPHICS_CONTEXT - - - This indicates an error with OpenGL or DirectX context. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NVLINK_UNCORRECTABLE - - - This indicates that an uncorrectable NVLink error was detected during the execution. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_JIT_COMPILER_NOT_FOUND - - - This indicates that the PTX JIT compiler library was not found. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNSUPPORTED_PTX_VERSION - - - This indicates that the provided PTX was compiled with an unsupported toolchain. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_JIT_COMPILATION_DISABLED - - - This indicates that the PTX JIT compilation was disabled. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY - - - This indicates that the :py:obj:`~.CUexecAffinityType` passed to the API call is not supported by the active device. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC - - - This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_SOURCE - - - This indicates that the device kernel source is invalid. This includes compilation/linker errors encountered in device code or user error. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_FILE_NOT_FOUND - - - This indicates that the file specified was not found. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND - - - This indicates that a link to a shared object failed to resolve. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_SHARED_OBJECT_INIT_FAILED - - - This indicates that initialization of a shared object failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_OPERATING_SYSTEM - - - This indicates that an OS call failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_HANDLE - - - This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like :py:obj:`~.CUstream` and :py:obj:`~.CUevent`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ILLEGAL_STATE - - - This indicates that a resource required by the API call is not in a valid state to perform the requested operation. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LOSSY_QUERY - - - This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_FOUND - - - This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_READY - - - This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than :py:obj:`~.CUDA_SUCCESS` (which indicates completion). Calls that may return this value include :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ILLEGAL_ADDRESS - - - While executing a kernel, the device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES - - - This indicates that a launch did not occur because it did not have appropriate resources. This error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count. Passing arguments of the wrong size (i.e. a 64-bit pointer when a 32-bit int is expected) is equivalent to passing too many arguments and can also result in this error. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LAUNCH_TIMEOUT - - - This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT` for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING - - - This error indicates a kernel launch that uses an incompatible texturing mode. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED - - - This error indicates that a call to :py:obj:`~.cuCtxEnablePeerAccess()` is trying to re-enable peer access to a context which has already had peer access to it enabled. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PEER_ACCESS_NOT_ENABLED - - - This error indicates that :py:obj:`~.cuCtxDisablePeerAccess()` is trying to disable peer access which has not been enabled yet via :py:obj:`~.cuCtxEnablePeerAccess()`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE - - - This error indicates that the primary context for the specified device has already been initialized. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CONTEXT_IS_DESTROYED - - - This error indicates that the context current to the calling thread has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary context which has not yet been initialized. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ASSERT - - - A device-side assert triggered during kernel execution. The context cannot be used anymore, and must be destroyed. All existing device memory allocations from this context are invalid and must be reconstructed if the program is to continue using CUDA. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_TOO_MANY_PEERS - - - This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to :py:obj:`~.cuCtxEnablePeerAccess()`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED - - - This error indicates that the memory range passed to :py:obj:`~.cuMemHostRegister()` has already been registered. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED - - - This error indicates that the pointer passed to :py:obj:`~.cuMemHostUnregister()` does not correspond to any currently registered memory region. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_HARDWARE_STACK_ERROR - - - While executing a kernel, the device encountered a stack error. This can be due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_ILLEGAL_INSTRUCTION - - - While executing a kernel, the device encountered an illegal instruction. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MISALIGNED_ADDRESS - - - While executing a kernel, the device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_ADDRESS_SPACE - - - While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_PC - - - While executing a kernel, the device program counter wrapped its address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_LAUNCH_FAILED - - - An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE - - - This error indicates that the number of blocks launched per grid for a kernel that was launched via either :py:obj:`~.cuLaunchCooperativeKernel` or :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` exceeds the maximum number of blocks as allowed by :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessor` or :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` times the number of multiprocessors as specified by the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT`. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_PERMITTED - - - This error indicates that the attempted operation is not permitted. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_NOT_SUPPORTED - - - This error indicates that the attempted operation is not supported on the current system or device. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_SYSTEM_NOT_READY - - - This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH - - - This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE - - - This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_CONNECTION_FAILED - - - This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_RPC_FAILURE - - - This error indicates that the remote procedural call between the MPS server and the MPS client failed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_SERVER_NOT_READY - - - This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_MAX_CLIENTS_REACHED - - - This error indicates that the hardware resources required to create MPS client have been exhausted. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED - - - This error indicates the the hardware resources required to support device connections have been exhausted. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_MPS_CLIENT_TERMINATED - - - This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CDP_NOT_SUPPORTED - - - This error indicates that the module is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CDP_VERSION_MISMATCH - - - This error indicates that a module contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED - - - This error indicates that the operation is not permitted when the stream is capturing. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_INVALIDATED - - - This error indicates that the current capture sequence on the stream has been invalidated due to a previous error. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_MERGE - - - This error indicates that the operation would have resulted in a merge of two independent capture sequences. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNMATCHED - - - This error indicates that the capture was not initiated in this stream. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNJOINED - - - This error indicates that the capture sequence contains a fork that was not joined to the primary stream. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_ISOLATION - - - This error indicates that a dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_IMPLICIT - - - This error indicates a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_CAPTURED_EVENT - - - This error indicates that the operation is not permitted on an event which was last recorded in a capturing stream. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD - - - A stream capture sequence not initiated with the :py:obj:`~.CU_STREAM_CAPTURE_MODE_RELAXED` argument to :py:obj:`~.cuStreamBeginCapture` was passed to :py:obj:`~.cuStreamEndCapture` in a different thread. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_TIMEOUT - - - This error indicates that the timeout specified for the wait operation has lapsed. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE - - - This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_EXTERNAL_DEVICE - - - This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device's signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_CLUSTER_SIZE - - - Indicates a kernel launch error due to cluster misconfiguration. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_FUNCTION_NOT_LOADED - - - Indiciates a function handle is not loaded when calling an API that requires a loaded function. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_RESOURCE_TYPE - - - This error indicates one or more resources passed in are not valid resource types for the operation. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION - - - This error indicates one or more resources are insufficient or non-applicable for the operation. - - - .. autoattribute:: cuda.cuda.CUresult.CUDA_ERROR_UNKNOWN - - - This indicates that an unknown internal error has occurred. - -.. autoclass:: cuda.cuda.CUdevice_P2PAttribute - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK - - - A relative value indicating the performance of the link between two devices - - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED - - - P2P Access is enable - - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED - - - Atomic operation over the link supported - - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED - - - [Deprecated] - - - .. autoattribute:: cuda.cuda.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED - - - Accessing CUDA arrays over the link supported - -.. autoclass:: cuda.cuda.CUresourceViewFormat - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_NONE - - - No resource view format (use underlying resource format) - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X8 - - - 1 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X8 - - - 2 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X8 - - - 4 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X8 - - - 1 channel signed 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X8 - - - 2 channel signed 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X8 - - - 4 channel signed 8-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X16 - - - 1 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X16 - - - 2 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X16 - - - 4 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X16 - - - 1 channel signed 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X16 - - - 2 channel signed 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X16 - - - 4 channel signed 16-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X32 - - - 1 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X32 - - - 2 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X32 - - - 4 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X32 - - - 1 channel signed 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X32 - - - 2 channel signed 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X32 - - - 4 channel signed 32-bit integers - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_1X16 - - - 1 channel 16-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_2X16 - - - 2 channel 16-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_4X16 - - - 4 channel 16-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_1X32 - - - 1 channel 32-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_2X32 - - - 2 channel 32-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_4X32 - - - 4 channel 32-bit floating point - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC1 - - - Block compressed 1 - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC2 - - - Block compressed 2 - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC3 - - - Block compressed 3 - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC4 - - - Block compressed 4 unsigned - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC4 - - - Block compressed 4 signed - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC5 - - - Block compressed 5 unsigned - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC5 - - - Block compressed 5 signed - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC6H - - - Block compressed 6 unsigned half-float - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC6H - - - Block compressed 6 signed half-float - - - .. autoattribute:: cuda.cuda.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC7 - - - Block compressed 7 - -.. autoclass:: cuda.cuda.CUtensorMapDataType - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT8 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT16 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT32 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_INT32 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT64 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_INT64 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT16 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT32 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT64 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 - - - .. autoattribute:: cuda.cuda.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ - -.. autoclass:: cuda.cuda.CUtensorMapInterleave - - .. autoattribute:: cuda.cuda.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_NONE - - - .. autoattribute:: cuda.cuda.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_16B - - - .. autoattribute:: cuda.cuda.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_32B - -.. autoclass:: cuda.cuda.CUtensorMapSwizzle - - .. autoattribute:: cuda.cuda.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_NONE - - - .. autoattribute:: cuda.cuda.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_32B - - - .. autoattribute:: cuda.cuda.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_64B - - - .. autoattribute:: cuda.cuda.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_128B - -.. autoclass:: cuda.cuda.CUtensorMapL2promotion - - .. autoattribute:: cuda.cuda.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_NONE - - - .. autoattribute:: cuda.cuda.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_64B - - - .. autoattribute:: cuda.cuda.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_128B - - - .. autoattribute:: cuda.cuda.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_256B - -.. autoclass:: cuda.cuda.CUtensorMapFloatOOBfill - - .. autoattribute:: cuda.cuda.CUtensorMapFloatOOBfill.CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE - - - .. autoattribute:: cuda.cuda.CUtensorMapFloatOOBfill.CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA - -.. autoclass:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS - - .. autoattribute:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE - - - No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations - - - .. autoattribute:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ - - - Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case. - - - .. autoattribute:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE - - - Read-write access, the device has full read-write access to the memory - -.. autoclass:: cuda.cuda.CUexternalMemoryHandleType - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD - - - Handle is an opaque file descriptor - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 - - - Handle is an opaque shared NT handle - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT - - - Handle is an opaque, globally shared handle - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP - - - Handle is a D3D12 heap object - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE - - - Handle is a D3D12 committed resource - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE - - - Handle is a shared NT handle to a D3D11 resource - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT - - - Handle is a globally shared handle to a D3D11 resource - - - .. autoattribute:: cuda.cuda.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF - - - Handle is an NvSciBuf object - -.. autoclass:: cuda.cuda.CUexternalSemaphoreHandleType - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD - - - Handle is an opaque file descriptor - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 - - - Handle is an opaque shared NT handle - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT - - - Handle is an opaque, globally shared handle - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE - - - Handle is a shared NT handle referencing a D3D12 fence object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE - - - Handle is a shared NT handle referencing a D3D11 fence object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC - - - Opaque handle to NvSciSync Object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX - - - Handle is a shared NT handle referencing a D3D11 keyed mutex object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT - - - Handle is a globally shared handle referencing a D3D11 keyed mutex object - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD - - - Handle is an opaque file descriptor referencing a timeline semaphore - - - .. autoattribute:: cuda.cuda.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 - - - Handle is an opaque shared NT handle referencing a timeline semaphore - -.. autoclass:: cuda.cuda.CUmemAllocationHandleType - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_NONE - - - Does not allow any export mechanism. > - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR - - - Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_WIN32 - - - Allows a Win32 NT handle to be used for exporting. (HANDLE) - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_WIN32_KMT - - - Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_FABRIC - - - Allows a fabric handle to be used for exporting. (CUmemFabricHandle) - - - .. autoattribute:: cuda.cuda.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_MAX - -.. autoclass:: cuda.cuda.CUmemAccess_flags - - .. autoattribute:: cuda.cuda.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_NONE - - - Default, make the address range not accessible - - - .. autoattribute:: cuda.cuda.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READ - - - Make the address range read accessible - - - .. autoattribute:: cuda.cuda.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READWRITE - - - Make the address range read-write accessible - - - .. autoattribute:: cuda.cuda.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_MAX - -.. autoclass:: cuda.cuda.CUmemLocationType - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_INVALID - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE - - - Location is a device location, thus id is a device ordinal - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST - - - Location is host, id is ignored - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST_NUMA - - - Location is a host NUMA node, thus id is a host NUMA node id - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT - - - Location is a host NUMA node of the current thread, id is ignored - - - .. autoattribute:: cuda.cuda.CUmemLocationType.CU_MEM_LOCATION_TYPE_MAX - -.. autoclass:: cuda.cuda.CUmemAllocationType - - .. autoattribute:: cuda.cuda.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_INVALID - - - .. autoattribute:: cuda.cuda.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED - - - This allocation type is 'pinned', i.e. cannot migrate from its current location while the application is actively using it - - - .. autoattribute:: cuda.cuda.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_MAX - -.. autoclass:: cuda.cuda.CUmemAllocationGranularity_flags - - .. autoattribute:: cuda.cuda.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_MINIMUM - - - Minimum required granularity for allocation - - - .. autoattribute:: cuda.cuda.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_RECOMMENDED - - - Recommended granularity for allocation for best performance - -.. autoclass:: cuda.cuda.CUmemRangeHandleType - - .. autoattribute:: cuda.cuda.CUmemRangeHandleType.CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD - - - .. autoattribute:: cuda.cuda.CUmemRangeHandleType.CU_MEM_RANGE_HANDLE_TYPE_MAX - -.. autoclass:: cuda.cuda.CUarraySparseSubresourceType - - .. autoattribute:: cuda.cuda.CUarraySparseSubresourceType.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL - - - .. autoattribute:: cuda.cuda.CUarraySparseSubresourceType.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL - -.. autoclass:: cuda.cuda.CUmemOperationType - - .. autoattribute:: cuda.cuda.CUmemOperationType.CU_MEM_OPERATION_TYPE_MAP - - - .. autoattribute:: cuda.cuda.CUmemOperationType.CU_MEM_OPERATION_TYPE_UNMAP - -.. autoclass:: cuda.cuda.CUmemHandleType - - .. autoattribute:: cuda.cuda.CUmemHandleType.CU_MEM_HANDLE_TYPE_GENERIC - -.. autoclass:: cuda.cuda.CUmemAllocationCompType - - .. autoattribute:: cuda.cuda.CUmemAllocationCompType.CU_MEM_ALLOCATION_COMP_NONE - - - Allocating non-compressible memory - - - .. autoattribute:: cuda.cuda.CUmemAllocationCompType.CU_MEM_ALLOCATION_COMP_GENERIC - - - Allocating compressible memory - -.. autoclass:: cuda.cuda.CUmulticastGranularity_flags - - .. autoattribute:: cuda.cuda.CUmulticastGranularity_flags.CU_MULTICAST_GRANULARITY_MINIMUM - - - Minimum required granularity - - - .. autoattribute:: cuda.cuda.CUmulticastGranularity_flags.CU_MULTICAST_GRANULARITY_RECOMMENDED - - - Recommended granularity for best performance - -.. autoclass:: cuda.cuda.CUgraphExecUpdateResult - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_SUCCESS - - - The update succeeded - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR - - - The update failed for an unexpected reason which is described in the return value of the function - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED - - - The update failed because the topology changed - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED - - - The update failed because a node type changed - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED - - - The update failed because the function of a kernel node changed (CUDA driver < 11.2) - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED - - - The update failed because the parameters changed in a way that is not supported - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED - - - The update failed because something about the node is not supported - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE - - - The update failed because the function of a kernel node changed in an unsupported way - - - .. autoattribute:: cuda.cuda.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED - - - The update failed because the node attributes changed in a way that is not supported - -.. autoclass:: cuda.cuda.CUmemPool_attribute - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES - - - (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled) - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC - - - (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled) - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES - - - (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled). - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD - - - (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0) - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT - - - (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool. - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH - - - (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero. - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_CURRENT - - - (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application. - - - .. autoattribute:: cuda.cuda.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_HIGH - - - (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero. - -.. autoclass:: cuda.cuda.CUgraphMem_attribute - - .. autoattribute:: cuda.cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT - - - (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs - - - .. autoattribute:: cuda.cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH - - - (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero. - - - .. autoattribute:: cuda.cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT - - - (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. - - - .. autoattribute:: cuda.cuda.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH - - - (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. - -.. autoclass:: cuda.cuda.CUflushGPUDirectRDMAWritesOptions - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesOptions.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST - - - :py:obj:`~.cuFlushGPUDirectRDMAWrites()` and its CUDA Runtime API counterpart are supported on the device. - - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesOptions.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS - - - The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the device. - -.. autoclass:: cuda.cuda.CUGPUDirectRDMAWritesOrdering - - .. autoattribute:: cuda.cuda.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE - - - The device does not natively support ordering of remote writes. :py:obj:`~.cuFlushGPUDirectRDMAWrites()` can be leveraged if supported. - - - .. autoattribute:: cuda.cuda.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER - - - Natively, the device can consistently consume remote writes, although other CUDA devices may not. - - - .. autoattribute:: cuda.cuda.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES - - - Any CUDA device in the system can consistently consume remote writes to this device. - -.. autoclass:: cuda.cuda.CUflushGPUDirectRDMAWritesScope - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesScope.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER - - - Blocks until remote writes are visible to the CUDA device context owning the data. - - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesScope.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES - - - Blocks until remote writes are visible to all CUDA device contexts. - -.. autoclass:: cuda.cuda.CUflushGPUDirectRDMAWritesTarget - - .. autoattribute:: cuda.cuda.CUflushGPUDirectRDMAWritesTarget.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX - - - Sets the target for :py:obj:`~.cuFlushGPUDirectRDMAWrites()` to the currently active CUDA device context. - -.. autoclass:: cuda.cuda.CUgraphDebugDot_flags - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE - - - Output all debug data as if every debug flag is enabled - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES - - - Use CUDA Runtime structures for output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS - - - Adds CUDA_KERNEL_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS - - - Adds CUDA_MEMCPY3D values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS - - - Adds CUDA_MEMSET_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS - - - Adds CUDA_HOST_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS - - - Adds CUevent handle from record and wait nodes to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS - - - Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS - - - Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES - - - Adds CUkernelNodeAttrValue values to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES - - - Adds node handles and every kernel function handle to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS - - - Adds memory alloc node parameters to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS - - - Adds memory free node parameters to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS - - - Adds batch mem op node parameters to output - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO - - - Adds edge numbering information - - - .. autoattribute:: cuda.cuda.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS - - - Adds conditional node parameters to output - -.. autoclass:: cuda.cuda.CUuserObject_flags - - .. autoattribute:: cuda.cuda.CUuserObject_flags.CU_USER_OBJECT_NO_DESTRUCTOR_SYNC - - - Indicates the destructor execution is not synchronized by any CUDA handle. - -.. autoclass:: cuda.cuda.CUuserObjectRetain_flags - - .. autoattribute:: cuda.cuda.CUuserObjectRetain_flags.CU_GRAPH_USER_OBJECT_MOVE - - - Transfer references from the caller rather than creating new references. - -.. autoclass:: cuda.cuda.CUgraphInstantiate_flags - - .. autoattribute:: cuda.cuda.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH - - - Automatically free memory allocated in a graph before relaunching. - - - .. autoattribute:: cuda.cuda.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD - - - Automatically upload the graph after instantiation. Only supported by :py:obj:`~.cuGraphInstantiateWithParams`. The upload will be performed using the stream provided in `instantiateParams`. - - - .. autoattribute:: cuda.cuda.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH - - - Instantiate the graph to be launchable from the device. This flag can only be used on platforms which support unified addressing. This flag cannot be used in conjunction with CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. - - - .. autoattribute:: cuda.cuda.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY - - - Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into. - -.. autoclass:: cuda.cuda.CUdeviceNumaConfig - - .. autoattribute:: cuda.cuda.CUdeviceNumaConfig.CU_DEVICE_NUMA_CONFIG_NONE - - - The GPU is not a NUMA node - - - .. autoattribute:: cuda.cuda.CUdeviceNumaConfig.CU_DEVICE_NUMA_CONFIG_NUMA_NODE - - - The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its NUMA ID - -.. autoclass:: cuda.cuda.CUeglFrameType - - .. autoattribute:: cuda.cuda.CUeglFrameType.CU_EGL_FRAME_TYPE_ARRAY - - - Frame type CUDA array - - - .. autoattribute:: cuda.cuda.CUeglFrameType.CU_EGL_FRAME_TYPE_PITCH - - - Frame type pointer - -.. autoclass:: cuda.cuda.CUeglResourceLocationFlags - - .. autoattribute:: cuda.cuda.CUeglResourceLocationFlags.CU_EGL_RESOURCE_LOCATION_SYSMEM - - - Resource location sysmem - - - .. autoattribute:: cuda.cuda.CUeglResourceLocationFlags.CU_EGL_RESOURCE_LOCATION_VIDMEM - - - Resource location vidmem - -.. autoclass:: cuda.cuda.CUeglColorFormat - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR - - - Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR - - - Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_PLANAR - - - Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR - - - Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RGB - - - R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BGR - - - R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_ARGB - - - R/G/B/A four channels in one surface with BGRA byte ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RGBA - - - R/G/B/A four channels in one surface with ABGR byte ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_L - - - single luminance channel in one surface. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_R - - - single color channel in one surface. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_PLANAR - - - Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR - - - Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUYV_422 - - - Y, U, V in one surface, interleaved as UYVY in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_UYVY_422 - - - Y, U, V in one surface, interleaved as YUYV in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_ABGR - - - R/G/B/A four channels in one surface with RGBA byte ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BGRA - - - R/G/B/A four channels in one surface with ARGB byte ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_A - - - Alpha color format - one channel in one surface. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RG - - - R/G color format - two channels in one surface with GR byte ordering - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_AYUV - - - Y, U, V, A four channels in one surface, interleaved as VUYA. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR - - - Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR - - - Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR - - - Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR - - - Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_VYUY_ER - - - Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_UYVY_ER - - - Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUYV_ER - - - Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVYU_ER - - - Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV_ER - - - Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUVA_ER - - - Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_AYUV_ER - - - Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER - - - Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER - - - Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER - - - Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER - - - Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER - - - Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER - - - Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_RGGB - - - Bayer format - one channel in one surface with interleaved RGGB ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_BGGR - - - Bayer format - one channel in one surface with interleaved BGGR ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_GRBG - - - Bayer format - one channel in one surface with interleaved GRBG ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_GBRG - - - Bayer format - one channel in one surface with interleaved GBRG ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_RGGB - - - Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_BGGR - - - Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_GRBG - - - Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_GBRG - - - Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_RGGB - - - Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_BGGR - - - Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_GRBG - - - Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_GBRG - - - Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_RGGB - - - Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_BGGR - - - Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_GRBG - - - Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_GBRG - - - Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_RGGB - - - Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_BGGR - - - Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_GRBG - - - Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_GBRG - - - Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_PLANAR - - - Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_PLANAR - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_BCCR - - - Bayer format - one channel in one surface with interleaved BCCR ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_RCCB - - - Bayer format - one channel in one surface with interleaved RCCB ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_CRBC - - - Bayer format - one channel in one surface with interleaved CRBC ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_CBRC - - - Bayer format - one channel in one surface with interleaved CBRC ordering. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_CCCC - - - Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_BCCR - - - Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_RCCB - - - Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CRBC - - - Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CBRC - - - Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CCCC - - - Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y - - - Color format for single Y plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 - - - Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 - - - Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 - - - Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 - - - Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 - - - Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 - - - Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 - - - Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 - - - Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 - - - Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 - - - Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 - - - Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR - - - Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 - - - Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y_ER - - - Extended Range Color format for single Y plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y_709_ER - - - Extended Range Color format for single Y plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10_ER - - - Extended Range Color format for single Y10 plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10_709_ER - - - Extended Range Color format for single Y10 plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12_ER - - - Extended Range Color format for single Y12 plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12_709_ER - - - Extended Range Color format for single Y12 plane. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUVA - - - Y, U, V, A four channels in one surface, interleaved as AVUY. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV - - - Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVYU - - - Y, U, V in one surface, interleaved as YVYU in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_VYUY - - - Y, U, V in one surface, interleaved as VYUY in one channel. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER - - - Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER - - - Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cuda.CUeglColorFormat.CU_EGL_COLOR_FORMAT_MAX - -.. autoclass:: cuda.cuda.CUdeviceptr_v2 -.. autoclass:: cuda.cuda.CUdeviceptr -.. autoclass:: cuda.cuda.CUdevice_v1 -.. autoclass:: cuda.cuda.CUdevice -.. autoclass:: cuda.cuda.CUcontext -.. autoclass:: cuda.cuda.CUmodule -.. autoclass:: cuda.cuda.CUfunction -.. autoclass:: cuda.cuda.CUlibrary -.. autoclass:: cuda.cuda.CUkernel -.. autoclass:: cuda.cuda.CUarray -.. autoclass:: cuda.cuda.CUmipmappedArray -.. autoclass:: cuda.cuda.CUtexref -.. autoclass:: cuda.cuda.CUsurfref -.. autoclass:: cuda.cuda.CUevent -.. autoclass:: cuda.cuda.CUstream -.. autoclass:: cuda.cuda.CUgraphicsResource -.. autoclass:: cuda.cuda.CUtexObject_v1 -.. autoclass:: cuda.cuda.CUtexObject -.. autoclass:: cuda.cuda.CUsurfObject_v1 -.. autoclass:: cuda.cuda.CUsurfObject -.. autoclass:: cuda.cuda.CUexternalMemory -.. autoclass:: cuda.cuda.CUexternalSemaphore -.. autoclass:: cuda.cuda.CUgraph -.. autoclass:: cuda.cuda.CUgraphNode -.. autoclass:: cuda.cuda.CUgraphExec -.. autoclass:: cuda.cuda.CUmemoryPool -.. autoclass:: cuda.cuda.CUuserObject -.. autoclass:: cuda.cuda.CUgraphConditionalHandle -.. autoclass:: cuda.cuda.CUgraphDeviceNode -.. autoclass:: cuda.cuda.CUasyncCallbackHandle -.. autoclass:: cuda.cuda.CUgreenCtx -.. autoclass:: cuda.cuda.CUuuid -.. autoclass:: cuda.cuda.CUmemFabricHandle_v1 -.. autoclass:: cuda.cuda.CUmemFabricHandle -.. autoclass:: cuda.cuda.CUipcEventHandle_v1 -.. autoclass:: cuda.cuda.CUipcEventHandle -.. autoclass:: cuda.cuda.CUipcMemHandle_v1 -.. autoclass:: cuda.cuda.CUipcMemHandle -.. autoclass:: cuda.cuda.CUstreamBatchMemOpParams_v1 -.. autoclass:: cuda.cuda.CUstreamBatchMemOpParams -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUasyncNotificationInfo -.. autoclass:: cuda.cuda.CUasyncCallback -.. autoclass:: cuda.cuda.CUdevprop_v1 -.. autoclass:: cuda.cuda.CUdevprop -.. autoclass:: cuda.cuda.CUlinkState -.. autoclass:: cuda.cuda.CUhostFn -.. autoclass:: cuda.cuda.CUaccessPolicyWindow_v1 -.. autoclass:: cuda.cuda.CUaccessPolicyWindow -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_KERNEL_NODE_PARAMS_v3 -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_MEMSET_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_HOST_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_CONDITIONAL_NODE_PARAMS -.. autoclass:: cuda.cuda.CUgraphEdgeData -.. autoclass:: cuda.cuda.CUDA_GRAPH_INSTANTIATE_PARAMS -.. autoclass:: cuda.cuda.CUlaunchMemSyncDomainMap -.. autoclass:: cuda.cuda.CUlaunchAttributeValue -.. autoclass:: cuda.cuda.CUlaunchAttribute -.. autoclass:: cuda.cuda.CUlaunchConfig -.. autoclass:: cuda.cuda.CUkernelNodeAttrID -.. autoclass:: cuda.cuda.CUkernelNodeAttrValue_v1 -.. autoclass:: cuda.cuda.CUkernelNodeAttrValue -.. autoclass:: cuda.cuda.CUstreamAttrID -.. autoclass:: cuda.cuda.CUstreamAttrValue_v1 -.. autoclass:: cuda.cuda.CUstreamAttrValue -.. autoclass:: cuda.cuda.CUexecAffinitySmCount_v1 -.. autoclass:: cuda.cuda.CUexecAffinitySmCount -.. autoclass:: cuda.cuda.CUexecAffinityParam_v1 -.. autoclass:: cuda.cuda.CUexecAffinityParam -.. autoclass:: cuda.cuda.CUctxCigParam -.. autoclass:: cuda.cuda.CUctxCreateParams -.. autoclass:: cuda.cuda.CUlibraryHostUniversalFunctionAndDataTable -.. autoclass:: cuda.cuda.CUstreamCallback -.. autoclass:: cuda.cuda.CUoccupancyB2DSize -.. autoclass:: cuda.cuda.CUDA_MEMCPY2D_v2 -.. autoclass:: cuda.cuda.CUDA_MEMCPY2D -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_v2 -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_PEER_v1 -.. autoclass:: cuda.cuda.CUDA_MEMCPY3D_PEER -.. autoclass:: cuda.cuda.CUDA_MEMCPY_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_ARRAY_DESCRIPTOR_v2 -.. autoclass:: cuda.cuda.CUDA_ARRAY_DESCRIPTOR -.. autoclass:: cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR_v2 -.. autoclass:: cuda.cuda.CUDA_ARRAY3D_DESCRIPTOR -.. autoclass:: cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES_v1 -.. autoclass:: cuda.cuda.CUDA_ARRAY_SPARSE_PROPERTIES -.. autoclass:: cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 -.. autoclass:: cuda.cuda.CUDA_ARRAY_MEMORY_REQUIREMENTS -.. autoclass:: cuda.cuda.CUDA_RESOURCE_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_RESOURCE_DESC -.. autoclass:: cuda.cuda.CUDA_TEXTURE_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_TEXTURE_DESC -.. autoclass:: cuda.cuda.CUDA_RESOURCE_VIEW_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_RESOURCE_VIEW_DESC -.. autoclass:: cuda.cuda.CUtensorMap -.. autoclass:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 -.. autoclass:: cuda.cuda.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS -.. autoclass:: cuda.cuda.CUDA_LAUNCH_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_LAUNCH_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_HANDLE_DESC -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_BUFFER_DESC -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUmemGenericAllocationHandle_v1 -.. autoclass:: cuda.cuda.CUmemGenericAllocationHandle -.. autoclass:: cuda.cuda.CUarrayMapInfo_v1 -.. autoclass:: cuda.cuda.CUarrayMapInfo -.. autoclass:: cuda.cuda.CUmemLocation_v1 -.. autoclass:: cuda.cuda.CUmemLocation -.. autoclass:: cuda.cuda.CUmemAllocationProp_v1 -.. autoclass:: cuda.cuda.CUmemAllocationProp -.. autoclass:: cuda.cuda.CUmulticastObjectProp_v1 -.. autoclass:: cuda.cuda.CUmulticastObjectProp -.. autoclass:: cuda.cuda.CUmemAccessDesc_v1 -.. autoclass:: cuda.cuda.CUmemAccessDesc -.. autoclass:: cuda.cuda.CUgraphExecUpdateResultInfo_v1 -.. autoclass:: cuda.cuda.CUgraphExecUpdateResultInfo -.. autoclass:: cuda.cuda.CUmemPoolProps_v1 -.. autoclass:: cuda.cuda.CUmemPoolProps -.. autoclass:: cuda.cuda.CUmemPoolPtrExportData_v1 -.. autoclass:: cuda.cuda.CUmemPoolPtrExportData -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v1 -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_MEM_ALLOC_NODE_PARAMS_v2 -.. autoclass:: cuda.cuda.CUDA_MEM_FREE_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_CHILD_GRAPH_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_EVENT_RECORD_NODE_PARAMS -.. autoclass:: cuda.cuda.CUDA_EVENT_WAIT_NODE_PARAMS -.. autoclass:: cuda.cuda.CUgraphNodeParams -.. autoclass:: cuda.cuda.CUeglFrame_v1 -.. autoclass:: cuda.cuda.CUeglFrame -.. autoclass:: cuda.cuda.CUeglStreamConnection -.. autoattribute:: cuda.cuda.CUDA_VERSION - - CUDA API version number - -.. autoattribute:: cuda.cuda.CU_UUID_HAS_BEEN_DEFINED - - CUDA UUID types - -.. autoattribute:: cuda.cuda.CU_IPC_HANDLE_SIZE - - CUDA IPC handle size - -.. autoattribute:: cuda.cuda.CU_STREAM_LEGACY - - Legacy stream handle - - - - Stream handle that can be passed as a CUstream to use an implicit stream with legacy synchronization behavior. - - - - See details of the \link_sync_behavior - -.. autoattribute:: cuda.cuda.CU_STREAM_PER_THREAD - - Per-thread stream handle - - - - Stream handle that can be passed as a CUstream to use an implicit stream with per-thread synchronization behavior. - - - - See details of the \link_sync_behavior - -.. autoattribute:: cuda.cuda.CU_COMPUTE_ACCELERATED_TARGET_BASE -.. autoattribute:: cuda.cuda.CUDA_CB -.. autoattribute:: cuda.cuda.CU_GRAPH_COND_ASSIGN_DEFAULT - - Conditional node handle flags Default value is applied when graph is launched. - -.. autoattribute:: cuda.cuda.CU_GRAPH_KERNEL_NODE_PORT_DEFAULT - - This port activates when the kernel has finished executing. - -.. autoattribute:: cuda.cuda.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC - - This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type :py:obj:`~.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC`. See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT`. - -.. autoattribute:: cuda.cuda.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER - - This port activates when all blocks of the kernel have begun execution. See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT`. - -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_PRIORITY -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE -.. autoattribute:: cuda.cuda.CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_PRIORITY -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP -.. autoattribute:: cuda.cuda.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN -.. autoattribute:: cuda.cuda.CU_MEMHOSTALLOC_PORTABLE - - If set, host memory is portable between CUDA contexts. Flag for :py:obj:`~.cuMemHostAlloc()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTALLOC_DEVICEMAP - - If set, host memory is mapped into CUDA address space and :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host pointer. Flag for :py:obj:`~.cuMemHostAlloc()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTALLOC_WRITECOMBINED - - If set, host memory is allocated as write-combined - fast to write, faster to DMA, slow to read except via SSE4 streaming load instruction (MOVNTDQA). Flag for :py:obj:`~.cuMemHostAlloc()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTREGISTER_PORTABLE - - If set, host memory is portable between CUDA contexts. Flag for :py:obj:`~.cuMemHostRegister()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTREGISTER_DEVICEMAP - - If set, host memory is mapped into CUDA address space and :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host pointer. Flag for :py:obj:`~.cuMemHostRegister()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTREGISTER_IOMEMORY - - If set, the passed memory pointer is treated as pointing to some memory-mapped I/O space, e.g. belonging to a third-party PCIe device. On Windows the flag is a no-op. On Linux that memory is marked as non cache-coherent for the GPU and is expected to be physically contiguous. It may return :py:obj:`~.CUDA_ERROR_NOT_PERMITTED` if run as an unprivileged user, :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED` on older Linux kernel versions. On all other platforms, it is not supported and :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED` is returned. Flag for :py:obj:`~.cuMemHostRegister()` - -.. autoattribute:: cuda.cuda.CU_MEMHOSTREGISTER_READ_ONLY - - If set, the passed memory pointer is treated as pointing to memory that is considered read-only by the device. On platforms without :py:obj:`~.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES`, this flag is required in order to register memory mapped to the CPU as read-only. Support for the use of this flag can be queried from the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED`. Using this flag with a current context associated with a device that does not have this attribute set will cause :py:obj:`~.cuMemHostRegister` to error with :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED`. - -.. autoattribute:: cuda.cuda.CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL - - Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers - -.. autoattribute:: cuda.cuda.CU_TENSOR_MAP_NUM_QWORDS - - Size of tensor map descriptor - -.. autoattribute:: cuda.cuda.CUDA_EXTERNAL_MEMORY_DEDICATED - - Indicates that the external memory object is a dedicated resource - -.. autoattribute:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC - - When the `flags` parameter of :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS` contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. - -.. autoattribute:: cuda.cuda.CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC - - When the `flags` parameter of :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS` contains this flag, it indicates that waiting on an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. - -.. autoattribute:: cuda.cuda.CUDA_NVSCISYNC_ATTR_SIGNAL - - When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to this, it indicates that application needs signaler specific NvSciSyncAttr to be filled by :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. - -.. autoattribute:: cuda.cuda.CUDA_NVSCISYNC_ATTR_WAIT - - When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to this, it indicates that application needs waiter specific NvSciSyncAttr to be filled by :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. - -.. autoattribute:: cuda.cuda.CU_MEM_CREATE_USAGE_TILE_POOL - - This flag if set indicates that the memory will be used as a tile pool. - -.. autoattribute:: cuda.cuda.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC - - If set, each kernel launched as part of :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution. - -.. autoattribute:: cuda.cuda.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC - - If set, any subsequent work pushed in a stream that participated in a call to :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_LAYERED - - If set, the CUDA array is a collection of layers, where each layer is either a 1D or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number of layers, not the depth of a 3D array. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_2DARRAY - - Deprecated, use CUDA_ARRAY3D_LAYERED - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_SURFACE_LDST - - This flag must be set in order to bind a surface reference to the CUDA array - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_CUBEMAP - - If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The width of such a CUDA array must be equal to its height, and Depth must be six. If :py:obj:`~.CUDA_ARRAY3D_LAYERED` flag is also set, then the CUDA array is a collection of cubemaps and Depth must be a multiple of six. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_TEXTURE_GATHER - - This flag must be set in order to perform texture gather operations on a CUDA array. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_DEPTH_TEXTURE - - This flag if set indicates that the CUDA array is a DEPTH_TEXTURE. - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_COLOR_ATTACHMENT - - This flag indicates that the CUDA array may be bound as a color target in an external graphics API - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_SPARSE - - This flag if set indicates that the CUDA array or CUDA mipmapped array is a sparse CUDA array or CUDA mipmapped array respectively - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_DEFERRED_MAPPING - - This flag if set indicates that the CUDA array or CUDA mipmapped array will allow deferred memory mapping - -.. autoattribute:: cuda.cuda.CUDA_ARRAY3D_VIDEO_ENCODE_DECODE - - This flag indicates that the CUDA array will be used for hardware accelerated video encode/decode operations. - -.. autoattribute:: cuda.cuda.CU_TRSA_OVERRIDE_FORMAT - - Override the texref format with a format inferred from the array. Flag for :py:obj:`~.cuTexRefSetArray()` - -.. autoattribute:: cuda.cuda.CU_TRSF_READ_AS_INTEGER - - Read the texture as integers rather than promoting the values to floats in the range [0,1]. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_TRSF_NORMALIZED_COORDINATES - - Use normalized texture coordinates in the range [0,1) instead of [0,dim). Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_TRSF_SRGB - - Perform sRGB->linear conversion during texture read. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION - - Disable any trilinear filtering optimizations. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_TRSF_SEAMLESS_CUBEMAP - - Enable seamless cube map filtering. Flag for :py:obj:`~.cuTexObjectCreate()` - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_END_AS_INT - - C++ compile time constant for CU_LAUNCH_PARAM_END - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_END - - End of array terminator for the `extra` parameter to :py:obj:`~.cuLaunchKernel` - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT - - C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_POINTER - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_BUFFER_POINTER - - Indicator that the next value in the `extra` parameter to :py:obj:`~.cuLaunchKernel` will be a pointer to a buffer containing all kernel parameters used for launching kernel `f`. This buffer needs to honor all alignment/padding requirements of the individual parameters. If :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not also specified in the `extra` array, then :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` will have no effect. - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT - - C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_SIZE - -.. autoattribute:: cuda.cuda.CU_LAUNCH_PARAM_BUFFER_SIZE - - Indicator that the next value in the `extra` parameter to :py:obj:`~.cuLaunchKernel` will be a pointer to a size_t which contains the size of the buffer specified with :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER`. It is required that :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` also be specified in the `extra` array if the value associated with :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not zero. - -.. autoattribute:: cuda.cuda.CU_PARAM_TR_DEFAULT - - For texture references loaded into the module, use default texunit from texture reference. - -.. autoattribute:: cuda.cuda.CU_DEVICE_CPU - - Device that represents the CPU - -.. autoattribute:: cuda.cuda.CU_DEVICE_INVALID - - Device that represents an invalid device - -.. autoattribute:: cuda.cuda.MAX_PLANES - - Maximum number of planes per frame - -.. autoattribute:: cuda.cuda.CUDA_EGL_INFINITE_TIMEOUT - - Indicates that timeout for :py:obj:`~.cuEGLStreamConsumerAcquireFrame` is infinite. - - -Error Handling --------------- - -This section describes the error handling functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGetErrorString -.. autofunction:: cuda.cuda.cuGetErrorName - -Initialization --------------- - -This section describes the initialization functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuInit - -Version Management ------------------- - -This section describes the version management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuDriverGetVersion - -Device Management ------------------ - -This section describes the device management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuDeviceGet -.. autofunction:: cuda.cuda.cuDeviceGetCount -.. autofunction:: cuda.cuda.cuDeviceGetName -.. autofunction:: cuda.cuda.cuDeviceGetUuid -.. autofunction:: cuda.cuda.cuDeviceGetUuid_v2 -.. autofunction:: cuda.cuda.cuDeviceGetLuid -.. autofunction:: cuda.cuda.cuDeviceTotalMem -.. autofunction:: cuda.cuda.cuDeviceGetTexture1DLinearMaxWidth -.. autofunction:: cuda.cuda.cuDeviceGetAttribute -.. autofunction:: cuda.cuda.cuDeviceGetNvSciSyncAttributes -.. autofunction:: cuda.cuda.cuDeviceSetMemPool -.. autofunction:: cuda.cuda.cuDeviceGetMemPool -.. autofunction:: cuda.cuda.cuDeviceGetDefaultMemPool -.. autofunction:: cuda.cuda.cuDeviceGetExecAffinitySupport -.. autofunction:: cuda.cuda.cuFlushGPUDirectRDMAWrites - -Primary Context Management --------------------------- - -This section describes the primary context management functions of the low-level CUDA driver application programming interface. - - - -The primary context is unique per device and shared with the CUDA runtime API. These functions allow integration with other libraries using CUDA. - -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxRetain -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxRelease -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxSetFlags -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxGetState -.. autofunction:: cuda.cuda.cuDevicePrimaryCtxReset - -Context Management ------------------- - -This section describes the context management functions of the low-level CUDA driver application programming interface. - - - -Please note that some functions are described in Primary Context Management section. - -.. autofunction:: cuda.cuda.cuCtxCreate -.. autofunction:: cuda.cuda.cuCtxCreate_v3 -.. autofunction:: cuda.cuda.cuCtxCreate_v4 -.. autofunction:: cuda.cuda.cuCtxDestroy -.. autofunction:: cuda.cuda.cuCtxPushCurrent -.. autofunction:: cuda.cuda.cuCtxPopCurrent -.. autofunction:: cuda.cuda.cuCtxSetCurrent -.. autofunction:: cuda.cuda.cuCtxGetCurrent -.. autofunction:: cuda.cuda.cuCtxGetDevice -.. autofunction:: cuda.cuda.cuCtxGetFlags -.. autofunction:: cuda.cuda.cuCtxSetFlags -.. autofunction:: cuda.cuda.cuCtxGetId -.. autofunction:: cuda.cuda.cuCtxSynchronize -.. autofunction:: cuda.cuda.cuCtxSetLimit -.. autofunction:: cuda.cuda.cuCtxGetLimit -.. autofunction:: cuda.cuda.cuCtxGetCacheConfig -.. autofunction:: cuda.cuda.cuCtxSetCacheConfig -.. autofunction:: cuda.cuda.cuCtxGetApiVersion -.. autofunction:: cuda.cuda.cuCtxGetStreamPriorityRange -.. autofunction:: cuda.cuda.cuCtxResetPersistingL2Cache -.. autofunction:: cuda.cuda.cuCtxGetExecAffinity -.. autofunction:: cuda.cuda.cuCtxRecordEvent -.. autofunction:: cuda.cuda.cuCtxWaitEvent - -Module Management ------------------ - -This section describes the module management functions of the low-level CUDA driver application programming interface. - -.. autoclass:: cuda.cuda.CUmoduleLoadingMode - - .. autoattribute:: cuda.cuda.CUmoduleLoadingMode.CU_MODULE_EAGER_LOADING - - - Lazy Kernel Loading is not enabled - - - .. autoattribute:: cuda.cuda.CUmoduleLoadingMode.CU_MODULE_LAZY_LOADING - - - Lazy Kernel Loading is enabled - -.. autofunction:: cuda.cuda.cuModuleLoad -.. autofunction:: cuda.cuda.cuModuleLoadData -.. autofunction:: cuda.cuda.cuModuleLoadDataEx -.. autofunction:: cuda.cuda.cuModuleLoadFatBinary -.. autofunction:: cuda.cuda.cuModuleUnload -.. autofunction:: cuda.cuda.cuModuleGetLoadingMode -.. autofunction:: cuda.cuda.cuModuleGetFunction -.. autofunction:: cuda.cuda.cuModuleGetFunctionCount -.. autofunction:: cuda.cuda.cuModuleEnumerateFunctions -.. autofunction:: cuda.cuda.cuModuleGetGlobal -.. autofunction:: cuda.cuda.cuLinkCreate -.. autofunction:: cuda.cuda.cuLinkAddData -.. autofunction:: cuda.cuda.cuLinkAddFile -.. autofunction:: cuda.cuda.cuLinkComplete -.. autofunction:: cuda.cuda.cuLinkDestroy - -Library Management ------------------- - -This section describes the library management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuLibraryLoadData -.. autofunction:: cuda.cuda.cuLibraryLoadFromFile -.. autofunction:: cuda.cuda.cuLibraryUnload -.. autofunction:: cuda.cuda.cuLibraryGetKernel -.. autofunction:: cuda.cuda.cuLibraryGetKernelCount -.. autofunction:: cuda.cuda.cuLibraryEnumerateKernels -.. autofunction:: cuda.cuda.cuLibraryGetModule -.. autofunction:: cuda.cuda.cuKernelGetFunction -.. autofunction:: cuda.cuda.cuKernelGetLibrary -.. autofunction:: cuda.cuda.cuLibraryGetGlobal -.. autofunction:: cuda.cuda.cuLibraryGetManaged -.. autofunction:: cuda.cuda.cuLibraryGetUnifiedFunction -.. autofunction:: cuda.cuda.cuKernelGetAttribute -.. autofunction:: cuda.cuda.cuKernelSetAttribute -.. autofunction:: cuda.cuda.cuKernelSetCacheConfig -.. autofunction:: cuda.cuda.cuKernelGetName -.. autofunction:: cuda.cuda.cuKernelGetParamInfo - -Memory Management ------------------ - -This section describes the memory management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuMemGetInfo -.. autofunction:: cuda.cuda.cuMemAlloc -.. autofunction:: cuda.cuda.cuMemAllocPitch -.. autofunction:: cuda.cuda.cuMemFree -.. autofunction:: cuda.cuda.cuMemGetAddressRange -.. autofunction:: cuda.cuda.cuMemAllocHost -.. autofunction:: cuda.cuda.cuMemFreeHost -.. autofunction:: cuda.cuda.cuMemHostAlloc -.. autofunction:: cuda.cuda.cuMemHostGetDevicePointer -.. autofunction:: cuda.cuda.cuMemHostGetFlags -.. autofunction:: cuda.cuda.cuMemAllocManaged -.. autofunction:: cuda.cuda.cuDeviceRegisterAsyncNotification -.. autofunction:: cuda.cuda.cuDeviceUnregisterAsyncNotification -.. autofunction:: cuda.cuda.cuDeviceGetByPCIBusId -.. autofunction:: cuda.cuda.cuDeviceGetPCIBusId -.. autofunction:: cuda.cuda.cuIpcGetEventHandle -.. autofunction:: cuda.cuda.cuIpcOpenEventHandle -.. autofunction:: cuda.cuda.cuIpcGetMemHandle -.. autofunction:: cuda.cuda.cuIpcOpenMemHandle -.. autofunction:: cuda.cuda.cuIpcCloseMemHandle -.. autofunction:: cuda.cuda.cuMemHostRegister -.. autofunction:: cuda.cuda.cuMemHostUnregister -.. autofunction:: cuda.cuda.cuMemcpy -.. autofunction:: cuda.cuda.cuMemcpyPeer -.. autofunction:: cuda.cuda.cuMemcpyHtoD -.. autofunction:: cuda.cuda.cuMemcpyDtoH -.. autofunction:: cuda.cuda.cuMemcpyDtoD -.. autofunction:: cuda.cuda.cuMemcpyDtoA -.. autofunction:: cuda.cuda.cuMemcpyAtoD -.. autofunction:: cuda.cuda.cuMemcpyHtoA -.. autofunction:: cuda.cuda.cuMemcpyAtoH -.. autofunction:: cuda.cuda.cuMemcpyAtoA -.. autofunction:: cuda.cuda.cuMemcpy2D -.. autofunction:: cuda.cuda.cuMemcpy2DUnaligned -.. autofunction:: cuda.cuda.cuMemcpy3D -.. autofunction:: cuda.cuda.cuMemcpy3DPeer -.. autofunction:: cuda.cuda.cuMemcpyAsync -.. autofunction:: cuda.cuda.cuMemcpyPeerAsync -.. autofunction:: cuda.cuda.cuMemcpyHtoDAsync -.. autofunction:: cuda.cuda.cuMemcpyDtoHAsync -.. autofunction:: cuda.cuda.cuMemcpyDtoDAsync -.. autofunction:: cuda.cuda.cuMemcpyHtoAAsync -.. autofunction:: cuda.cuda.cuMemcpyAtoHAsync -.. autofunction:: cuda.cuda.cuMemcpy2DAsync -.. autofunction:: cuda.cuda.cuMemcpy3DAsync -.. autofunction:: cuda.cuda.cuMemcpy3DPeerAsync -.. autofunction:: cuda.cuda.cuMemsetD8 -.. autofunction:: cuda.cuda.cuMemsetD16 -.. autofunction:: cuda.cuda.cuMemsetD32 -.. autofunction:: cuda.cuda.cuMemsetD2D8 -.. autofunction:: cuda.cuda.cuMemsetD2D16 -.. autofunction:: cuda.cuda.cuMemsetD2D32 -.. autofunction:: cuda.cuda.cuMemsetD8Async -.. autofunction:: cuda.cuda.cuMemsetD16Async -.. autofunction:: cuda.cuda.cuMemsetD32Async -.. autofunction:: cuda.cuda.cuMemsetD2D8Async -.. autofunction:: cuda.cuda.cuMemsetD2D16Async -.. autofunction:: cuda.cuda.cuMemsetD2D32Async -.. autofunction:: cuda.cuda.cuArrayCreate -.. autofunction:: cuda.cuda.cuArrayGetDescriptor -.. autofunction:: cuda.cuda.cuArrayGetSparseProperties -.. autofunction:: cuda.cuda.cuMipmappedArrayGetSparseProperties -.. autofunction:: cuda.cuda.cuArrayGetMemoryRequirements -.. autofunction:: cuda.cuda.cuMipmappedArrayGetMemoryRequirements -.. autofunction:: cuda.cuda.cuArrayGetPlane -.. autofunction:: cuda.cuda.cuArrayDestroy -.. autofunction:: cuda.cuda.cuArray3DCreate -.. autofunction:: cuda.cuda.cuArray3DGetDescriptor -.. autofunction:: cuda.cuda.cuMipmappedArrayCreate -.. autofunction:: cuda.cuda.cuMipmappedArrayGetLevel -.. autofunction:: cuda.cuda.cuMipmappedArrayDestroy -.. autofunction:: cuda.cuda.cuMemGetHandleForAddressRange - -Virtual Memory Management -------------------------- - -This section describes the virtual memory management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuMemAddressReserve -.. autofunction:: cuda.cuda.cuMemAddressFree -.. autofunction:: cuda.cuda.cuMemCreate -.. autofunction:: cuda.cuda.cuMemRelease -.. autofunction:: cuda.cuda.cuMemMap -.. autofunction:: cuda.cuda.cuMemMapArrayAsync -.. autofunction:: cuda.cuda.cuMemUnmap -.. autofunction:: cuda.cuda.cuMemSetAccess -.. autofunction:: cuda.cuda.cuMemGetAccess -.. autofunction:: cuda.cuda.cuMemExportToShareableHandle -.. autofunction:: cuda.cuda.cuMemImportFromShareableHandle -.. autofunction:: cuda.cuda.cuMemGetAllocationGranularity -.. autofunction:: cuda.cuda.cuMemGetAllocationPropertiesFromHandle -.. autofunction:: cuda.cuda.cuMemRetainAllocationHandle - -Stream Ordered Memory Allocator -------------------------------- - -This section describes the stream ordered memory allocator exposed by the low-level CUDA driver application programming interface. - - - - - -**overview** - - - -The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior. - -The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee. - - - - - -**Supported Platforms** - - - -Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED - -.. autofunction:: cuda.cuda.cuMemFreeAsync -.. autofunction:: cuda.cuda.cuMemAllocAsync -.. autofunction:: cuda.cuda.cuMemPoolTrimTo -.. autofunction:: cuda.cuda.cuMemPoolSetAttribute -.. autofunction:: cuda.cuda.cuMemPoolGetAttribute -.. autofunction:: cuda.cuda.cuMemPoolSetAccess -.. autofunction:: cuda.cuda.cuMemPoolGetAccess -.. autofunction:: cuda.cuda.cuMemPoolCreate -.. autofunction:: cuda.cuda.cuMemPoolDestroy -.. autofunction:: cuda.cuda.cuMemAllocFromPoolAsync -.. autofunction:: cuda.cuda.cuMemPoolExportToShareableHandle -.. autofunction:: cuda.cuda.cuMemPoolImportFromShareableHandle -.. autofunction:: cuda.cuda.cuMemPoolExportPointer -.. autofunction:: cuda.cuda.cuMemPoolImportPointer - -Multicast Object Management ---------------------------- - -This section describes the CUDA multicast object operations exposed by the low-level CUDA driver application programming interface. - - - - - -**overview** - - - -A multicast object created via cuMulticastCreate enables certain memory operations to be broadcast to a team of devices. Devices can be added to a multicast object via cuMulticastAddDevice. Memory can be bound on each participating device via either cuMulticastBindMem or cuMulticastBindAddr. Multicast objects can be mapped into a device's virtual address space using the virtual memmory management APIs (see cuMemMap and cuMemSetAccess). - - - - - -**Supported Platforms** - - - -Support for multicast on a specific device can be queried using the device attribute CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED - -.. autofunction:: cuda.cuda.cuMulticastCreate -.. autofunction:: cuda.cuda.cuMulticastAddDevice -.. autofunction:: cuda.cuda.cuMulticastBindMem -.. autofunction:: cuda.cuda.cuMulticastBindAddr -.. autofunction:: cuda.cuda.cuMulticastUnbind -.. autofunction:: cuda.cuda.cuMulticastGetGranularity - -Unified Addressing ------------------- - -This section describes the unified addressing functions of the low-level CUDA driver application programming interface. - - - - - -**Overview** - - - -CUDA devices can share a unified address space with the host. For these devices there is no distinction between a device pointer and a host pointer -- the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below). - - - - - -**Supported Platforms** - - - -Whether or not a device supports unified addressing may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING. - -Unified addressing is automatically enabled in 64-bit processes - - - - - -**Looking Up Information from Pointer Values** - - - -It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cuPointerGetAttribute() - -Since pointers are unique, it is not necessary to specify information about the pointers specified to the various copy functions in the CUDA API. The function cuMemcpy() may be used to perform a copy between two pointers, ignoring whether they point to host or device memory (making cuMemcpyHtoD(), cuMemcpyDtoD(), and cuMemcpyDtoH() unnecessary for devices supporting unified addressing). For multidimensional copies, the memory type CU_MEMORYTYPE_UNIFIED may be used to specify that the CUDA driver should infer the location of the pointer from its value. - - - - - -**Automatic Mapping of Host Allocated Host Memory** - - - -All host memory allocated in all contexts using cuMemAllocHost() and cuMemHostAlloc() is always directly accessible from all contexts on all devices that support unified addressing. This is the case regardless of whether or not the flags CU_MEMHOSTALLOC_PORTABLE and CU_MEMHOSTALLOC_DEVICEMAP are specified. - -The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host, so it is not necessary to call cuMemHostGetDevicePointer() to get the device pointer for these allocations. - -Note that this is not the case for memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below. - - - - - -**Automatic Registration of Peer Memory** - - - -Upon enabling direct access from a context that supports unified addressing to another peer context that supports unified addressing using cuCtxEnablePeerAccess() all memory allocated in the peer context using cuMemAlloc() and cuMemAllocPitch() will immediately be accessible by the current context. The device pointer value through which any peer memory may be accessed in the current context is the same pointer value through which that memory may be accessed in the peer context. - - - - - -**Exceptions, Disjoint Addressing** - - - -Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cuMemHostRegister() and host memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all contexts that support unified addressing. - -This device address may be queried using cuMemHostGetDevicePointer() when a context using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory through cuMemcpy() and similar functions using the CU_MEMORYTYPE_UNIFIED memory type. - -.. autofunction:: cuda.cuda.cuPointerGetAttribute -.. autofunction:: cuda.cuda.cuMemPrefetchAsync -.. autofunction:: cuda.cuda.cuMemPrefetchAsync_v2 -.. autofunction:: cuda.cuda.cuMemAdvise -.. autofunction:: cuda.cuda.cuMemAdvise_v2 -.. autofunction:: cuda.cuda.cuMemRangeGetAttribute -.. autofunction:: cuda.cuda.cuMemRangeGetAttributes -.. autofunction:: cuda.cuda.cuPointerSetAttribute -.. autofunction:: cuda.cuda.cuPointerGetAttributes - -Stream Management ------------------ - -This section describes the stream management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuStreamCreate -.. autofunction:: cuda.cuda.cuStreamCreateWithPriority -.. autofunction:: cuda.cuda.cuStreamGetPriority -.. autofunction:: cuda.cuda.cuStreamGetFlags -.. autofunction:: cuda.cuda.cuStreamGetId -.. autofunction:: cuda.cuda.cuStreamGetCtx -.. autofunction:: cuda.cuda.cuStreamGetCtx_v2 -.. autofunction:: cuda.cuda.cuStreamWaitEvent -.. autofunction:: cuda.cuda.cuStreamAddCallback -.. autofunction:: cuda.cuda.cuStreamBeginCapture -.. autofunction:: cuda.cuda.cuStreamBeginCaptureToGraph -.. autofunction:: cuda.cuda.cuThreadExchangeStreamCaptureMode -.. autofunction:: cuda.cuda.cuStreamEndCapture -.. autofunction:: cuda.cuda.cuStreamIsCapturing -.. autofunction:: cuda.cuda.cuStreamGetCaptureInfo -.. autofunction:: cuda.cuda.cuStreamGetCaptureInfo_v3 -.. autofunction:: cuda.cuda.cuStreamUpdateCaptureDependencies -.. autofunction:: cuda.cuda.cuStreamUpdateCaptureDependencies_v2 -.. autofunction:: cuda.cuda.cuStreamAttachMemAsync -.. autofunction:: cuda.cuda.cuStreamQuery -.. autofunction:: cuda.cuda.cuStreamSynchronize -.. autofunction:: cuda.cuda.cuStreamDestroy -.. autofunction:: cuda.cuda.cuStreamCopyAttributes -.. autofunction:: cuda.cuda.cuStreamGetAttribute -.. autofunction:: cuda.cuda.cuStreamSetAttribute - -Event Management ----------------- - -This section describes the event management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuEventCreate -.. autofunction:: cuda.cuda.cuEventRecord -.. autofunction:: cuda.cuda.cuEventRecordWithFlags -.. autofunction:: cuda.cuda.cuEventQuery -.. autofunction:: cuda.cuda.cuEventSynchronize -.. autofunction:: cuda.cuda.cuEventDestroy -.. autofunction:: cuda.cuda.cuEventElapsedTime - -External Resource Interoperability ----------------------------------- - -This section describes the external resource interoperability functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuImportExternalMemory -.. autofunction:: cuda.cuda.cuExternalMemoryGetMappedBuffer -.. autofunction:: cuda.cuda.cuExternalMemoryGetMappedMipmappedArray -.. autofunction:: cuda.cuda.cuDestroyExternalMemory -.. autofunction:: cuda.cuda.cuImportExternalSemaphore -.. autofunction:: cuda.cuda.cuSignalExternalSemaphoresAsync -.. autofunction:: cuda.cuda.cuWaitExternalSemaphoresAsync -.. autofunction:: cuda.cuda.cuDestroyExternalSemaphore - -Stream Memory Operations ------------------------- - -This section describes the stream memory operations of the low-level CUDA driver application programming interface. - - - -Support for the CU_STREAM_WAIT_VALUE_NOR flag can be queried with ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2. - - - -Support for the cuStreamWriteValue64() and cuStreamWaitValue64() functions, as well as for the CU_STREAM_MEM_OP_WAIT_VALUE_64 and CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. - - - -Support for both CU_STREAM_WAIT_VALUE_FLUSH and CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform hardware features and can be queried with cuDeviceGetAttribute() and CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES. - - - -Note that all memory pointers passed as parameters to these operations are device pointers. Where necessary a device pointer should be obtained, for example with cuMemHostGetDevicePointer(). - - - -None of the operations accepts pointers to managed memory buffers (cuMemAllocManaged). - - - -Warning: Improper use of these APIs may deadlock the application. Synchronization ordering established through these APIs is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by these APIs should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. - -.. autofunction:: cuda.cuda.cuStreamWaitValue32 -.. autofunction:: cuda.cuda.cuStreamWaitValue64 -.. autofunction:: cuda.cuda.cuStreamWriteValue32 -.. autofunction:: cuda.cuda.cuStreamWriteValue64 -.. autofunction:: cuda.cuda.cuStreamBatchMemOp - -Execution Control ------------------ - -This section describes the execution control functions of the low-level CUDA driver application programming interface. - -.. autoclass:: cuda.cuda.CUfunctionLoadingState - - .. autoattribute:: cuda.cuda.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_UNLOADED - - - .. autoattribute:: cuda.cuda.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_LOADED - - - .. autoattribute:: cuda.cuda.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_MAX - -.. autofunction:: cuda.cuda.cuFuncGetAttribute -.. autofunction:: cuda.cuda.cuFuncSetAttribute -.. autofunction:: cuda.cuda.cuFuncSetCacheConfig -.. autofunction:: cuda.cuda.cuFuncGetModule -.. autofunction:: cuda.cuda.cuFuncGetName -.. autofunction:: cuda.cuda.cuFuncGetParamInfo -.. autofunction:: cuda.cuda.cuFuncIsLoaded -.. autofunction:: cuda.cuda.cuFuncLoad -.. autofunction:: cuda.cuda.cuLaunchKernel -.. autofunction:: cuda.cuda.cuLaunchKernelEx -.. autofunction:: cuda.cuda.cuLaunchCooperativeKernel -.. autofunction:: cuda.cuda.cuLaunchCooperativeKernelMultiDevice -.. autofunction:: cuda.cuda.cuLaunchHostFunc - -Graph Management ----------------- - -This section describes the graph management functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGraphCreate -.. autofunction:: cuda.cuda.cuGraphAddKernelNode -.. autofunction:: cuda.cuda.cuGraphKernelNodeGetParams -.. autofunction:: cuda.cuda.cuGraphKernelNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddMemcpyNode -.. autofunction:: cuda.cuda.cuGraphMemcpyNodeGetParams -.. autofunction:: cuda.cuda.cuGraphMemcpyNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddMemsetNode -.. autofunction:: cuda.cuda.cuGraphMemsetNodeGetParams -.. autofunction:: cuda.cuda.cuGraphMemsetNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddHostNode -.. autofunction:: cuda.cuda.cuGraphHostNodeGetParams -.. autofunction:: cuda.cuda.cuGraphHostNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddChildGraphNode -.. autofunction:: cuda.cuda.cuGraphChildGraphNodeGetGraph -.. autofunction:: cuda.cuda.cuGraphAddEmptyNode -.. autofunction:: cuda.cuda.cuGraphAddEventRecordNode -.. autofunction:: cuda.cuda.cuGraphEventRecordNodeGetEvent -.. autofunction:: cuda.cuda.cuGraphEventRecordNodeSetEvent -.. autofunction:: cuda.cuda.cuGraphAddEventWaitNode -.. autofunction:: cuda.cuda.cuGraphEventWaitNodeGetEvent -.. autofunction:: cuda.cuda.cuGraphEventWaitNodeSetEvent -.. autofunction:: cuda.cuda.cuGraphAddExternalSemaphoresSignalNode -.. autofunction:: cuda.cuda.cuGraphExternalSemaphoresSignalNodeGetParams -.. autofunction:: cuda.cuda.cuGraphExternalSemaphoresSignalNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddExternalSemaphoresWaitNode -.. autofunction:: cuda.cuda.cuGraphExternalSemaphoresWaitNodeGetParams -.. autofunction:: cuda.cuda.cuGraphExternalSemaphoresWaitNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddBatchMemOpNode -.. autofunction:: cuda.cuda.cuGraphBatchMemOpNodeGetParams -.. autofunction:: cuda.cuda.cuGraphBatchMemOpNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecBatchMemOpNodeSetParams -.. autofunction:: cuda.cuda.cuGraphAddMemAllocNode -.. autofunction:: cuda.cuda.cuGraphMemAllocNodeGetParams -.. autofunction:: cuda.cuda.cuGraphAddMemFreeNode -.. autofunction:: cuda.cuda.cuGraphMemFreeNodeGetParams -.. autofunction:: cuda.cuda.cuDeviceGraphMemTrim -.. autofunction:: cuda.cuda.cuDeviceGetGraphMemAttribute -.. autofunction:: cuda.cuda.cuDeviceSetGraphMemAttribute -.. autofunction:: cuda.cuda.cuGraphClone -.. autofunction:: cuda.cuda.cuGraphNodeFindInClone -.. autofunction:: cuda.cuda.cuGraphNodeGetType -.. autofunction:: cuda.cuda.cuGraphGetNodes -.. autofunction:: cuda.cuda.cuGraphGetRootNodes -.. autofunction:: cuda.cuda.cuGraphGetEdges -.. autofunction:: cuda.cuda.cuGraphGetEdges_v2 -.. autofunction:: cuda.cuda.cuGraphNodeGetDependencies -.. autofunction:: cuda.cuda.cuGraphNodeGetDependencies_v2 -.. autofunction:: cuda.cuda.cuGraphNodeGetDependentNodes -.. autofunction:: cuda.cuda.cuGraphNodeGetDependentNodes_v2 -.. autofunction:: cuda.cuda.cuGraphAddDependencies -.. autofunction:: cuda.cuda.cuGraphAddDependencies_v2 -.. autofunction:: cuda.cuda.cuGraphRemoveDependencies -.. autofunction:: cuda.cuda.cuGraphRemoveDependencies_v2 -.. autofunction:: cuda.cuda.cuGraphDestroyNode -.. autofunction:: cuda.cuda.cuGraphInstantiate -.. autofunction:: cuda.cuda.cuGraphInstantiateWithParams -.. autofunction:: cuda.cuda.cuGraphExecGetFlags -.. autofunction:: cuda.cuda.cuGraphExecKernelNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecMemcpyNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecMemsetNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecHostNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecChildGraphNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecEventRecordNodeSetEvent -.. autofunction:: cuda.cuda.cuGraphExecEventWaitNodeSetEvent -.. autofunction:: cuda.cuda.cuGraphExecExternalSemaphoresSignalNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecExternalSemaphoresWaitNodeSetParams -.. autofunction:: cuda.cuda.cuGraphNodeSetEnabled -.. autofunction:: cuda.cuda.cuGraphNodeGetEnabled -.. autofunction:: cuda.cuda.cuGraphUpload -.. autofunction:: cuda.cuda.cuGraphLaunch -.. autofunction:: cuda.cuda.cuGraphExecDestroy -.. autofunction:: cuda.cuda.cuGraphDestroy -.. autofunction:: cuda.cuda.cuGraphExecUpdate -.. autofunction:: cuda.cuda.cuGraphKernelNodeCopyAttributes -.. autofunction:: cuda.cuda.cuGraphKernelNodeGetAttribute -.. autofunction:: cuda.cuda.cuGraphKernelNodeSetAttribute -.. autofunction:: cuda.cuda.cuGraphDebugDotPrint -.. autofunction:: cuda.cuda.cuUserObjectCreate -.. autofunction:: cuda.cuda.cuUserObjectRetain -.. autofunction:: cuda.cuda.cuUserObjectRelease -.. autofunction:: cuda.cuda.cuGraphRetainUserObject -.. autofunction:: cuda.cuda.cuGraphReleaseUserObject -.. autofunction:: cuda.cuda.cuGraphAddNode -.. autofunction:: cuda.cuda.cuGraphAddNode_v2 -.. autofunction:: cuda.cuda.cuGraphNodeSetParams -.. autofunction:: cuda.cuda.cuGraphExecNodeSetParams -.. autofunction:: cuda.cuda.cuGraphConditionalHandleCreate - -Occupancy ---------- - -This section describes the occupancy calculation functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuOccupancyMaxActiveBlocksPerMultiprocessor -.. autofunction:: cuda.cuda.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags -.. autofunction:: cuda.cuda.cuOccupancyMaxPotentialBlockSize -.. autofunction:: cuda.cuda.cuOccupancyMaxPotentialBlockSizeWithFlags -.. autofunction:: cuda.cuda.cuOccupancyAvailableDynamicSMemPerBlock -.. autofunction:: cuda.cuda.cuOccupancyMaxPotentialClusterSize -.. autofunction:: cuda.cuda.cuOccupancyMaxActiveClusters - -Texture Object Management -------------------------- - -This section describes the texture object management functions of the low-level CUDA driver application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher. - -.. autofunction:: cuda.cuda.cuTexObjectCreate -.. autofunction:: cuda.cuda.cuTexObjectDestroy -.. autofunction:: cuda.cuda.cuTexObjectGetResourceDesc -.. autofunction:: cuda.cuda.cuTexObjectGetTextureDesc -.. autofunction:: cuda.cuda.cuTexObjectGetResourceViewDesc - -Surface Object Management -------------------------- - -This section describes the surface object management functions of the low-level CUDA driver application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher. - -.. autofunction:: cuda.cuda.cuSurfObjectCreate -.. autofunction:: cuda.cuda.cuSurfObjectDestroy -.. autofunction:: cuda.cuda.cuSurfObjectGetResourceDesc - -Tensor Map Object Managment ---------------------------- - -This section describes the tensor map object management functions of the low-level CUDA driver application programming interface. The tensor core API is only supported on devices of compute capability 9.0 or higher. - -.. autofunction:: cuda.cuda.cuTensorMapEncodeTiled -.. autofunction:: cuda.cuda.cuTensorMapEncodeIm2col -.. autofunction:: cuda.cuda.cuTensorMapReplaceAddress - -Peer Context Memory Access --------------------------- - -This section describes the direct peer context memory access functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuDeviceCanAccessPeer -.. autofunction:: cuda.cuda.cuCtxEnablePeerAccess -.. autofunction:: cuda.cuda.cuCtxDisablePeerAccess -.. autofunction:: cuda.cuda.cuDeviceGetP2PAttribute - -Graphics Interoperability -------------------------- - -This section describes the graphics interoperability functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGraphicsUnregisterResource -.. autofunction:: cuda.cuda.cuGraphicsSubResourceGetMappedArray -.. autofunction:: cuda.cuda.cuGraphicsResourceGetMappedMipmappedArray -.. autofunction:: cuda.cuda.cuGraphicsResourceGetMappedPointer -.. autofunction:: cuda.cuda.cuGraphicsResourceSetMapFlags -.. autofunction:: cuda.cuda.cuGraphicsMapResources -.. autofunction:: cuda.cuda.cuGraphicsUnmapResources - -Driver Entry Point Access -------------------------- - -This section describes the driver entry point access functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGetProcAddress - -Coredump Attributes Control API -------------------------------- - -This section describes the coredump attribute control functions of the low-level CUDA driver application programming interface. - -.. autoclass:: cuda.cuda.CUcoredumpSettings - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_ENABLE_ON_EXCEPTION - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_TRIGGER_HOST - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_LIGHTWEIGHT - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_ENABLE_USER_TRIGGER - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_FILE - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_PIPE - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_GENERATION_FLAGS - - - .. autoattribute:: cuda.cuda.CUcoredumpSettings.CU_COREDUMP_MAX - -.. autoclass:: cuda.cuda.CUCoredumpGenerationFlags - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_DEFAULT_FLAGS - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_GLOBAL_MEMORY - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_SHARED_MEMORY - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_LOCAL_MEMORY - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_ABORT - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_CONSTBANK_MEMORY - - - .. autoattribute:: cuda.cuda.CUCoredumpGenerationFlags.CU_COREDUMP_LIGHTWEIGHT_FLAGS - -.. autofunction:: cuda.cuda.cuCoredumpGetAttribute -.. autofunction:: cuda.cuda.cuCoredumpGetAttributeGlobal -.. autofunction:: cuda.cuda.cuCoredumpSetAttribute -.. autofunction:: cuda.cuda.cuCoredumpSetAttributeGlobal - -Green Contexts --------------- - -This section describes the APIs for creation and manipulation of green contexts in the CUDA driver. Green contexts are a lightweight alternative to traditional contexts, with the ability to pass in a set of resources that they should be initialized with. This allows the developer to represent distinct spatial partitions of the GPU, provision resources for them, and target them via the same programming model that CUDA exposes (streams, kernel launches, etc.). - - - -There are 4 main steps to using these new set of APIs. - -- (1) Start with an initial set of resources, for example via cuDeviceGetDevResource. Only SM type is supported today. - - - - - - - -- (2) Partition this set of resources by providing them as input to a partition API, for example: cuDevSmResourceSplitByCount. - - - - - - - -- (3) Finalize the specification of resources by creating a descriptor via cuDevResourceGenerateDesc. - - - - - - - -- (4) Provision the resources and create a green context via cuGreenCtxCreate. - - - - - - - - - - - -For ``CU_DEV_RESOURCE_TYPE_SM``\ , the partitions created have minimum SM count requirements, often rounding up and aligning the minCount provided to cuDevSmResourceSplitByCount. The following is a guideline for each architecture and may be subject to change: - -- On Compute Architecture 6.X: The minimum count is 1 SM. - - - - - - - -- On Compute Architecture 7.X: The minimum count is 2 SMs and must be a multiple of 2. - - - - - - - -- On Compute Architecture 8.X: The minimum count is 4 SMs and must be a multiple of 2. - - - - - - - -- On Compute Architecture 9.0+: The minimum count is 8 SMs and must be a multiple of 8. - - - - - - - - - - - -In the future, flags can be provided to tradeoff functional and performance characteristics versus finer grained SM partitions. - - - -Even if the green contexts have disjoint SM partitions, it is not guaranteed that the kernels launched in them will run concurrently or have forward progress guarantees. This is due to other resources (like HW connections, see ::CUDA_DEVICE_MAX_CONNECTIONS) that could cause a dependency. Additionally, in certain scenarios, it is possible for the workload to run on more SMs than was provisioned (but never less). The following are two scenarios which can exhibit this behavior: - -- On Volta+ MPS: When ``CUDA_MPS_ACTIVE_THREAD_PERCENTAGE``\ is used, the set of SMs that are used for running kernels can be scaled up to the value of SMs used for the MPS client. - - - - - - - -- On Compute Architecture 9.x: When a module with dynamic parallelism (CDP) is loaded, all future kernels running under green contexts may use and share an additional set of 2 SMs. - -.. autoclass:: cuda.cuda.CUdevSmResource_st -.. autoclass:: cuda.cuda.CUdevResource_st -.. autoclass:: cuda.cuda.CUdevSmResource -.. autoclass:: cuda.cuda.CUdevResource -.. autoclass:: cuda.cuda.CUgreenCtxCreate_flags - - .. autoattribute:: cuda.cuda.CUgreenCtxCreate_flags.CU_GREEN_CTX_DEFAULT_STREAM - - - Required. Creates a default stream to use inside the green context - -.. autoclass:: cuda.cuda.CUdevSmResourceSplit_flags - - .. autoattribute:: cuda.cuda.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING - - - .. autoattribute:: cuda.cuda.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE - -.. autoclass:: cuda.cuda.CUdevResourceType - - .. autoattribute:: cuda.cuda.CUdevResourceType.CU_DEV_RESOURCE_TYPE_INVALID - - - .. autoattribute:: cuda.cuda.CUdevResourceType.CU_DEV_RESOURCE_TYPE_SM - - - Streaming multiprocessors related information - -.. autoclass:: cuda.cuda.CUdevResourceDesc -.. autoclass:: cuda.cuda.CUdevSmResource -.. autofunction:: cuda.cuda._CONCAT_OUTER -.. autofunction:: cuda.cuda.cuGreenCtxCreate -.. autofunction:: cuda.cuda.cuGreenCtxDestroy -.. autofunction:: cuda.cuda.cuCtxFromGreenCtx -.. autofunction:: cuda.cuda.cuDeviceGetDevResource -.. autofunction:: cuda.cuda.cuCtxGetDevResource -.. autofunction:: cuda.cuda.cuGreenCtxGetDevResource -.. autofunction:: cuda.cuda.cuDevSmResourceSplitByCount -.. autofunction:: cuda.cuda.cuDevResourceGenerateDesc -.. autofunction:: cuda.cuda.cuGreenCtxRecordEvent -.. autofunction:: cuda.cuda.cuGreenCtxWaitEvent -.. autofunction:: cuda.cuda.cuStreamGetGreenCtx -.. autofunction:: cuda.cuda.cuGreenCtxStreamCreate -.. autoattribute:: cuda.cuda.RESOURCE_ABI_VERSION -.. autoattribute:: cuda.cuda.RESOURCE_ABI_EXTERNAL_BYTES -.. autoattribute:: cuda.cuda._CONCAT_INNER -.. autoattribute:: cuda.cuda._CONCAT_OUTER - -EGL Interoperability --------------------- - -This section describes the EGL interoperability functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuGraphicsEGLRegisterImage -.. autofunction:: cuda.cuda.cuEGLStreamConsumerConnect -.. autofunction:: cuda.cuda.cuEGLStreamConsumerConnectWithFlags -.. autofunction:: cuda.cuda.cuEGLStreamConsumerDisconnect -.. autofunction:: cuda.cuda.cuEGLStreamConsumerAcquireFrame -.. autofunction:: cuda.cuda.cuEGLStreamConsumerReleaseFrame -.. autofunction:: cuda.cuda.cuEGLStreamProducerConnect -.. autofunction:: cuda.cuda.cuEGLStreamProducerDisconnect -.. autofunction:: cuda.cuda.cuEGLStreamProducerPresentFrame -.. autofunction:: cuda.cuda.cuEGLStreamProducerReturnFrame -.. autofunction:: cuda.cuda.cuGraphicsResourceGetMappedEglFrame -.. autofunction:: cuda.cuda.cuEventCreateFromEGLSync - -OpenGL Interoperability ------------------------ - -This section describes the OpenGL interoperability functions of the low-level CUDA driver application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interoperability. - -.. autoclass:: cuda.cuda.CUGLDeviceList - - .. autoattribute:: cuda.cuda.CUGLDeviceList.CU_GL_DEVICE_LIST_ALL - - - The CUDA devices for all GPUs used by the current OpenGL context - - - .. autoattribute:: cuda.cuda.CUGLDeviceList.CU_GL_DEVICE_LIST_CURRENT_FRAME - - - The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame - - - .. autoattribute:: cuda.cuda.CUGLDeviceList.CU_GL_DEVICE_LIST_NEXT_FRAME - - - The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame - -.. autofunction:: cuda.cuda.cuGraphicsGLRegisterBuffer -.. autofunction:: cuda.cuda.cuGraphicsGLRegisterImage -.. autofunction:: cuda.cuda.cuGLGetDevices - -Profiler Control ----------------- - -This section describes the profiler control functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuProfilerStart -.. autofunction:: cuda.cuda.cuProfilerStop - -VDPAU Interoperability ----------------------- - -This section describes the VDPAU interoperability functions of the low-level CUDA driver application programming interface. - -.. autofunction:: cuda.cuda.cuVDPAUGetDevice -.. autofunction:: cuda.cuda.cuVDPAUCtxCreate -.. autofunction:: cuda.cuda.cuGraphicsVDPAURegisterVideoSurface -.. autofunction:: cuda.cuda.cuGraphicsVDPAURegisterOutputSurface diff --git a/docs_src/source/module/cudart.rst b/docs_src/source/module/cudart.rst deleted file mode 100644 index fc7e6676..00000000 --- a/docs_src/source/module/cudart.rst +++ /dev/null @@ -1,5274 +0,0 @@ ------- -cudart ------- - -Profiler Control ----------------- - -This section describes the profiler control functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaProfilerStart -.. autofunction:: cuda.cudart.cudaProfilerStop - -Device Management ------------------ - -impl_private - - - - - - - -This section describes the device management functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaDeviceReset -.. autofunction:: cuda.cudart.cudaDeviceSynchronize -.. autofunction:: cuda.cudart.cudaDeviceSetLimit -.. autofunction:: cuda.cudart.cudaDeviceGetLimit -.. autofunction:: cuda.cudart.cudaDeviceGetTexture1DLinearMaxWidth -.. autofunction:: cuda.cudart.cudaDeviceGetCacheConfig -.. autofunction:: cuda.cudart.cudaDeviceGetStreamPriorityRange -.. autofunction:: cuda.cudart.cudaDeviceSetCacheConfig -.. autofunction:: cuda.cudart.cudaDeviceGetByPCIBusId -.. autofunction:: cuda.cudart.cudaDeviceGetPCIBusId -.. autofunction:: cuda.cudart.cudaIpcGetEventHandle -.. autofunction:: cuda.cudart.cudaIpcOpenEventHandle -.. autofunction:: cuda.cudart.cudaIpcGetMemHandle -.. autofunction:: cuda.cudart.cudaIpcOpenMemHandle -.. autofunction:: cuda.cudart.cudaIpcCloseMemHandle -.. autofunction:: cuda.cudart.cudaDeviceFlushGPUDirectRDMAWrites -.. autofunction:: cuda.cudart.cudaDeviceRegisterAsyncNotification -.. autofunction:: cuda.cudart.cudaDeviceUnregisterAsyncNotification -.. autofunction:: cuda.cudart.cudaGetDeviceCount -.. autofunction:: cuda.cudart.cudaGetDeviceProperties -.. autofunction:: cuda.cudart.cudaDeviceGetAttribute -.. autofunction:: cuda.cudart.cudaDeviceGetDefaultMemPool -.. autofunction:: cuda.cudart.cudaDeviceSetMemPool -.. autofunction:: cuda.cudart.cudaDeviceGetMemPool -.. autofunction:: cuda.cudart.cudaDeviceGetNvSciSyncAttributes -.. autofunction:: cuda.cudart.cudaDeviceGetP2PAttribute -.. autofunction:: cuda.cudart.cudaChooseDevice -.. autofunction:: cuda.cudart.cudaInitDevice -.. autofunction:: cuda.cudart.cudaSetDevice -.. autofunction:: cuda.cudart.cudaGetDevice -.. autofunction:: cuda.cudart.cudaSetDeviceFlags -.. autofunction:: cuda.cudart.cudaGetDeviceFlags - -Error Handling --------------- - -This section describes the error handling functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGetLastError -.. autofunction:: cuda.cudart.cudaPeekAtLastError -.. autofunction:: cuda.cudart.cudaGetErrorName -.. autofunction:: cuda.cudart.cudaGetErrorString - -Stream Management ------------------ - -This section describes the stream management functions of the CUDA runtime application programming interface. - -.. autoclass:: cuda.cudart.cudaStreamCallback_t -.. autofunction:: cuda.cudart.cudaStreamCreate -.. autofunction:: cuda.cudart.cudaStreamCreateWithFlags -.. autofunction:: cuda.cudart.cudaStreamCreateWithPriority -.. autofunction:: cuda.cudart.cudaStreamGetPriority -.. autofunction:: cuda.cudart.cudaStreamGetFlags -.. autofunction:: cuda.cudart.cudaStreamGetId -.. autofunction:: cuda.cudart.cudaCtxResetPersistingL2Cache -.. autofunction:: cuda.cudart.cudaStreamCopyAttributes -.. autofunction:: cuda.cudart.cudaStreamGetAttribute -.. autofunction:: cuda.cudart.cudaStreamSetAttribute -.. autofunction:: cuda.cudart.cudaStreamDestroy -.. autofunction:: cuda.cudart.cudaStreamWaitEvent -.. autofunction:: cuda.cudart.cudaStreamAddCallback -.. autofunction:: cuda.cudart.cudaStreamSynchronize -.. autofunction:: cuda.cudart.cudaStreamQuery -.. autofunction:: cuda.cudart.cudaStreamAttachMemAsync -.. autofunction:: cuda.cudart.cudaStreamBeginCapture -.. autofunction:: cuda.cudart.cudaStreamBeginCaptureToGraph -.. autofunction:: cuda.cudart.cudaThreadExchangeStreamCaptureMode -.. autofunction:: cuda.cudart.cudaStreamEndCapture -.. autofunction:: cuda.cudart.cudaStreamIsCapturing -.. autofunction:: cuda.cudart.cudaStreamGetCaptureInfo -.. autofunction:: cuda.cudart.cudaStreamGetCaptureInfo_v3 -.. autofunction:: cuda.cudart.cudaStreamUpdateCaptureDependencies -.. autofunction:: cuda.cudart.cudaStreamUpdateCaptureDependencies_v2 - -Event Management ----------------- - -This section describes the event management functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaEventCreate -.. autofunction:: cuda.cudart.cudaEventCreateWithFlags -.. autofunction:: cuda.cudart.cudaEventRecord -.. autofunction:: cuda.cudart.cudaEventRecordWithFlags -.. autofunction:: cuda.cudart.cudaEventQuery -.. autofunction:: cuda.cudart.cudaEventSynchronize -.. autofunction:: cuda.cudart.cudaEventDestroy -.. autofunction:: cuda.cudart.cudaEventElapsedTime - -External Resource Interoperability ----------------------------------- - -This section describes the external resource interoperability functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaImportExternalMemory -.. autofunction:: cuda.cudart.cudaExternalMemoryGetMappedBuffer -.. autofunction:: cuda.cudart.cudaExternalMemoryGetMappedMipmappedArray -.. autofunction:: cuda.cudart.cudaDestroyExternalMemory -.. autofunction:: cuda.cudart.cudaImportExternalSemaphore -.. autofunction:: cuda.cudart.cudaSignalExternalSemaphoresAsync -.. autofunction:: cuda.cudart.cudaWaitExternalSemaphoresAsync -.. autofunction:: cuda.cudart.cudaDestroyExternalSemaphore - -Execution Control ------------------ - -This section describes the execution control functions of the CUDA runtime application programming interface. - - - -Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module. - -.. autofunction:: cuda.cudart.cudaFuncSetCacheConfig -.. autofunction:: cuda.cudart.cudaFuncGetAttributes -.. autofunction:: cuda.cudart.cudaFuncSetAttribute -.. autofunction:: cuda.cudart.cudaLaunchHostFunc - -Occupancy ---------- - -This section describes the occupancy calculation functions of the CUDA runtime application programming interface. - - - -Besides the occupancy calculator functions (cudaOccupancyMaxActiveBlocksPerMultiprocessor and cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags), there are also C++ only occupancy-based launch configuration functions documented in C++ API Routines module. - - - -See cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API) cudaOccupancyAvailableDynamicSMemPerBlock (C++ API), - -.. autofunction:: cuda.cudart.cudaOccupancyMaxActiveBlocksPerMultiprocessor -.. autofunction:: cuda.cudart.cudaOccupancyAvailableDynamicSMemPerBlock -.. autofunction:: cuda.cudart.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags - -Memory Management ------------------ - -This section describes the memory management functions of the CUDA runtime application programming interface. - - - -Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module. - -.. autofunction:: cuda.cudart.cudaMallocManaged -.. autofunction:: cuda.cudart.cudaMalloc -.. autofunction:: cuda.cudart.cudaMallocHost -.. autofunction:: cuda.cudart.cudaMallocPitch -.. autofunction:: cuda.cudart.cudaMallocArray -.. autofunction:: cuda.cudart.cudaFree -.. autofunction:: cuda.cudart.cudaFreeHost -.. autofunction:: cuda.cudart.cudaFreeArray -.. autofunction:: cuda.cudart.cudaFreeMipmappedArray -.. autofunction:: cuda.cudart.cudaHostAlloc -.. autofunction:: cuda.cudart.cudaHostRegister -.. autofunction:: cuda.cudart.cudaHostUnregister -.. autofunction:: cuda.cudart.cudaHostGetDevicePointer -.. autofunction:: cuda.cudart.cudaHostGetFlags -.. autofunction:: cuda.cudart.cudaMalloc3D -.. autofunction:: cuda.cudart.cudaMalloc3DArray -.. autofunction:: cuda.cudart.cudaMallocMipmappedArray -.. autofunction:: cuda.cudart.cudaGetMipmappedArrayLevel -.. autofunction:: cuda.cudart.cudaMemcpy3D -.. autofunction:: cuda.cudart.cudaMemcpy3DPeer -.. autofunction:: cuda.cudart.cudaMemcpy3DAsync -.. autofunction:: cuda.cudart.cudaMemcpy3DPeerAsync -.. autofunction:: cuda.cudart.cudaMemGetInfo -.. autofunction:: cuda.cudart.cudaArrayGetInfo -.. autofunction:: cuda.cudart.cudaArrayGetPlane -.. autofunction:: cuda.cudart.cudaArrayGetMemoryRequirements -.. autofunction:: cuda.cudart.cudaMipmappedArrayGetMemoryRequirements -.. autofunction:: cuda.cudart.cudaArrayGetSparseProperties -.. autofunction:: cuda.cudart.cudaMipmappedArrayGetSparseProperties -.. autofunction:: cuda.cudart.cudaMemcpy -.. autofunction:: cuda.cudart.cudaMemcpyPeer -.. autofunction:: cuda.cudart.cudaMemcpy2D -.. autofunction:: cuda.cudart.cudaMemcpy2DToArray -.. autofunction:: cuda.cudart.cudaMemcpy2DFromArray -.. autofunction:: cuda.cudart.cudaMemcpy2DArrayToArray -.. autofunction:: cuda.cudart.cudaMemcpyAsync -.. autofunction:: cuda.cudart.cudaMemcpyPeerAsync -.. autofunction:: cuda.cudart.cudaMemcpy2DAsync -.. autofunction:: cuda.cudart.cudaMemcpy2DToArrayAsync -.. autofunction:: cuda.cudart.cudaMemcpy2DFromArrayAsync -.. autofunction:: cuda.cudart.cudaMemset -.. autofunction:: cuda.cudart.cudaMemset2D -.. autofunction:: cuda.cudart.cudaMemset3D -.. autofunction:: cuda.cudart.cudaMemsetAsync -.. autofunction:: cuda.cudart.cudaMemset2DAsync -.. autofunction:: cuda.cudart.cudaMemset3DAsync -.. autofunction:: cuda.cudart.cudaMemPrefetchAsync -.. autofunction:: cuda.cudart.cudaMemPrefetchAsync_v2 -.. autofunction:: cuda.cudart.cudaMemAdvise -.. autofunction:: cuda.cudart.cudaMemAdvise_v2 -.. autofunction:: cuda.cudart.cudaMemRangeGetAttribute -.. autofunction:: cuda.cudart.cudaMemRangeGetAttributes -.. autofunction:: cuda.cudart.make_cudaPitchedPtr -.. autofunction:: cuda.cudart.make_cudaPos -.. autofunction:: cuda.cudart.make_cudaExtent - -Stream Ordered Memory Allocator -------------------------------- - -**overview** - - - -The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior. - -The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee. - - - - - -**Supported Platforms** - - - -Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cudaDeviceGetAttribute() with the device attribute cudaDevAttrMemoryPoolsSupported. - -.. autofunction:: cuda.cudart.cudaMallocAsync -.. autofunction:: cuda.cudart.cudaFreeAsync -.. autofunction:: cuda.cudart.cudaMemPoolTrimTo -.. autofunction:: cuda.cudart.cudaMemPoolSetAttribute -.. autofunction:: cuda.cudart.cudaMemPoolGetAttribute -.. autofunction:: cuda.cudart.cudaMemPoolSetAccess -.. autofunction:: cuda.cudart.cudaMemPoolGetAccess -.. autofunction:: cuda.cudart.cudaMemPoolCreate -.. autofunction:: cuda.cudart.cudaMemPoolDestroy -.. autofunction:: cuda.cudart.cudaMallocFromPoolAsync -.. autofunction:: cuda.cudart.cudaMemPoolExportToShareableHandle -.. autofunction:: cuda.cudart.cudaMemPoolImportFromShareableHandle -.. autofunction:: cuda.cudart.cudaMemPoolExportPointer -.. autofunction:: cuda.cudart.cudaMemPoolImportPointer - -Unified Addressing ------------------- - -This section describes the unified addressing functions of the CUDA runtime application programming interface. - - - - - -**Overview** - - - -CUDA devices can share a unified address space with the host. - - For these devices there is no distinction between a device pointer and a host pointer -- the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below). - - - - - -**Supported Platforms** - - - -Whether or not a device supports unified addressing may be queried by calling cudaGetDeviceProperties() with the device property cudaDeviceProp::unifiedAddressing. - -Unified addressing is automatically enabled in 64-bit processes . - - - - - -**Looking Up Information from Pointer Values** - - - -It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cudaPointerGetAttributes() - -Since pointers are unique, it is not necessary to specify information about the pointers specified to cudaMemcpy() and other copy functions. - - The copy direction cudaMemcpyDefault may be used to specify that the CUDA runtime should infer the location of the pointer from its value. - - - - - -**Automatic Mapping of Host Allocated Host Memory** - - - -All host memory allocated through all devices using cudaMallocHost() and cudaHostAlloc() is always directly accessible from all devices that support unified addressing. This is the case regardless of whether or not the flags cudaHostAllocPortable and cudaHostAllocMapped are specified. - -The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host. It is not necessary to call cudaHostGetDevicePointer() to get the device pointer for these allocations. - - - -Note that this is not the case for memory allocated using the flag cudaHostAllocWriteCombined, as discussed below. - - - - - -**Direct Access of Peer Memory** - - - -Upon enabling direct access from a device that supports unified addressing to another peer device that supports unified addressing using cudaDeviceEnablePeerAccess() all memory allocated in the peer device using cudaMalloc() and cudaMallocPitch() will immediately be accessible by the current device. The device pointer value through which any peer's memory may be accessed in the current device is the same pointer value through which that memory may be accessed from the peer device. - - - - - -**Exceptions, Disjoint Addressing** - - - -Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cudaHostRegister() and host memory allocated using the flag cudaHostAllocWriteCombined. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all devices that support unified addressing. - - - -This device address may be queried using cudaHostGetDevicePointer() when a device using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory in cudaMemcpy() and similar functions using the cudaMemcpyDefault memory direction. - -.. autofunction:: cuda.cudart.cudaPointerGetAttributes - -Peer Device Memory Access -------------------------- - -This section describes the peer device memory access functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaDeviceCanAccessPeer -.. autofunction:: cuda.cudart.cudaDeviceEnablePeerAccess -.. autofunction:: cuda.cudart.cudaDeviceDisablePeerAccess - -OpenGL Interoperability ------------------------ - -impl_private - - - -This section describes the OpenGL interoperability functions of the CUDA runtime application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interopability. - -.. autoclass:: cuda.cudart.cudaGLDeviceList - - .. autoattribute:: cuda.cudart.cudaGLDeviceList.cudaGLDeviceListAll - - - The CUDA devices for all GPUs used by the current OpenGL context - - - .. autoattribute:: cuda.cudart.cudaGLDeviceList.cudaGLDeviceListCurrentFrame - - - The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame - - - .. autoattribute:: cuda.cudart.cudaGLDeviceList.cudaGLDeviceListNextFrame - - - The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame - -.. autofunction:: cuda.cudart.cudaGLGetDevices -.. autofunction:: cuda.cudart.cudaGraphicsGLRegisterImage -.. autofunction:: cuda.cudart.cudaGraphicsGLRegisterBuffer - -Direct3D 9 Interoperability ---------------------------- - - - - -Direct3D 10 Interoperability ----------------------------- - - - - -Direct3D 11 Interoperability ----------------------------- - - - - -VDPAU Interoperability ----------------------- - -This section describes the VDPAU interoperability functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaVDPAUGetDevice -.. autofunction:: cuda.cudart.cudaVDPAUSetVDPAUDevice -.. autofunction:: cuda.cudart.cudaGraphicsVDPAURegisterVideoSurface -.. autofunction:: cuda.cudart.cudaGraphicsVDPAURegisterOutputSurface - -EGL Interoperability --------------------- - -This section describes the EGL interoperability functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGraphicsEGLRegisterImage -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerConnect -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerConnectWithFlags -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerDisconnect -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerAcquireFrame -.. autofunction:: cuda.cudart.cudaEGLStreamConsumerReleaseFrame -.. autofunction:: cuda.cudart.cudaEGLStreamProducerConnect -.. autofunction:: cuda.cudart.cudaEGLStreamProducerDisconnect -.. autofunction:: cuda.cudart.cudaEGLStreamProducerPresentFrame -.. autofunction:: cuda.cudart.cudaEGLStreamProducerReturnFrame -.. autofunction:: cuda.cudart.cudaGraphicsResourceGetMappedEglFrame -.. autofunction:: cuda.cudart.cudaEventCreateFromEGLSync - -Graphics Interoperability -------------------------- - -This section describes the graphics interoperability functions of the CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGraphicsUnregisterResource -.. autofunction:: cuda.cudart.cudaGraphicsResourceSetMapFlags -.. autofunction:: cuda.cudart.cudaGraphicsMapResources -.. autofunction:: cuda.cudart.cudaGraphicsUnmapResources -.. autofunction:: cuda.cudart.cudaGraphicsResourceGetMappedPointer -.. autofunction:: cuda.cudart.cudaGraphicsSubResourceGetMappedArray -.. autofunction:: cuda.cudart.cudaGraphicsResourceGetMappedMipmappedArray - -Texture Object Management -------------------------- - -This section describes the low level texture object management functions of the CUDA runtime application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher. - -.. autofunction:: cuda.cudart.cudaGetChannelDesc -.. autofunction:: cuda.cudart.cudaCreateChannelDesc -.. autofunction:: cuda.cudart.cudaCreateTextureObject -.. autofunction:: cuda.cudart.cudaDestroyTextureObject -.. autofunction:: cuda.cudart.cudaGetTextureObjectResourceDesc -.. autofunction:: cuda.cudart.cudaGetTextureObjectTextureDesc -.. autofunction:: cuda.cudart.cudaGetTextureObjectResourceViewDesc - -Surface Object Management -------------------------- - -This section describes the low level texture object management functions of the CUDA runtime application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher. - -.. autofunction:: cuda.cudart.cudaCreateSurfaceObject -.. autofunction:: cuda.cudart.cudaDestroySurfaceObject -.. autofunction:: cuda.cudart.cudaGetSurfaceObjectResourceDesc - -Version Management ------------------- - - - -.. autofunction:: cuda.cudart.cudaDriverGetVersion -.. autofunction:: cuda.cudart.cudaRuntimeGetVersion -.. autofunction:: cuda.cudart.getLocalRuntimeVersion - -Graph Management ----------------- - -This section describes the graph management functions of CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGraphCreate -.. autofunction:: cuda.cudart.cudaGraphAddKernelNode -.. autofunction:: cuda.cudart.cudaGraphKernelNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphKernelNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphKernelNodeCopyAttributes -.. autofunction:: cuda.cudart.cudaGraphKernelNodeGetAttribute -.. autofunction:: cuda.cudart.cudaGraphKernelNodeSetAttribute -.. autofunction:: cuda.cudart.cudaGraphAddMemcpyNode -.. autofunction:: cuda.cudart.cudaGraphAddMemcpyNode1D -.. autofunction:: cuda.cudart.cudaGraphMemcpyNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphMemcpyNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphMemcpyNodeSetParams1D -.. autofunction:: cuda.cudart.cudaGraphAddMemsetNode -.. autofunction:: cuda.cudart.cudaGraphMemsetNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphMemsetNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphAddHostNode -.. autofunction:: cuda.cudart.cudaGraphHostNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphHostNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphAddChildGraphNode -.. autofunction:: cuda.cudart.cudaGraphChildGraphNodeGetGraph -.. autofunction:: cuda.cudart.cudaGraphAddEmptyNode -.. autofunction:: cuda.cudart.cudaGraphAddEventRecordNode -.. autofunction:: cuda.cudart.cudaGraphEventRecordNodeGetEvent -.. autofunction:: cuda.cudart.cudaGraphEventRecordNodeSetEvent -.. autofunction:: cuda.cudart.cudaGraphAddEventWaitNode -.. autofunction:: cuda.cudart.cudaGraphEventWaitNodeGetEvent -.. autofunction:: cuda.cudart.cudaGraphEventWaitNodeSetEvent -.. autofunction:: cuda.cudart.cudaGraphAddExternalSemaphoresSignalNode -.. autofunction:: cuda.cudart.cudaGraphExternalSemaphoresSignalNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphExternalSemaphoresSignalNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphAddExternalSemaphoresWaitNode -.. autofunction:: cuda.cudart.cudaGraphExternalSemaphoresWaitNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphExternalSemaphoresWaitNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphAddMemAllocNode -.. autofunction:: cuda.cudart.cudaGraphMemAllocNodeGetParams -.. autofunction:: cuda.cudart.cudaGraphAddMemFreeNode -.. autofunction:: cuda.cudart.cudaGraphMemFreeNodeGetParams -.. autofunction:: cuda.cudart.cudaDeviceGraphMemTrim -.. autofunction:: cuda.cudart.cudaDeviceGetGraphMemAttribute -.. autofunction:: cuda.cudart.cudaDeviceSetGraphMemAttribute -.. autofunction:: cuda.cudart.cudaGraphClone -.. autofunction:: cuda.cudart.cudaGraphNodeFindInClone -.. autofunction:: cuda.cudart.cudaGraphNodeGetType -.. autofunction:: cuda.cudart.cudaGraphGetNodes -.. autofunction:: cuda.cudart.cudaGraphGetRootNodes -.. autofunction:: cuda.cudart.cudaGraphGetEdges -.. autofunction:: cuda.cudart.cudaGraphGetEdges_v2 -.. autofunction:: cuda.cudart.cudaGraphNodeGetDependencies -.. autofunction:: cuda.cudart.cudaGraphNodeGetDependencies_v2 -.. autofunction:: cuda.cudart.cudaGraphNodeGetDependentNodes -.. autofunction:: cuda.cudart.cudaGraphNodeGetDependentNodes_v2 -.. autofunction:: cuda.cudart.cudaGraphAddDependencies -.. autofunction:: cuda.cudart.cudaGraphAddDependencies_v2 -.. autofunction:: cuda.cudart.cudaGraphRemoveDependencies -.. autofunction:: cuda.cudart.cudaGraphRemoveDependencies_v2 -.. autofunction:: cuda.cudart.cudaGraphDestroyNode -.. autofunction:: cuda.cudart.cudaGraphInstantiate -.. autofunction:: cuda.cudart.cudaGraphInstantiateWithFlags -.. autofunction:: cuda.cudart.cudaGraphInstantiateWithParams -.. autofunction:: cuda.cudart.cudaGraphExecGetFlags -.. autofunction:: cuda.cudart.cudaGraphExecKernelNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecMemcpyNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecMemcpyNodeSetParams1D -.. autofunction:: cuda.cudart.cudaGraphExecMemsetNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecHostNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecChildGraphNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecEventRecordNodeSetEvent -.. autofunction:: cuda.cudart.cudaGraphExecEventWaitNodeSetEvent -.. autofunction:: cuda.cudart.cudaGraphExecExternalSemaphoresSignalNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecExternalSemaphoresWaitNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphNodeSetEnabled -.. autofunction:: cuda.cudart.cudaGraphNodeGetEnabled -.. autofunction:: cuda.cudart.cudaGraphExecUpdate -.. autofunction:: cuda.cudart.cudaGraphUpload -.. autofunction:: cuda.cudart.cudaGraphLaunch -.. autofunction:: cuda.cudart.cudaGraphExecDestroy -.. autofunction:: cuda.cudart.cudaGraphDestroy -.. autofunction:: cuda.cudart.cudaGraphDebugDotPrint -.. autofunction:: cuda.cudart.cudaUserObjectCreate -.. autofunction:: cuda.cudart.cudaUserObjectRetain -.. autofunction:: cuda.cudart.cudaUserObjectRelease -.. autofunction:: cuda.cudart.cudaGraphRetainUserObject -.. autofunction:: cuda.cudart.cudaGraphReleaseUserObject -.. autofunction:: cuda.cudart.cudaGraphAddNode -.. autofunction:: cuda.cudart.cudaGraphAddNode_v2 -.. autofunction:: cuda.cudart.cudaGraphNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphExecNodeSetParams -.. autofunction:: cuda.cudart.cudaGraphConditionalHandleCreate - -Driver Entry Point Access -------------------------- - -This section describes the driver entry point access functions of CUDA runtime application programming interface. - -.. autofunction:: cuda.cudart.cudaGetDriverEntryPoint -.. autofunction:: cuda.cudart.cudaGetDriverEntryPointByVersion - -C++ API Routines ----------------- -C++-style interface built on top of CUDA runtime API. -impl_private - - - - - - - -This section describes the C++ high level API functions of the CUDA runtime application programming interface. To use these functions, your application needs to be compiled with the ``nvcc``\ compiler. - - -Interactions with the CUDA Driver API -------------------------------------- - -This section describes the interactions between the CUDA Driver API and the CUDA Runtime API - - - - - -**Primary Contexts** - - - -There exists a one to one relationship between CUDA devices in the CUDA Runtime API and ::CUcontext s in the CUDA Driver API within a process. The specific context which the CUDA Runtime API uses for a device is called the device's primary context. From the perspective of the CUDA Runtime API, a device and its primary context are synonymous. - - - - - -**Initialization and Tear-Down** - - - -CUDA Runtime API calls operate on the CUDA Driver API ::CUcontext which is current to to the calling host thread. - -The function cudaInitDevice() ensures that the primary context is initialized for the requested device but does not make it current to the calling thread. - -The function cudaSetDevice() initializes the primary context for the specified device and makes it current to the calling thread by calling ::cuCtxSetCurrent(). - -The CUDA Runtime API will automatically initialize the primary context for a device at the first CUDA Runtime API call which requires an active context. If no ::CUcontext is current to the calling thread when a CUDA Runtime API call which requires an active context is made, then the primary context for a device will be selected, made current to the calling thread, and initialized. - -The context which the CUDA Runtime API initializes will be initialized using the parameters specified by the CUDA Runtime API functions cudaSetDeviceFlags(), ::cudaD3D9SetDirect3DDevice(), ::cudaD3D10SetDirect3DDevice(), ::cudaD3D11SetDirect3DDevice(), cudaGLSetGLDevice(), and cudaVDPAUSetVDPAUDevice(). Note that these functions will fail with cudaErrorSetOnActiveProcess if they are called when the primary context for the specified device has already been initialized. (or if the current device has already been initialized, in the case of cudaSetDeviceFlags()). - -Primary contexts will remain active until they are explicitly deinitialized using cudaDeviceReset(). The function cudaDeviceReset() will deinitialize the primary context for the calling thread's current device immediately. The context will remain current to all of the threads that it was current to. The next CUDA Runtime API call on any thread which requires an active context will trigger the reinitialization of that device's primary context. - -Note that primary contexts are shared resources. It is recommended that the primary context not be reset except just before exit or to recover from an unspecified launch failure. - - - - - -**Context Interoperability** - - - -Note that the use of multiple ::CUcontext s per device within a single process will substantially degrade performance and is strongly discouraged. Instead, it is highly recommended that the implicit one-to-one device-to-context mapping for the process provided by the CUDA Runtime API be used. - -If a non-primary ::CUcontext created by the CUDA Driver API is current to a thread then the CUDA Runtime API calls to that thread will operate on that ::CUcontext, with some exceptions listed below. Interoperability between data types is discussed in the following sections. - -The function cudaPointerGetAttributes() will return the error cudaErrorIncompatibleDriverContext if the pointer being queried was allocated by a non-primary context. The function cudaDeviceEnablePeerAccess() and the rest of the peer access API may not be called when a non-primary ::CUcontext is current. - - To use the pointer query and peer access APIs with a context created using the CUDA Driver API, it is necessary that the CUDA Driver API be used to access these features. - -All CUDA Runtime API state (e.g, global variables' addresses and values) travels with its underlying ::CUcontext. In particular, if a ::CUcontext is moved from one thread to another then all CUDA Runtime API state will move to that thread as well. - -Please note that attaching to legacy contexts (those with a version of 3010 as returned by ::cuCtxGetApiVersion()) is not possible. The CUDA Runtime will return cudaErrorIncompatibleDriverContext in such cases. - - - - - -**Interactions between CUstream and cudaStream_t** - - - -The types ::CUstream and cudaStream_t are identical and may be used interchangeably. - - - - - -**Interactions between CUevent and cudaEvent_t** - - - -The types ::CUevent and cudaEvent_t are identical and may be used interchangeably. - - - - - -**Interactions between CUarray and cudaArray_t** - - - -The types ::CUarray and struct ::cudaArray * represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a ::CUarray in a CUDA Runtime API function which takes a struct ::cudaArray *, it is necessary to explicitly cast the ::CUarray to a struct ::cudaArray *. - -In order to use a struct ::cudaArray * in a CUDA Driver API function which takes a ::CUarray, it is necessary to explicitly cast the struct ::cudaArray * to a ::CUarray . - - - - - -**Interactions between CUgraphicsResource and cudaGraphicsResource_t** - - - -The types ::CUgraphicsResource and cudaGraphicsResource_t represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a ::CUgraphicsResource in a CUDA Runtime API function which takes a cudaGraphicsResource_t, it is necessary to explicitly cast the ::CUgraphicsResource to a cudaGraphicsResource_t. - -In order to use a cudaGraphicsResource_t in a CUDA Driver API function which takes a ::CUgraphicsResource, it is necessary to explicitly cast the cudaGraphicsResource_t to a ::CUgraphicsResource. - - - - - -**Interactions between CUtexObject and cudaTextureObject_t** - - - -The types ::CUtexObject and cudaTextureObject_t represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a ::CUtexObject in a CUDA Runtime API function which takes a cudaTextureObject_t, it is necessary to explicitly cast the ::CUtexObject to a cudaTextureObject_t. - -In order to use a cudaTextureObject_t in a CUDA Driver API function which takes a ::CUtexObject, it is necessary to explicitly cast the cudaTextureObject_t to a ::CUtexObject. - - - - - -**Interactions between CUsurfObject and cudaSurfaceObject_t** - - - -The types ::CUsurfObject and cudaSurfaceObject_t represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a ::CUsurfObject in a CUDA Runtime API function which takes a cudaSurfaceObject_t, it is necessary to explicitly cast the ::CUsurfObject to a cudaSurfaceObject_t. - -In order to use a cudaSurfaceObject_t in a CUDA Driver API function which takes a ::CUsurfObject, it is necessary to explicitly cast the cudaSurfaceObject_t to a ::CUsurfObject. - - - - - -**Interactions between CUfunction and cudaFunction_t** - - - -The types ::CUfunction and cudaFunction_t represent the same data type and may be used interchangeably by casting the two types between each other. - -In order to use a cudaFunction_t in a CUDA Driver API function which takes a ::CUfunction, it is necessary to explicitly cast the cudaFunction_t to a ::CUfunction. - -.. autofunction:: cuda.cudart.cudaGetKernel - -Data types used by CUDA Runtime -------------------------------- - - - -.. autoclass:: cuda.cudart.cudaEglPlaneDesc_st -.. autoclass:: cuda.cudart.cudaEglFrame_st -.. autoclass:: cuda.cudart.cudaChannelFormatDesc -.. autoclass:: cuda.cudart.cudaArraySparseProperties -.. autoclass:: cuda.cudart.cudaArrayMemoryRequirements -.. autoclass:: cuda.cudart.cudaPitchedPtr -.. autoclass:: cuda.cudart.cudaExtent -.. autoclass:: cuda.cudart.cudaPos -.. autoclass:: cuda.cudart.cudaMemcpy3DParms -.. autoclass:: cuda.cudart.cudaMemcpyNodeParams -.. autoclass:: cuda.cudart.cudaMemcpy3DPeerParms -.. autoclass:: cuda.cudart.cudaMemsetParams -.. autoclass:: cuda.cudart.cudaMemsetParamsV2 -.. autoclass:: cuda.cudart.cudaAccessPolicyWindow -.. autoclass:: cuda.cudart.cudaHostNodeParams -.. autoclass:: cuda.cudart.cudaHostNodeParamsV2 -.. autoclass:: cuda.cudart.cudaResourceDesc -.. autoclass:: cuda.cudart.cudaResourceViewDesc -.. autoclass:: cuda.cudart.cudaPointerAttributes -.. autoclass:: cuda.cudart.cudaFuncAttributes -.. autoclass:: cuda.cudart.cudaMemLocation -.. autoclass:: cuda.cudart.cudaMemAccessDesc -.. autoclass:: cuda.cudart.cudaMemPoolProps -.. autoclass:: cuda.cudart.cudaMemPoolPtrExportData -.. autoclass:: cuda.cudart.cudaMemAllocNodeParams -.. autoclass:: cuda.cudart.cudaMemAllocNodeParamsV2 -.. autoclass:: cuda.cudart.cudaMemFreeNodeParams -.. autoclass:: cuda.cudart.CUuuid_st -.. autoclass:: cuda.cudart.cudaDeviceProp -.. autoclass:: cuda.cudart.cudaIpcEventHandle_st -.. autoclass:: cuda.cudart.cudaIpcMemHandle_st -.. autoclass:: cuda.cudart.cudaMemFabricHandle_st -.. autoclass:: cuda.cudart.cudaExternalMemoryHandleDesc -.. autoclass:: cuda.cudart.cudaExternalMemoryBufferDesc -.. autoclass:: cuda.cudart.cudaExternalMemoryMipmappedArrayDesc -.. autoclass:: cuda.cudart.cudaExternalSemaphoreHandleDesc -.. autoclass:: cuda.cudart.cudaExternalSemaphoreSignalParams -.. autoclass:: cuda.cudart.cudaExternalSemaphoreWaitParams -.. autoclass:: cuda.cudart.cudaKernelNodeParams -.. autoclass:: cuda.cudart.cudaKernelNodeParamsV2 -.. autoclass:: cuda.cudart.cudaExternalSemaphoreSignalNodeParams -.. autoclass:: cuda.cudart.cudaExternalSemaphoreSignalNodeParamsV2 -.. autoclass:: cuda.cudart.cudaExternalSemaphoreWaitNodeParams -.. autoclass:: cuda.cudart.cudaExternalSemaphoreWaitNodeParamsV2 -.. autoclass:: cuda.cudart.cudaConditionalNodeParams -.. autoclass:: cuda.cudart.cudaChildGraphNodeParams -.. autoclass:: cuda.cudart.cudaEventRecordNodeParams -.. autoclass:: cuda.cudart.cudaEventWaitNodeParams -.. autoclass:: cuda.cudart.cudaGraphNodeParams -.. autoclass:: cuda.cudart.cudaGraphEdgeData_st -.. autoclass:: cuda.cudart.cudaGraphInstantiateParams_st -.. autoclass:: cuda.cudart.cudaGraphExecUpdateResultInfo_st -.. autoclass:: cuda.cudart.cudaGraphKernelNodeUpdate -.. autoclass:: cuda.cudart.cudaLaunchMemSyncDomainMap_st -.. autoclass:: cuda.cudart.cudaLaunchAttributeValue -.. autoclass:: cuda.cudart.cudaLaunchAttribute_st -.. autoclass:: cuda.cudart.cudaAsyncNotificationInfo -.. autoclass:: cuda.cudart.cudaTextureDesc -.. autoclass:: cuda.cudart.cudaEglFrameType - - .. autoattribute:: cuda.cudart.cudaEglFrameType.cudaEglFrameTypeArray - - - Frame type CUDA array - - - .. autoattribute:: cuda.cudart.cudaEglFrameType.cudaEglFrameTypePitch - - - Frame type CUDA pointer - -.. autoclass:: cuda.cudart.cudaEglResourceLocationFlags - - .. autoattribute:: cuda.cudart.cudaEglResourceLocationFlags.cudaEglResourceLocationSysmem - - - Resource location sysmem - - - .. autoattribute:: cuda.cudart.cudaEglResourceLocationFlags.cudaEglResourceLocationVidmem - - - Resource location vidmem - -.. autoclass:: cuda.cudart.cudaEglColorFormat - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420Planar - - - Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar - - - Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV422Planar - - - Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV422SemiPlanar - - - Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatARGB - - - R/G/B/A four channels in one surface with BGRA byte ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatRGBA - - - R/G/B/A four channels in one surface with ABGR byte ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatL - - - single luminance channel in one surface. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatR - - - single color channel in one surface. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV444Planar - - - Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV444SemiPlanar - - - Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUYV422 - - - Y, U, V in one surface, interleaved as UYVY in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatUYVY422 - - - Y, U, V in one surface, interleaved as YUYV in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatABGR - - - R/G/B/A four channels in one surface with RGBA byte ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBGRA - - - R/G/B/A four channels in one surface with ARGB byte ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatA - - - Alpha color format - one channel in one surface. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatRG - - - R/G color format - two channels in one surface with GR byte ordering - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatAYUV - - - Y, U, V, A four channels in one surface, interleaved as VUYA. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU444SemiPlanar - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU422SemiPlanar - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar - - - Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar - - - Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar - - - Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar - - - Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar - - - Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatVYUY_ER - - - Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatUYVY_ER - - - Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUYV_ER - - - Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVYU_ER - - - Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUVA_ER - - - Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatAYUV_ER - - - Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV444Planar_ER - - - Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV422Planar_ER - - - Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_ER - - - Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV444SemiPlanar_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV422SemiPlanar_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_ER - - - Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU444Planar_ER - - - Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU422Planar_ER - - - Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_ER - - - Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU444SemiPlanar_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU422SemiPlanar_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_ER - - - Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerRGGB - - - Bayer format - one channel in one surface with interleaved RGGB ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerBGGR - - - Bayer format - one channel in one surface with interleaved BGGR ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerGRBG - - - Bayer format - one channel in one surface with interleaved GRBG ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerGBRG - - - Bayer format - one channel in one surface with interleaved GBRG ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10RGGB - - - Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10BGGR - - - Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10GRBG - - - Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10GBRG - - - Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12RGGB - - - Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12BGGR - - - Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12GRBG - - - Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12GBRG - - - Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer14RGGB - - - Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer14BGGR - - - Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer14GRBG - - - Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer14GBRG - - - Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer20RGGB - - - Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer20BGGR - - - Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer20GRBG - - - Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer20GBRG - - - Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU444Planar - - - Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU422Planar - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420Planar - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerIspRGGB - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerIspBGGR - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerIspGRBG - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerIspGBRG - - - Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerBCCR - - - Bayer format - one channel in one surface with interleaved BCCR ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerRCCB - - - Bayer format - one channel in one surface with interleaved RCCB ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerCRBC - - - Bayer format - one channel in one surface with interleaved CRBC ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayerCBRC - - - Bayer format - one channel in one surface with interleaved CBRC ordering. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer10CCCC - - - Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12BCCR - - - Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12RCCB - - - Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12CRBC - - - Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12CBRC - - - Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatBayer12CCCC - - - Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY - - - Color format for single Y plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_2020 - - - Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_2020 - - - Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_2020 - - - Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_2020 - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_709 - - - Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_709 - - - Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_709 - - - Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_709 - - - Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_709 - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_2020 - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar_2020 - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar_709 - - - Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY_ER - - - Extended Range Color format for single Y plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY_709_ER - - - Extended Range Color format for single Y plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10_ER - - - Extended Range Color format for single Y10 plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10_709_ER - - - Extended Range Color format for single Y10 plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12_ER - - - Extended Range Color format for single Y12 plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12_709_ER - - - Extended Range Color format for single Y12 plane. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYUVA - - - Y, U, V, A four channels in one surface, interleaved as AVUY. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatYVYU - - - Y, U, V in one surface, interleaved as YVYU in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatVYUY - - - Y, U, V in one surface, interleaved as VYUY in one channel. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER - - - Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - - - .. autoattribute:: cuda.cudart.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER - - - Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. - -.. autoclass:: cuda.cudart.cudaError_t - - .. autoattribute:: cuda.cudart.cudaError_t.cudaSuccess - - - The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`). - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidValue - - - This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMemoryAllocation - - - The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInitializationError - - - The API call failed because the CUDA driver and runtime could not be initialized. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCudartUnloading - - - This indicates that a CUDA Runtime API call cannot be executed because it is being called during process shut down, at a point in time after CUDA driver has been unloaded. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorProfilerDisabled - - - This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorProfilerNotInitialized - - - [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorProfilerAlreadyStarted - - - [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorProfilerAlreadyStopped - - - [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidConfiguration - - - This indicates that a kernel launch is requesting resources that can never be satisfied by the current device. Requesting more shared memory per block than the device supports will trigger this error, as will requesting too many threads or blocks. See :py:obj:`~.cudaDeviceProp` for more device limitations. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidPitchValue - - - This indicates that one or more of the pitch-related parameters passed to the API call is not within the acceptable range for pitch. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidSymbol - - - This indicates that the symbol name/identifier passed to the API call is not a valid name or identifier. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidHostPointer - - - This indicates that at least one host pointer passed to the API call is not a valid host pointer. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidDevicePointer - - - This indicates that at least one device pointer passed to the API call is not a valid device pointer. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidTexture - - - This indicates that the texture passed to the API call is not a valid texture. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidTextureBinding - - - This indicates that the texture binding is not valid. This occurs if you call :py:obj:`~.cudaGetTextureAlignmentOffset()` with an unbound texture. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidChannelDescriptor - - - This indicates that the channel descriptor passed to the API call is not valid. This occurs if the format is not one of the formats specified by :py:obj:`~.cudaChannelFormatKind`, or if one of the dimensions is invalid. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidMemcpyDirection - - - This indicates that the direction of the memcpy passed to the API call is not one of the types specified by :py:obj:`~.cudaMemcpyKind`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorAddressOfConstant - - - This indicated that the user has taken the address of a constant variable, which was forbidden up until the CUDA 3.1 release. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorTextureFetchFailed - - - This indicated that a texture fetch was not able to be performed. This was previously used for device emulation of texture operations. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorTextureNotBound - - - This indicated that a texture was not bound for access. This was previously used for device emulation of texture operations. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSynchronizationError - - - This indicated that a synchronization operation had failed. This was previously used for some device emulation functions. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidFilterSetting - - - This indicates that a non-float texture was being accessed with linear filtering. This is not supported by CUDA. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidNormSetting - - - This indicates that an attempt was made to read a non-float texture as a normalized float. This is not supported by CUDA. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMixedDeviceExecution - - - Mixing of device and device emulation code was not allowed. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotYetImplemented - - - This indicates that the API call is not yet implemented. Production releases of CUDA will never return this error. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMemoryValueTooLarge - - - This indicated that an emulated device pointer exceeded the 32-bit address range. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStubLibrary - - - This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInsufficientDriver - - - This indicates that the installed NVIDIA CUDA driver is older than the CUDA runtime library. This is not a supported configuration. Users should install an updated NVIDIA display driver to allow the application to run. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCallRequiresNewerDriver - - - This indicates that the API call requires a newer CUDA driver than the one currently installed. Users should install an updated NVIDIA CUDA driver to allow the API call to succeed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidSurface - - - This indicates that the surface passed to the API call is not a valid surface. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDuplicateVariableName - - - This indicates that multiple global or constant variables (across separate CUDA source files in the application) share the same string name. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDuplicateTextureName - - - This indicates that multiple textures (across separate CUDA source files in the application) share the same string name. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDuplicateSurfaceName - - - This indicates that multiple surfaces (across separate CUDA source files in the application) share the same string name. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDevicesUnavailable - - - This indicates that all CUDA devices are busy or unavailable at the current time. Devices are often busy/unavailable due to use of :py:obj:`~.cudaComputeModeProhibited`, :py:obj:`~.cudaComputeModeExclusiveProcess`, or when long running CUDA kernels have filled up the GPU and are blocking new work from starting. They can also be unavailable due to memory constraints on a device that already has active CUDA work being performed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorIncompatibleDriverContext - - - This indicates that the current context is not compatible with this the CUDA Runtime. This can only occur if you are using CUDA Runtime/Driver interoperability and have created an existing Driver context using the driver API. The Driver context may be incompatible either because the Driver context was created using an older version of the API, because the Runtime API call expects a primary driver context and the Driver context is not primary, or because the Driver context has been destroyed. Please see :py:obj:`~.Interactions`with the CUDA Driver API" for more information. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMissingConfiguration - - - The device function being invoked (usually via :py:obj:`~.cudaLaunchKernel()`) was not previously configured via the :py:obj:`~.cudaConfigureCall()` function. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorPriorLaunchFailure - - - This indicated that a previous kernel launch failed. This was previously used for device emulation of kernel launches. [Deprecated] - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchMaxDepthExceeded - - - This error indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchFileScopedTex - - - This error indicates that a grid launch did not occur because the kernel uses file-scoped textures which are unsupported by the device runtime. Kernels launched via the device runtime only support textures created with the Texture Object API's. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchFileScopedSurf - - - This error indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. Kernels launched via the device runtime only support surfaces created with the Surface Object API's. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSyncDepthExceeded - - - This error indicates that a call to :py:obj:`~.cudaDeviceSynchronize` made from the device runtime failed because the call was made at grid depth greater than than either the default (2 levels of grids) or user specified device limit :py:obj:`~.cudaLimitDevRuntimeSyncDepth`. To be able to synchronize on launched grids at a greater depth successfully, the maximum nested depth at which :py:obj:`~.cudaDeviceSynchronize` will be called must be specified with the :py:obj:`~.cudaLimitDevRuntimeSyncDepth` limit to the :py:obj:`~.cudaDeviceSetLimit` api before the host-side launch of a kernel using the device runtime. Keep in mind that additional levels of sync depth require the runtime to reserve large amounts of device memory that cannot be used for user allocations. Note that :py:obj:`~.cudaDeviceSynchronize` made from device runtime is only supported on devices of compute capability < 9.0. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchPendingCountExceeded - - - This error indicates that a device runtime grid launch failed because the launch would exceed the limit :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount`. For this launch to proceed successfully, :py:obj:`~.cudaDeviceSetLimit` must be called to set the :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount` to be higher than the upper bound of outstanding launches that can be issued to the device runtime. Keep in mind that raising the limit of pending device runtime launches will require the runtime to reserve device memory that cannot be used for user allocations. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidDeviceFunction - - - The requested device function does not exist or is not compiled for the proper device architecture. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNoDevice - - - This indicates that no CUDA-capable devices were detected by the installed CUDA driver. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidDevice - - - This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDeviceNotLicensed - - - This indicates that the device doesn't have a valid Grid License. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSoftwareValidityNotEstablished - - - By default, the CUDA runtime may perform a minimal set of self-tests, as well as CUDA driver tests, to establish the validity of both. Introduced in CUDA 11.2, this error return indicates that at least one of these tests has failed and the validity of either the runtime or the driver could not be established. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStartupFailure - - - This indicates an internal startup failure in the CUDA runtime. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidKernelImage - - - This indicates that the device kernel image is invalid. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDeviceUninitialized - - - This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had :py:obj:`~.cuCtxDestroy()` invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See :py:obj:`~.cuCtxGetApiVersion()` for more details. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMapBufferObjectFailed - - - This indicates that the buffer object could not be mapped. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnmapBufferObjectFailed - - - This indicates that the buffer object could not be unmapped. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorArrayIsMapped - - - This indicates that the specified array is currently mapped and thus cannot be destroyed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorAlreadyMapped - - - This indicates that the resource is already mapped. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNoKernelImageForDevice - - - This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorAlreadyAcquired - - - This indicates that a resource has already been acquired. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotMapped - - - This indicates that a resource is not mapped. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotMappedAsArray - - - This indicates that a mapped resource is not available for access as an array. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotMappedAsPointer - - - This indicates that a mapped resource is not available for access as a pointer. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorECCUncorrectable - - - This indicates that an uncorrectable ECC error was detected during execution. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnsupportedLimit - - - This indicates that the :py:obj:`~.cudaLimit` passed to the API call is not supported by the active device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorDeviceAlreadyInUse - - - This indicates that a call tried to access an exclusive-thread device that is already in use by a different thread. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorPeerAccessUnsupported - - - This error indicates that P2P access is not supported across the given devices. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidPtx - - - A PTX compilation failed. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidGraphicsContext - - - This indicates an error with the OpenGL or DirectX context. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNvlinkUncorrectable - - - This indicates that an uncorrectable NVLink error was detected during the execution. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorJitCompilerNotFound - - - This indicates that the PTX JIT compiler library was not found. The JIT Compiler library is used for PTX compilation. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnsupportedPtxVersion - - - This indicates that the provided PTX was compiled with an unsupported toolchain. The most common reason for this, is the PTX was generated by a compiler newer than what is supported by the CUDA driver and PTX JIT compiler. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorJitCompilationDisabled - - - This indicates that the JIT compilation was disabled. The JIT compilation compiles PTX. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnsupportedExecAffinity - - - This indicates that the provided execution affinity is not supported by the device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnsupportedDevSideSync - - - This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidSource - - - This indicates that the device kernel source is invalid. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorFileNotFound - - - This indicates that the file specified was not found. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSharedObjectSymbolNotFound - - - This indicates that a link to a shared object failed to resolve. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSharedObjectInitFailed - - - This indicates that initialization of a shared object failed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorOperatingSystem - - - This error indicates that an OS call failed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidResourceHandle - - - This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like :py:obj:`~.cudaStream_t` and :py:obj:`~.cudaEvent_t`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorIllegalState - - - This indicates that a resource required by the API call is not in a valid state to perform the requested operation. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLossyQuery - - - This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSymbolNotFound - - - This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotReady - - - This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than :py:obj:`~.cudaSuccess` (which indicates completion). Calls that may return this value include :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorIllegalAddress - - - The device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchOutOfResources - - - This indicates that a launch did not occur because it did not have appropriate resources. Although this error is similar to :py:obj:`~.cudaErrorInvalidConfiguration`, this error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchTimeout - - - This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device property :py:obj:`~.kernelExecTimeoutEnabled` for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchIncompatibleTexturing - - - This error indicates a kernel launch that uses an incompatible texturing mode. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorPeerAccessAlreadyEnabled - - - This error indicates that a call to :py:obj:`~.cudaDeviceEnablePeerAccess()` is trying to re-enable peer addressing on from a context which has already had peer addressing enabled. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorPeerAccessNotEnabled - - - This error indicates that :py:obj:`~.cudaDeviceDisablePeerAccess()` is trying to disable peer addressing which has not been enabled yet via :py:obj:`~.cudaDeviceEnablePeerAccess()`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSetOnActiveProcess - - - This indicates that the user has called :py:obj:`~.cudaSetValidDevices()`, :py:obj:`~.cudaSetDeviceFlags()`, :py:obj:`~.cudaD3D9SetDirect3DDevice()`, :py:obj:`~.cudaD3D10SetDirect3DDevice`, :py:obj:`~.cudaD3D11SetDirect3DDevice()`, or :py:obj:`~.cudaVDPAUSetVDPAUDevice()` after initializing the CUDA runtime by calling non-device management operations (allocating memory and launching kernels are examples of non-device management operations). This error can also be returned if using runtime/driver interoperability and there is an existing :py:obj:`~.CUcontext` active on the host thread. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorContextIsDestroyed - - - This error indicates that the context current to the calling thread has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary context which has not yet been initialized. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorAssert - - - An assert triggered in device code during kernel execution. The device cannot be used again. All existing allocations are invalid. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorTooManyPeers - - - This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to :py:obj:`~.cudaEnablePeerAccess()`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorHostMemoryAlreadyRegistered - - - This error indicates that the memory range passed to :py:obj:`~.cudaHostRegister()` has already been registered. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorHostMemoryNotRegistered - - - This error indicates that the pointer passed to :py:obj:`~.cudaHostUnregister()` does not correspond to any currently registered memory region. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorHardwareStackError - - - Device encountered an error in the call stack during kernel execution, possibly due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorIllegalInstruction - - - The device encountered an illegal instruction during kernel execution This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMisalignedAddress - - - The device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidAddressSpace - - - While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidPc - - - The device encountered an invalid program counter. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorLaunchFailure - - - An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCooperativeLaunchTooLarge - - - This error indicates that the number of blocks launched per grid for a kernel that was launched via either :py:obj:`~.cudaLaunchCooperativeKernel` or :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` exceeds the maximum number of blocks as allowed by :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessor` or :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` times the number of multiprocessors as specified by the device attribute :py:obj:`~.cudaDevAttrMultiProcessorCount`. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotPermitted - - - This error indicates the attempted operation is not permitted. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorNotSupported - - - This error indicates the attempted operation is not supported on the current system or device. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSystemNotReady - - - This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorSystemDriverMismatch - - - This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCompatNotSupportedOnDevice - - - This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsConnectionFailed - - - This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsRpcFailure - - - This error indicates that the remote procedural call between the MPS server and the MPS client failed. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsServerNotReady - - - This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsMaxClientsReached - - - This error indicates that the hardware resources required to create MPS client have been exhausted. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsMaxConnectionsReached - - - This error indicates the the hardware resources required to device connections have been exhausted. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorMpsClientTerminated - - - This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCdpNotSupported - - - This error indicates, that the program is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCdpVersionMismatch - - - This error indicates, that the program contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureUnsupported - - - The operation is not permitted when the stream is capturing. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureInvalidated - - - The current capture sequence on the stream has been invalidated due to a previous error. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureMerge - - - The operation would have resulted in a merge of two independent capture sequences. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureUnmatched - - - The capture was not initiated in this stream. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureUnjoined - - - The capture sequence contains a fork that was not joined to the primary stream. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureIsolation - - - A dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureImplicit - - - The operation would have resulted in a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorCapturedEvent - - - The operation is not permitted on an event which was last recorded in a capturing stream. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorStreamCaptureWrongThread - - - A stream capture sequence not initiated with the :py:obj:`~.cudaStreamCaptureModeRelaxed` argument to :py:obj:`~.cudaStreamBeginCapture` was passed to :py:obj:`~.cudaStreamEndCapture` in a different thread. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorTimeout - - - This indicates that the wait operation has timed out. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorGraphExecUpdateFailure - - - This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorExternalDevice - - - This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device's signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidClusterSize - - - This indicates that a kernel launch error has occurred due to cluster misconfiguration. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorFunctionNotLoaded - - - Indiciates a function handle is not loaded when calling an API that requires a loaded function. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidResourceType - - - This error indicates one or more resources passed in are not valid resource types for the operation. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorInvalidResourceConfiguration - - - This error indicates one or more resources are insufficient or non-applicable for the operation. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorUnknown - - - This indicates that an unknown internal error has occurred. - - - .. autoattribute:: cuda.cudart.cudaError_t.cudaErrorApiFailureBase - -.. autoclass:: cuda.cudart.cudaChannelFormatKind - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSigned - - - Signed channel format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsigned - - - Unsigned channel format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindFloat - - - Float channel format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindNone - - - No channel format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindNV12 - - - Unsigned 8-bit integers, planar 4:2:0 YUV format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X1 - - - 1 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X2 - - - 2 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X4 - - - 4 channel unsigned 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X1 - - - 1 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X2 - - - 2 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X4 - - - 4 channel unsigned 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X1 - - - 1 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X2 - - - 2 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X4 - - - 4 channel signed 8-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X1 - - - 1 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X2 - - - 2 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X4 - - - 4 channel signed 16-bit normalized integer - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1 - - - 4 channel unsigned normalized block-compressed (BC1 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1SRGB - - - 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2 - - - 4 channel unsigned normalized block-compressed (BC2 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2SRGB - - - 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3 - - - 4 channel unsigned normalized block-compressed (BC3 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3SRGB - - - 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed4 - - - 1 channel unsigned normalized block-compressed (BC4 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed4 - - - 1 channel signed normalized block-compressed (BC4 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed5 - - - 2 channel unsigned normalized block-compressed (BC5 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed5 - - - 2 channel signed normalized block-compressed (BC5 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed6H - - - 3 channel unsigned half-float block-compressed (BC6H compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed6H - - - 3 channel signed half-float block-compressed (BC6H compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7 - - - 4 channel unsigned normalized block-compressed (BC7 compression) format - - - .. autoattribute:: cuda.cudart.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7SRGB - - - 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding - -.. autoclass:: cuda.cudart.cudaMemoryType - - .. autoattribute:: cuda.cudart.cudaMemoryType.cudaMemoryTypeUnregistered - - - Unregistered memory - - - .. autoattribute:: cuda.cudart.cudaMemoryType.cudaMemoryTypeHost - - - Host memory - - - .. autoattribute:: cuda.cudart.cudaMemoryType.cudaMemoryTypeDevice - - - Device memory - - - .. autoattribute:: cuda.cudart.cudaMemoryType.cudaMemoryTypeManaged - - - Managed memory - -.. autoclass:: cuda.cudart.cudaMemcpyKind - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyHostToHost - - - Host -> Host - - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyHostToDevice - - - Host -> Device - - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost - - - Device -> Host - - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice - - - Device -> Device - - - .. autoattribute:: cuda.cudart.cudaMemcpyKind.cudaMemcpyDefault - - - Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing - -.. autoclass:: cuda.cudart.cudaAccessProperty - - .. autoattribute:: cuda.cudart.cudaAccessProperty.cudaAccessPropertyNormal - - - Normal cache persistence. - - - .. autoattribute:: cuda.cudart.cudaAccessProperty.cudaAccessPropertyStreaming - - - Streaming access is less likely to persit from cache. - - - .. autoattribute:: cuda.cudart.cudaAccessProperty.cudaAccessPropertyPersisting - - - Persisting access is more likely to persist in cache. - -.. autoclass:: cuda.cudart.cudaStreamCaptureStatus - - .. autoattribute:: cuda.cudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusNone - - - Stream is not capturing - - - .. autoattribute:: cuda.cudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusActive - - - Stream is actively capturing - - - .. autoattribute:: cuda.cudart.cudaStreamCaptureStatus.cudaStreamCaptureStatusInvalidated - - - Stream is part of a capture sequence that has been invalidated, but not terminated - -.. autoclass:: cuda.cudart.cudaStreamCaptureMode - - .. autoattribute:: cuda.cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal - - - .. autoattribute:: cuda.cudart.cudaStreamCaptureMode.cudaStreamCaptureModeThreadLocal - - - .. autoattribute:: cuda.cudart.cudaStreamCaptureMode.cudaStreamCaptureModeRelaxed - -.. autoclass:: cuda.cudart.cudaSynchronizationPolicy - - .. autoattribute:: cuda.cudart.cudaSynchronizationPolicy.cudaSyncPolicyAuto - - - .. autoattribute:: cuda.cudart.cudaSynchronizationPolicy.cudaSyncPolicySpin - - - .. autoattribute:: cuda.cudart.cudaSynchronizationPolicy.cudaSyncPolicyYield - - - .. autoattribute:: cuda.cudart.cudaSynchronizationPolicy.cudaSyncPolicyBlockingSync - -.. autoclass:: cuda.cudart.cudaClusterSchedulingPolicy - - .. autoattribute:: cuda.cudart.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyDefault - - - the default policy - - - .. autoattribute:: cuda.cudart.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicySpread - - - spread the blocks within a cluster to the SMs - - - .. autoattribute:: cuda.cudart.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyLoadBalancing - - - allow the hardware to load-balance the blocks in a cluster to the SMs - -.. autoclass:: cuda.cudart.cudaStreamUpdateCaptureDependenciesFlags - - .. autoattribute:: cuda.cudart.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamAddCaptureDependencies - - - Add new nodes to the dependency set - - - .. autoattribute:: cuda.cudart.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamSetCaptureDependencies - - - Replace the dependency set with the new nodes - -.. autoclass:: cuda.cudart.cudaUserObjectFlags - - .. autoattribute:: cuda.cudart.cudaUserObjectFlags.cudaUserObjectNoDestructorSync - - - Indicates the destructor execution is not synchronized by any CUDA handle. - -.. autoclass:: cuda.cudart.cudaUserObjectRetainFlags - - .. autoattribute:: cuda.cudart.cudaUserObjectRetainFlags.cudaGraphUserObjectMove - - - Transfer references from the caller rather than creating new references. - -.. autoclass:: cuda.cudart.cudaGraphicsRegisterFlags - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone - - - Default - - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly - - - CUDA will not write to this resource - - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard - - - CUDA will only write to and will not read from this resource - - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsSurfaceLoadStore - - - CUDA will bind this resource to a surface reference - - - .. autoattribute:: cuda.cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsTextureGather - - - CUDA will perform texture gather operations on this resource - -.. autoclass:: cuda.cudart.cudaGraphicsMapFlags - - .. autoattribute:: cuda.cudart.cudaGraphicsMapFlags.cudaGraphicsMapFlagsNone - - - Default; Assume resource can be read/written - - - .. autoattribute:: cuda.cudart.cudaGraphicsMapFlags.cudaGraphicsMapFlagsReadOnly - - - CUDA will not write to this resource - - - .. autoattribute:: cuda.cudart.cudaGraphicsMapFlags.cudaGraphicsMapFlagsWriteDiscard - - - CUDA will only write to and will not read from this resource - -.. autoclass:: cuda.cudart.cudaGraphicsCubeFace - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveX - - - Positive X face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeX - - - Negative X face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveY - - - Positive Y face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeY - - - Negative Y face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveZ - - - Positive Z face of cubemap - - - .. autoattribute:: cuda.cudart.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeZ - - - Negative Z face of cubemap - -.. autoclass:: cuda.cudart.cudaResourceType - - .. autoattribute:: cuda.cudart.cudaResourceType.cudaResourceTypeArray - - - Array resource - - - .. autoattribute:: cuda.cudart.cudaResourceType.cudaResourceTypeMipmappedArray - - - Mipmapped array resource - - - .. autoattribute:: cuda.cudart.cudaResourceType.cudaResourceTypeLinear - - - Linear resource - - - .. autoattribute:: cuda.cudart.cudaResourceType.cudaResourceTypePitch2D - - - Pitch 2D resource - -.. autoclass:: cuda.cudart.cudaResourceViewFormat - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatNone - - - No resource view format (use underlying resource format) - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedChar1 - - - 1 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedChar2 - - - 2 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedChar4 - - - 4 channel unsigned 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedChar1 - - - 1 channel signed 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedChar2 - - - 2 channel signed 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedChar4 - - - 4 channel signed 8-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedShort1 - - - 1 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedShort2 - - - 2 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedShort4 - - - 4 channel unsigned 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedShort1 - - - 1 channel signed 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedShort2 - - - 2 channel signed 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedShort4 - - - 4 channel signed 16-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedInt1 - - - 1 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedInt2 - - - 2 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedInt4 - - - 4 channel unsigned 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedInt1 - - - 1 channel signed 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedInt2 - - - 2 channel signed 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedInt4 - - - 4 channel signed 32-bit integers - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatHalf1 - - - 1 channel 16-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatHalf2 - - - 2 channel 16-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatHalf4 - - - 4 channel 16-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatFloat1 - - - 1 channel 32-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatFloat2 - - - 2 channel 32-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatFloat4 - - - 4 channel 32-bit floating point - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed1 - - - Block compressed 1 - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed2 - - - Block compressed 2 - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed3 - - - Block compressed 3 - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed4 - - - Block compressed 4 unsigned - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed4 - - - Block compressed 4 signed - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed5 - - - Block compressed 5 unsigned - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed5 - - - Block compressed 5 signed - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed6H - - - Block compressed 6 unsigned half-float - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed6H - - - Block compressed 6 signed half-float - - - .. autoattribute:: cuda.cudart.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed7 - - - Block compressed 7 - -.. autoclass:: cuda.cudart.cudaFuncAttribute - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeMaxDynamicSharedMemorySize - - - Maximum dynamic shared memory size - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributePreferredSharedMemoryCarveout - - - Preferred shared memory-L1 cache split - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeClusterDimMustBeSet - - - Indicator to enforce valid cluster dimension specification on kernel launch - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeRequiredClusterWidth - - - Required cluster width - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeRequiredClusterHeight - - - Required cluster height - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeRequiredClusterDepth - - - Required cluster depth - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeNonPortableClusterSizeAllowed - - - Whether non-portable cluster scheduling policy is supported - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeClusterSchedulingPolicyPreference - - - Required cluster scheduling policy preference - - - .. autoattribute:: cuda.cudart.cudaFuncAttribute.cudaFuncAttributeMax - -.. autoclass:: cuda.cudart.cudaFuncCache - - .. autoattribute:: cuda.cudart.cudaFuncCache.cudaFuncCachePreferNone - - - Default function cache configuration, no preference - - - .. autoattribute:: cuda.cudart.cudaFuncCache.cudaFuncCachePreferShared - - - Prefer larger shared memory and smaller L1 cache - - - .. autoattribute:: cuda.cudart.cudaFuncCache.cudaFuncCachePreferL1 - - - Prefer larger L1 cache and smaller shared memory - - - .. autoattribute:: cuda.cudart.cudaFuncCache.cudaFuncCachePreferEqual - - - Prefer equal size L1 cache and shared memory - -.. autoclass:: cuda.cudart.cudaSharedMemConfig - - .. autoattribute:: cuda.cudart.cudaSharedMemConfig.cudaSharedMemBankSizeDefault - - - .. autoattribute:: cuda.cudart.cudaSharedMemConfig.cudaSharedMemBankSizeFourByte - - - .. autoattribute:: cuda.cudart.cudaSharedMemConfig.cudaSharedMemBankSizeEightByte - -.. autoclass:: cuda.cudart.cudaSharedCarveout - - .. autoattribute:: cuda.cudart.cudaSharedCarveout.cudaSharedmemCarveoutDefault - - - No preference for shared memory or L1 (default) - - - .. autoattribute:: cuda.cudart.cudaSharedCarveout.cudaSharedmemCarveoutMaxShared - - - Prefer maximum available shared memory, minimum L1 cache - - - .. autoattribute:: cuda.cudart.cudaSharedCarveout.cudaSharedmemCarveoutMaxL1 - - - Prefer maximum available L1 cache, minimum shared memory - -.. autoclass:: cuda.cudart.cudaComputeMode - - .. autoattribute:: cuda.cudart.cudaComputeMode.cudaComputeModeDefault - - - Default compute mode (Multiple threads can use :py:obj:`~.cudaSetDevice()` with this device) - - - .. autoattribute:: cuda.cudart.cudaComputeMode.cudaComputeModeExclusive - - - Compute-exclusive-thread mode (Only one thread in one process will be able to use :py:obj:`~.cudaSetDevice()` with this device) - - - .. autoattribute:: cuda.cudart.cudaComputeMode.cudaComputeModeProhibited - - - Compute-prohibited mode (No threads can use :py:obj:`~.cudaSetDevice()` with this device) - - - .. autoattribute:: cuda.cudart.cudaComputeMode.cudaComputeModeExclusiveProcess - - - Compute-exclusive-process mode (Many threads in one process will be able to use :py:obj:`~.cudaSetDevice()` with this device) - -.. autoclass:: cuda.cudart.cudaLimit - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitStackSize - - - GPU thread stack size - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitPrintfFifoSize - - - GPU printf FIFO size - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitMallocHeapSize - - - GPU malloc heap size - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitDevRuntimeSyncDepth - - - GPU device runtime synchronize depth - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitDevRuntimePendingLaunchCount - - - GPU device runtime pending launch count - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitMaxL2FetchGranularity - - - A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint - - - .. autoattribute:: cuda.cudart.cudaLimit.cudaLimitPersistingL2CacheSize - - - A size in bytes for L2 persisting lines cache size - -.. autoclass:: cuda.cudart.cudaMemoryAdvise - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseSetReadMostly - - - Data will mostly be read and only occassionally be written to - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseUnsetReadMostly - - - Undo the effect of :py:obj:`~.cudaMemAdviseSetReadMostly` - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseSetPreferredLocation - - - Set the preferred location for the data as the specified device - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseUnsetPreferredLocation - - - Clear the preferred location for the data - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseSetAccessedBy - - - Data will be accessed by the specified device, so prevent page faults as much as possible - - - .. autoattribute:: cuda.cudart.cudaMemoryAdvise.cudaMemAdviseUnsetAccessedBy - - - Let the Unified Memory subsystem decide on the page faulting policy for the specified device - -.. autoclass:: cuda.cudart.cudaMemRangeAttribute - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeReadMostly - - - Whether the range will mostly be read and only occassionally be written to - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocation - - - The preferred location of the range - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeAccessedBy - - - Memory range has :py:obj:`~.cudaMemAdviseSetAccessedBy` set for specified device - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocation - - - The last location to which the range was prefetched - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationType - - - The preferred location type of the range - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationId - - - The preferred location id of the range - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationType - - - The last location type to which the range was prefetched - - - .. autoattribute:: cuda.cudart.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationId - - - The last location id to which the range was prefetched - -.. autoclass:: cuda.cudart.cudaFlushGPUDirectRDMAWritesOptions - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionHost - - - :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` and its CUDA Driver API counterpart are supported on the device. - - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionMemOps - - - The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the CUDA device. - -.. autoclass:: cuda.cudart.cudaGPUDirectRDMAWritesOrdering - - .. autoattribute:: cuda.cudart.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingNone - - - The device does not natively support ordering of GPUDirect RDMA writes. :py:obj:`~.cudaFlushGPUDirectRDMAWrites()` can be leveraged if supported. - - - .. autoattribute:: cuda.cudart.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingOwner - - - Natively, the device can consistently consume GPUDirect RDMA writes, although other CUDA devices may not. - - - .. autoattribute:: cuda.cudart.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingAllDevices - - - Any CUDA device in the system can consistently consume GPUDirect RDMA writes to this device. - -.. autoclass:: cuda.cudart.cudaFlushGPUDirectRDMAWritesScope - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToOwner - - - Blocks until remote writes are visible to the CUDA device context owning the data. - - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToAllDevices - - - Blocks until remote writes are visible to all CUDA device contexts. - -.. autoclass:: cuda.cudart.cudaFlushGPUDirectRDMAWritesTarget - - .. autoattribute:: cuda.cudart.cudaFlushGPUDirectRDMAWritesTarget.cudaFlushGPUDirectRDMAWritesTargetCurrentDevice - - - Sets the target for :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` to the currently active CUDA device context. - -.. autoclass:: cuda.cudart.cudaDeviceAttr - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock - - - Maximum number of threads per block - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxBlockDimX - - - Maximum block dimension X - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxBlockDimY - - - Maximum block dimension Y - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxBlockDimZ - - - Maximum block dimension Z - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxGridDimX - - - Maximum grid dimension X - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxGridDimY - - - Maximum grid dimension Y - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxGridDimZ - - - Maximum grid dimension Z - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock - - - Maximum shared memory available per block in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTotalConstantMemory - - - Memory available on device for constant variables in a CUDA C kernel in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrWarpSize - - - Warp size in threads - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxPitch - - - Maximum pitch in bytes allowed by memory copies - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock - - - Maximum number of 32-bit registers available per block - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrClockRate - - - Peak clock frequency in kilohertz - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTextureAlignment - - - Alignment requirement for textures - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGpuOverlap - - - Device can possibly copy memory and execute a kernel concurrently - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMultiProcessorCount - - - Number of multiprocessors on device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrKernelExecTimeout - - - Specifies whether there is a run time limit on kernels - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrIntegrated - - - Device is integrated with host memory - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCanMapHostMemory - - - Device can map host memory into CUDA address space - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrComputeMode - - - Compute mode (See :py:obj:`~.cudaComputeMode` for details) - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth - - - Maximum 1D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth - - - Maximum 2D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight - - - Maximum 2D texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth - - - Maximum 3D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight - - - Maximum 3D texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth - - - Maximum 3D texture depth - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth - - - Maximum 2D layered texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight - - - Maximum 2D layered texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers - - - Maximum layers in a 2D layered texture - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrSurfaceAlignment - - - Alignment requirement for surfaces - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrConcurrentKernels - - - Device can possibly execute multiple kernels concurrently - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrEccEnabled - - - Device has ECC support enabled - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPciBusId - - - PCI bus ID of the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPciDeviceId - - - PCI device ID of the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTccDriver - - - Device is using TCC driver model - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMemoryClockRate - - - Peak memory clock frequency in kilohertz - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth - - - Global memory bus width in bits - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrL2CacheSize - - - Size of L2 cache in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor - - - Maximum resident threads per multiprocessor - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrAsyncEngineCount - - - Number of asynchronous engines - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrUnifiedAddressing - - - Device shares a unified address space with the host - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth - - - Maximum 1D layered texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers - - - Maximum layers in a 1D layered texture - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth - - - Maximum 2D texture width if cudaArrayTextureGather is set - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight - - - Maximum 2D texture height if cudaArrayTextureGather is set - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt - - - Alternate maximum 3D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt - - - Alternate maximum 3D texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt - - - Alternate maximum 3D texture depth - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPciDomainId - - - PCI domain ID of the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTexturePitchAlignment - - - Pitch alignment requirement for textures - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth - - - Maximum cubemap texture width/height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth - - - Maximum cubemap layered texture width/height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers - - - Maximum layers in a cubemap layered texture - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth - - - Maximum 1D surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth - - - Maximum 2D surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight - - - Maximum 2D surface height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth - - - Maximum 3D surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight - - - Maximum 3D surface height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth - - - Maximum 3D surface depth - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth - - - Maximum 1D layered surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers - - - Maximum layers in a 1D layered surface - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth - - - Maximum 2D layered surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight - - - Maximum 2D layered surface height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers - - - Maximum layers in a 2D layered surface - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth - - - Maximum cubemap surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth - - - Maximum cubemap layered surface width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers - - - Maximum layers in a cubemap layered surface - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth - - - Maximum 1D linear texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth - - - Maximum 2D linear texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight - - - Maximum 2D linear texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch - - - Maximum 2D linear texture pitch in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth - - - Maximum mipmapped 2D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight - - - Maximum mipmapped 2D texture height - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor - - - Major compute capability version number - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor - - - Minor compute capability version number - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth - - - Maximum mipmapped 1D texture width - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported - - - Device supports stream priorities - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported - - - Device supports caching globals in L1 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported - - - Device supports caching locals in L1 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor - - - Maximum shared memory available per multiprocessor in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor - - - Maximum number of 32-bit registers available per multiprocessor - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrManagedMemory - - - Device can allocate managed memory on this system - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard - - - Device is on a multi-GPU board - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID - - - Unique identifier for a group of devices on the same multi-GPU board - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported - - - Link between the device and the host supports native atomic operations - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio - - - Ratio of single precision performance (in floating-point operations per second) to double precision performance - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPageableMemoryAccess - - - Device supports coherently accessing pageable memory without calling cudaHostRegister on it - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess - - - Device can coherently access managed memory concurrently with the CPU - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrComputePreemptionSupported - - - Device supports Compute Preemption - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem - - - Device can access host registered memory at the same virtual address as the CPU - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved92 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved93 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved94 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCooperativeLaunch - - - Device supports launching cooperative kernels via :py:obj:`~.cudaLaunchCooperativeKernel` - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch - - - Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin - - - The maximum optin shared memory per block. This value may vary by chip. See :py:obj:`~.cudaFuncSetAttribute` - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites - - - Device supports flushing of outstanding remote writes. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrHostRegisterSupported - - - Device supports host memory registration via :py:obj:`~.cudaHostRegister`. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables - - - Device accesses pageable memory via the host's page tables. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost - - - Host can directly access managed memory on the device without migration. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxBlocksPerMultiprocessor - - - Maximum number of blocks per multiprocessor - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxPersistingL2CacheSize - - - Maximum L2 persisting lines capacity setting in bytes. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxAccessPolicyWindowSize - - - Maximum value of :py:obj:`~.cudaAccessPolicyWindow.num_bytes`. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReservedSharedMemoryPerBlock - - - Shared memory reserved by CUDA driver per block in bytes - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrSparseCudaArraySupported - - - Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrHostRegisterReadOnlySupported - - - Device supports using the :py:obj:`~.cudaHostRegister` flag cudaHostRegisterReadOnly to register memory that must be mapped as read-only to the GPU - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrTimelineSemaphoreInteropSupported - - - External timeline semaphore interop is supported on the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMaxTimelineSemaphoreInteropSupported - - - Deprecated, External timeline semaphore interop is supported on the device - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMemoryPoolsSupported - - - Device supports using the :py:obj:`~.cudaMallocAsync` and :py:obj:`~.cudaMemPool` family of APIs - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGPUDirectRDMASupported - - - Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAFlushWritesOptions - - - The returned attribute shall be interpreted as a bitmask, where the individual bits are listed in the :py:obj:`~.cudaFlushGPUDirectRDMAWritesOptions` enum - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAWritesOrdering - - - GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See :py:obj:`~.cudaGPUDirectRDMAWritesOrdering` for the numerical values returned here. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMemoryPoolSupportedHandleTypes - - - Handle types supported with mempool based IPC - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrClusterLaunch - - - Indicates device supports cluster launch - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrDeferredMappingCudaArraySupported - - - Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved122 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved123 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved124 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrIpcEventSupport - - - Device supports IPC Events. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMemSyncDomainCount - - - Number of memory synchronization domains the device supports. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved127 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved128 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved129 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrNumaConfig - - - NUMA configuration of a device: value is of type :py:obj:`~.cudaDeviceNumaConfig` enum - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrNumaId - - - NUMA node ID of the GPU memory - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrReserved132 - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMpsEnabled - - - Contexts created on this device will be shared via MPS - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrHostNumaId - - - NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrD3D12CigSupported - - - Device supports CIG with D3D12. - - - .. autoattribute:: cuda.cudart.cudaDeviceAttr.cudaDevAttrMax - -.. autoclass:: cuda.cudart.cudaMemPoolAttr - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolReuseFollowEventDependencies - - - (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled) - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowOpportunistic - - - (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled) - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolReuseAllowInternalDependencies - - - (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled). - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold - - - (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0) - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrReservedMemCurrent - - - (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool. - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrReservedMemHigh - - - (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero. - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrUsedMemCurrent - - - (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application. - - - .. autoattribute:: cuda.cudart.cudaMemPoolAttr.cudaMemPoolAttrUsedMemHigh - - - (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero. - -.. autoclass:: cuda.cudart.cudaMemLocationType - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeInvalid - - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeDevice - - - Location is a device location, thus id is a device ordinal - - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeHost - - - Location is host, id is ignored - - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeHostNuma - - - Location is a host NUMA node, thus id is a host NUMA node id - - - .. autoattribute:: cuda.cudart.cudaMemLocationType.cudaMemLocationTypeHostNumaCurrent - - - Location is the host NUMA node closest to the current thread's CPU, id is ignored - -.. autoclass:: cuda.cudart.cudaMemAccessFlags - - .. autoattribute:: cuda.cudart.cudaMemAccessFlags.cudaMemAccessFlagsProtNone - - - Default, make the address range not accessible - - - .. autoattribute:: cuda.cudart.cudaMemAccessFlags.cudaMemAccessFlagsProtRead - - - Make the address range read accessible - - - .. autoattribute:: cuda.cudart.cudaMemAccessFlags.cudaMemAccessFlagsProtReadWrite - - - Make the address range read-write accessible - -.. autoclass:: cuda.cudart.cudaMemAllocationType - - .. autoattribute:: cuda.cudart.cudaMemAllocationType.cudaMemAllocationTypeInvalid - - - .. autoattribute:: cuda.cudart.cudaMemAllocationType.cudaMemAllocationTypePinned - - - This allocation type is 'pinned', i.e. cannot migrate from its current location while the application is actively using it - - - .. autoattribute:: cuda.cudart.cudaMemAllocationType.cudaMemAllocationTypeMax - -.. autoclass:: cuda.cudart.cudaMemAllocationHandleType - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypeNone - - - Does not allow any export mechanism. > - - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypePosixFileDescriptor - - - Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) - - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypeWin32 - - - Allows a Win32 NT handle to be used for exporting. (HANDLE) - - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypeWin32Kmt - - - Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) - - - .. autoattribute:: cuda.cudart.cudaMemAllocationHandleType.cudaMemHandleTypeFabric - - - Allows a fabric handle to be used for exporting. (cudaMemFabricHandle_t) - -.. autoclass:: cuda.cudart.cudaGraphMemAttributeType - - .. autoattribute:: cuda.cudart.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemCurrent - - - (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs. - - - .. autoattribute:: cuda.cudart.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemHigh - - - (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero. - - - .. autoattribute:: cuda.cudart.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemCurrent - - - (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. - - - .. autoattribute:: cuda.cudart.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemHigh - - - (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. - -.. autoclass:: cuda.cudart.cudaDeviceP2PAttr - - .. autoattribute:: cuda.cudart.cudaDeviceP2PAttr.cudaDevP2PAttrPerformanceRank - - - A relative value indicating the performance of the link between two devices - - - .. autoattribute:: cuda.cudart.cudaDeviceP2PAttr.cudaDevP2PAttrAccessSupported - - - Peer access is enabled - - - .. autoattribute:: cuda.cudart.cudaDeviceP2PAttr.cudaDevP2PAttrNativeAtomicSupported - - - Native atomic operation over the link supported - - - .. autoattribute:: cuda.cudart.cudaDeviceP2PAttr.cudaDevP2PAttrCudaArrayAccessSupported - - - Accessing CUDA arrays over the link supported - -.. autoclass:: cuda.cudart.cudaExternalMemoryHandleType - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueFd - - - Handle is an opaque file descriptor - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32 - - - Handle is an opaque shared NT handle - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32Kmt - - - Handle is an opaque, globally shared handle - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Heap - - - Handle is a D3D12 heap object - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Resource - - - Handle is a D3D12 committed resource - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11Resource - - - Handle is a shared NT handle to a D3D11 resource - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11ResourceKmt - - - Handle is a globally shared handle to a D3D11 resource - - - .. autoattribute:: cuda.cudart.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeNvSciBuf - - - Handle is an NvSciBuf object - -.. autoclass:: cuda.cudart.cudaExternalSemaphoreHandleType - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueFd - - - Handle is an opaque file descriptor - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32 - - - Handle is an opaque shared NT handle - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt - - - Handle is an opaque, globally shared handle - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D12Fence - - - Handle is a shared NT handle referencing a D3D12 fence object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D11Fence - - - Handle is a shared NT handle referencing a D3D11 fence object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeNvSciSync - - - Opaque handle to NvSciSync Object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutex - - - Handle is a shared NT handle referencing a D3D11 keyed mutex object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutexKmt - - - Handle is a shared KMT handle referencing a D3D11 keyed mutex object - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd - - - Handle is an opaque handle file descriptor referencing a timeline semaphore - - - .. autoattribute:: cuda.cudart.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 - - - Handle is an opaque handle file descriptor referencing a timeline semaphore - -.. autoclass:: cuda.cudart.cudaCGScope - - .. autoattribute:: cuda.cudart.cudaCGScope.cudaCGScopeInvalid - - - Invalid cooperative group scope - - - .. autoattribute:: cuda.cudart.cudaCGScope.cudaCGScopeGrid - - - Scope represented by a grid_group - - - .. autoattribute:: cuda.cudart.cudaCGScope.cudaCGScopeMultiGrid - - - Scope represented by a multi_grid_group - -.. autoclass:: cuda.cudart.cudaGraphConditionalHandleFlags - - .. autoattribute:: cuda.cudart.cudaGraphConditionalHandleFlags.cudaGraphCondAssignDefault - - - Apply default handle value when graph is launched. - -.. autoclass:: cuda.cudart.cudaGraphConditionalNodeType - - .. autoattribute:: cuda.cudart.cudaGraphConditionalNodeType.cudaGraphCondTypeIf - - - Conditional 'if' Node. Body executed once if condition value is non-zero. - - - .. autoattribute:: cuda.cudart.cudaGraphConditionalNodeType.cudaGraphCondTypeWhile - - - Conditional 'while' Node. Body executed repeatedly while condition value is non-zero. - -.. autoclass:: cuda.cudart.cudaGraphNodeType - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeKernel - - - GPU kernel node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeMemcpy - - - Memcpy node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeMemset - - - Memset node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeHost - - - Host (executable) node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeGraph - - - Node which executes an embedded graph - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeEmpty - - - Empty (no-op) node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeWaitEvent - - - External event wait node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeEventRecord - - - External event record node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreSignal - - - External semaphore signal node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreWait - - - External semaphore wait node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeMemAlloc - - - Memory allocation node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeMemFree - - - Memory free node - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeConditional - - - Conditional node May be used to implement a conditional execution path or loop - - inside of a graph. The graph(s) contained within the body of the conditional node - - can be selectively executed or iterated upon based on the value of a conditional - - variable. - - - - Handles must be created in advance of creating the node - - using :py:obj:`~.cudaGraphConditionalHandleCreate`. - - - - The following restrictions apply to graphs which contain conditional nodes: - - The graph cannot be used in a child node. - - Only one instantiation of the graph may exist at any point in time. - - The graph cannot be cloned. - - - - To set the control value, supply a default value when creating the handle and/or - - call :py:obj:`~.cudaGraphSetConditional` from device code. - - - .. autoattribute:: cuda.cudart.cudaGraphNodeType.cudaGraphNodeTypeCount - -.. autoclass:: cuda.cudart.cudaGraphDependencyType - - .. autoattribute:: cuda.cudart.cudaGraphDependencyType.cudaGraphDependencyTypeDefault - - - This is an ordinary dependency. - - - .. autoattribute:: cuda.cudart.cudaGraphDependencyType.cudaGraphDependencyTypeProgrammatic - - - This dependency type allows the downstream node to use `cudaGridDependencySynchronize()`. It may only be used between kernel nodes, and must be used with either the :py:obj:`~.cudaGraphKernelNodePortProgrammatic` or :py:obj:`~.cudaGraphKernelNodePortLaunchCompletion` outgoing port. - -.. autoclass:: cuda.cudart.cudaGraphExecUpdateResult - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateSuccess - - - The update succeeded - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateError - - - The update failed for an unexpected reason which is described in the return value of the function - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorTopologyChanged - - - The update failed because the topology changed - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNodeTypeChanged - - - The update failed because a node type changed - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorFunctionChanged - - - The update failed because the function of a kernel node changed (CUDA driver < 11.2) - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorParametersChanged - - - The update failed because the parameters changed in a way that is not supported - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNotSupported - - - The update failed because something about the node is not supported - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorUnsupportedFunctionChange - - - The update failed because the function of a kernel node changed in an unsupported way - - - .. autoattribute:: cuda.cudart.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorAttributesChanged - - - The update failed because the node attributes changed in a way that is not supported - -.. autoclass:: cuda.cudart.cudaGraphInstantiateResult - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateSuccess - - - Instantiation succeeded - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateError - - - Instantiation failed for an unexpected reason which is described in the return value of the function - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateInvalidStructure - - - Instantiation failed due to invalid structure, such as cycles - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateNodeOperationNotSupported - - - Instantiation for device launch failed because the graph contained an unsupported operation - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateResult.cudaGraphInstantiateMultipleDevicesNotSupported - - - Instantiation for device launch failed due to the nodes belonging to different contexts - -.. autoclass:: cuda.cudart.cudaGraphKernelNodeField - - .. autoattribute:: cuda.cudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldInvalid - - - Invalid field - - - .. autoattribute:: cuda.cudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldGridDim - - - Grid dimension update - - - .. autoattribute:: cuda.cudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldParam - - - Kernel parameter update - - - .. autoattribute:: cuda.cudart.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldEnabled - - - Node enable/disable - -.. autoclass:: cuda.cudart.cudaGetDriverEntryPointFlags - - .. autoattribute:: cuda.cudart.cudaGetDriverEntryPointFlags.cudaEnableDefault - - - Default search mode for driver symbols. - - - .. autoattribute:: cuda.cudart.cudaGetDriverEntryPointFlags.cudaEnableLegacyStream - - - Search for legacy versions of driver symbols. - - - .. autoattribute:: cuda.cudart.cudaGetDriverEntryPointFlags.cudaEnablePerThreadDefaultStream - - - Search for per-thread versions of driver symbols. - -.. autoclass:: cuda.cudart.cudaDriverEntryPointQueryResult - - .. autoattribute:: cuda.cudart.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSuccess - - - Search for symbol found a match - - - .. autoattribute:: cuda.cudart.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSymbolNotFound - - - Search for symbol was not found - - - .. autoattribute:: cuda.cudart.cudaDriverEntryPointQueryResult.cudaDriverEntryPointVersionNotSufficent - - - Search for symbol was found but version wasn't great enough - -.. autoclass:: cuda.cudart.cudaGraphDebugDotFlags - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsVerbose - - - Output all debug data as if every debug flag is enabled - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeParams - - - Adds :py:obj:`~.cudaKernelNodeParams` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemcpyNodeParams - - - Adds :py:obj:`~.cudaMemcpy3DParms` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemsetNodeParams - - - Adds :py:obj:`~.cudaMemsetParams` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHostNodeParams - - - Adds :py:obj:`~.cudaHostNodeParams` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsEventNodeParams - - - Adds cudaEvent_t handle from record and wait nodes to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasSignalNodeParams - - - Adds :py:obj:`~.cudaExternalSemaphoreSignalNodeParams` values to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasWaitNodeParams - - - Adds :py:obj:`~.cudaExternalSemaphoreWaitNodeParams` to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeAttributes - - - Adds cudaKernelNodeAttrID values to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHandles - - - Adds node handles and every kernel function handle to output - - - .. autoattribute:: cuda.cudart.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsConditionalNodeParams - - - Adds :py:obj:`~.cudaConditionalNodeParams` to output - -.. autoclass:: cuda.cudart.cudaGraphInstantiateFlags - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagAutoFreeOnLaunch - - - Automatically free memory allocated in a graph before relaunching. - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUpload - - - Automatically upload the graph after instantiation. Only supported by - - :py:obj:`~.cudaGraphInstantiateWithParams`. The upload will be performed using the - - stream provided in `instantiateParams`. - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagDeviceLaunch - - - Instantiate the graph to be launchable from the device. This flag can only - - be used on platforms which support unified addressing. This flag cannot be - - used in conjunction with cudaGraphInstantiateFlagAutoFreeOnLaunch. - - - .. autoattribute:: cuda.cudart.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUseNodePriority - - - Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into. - -.. autoclass:: cuda.cudart.cudaLaunchMemSyncDomain - - .. autoattribute:: cuda.cudart.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainDefault - - - Launch kernels in the default domain - - - .. autoattribute:: cuda.cudart.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainRemote - - - Launch kernels in the remote domain - -.. autoclass:: cuda.cudart.cudaLaunchAttributeID - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeIgnore - - - Ignored entry, for convenient composition - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow - - - Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.accessPolicyWindow`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeCooperative - - - Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.cooperative`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy - - - Valid for streams. See :py:obj:`~.cudaLaunchAttributeValue.syncPolicy`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension - - - Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.clusterDim`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference - - - Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.clusterSchedulingPolicyPreference`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization - - - Valid for launches. Setting :py:obj:`~.cudaLaunchAttributeValue.programmaticStreamSerializationAllowed` to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid's execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent - - - Valid for launches. Set :py:obj:`~.cudaLaunchAttributeValue.programmaticEvent` to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event programmatically in a future CUDA release. A trigger can also be inserted at the beginning of each block's execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling :py:obj:`~.cudaEventSynchronize()`) are not guaranteed to observe the release precisely when it is released. For example, :py:obj:`~.cudaEventSynchronize()` may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks. - - The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.cudaEventDisableTiming` flag set). - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributePriority - - - Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.priority`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap - - - Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.memSyncDomainMap`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain - - - Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.memSyncDomain`. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent - - - Valid for launches. Set :py:obj:`~.cudaLaunchAttributeValue.launchCompletionEvent` to record the event. - - Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock. - - A launch completion event is nominally similar to a programmatic event with `triggerAtBlockStart` set except that it is not visible to `cudaGridDependencySynchronize()` and can be used with compute capability less than 9.0. - - The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.cudaEventDisableTiming` flag set). - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode - - - Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error. - - :cudaLaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via :py:obj:`~.cudaLaunchAttributeValue`::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node's kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see :py:obj:`~.cudaGraphKernelNodeUpdatesApply`. - - Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via :py:obj:`~.cudaGraphDestroyNode`. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via :py:obj:`~.cudaGraphKernelNodeCopyAttributes`. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to :py:obj:`~.cudaGraphExecUpdate`. - - If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with :py:obj:`~.cuGraphUpload` before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again. - - - .. autoattribute:: cuda.cudart.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout - - - Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting :py:obj:`~.cudaLaunchAttributeValue.sharedMemCarveout` to a percentage between 0-100 signals sets the shared memory carveout preference in percent of the total shared memory for that kernel launch. This attribute takes precedence over :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout`. This is only a hint, and the driver can choose a different configuration if required for the launch. - -.. autoclass:: cuda.cudart.cudaDeviceNumaConfig - - .. autoattribute:: cuda.cudart.cudaDeviceNumaConfig.cudaDeviceNumaConfigNone - - - The GPU is not a NUMA node - - - .. autoattribute:: cuda.cudart.cudaDeviceNumaConfig.cudaDeviceNumaConfigNumaNode - - - The GPU is a NUMA node, cudaDevAttrNumaId contains its NUMA ID - -.. autoclass:: cuda.cudart.cudaAsyncNotificationType - - .. autoattribute:: cuda.cudart.cudaAsyncNotificationType.cudaAsyncNotificationTypeOverBudget - -.. autoclass:: cuda.cudart.cudaSurfaceBoundaryMode - - .. autoattribute:: cuda.cudart.cudaSurfaceBoundaryMode.cudaBoundaryModeZero - - - Zero boundary mode - - - .. autoattribute:: cuda.cudart.cudaSurfaceBoundaryMode.cudaBoundaryModeClamp - - - Clamp boundary mode - - - .. autoattribute:: cuda.cudart.cudaSurfaceBoundaryMode.cudaBoundaryModeTrap - - - Trap boundary mode - -.. autoclass:: cuda.cudart.cudaSurfaceFormatMode - - .. autoattribute:: cuda.cudart.cudaSurfaceFormatMode.cudaFormatModeForced - - - Forced format mode - - - .. autoattribute:: cuda.cudart.cudaSurfaceFormatMode.cudaFormatModeAuto - - - Auto format mode - -.. autoclass:: cuda.cudart.cudaTextureAddressMode - - .. autoattribute:: cuda.cudart.cudaTextureAddressMode.cudaAddressModeWrap - - - Wrapping address mode - - - .. autoattribute:: cuda.cudart.cudaTextureAddressMode.cudaAddressModeClamp - - - Clamp to edge address mode - - - .. autoattribute:: cuda.cudart.cudaTextureAddressMode.cudaAddressModeMirror - - - Mirror address mode - - - .. autoattribute:: cuda.cudart.cudaTextureAddressMode.cudaAddressModeBorder - - - Border address mode - -.. autoclass:: cuda.cudart.cudaTextureFilterMode - - .. autoattribute:: cuda.cudart.cudaTextureFilterMode.cudaFilterModePoint - - - Point filter mode - - - .. autoattribute:: cuda.cudart.cudaTextureFilterMode.cudaFilterModeLinear - - - Linear filter mode - -.. autoclass:: cuda.cudart.cudaTextureReadMode - - .. autoattribute:: cuda.cudart.cudaTextureReadMode.cudaReadModeElementType - - - Read texture as specified element type - - - .. autoattribute:: cuda.cudart.cudaTextureReadMode.cudaReadModeNormalizedFloat - - - Read texture as normalized float - -.. autoclass:: cuda.cudart.cudaEglPlaneDesc -.. autoclass:: cuda.cudart.cudaEglFrame -.. autoclass:: cuda.cudart.cudaEglStreamConnection -.. autoclass:: cuda.cudart.cudaArray_t -.. autoclass:: cuda.cudart.cudaArray_const_t -.. autoclass:: cuda.cudart.cudaMipmappedArray_t -.. autoclass:: cuda.cudart.cudaMipmappedArray_const_t -.. autoclass:: cuda.cudart.cudaHostFn_t -.. autoclass:: cuda.cudart.CUuuid -.. autoclass:: cuda.cudart.cudaUUID_t -.. autoclass:: cuda.cudart.cudaIpcEventHandle_t -.. autoclass:: cuda.cudart.cudaIpcMemHandle_t -.. autoclass:: cuda.cudart.cudaMemFabricHandle_t -.. autoclass:: cuda.cudart.cudaStream_t -.. autoclass:: cuda.cudart.cudaEvent_t -.. autoclass:: cuda.cudart.cudaGraphicsResource_t -.. autoclass:: cuda.cudart.cudaExternalMemory_t -.. autoclass:: cuda.cudart.cudaExternalSemaphore_t -.. autoclass:: cuda.cudart.cudaGraph_t -.. autoclass:: cuda.cudart.cudaGraphNode_t -.. autoclass:: cuda.cudart.cudaUserObject_t -.. autoclass:: cuda.cudart.cudaGraphConditionalHandle -.. autoclass:: cuda.cudart.cudaFunction_t -.. autoclass:: cuda.cudart.cudaKernel_t -.. autoclass:: cuda.cudart.cudaMemPool_t -.. autoclass:: cuda.cudart.cudaGraphEdgeData -.. autoclass:: cuda.cudart.cudaGraphExec_t -.. autoclass:: cuda.cudart.cudaGraphInstantiateParams -.. autoclass:: cuda.cudart.cudaGraphExecUpdateResultInfo -.. autoclass:: cuda.cudart.cudaGraphDeviceNode_t -.. autoclass:: cuda.cudart.cudaLaunchMemSyncDomainMap -.. autoclass:: cuda.cudart.cudaLaunchAttributeValue -.. autoclass:: cuda.cudart.cudaLaunchAttribute -.. autoclass:: cuda.cudart.cudaAsyncCallbackHandle_t -.. autoclass:: cuda.cudart.cudaAsyncNotificationInfo_t -.. autoclass:: cuda.cudart.cudaAsyncCallback -.. autoclass:: cuda.cudart.cudaSurfaceObject_t -.. autoclass:: cuda.cudart.cudaTextureObject_t -.. autoattribute:: cuda.cudart.CUDA_EGL_MAX_PLANES - - Maximum number of planes per frame - -.. autoattribute:: cuda.cudart.cudaHostAllocDefault - - Default page-locked allocation flag - -.. autoattribute:: cuda.cudart.cudaHostAllocPortable - - Pinned memory accessible by all CUDA contexts - -.. autoattribute:: cuda.cudart.cudaHostAllocMapped - - Map allocation into device space - -.. autoattribute:: cuda.cudart.cudaHostAllocWriteCombined - - Write-combined memory - -.. autoattribute:: cuda.cudart.cudaHostRegisterDefault - - Default host memory registration flag - -.. autoattribute:: cuda.cudart.cudaHostRegisterPortable - - Pinned memory accessible by all CUDA contexts - -.. autoattribute:: cuda.cudart.cudaHostRegisterMapped - - Map registered memory into device space - -.. autoattribute:: cuda.cudart.cudaHostRegisterIoMemory - - Memory-mapped I/O space - -.. autoattribute:: cuda.cudart.cudaHostRegisterReadOnly - - Memory-mapped read-only - -.. autoattribute:: cuda.cudart.cudaPeerAccessDefault - - Default peer addressing enable flag - -.. autoattribute:: cuda.cudart.cudaStreamDefault - - Default stream flag - -.. autoattribute:: cuda.cudart.cudaStreamNonBlocking - - Stream does not synchronize with stream 0 (the NULL stream) - -.. autoattribute:: cuda.cudart.cudaStreamLegacy - - Legacy stream handle - - - - Stream handle that can be passed as a cudaStream_t to use an implicit stream with legacy synchronization behavior. - - - - See details of the \link_sync_behavior - -.. autoattribute:: cuda.cudart.cudaStreamPerThread - - Per-thread stream handle - - - - Stream handle that can be passed as a cudaStream_t to use an implicit stream with per-thread synchronization behavior. - - - - See details of the \link_sync_behavior - -.. autoattribute:: cuda.cudart.cudaEventDefault - - Default event flag - -.. autoattribute:: cuda.cudart.cudaEventBlockingSync - - Event uses blocking synchronization - -.. autoattribute:: cuda.cudart.cudaEventDisableTiming - - Event will not record timing data - -.. autoattribute:: cuda.cudart.cudaEventInterprocess - - Event is suitable for interprocess use. cudaEventDisableTiming must be set - -.. autoattribute:: cuda.cudart.cudaEventRecordDefault - - Default event record flag - -.. autoattribute:: cuda.cudart.cudaEventRecordExternal - - Event is captured in the graph as an external event node when performing stream capture - -.. autoattribute:: cuda.cudart.cudaEventWaitDefault - - Default event wait flag - -.. autoattribute:: cuda.cudart.cudaEventWaitExternal - - Event is captured in the graph as an external event node when performing stream capture - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleAuto - - Device flag - Automatic scheduling - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleSpin - - Device flag - Spin default scheduling - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleYield - - Device flag - Yield default scheduling - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleBlockingSync - - Device flag - Use blocking synchronization - -.. autoattribute:: cuda.cudart.cudaDeviceBlockingSync - - Device flag - Use blocking synchronization [Deprecated] - -.. autoattribute:: cuda.cudart.cudaDeviceScheduleMask - - Device schedule flags mask - -.. autoattribute:: cuda.cudart.cudaDeviceMapHost - - Device flag - Support mapped pinned allocations - -.. autoattribute:: cuda.cudart.cudaDeviceLmemResizeToMax - - Device flag - Keep local memory allocation after launch - -.. autoattribute:: cuda.cudart.cudaDeviceSyncMemops - - Device flag - Ensure synchronous memory operations on this context will synchronize - -.. autoattribute:: cuda.cudart.cudaDeviceMask - - Device flags mask - -.. autoattribute:: cuda.cudart.cudaArrayDefault - - Default CUDA array allocation flag - -.. autoattribute:: cuda.cudart.cudaArrayLayered - - Must be set in cudaMalloc3DArray to create a layered CUDA array - -.. autoattribute:: cuda.cudart.cudaArraySurfaceLoadStore - - Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind surfaces to the CUDA array - -.. autoattribute:: cuda.cudart.cudaArrayCubemap - - Must be set in cudaMalloc3DArray to create a cubemap CUDA array - -.. autoattribute:: cuda.cudart.cudaArrayTextureGather - - Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform texture gather operations on the CUDA array - -.. autoattribute:: cuda.cudart.cudaArrayColorAttachment - - Must be set in cudaExternalMemoryGetMappedMipmappedArray if the mipmapped array is used as a color target in a graphics API - -.. autoattribute:: cuda.cudart.cudaArraySparse - - Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a sparse CUDA array or CUDA mipmapped array - -.. autoattribute:: cuda.cudart.cudaArrayDeferredMapping - - Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a deferred mapping CUDA array or CUDA mipmapped array - -.. autoattribute:: cuda.cudart.cudaIpcMemLazyEnablePeerAccess - - Automatically enable peer access between remote devices as needed - -.. autoattribute:: cuda.cudart.cudaMemAttachGlobal - - Memory can be accessed by any stream on any device - -.. autoattribute:: cuda.cudart.cudaMemAttachHost - - Memory cannot be accessed by any stream on any device - -.. autoattribute:: cuda.cudart.cudaMemAttachSingle - - Memory can only be accessed by a single stream on the associated device - -.. autoattribute:: cuda.cudart.cudaOccupancyDefault - - Default behavior - -.. autoattribute:: cuda.cudart.cudaOccupancyDisableCachingOverride - - Assume global caching is enabled and cannot be automatically turned off - -.. autoattribute:: cuda.cudart.cudaCpuDeviceId - - Device id that represents the CPU - -.. autoattribute:: cuda.cudart.cudaInvalidDeviceId - - Device id that represents an invalid device - -.. autoattribute:: cuda.cudart.cudaInitDeviceFlagsAreValid - - Tell the CUDA runtime that DeviceFlags is being set in cudaInitDevice call - -.. autoattribute:: cuda.cudart.cudaCooperativeLaunchMultiDeviceNoPreSync - - If set, each kernel launched as part of :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution. - -.. autoattribute:: cuda.cudart.cudaCooperativeLaunchMultiDeviceNoPostSync - - If set, any subsequent work pushed in a stream that participated in a call to :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution. - -.. autoattribute:: cuda.cudart.cudaArraySparsePropertiesSingleMipTail - - Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers - -.. autoattribute:: cuda.cudart.CUDART_CB -.. autoattribute:: cuda.cudart.CU_UUID_HAS_BEEN_DEFINED - - CUDA UUID types - -.. autoattribute:: cuda.cudart.CUDA_IPC_HANDLE_SIZE - - CUDA IPC Handle Size - -.. autoattribute:: cuda.cudart.cudaExternalMemoryDedicated - - Indicates that the external memory object is a dedicated resource - -.. autoattribute:: cuda.cudart.cudaExternalSemaphoreSignalSkipNvSciBufMemSync - - When the /p flags parameter of :py:obj:`~.cudaExternalSemaphoreSignalParams` contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. - -.. autoattribute:: cuda.cudart.cudaExternalSemaphoreWaitSkipNvSciBufMemSync - - When the /p flags parameter of :py:obj:`~.cudaExternalSemaphoreWaitParams` contains this flag, it indicates that waiting an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. - -.. autoattribute:: cuda.cudart.cudaNvSciSyncAttrSignal - - When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to this, it indicates that application need signaler specific NvSciSyncAttr to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. - -.. autoattribute:: cuda.cudart.cudaNvSciSyncAttrWait - - When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to this, it indicates that application need waiter specific NvSciSyncAttr to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. - -.. autoattribute:: cuda.cudart.cudaGraphKernelNodePortDefault - - This port activates when the kernel has finished executing. - -.. autoattribute:: cuda.cudart.cudaGraphKernelNodePortProgrammatic - - This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type :py:obj:`~.cudaGraphDependencyTypeProgrammatic`. See also :py:obj:`~.cudaLaunchAttributeProgrammaticEvent`. - -.. autoattribute:: cuda.cudart.cudaGraphKernelNodePortLaunchCompletion - - This port activates when all blocks of the kernel have begun execution. See also :py:obj:`~.cudaLaunchAttributeLaunchCompletionEvent`. - -.. autoattribute:: cuda.cudart.cudaStreamAttrID -.. autoattribute:: cuda.cudart.cudaStreamAttributeAccessPolicyWindow -.. autoattribute:: cuda.cudart.cudaStreamAttributeSynchronizationPolicy -.. autoattribute:: cuda.cudart.cudaStreamAttributeMemSyncDomainMap -.. autoattribute:: cuda.cudart.cudaStreamAttributeMemSyncDomain -.. autoattribute:: cuda.cudart.cudaStreamAttributePriority -.. autoattribute:: cuda.cudart.cudaStreamAttrValue -.. autoattribute:: cuda.cudart.cudaKernelNodeAttrID -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeAccessPolicyWindow -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeCooperative -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributePriority -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeClusterDimension -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeClusterSchedulingPolicyPreference -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeMemSyncDomainMap -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeMemSyncDomain -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributePreferredSharedMemoryCarveout -.. autoattribute:: cuda.cudart.cudaKernelNodeAttributeDeviceUpdatableKernelNode -.. autoattribute:: cuda.cudart.cudaKernelNodeAttrValue -.. autoattribute:: cuda.cudart.cudaSurfaceType1D -.. autoattribute:: cuda.cudart.cudaSurfaceType2D -.. autoattribute:: cuda.cudart.cudaSurfaceType3D -.. autoattribute:: cuda.cudart.cudaSurfaceTypeCubemap -.. autoattribute:: cuda.cudart.cudaSurfaceType1DLayered -.. autoattribute:: cuda.cudart.cudaSurfaceType2DLayered -.. autoattribute:: cuda.cudart.cudaSurfaceTypeCubemapLayered -.. autoattribute:: cuda.cudart.cudaTextureType1D -.. autoattribute:: cuda.cudart.cudaTextureType2D -.. autoattribute:: cuda.cudart.cudaTextureType3D -.. autoattribute:: cuda.cudart.cudaTextureTypeCubemap -.. autoattribute:: cuda.cudart.cudaTextureType1DLayered -.. autoattribute:: cuda.cudart.cudaTextureType2DLayered -.. autoattribute:: cuda.cudart.cudaTextureTypeCubemapLayered diff --git a/docs_src/source/module/driver.rst b/docs_src/source/module/driver.rst new file mode 100644 index 00000000..694c81c7 --- /dev/null +++ b/docs_src/source/module/driver.rst @@ -0,0 +1,6792 @@ +------ +driver +------ + +Data types used by CUDA driver +------------------------------ + + + +.. autoclass:: cuda.bindings.driver.CUuuid_st +.. autoclass:: cuda.bindings.driver.CUmemFabricHandle_st +.. autoclass:: cuda.bindings.driver.CUipcEventHandle_st +.. autoclass:: cuda.bindings.driver.CUipcMemHandle_st +.. autoclass:: cuda.bindings.driver.CUstreamBatchMemOpParams_union +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUasyncNotificationInfo_st +.. autoclass:: cuda.bindings.driver.CUdevprop_st +.. autoclass:: cuda.bindings.driver.CUaccessPolicyWindow_st +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v3_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_CONDITIONAL_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUgraphEdgeData_st +.. autoclass:: cuda.bindings.driver.CUDA_GRAPH_INSTANTIATE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUlaunchMemSyncDomainMap_st +.. autoclass:: cuda.bindings.driver.CUlaunchAttributeValue_union +.. autoclass:: cuda.bindings.driver.CUlaunchAttribute_st +.. autoclass:: cuda.bindings.driver.CUlaunchConfig_st +.. autoclass:: cuda.bindings.driver.CUexecAffinitySmCount_st +.. autoclass:: cuda.bindings.driver.CUexecAffinityParam_st +.. autoclass:: cuda.bindings.driver.CUctxCigParam_st +.. autoclass:: cuda.bindings.driver.CUctxCreateParams_st +.. autoclass:: cuda.bindings.driver.CUlibraryHostUniversalFunctionAndDataTable_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY2D_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_PEER_st +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR_st +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR_st +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES_st +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS_st +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_TEXTURE_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC_st +.. autoclass:: cuda.bindings.driver.CUtensorMap_st +.. autoclass:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st +.. autoclass:: cuda.bindings.driver.CUDA_LAUNCH_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUarrayMapInfo_st +.. autoclass:: cuda.bindings.driver.CUmemLocation_st +.. autoclass:: cuda.bindings.driver.CUmemAllocationProp_st +.. autoclass:: cuda.bindings.driver.CUmulticastObjectProp_st +.. autoclass:: cuda.bindings.driver.CUmemAccessDesc_st +.. autoclass:: cuda.bindings.driver.CUgraphExecUpdateResultInfo_st +.. autoclass:: cuda.bindings.driver.CUmemPoolProps_st +.. autoclass:: cuda.bindings.driver.CUmemPoolPtrExportData_st +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v1_st +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v2_st +.. autoclass:: cuda.bindings.driver.CUDA_MEM_FREE_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_CHILD_GRAPH_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EVENT_RECORD_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUDA_EVENT_WAIT_NODE_PARAMS_st +.. autoclass:: cuda.bindings.driver.CUgraphNodeParams_st +.. autoclass:: cuda.bindings.driver.CUeglFrame_st +.. autoclass:: cuda.bindings.driver.CUipcMem_flags + + .. autoattribute:: cuda.bindings.driver.CUipcMem_flags.CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS + + + Automatically enable peer access between remote devices as needed + +.. autoclass:: cuda.bindings.driver.CUmemAttach_flags + + .. autoattribute:: cuda.bindings.driver.CUmemAttach_flags.CU_MEM_ATTACH_GLOBAL + + + Memory can be accessed by any stream on any device + + + .. autoattribute:: cuda.bindings.driver.CUmemAttach_flags.CU_MEM_ATTACH_HOST + + + Memory cannot be accessed by any stream on any device + + + .. autoattribute:: cuda.bindings.driver.CUmemAttach_flags.CU_MEM_ATTACH_SINGLE + + + Memory can only be accessed by a single stream on the associated device + +.. autoclass:: cuda.bindings.driver.CUctx_flags + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_AUTO + + + Automatic scheduling + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_SPIN + + + Set spin as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_YIELD + + + Set yield as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SCHED_MASK + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_MAP_HOST + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_LMEM_RESIZE_TO_MAX + + + Keep local memory allocation after launch + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_COREDUMP_ENABLE + + + Trigger coredumps from exceptions in this context + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_USER_COREDUMP_ENABLE + + + Enable user pipe to trigger coredumps in this context + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_SYNC_MEMOPS + + + Ensure synchronous memory operations on this context will synchronize + + + .. autoattribute:: cuda.bindings.driver.CUctx_flags.CU_CTX_FLAGS_MASK + +.. autoclass:: cuda.bindings.driver.CUevent_sched_flags + + .. autoattribute:: cuda.bindings.driver.CUevent_sched_flags.CU_EVENT_SCHED_AUTO + + + Automatic scheduling + + + .. autoattribute:: cuda.bindings.driver.CUevent_sched_flags.CU_EVENT_SCHED_SPIN + + + Set spin as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUevent_sched_flags.CU_EVENT_SCHED_YIELD + + + Set yield as default scheduling + + + .. autoattribute:: cuda.bindings.driver.CUevent_sched_flags.CU_EVENT_SCHED_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling + +.. autoclass:: cuda.bindings.driver.cl_event_flags + + .. autoattribute:: cuda.bindings.driver.cl_event_flags.NVCL_EVENT_SCHED_AUTO + + + Automatic scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_event_flags.NVCL_EVENT_SCHED_SPIN + + + Set spin as default scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_event_flags.NVCL_EVENT_SCHED_YIELD + + + Set yield as default scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_event_flags.NVCL_EVENT_SCHED_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling + +.. autoclass:: cuda.bindings.driver.cl_context_flags + + .. autoattribute:: cuda.bindings.driver.cl_context_flags.NVCL_CTX_SCHED_AUTO + + + Automatic scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_context_flags.NVCL_CTX_SCHED_SPIN + + + Set spin as default scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_context_flags.NVCL_CTX_SCHED_YIELD + + + Set yield as default scheduling + + + .. autoattribute:: cuda.bindings.driver.cl_context_flags.NVCL_CTX_SCHED_BLOCKING_SYNC + + + Set blocking synchronization as default scheduling + +.. autoclass:: cuda.bindings.driver.CUstream_flags + + .. autoattribute:: cuda.bindings.driver.CUstream_flags.CU_STREAM_DEFAULT + + + Default stream flag + + + .. autoattribute:: cuda.bindings.driver.CUstream_flags.CU_STREAM_NON_BLOCKING + + + Stream does not synchronize with stream 0 (the NULL stream) + +.. autoclass:: cuda.bindings.driver.CUevent_flags + + .. autoattribute:: cuda.bindings.driver.CUevent_flags.CU_EVENT_DEFAULT + + + Default event flag + + + .. autoattribute:: cuda.bindings.driver.CUevent_flags.CU_EVENT_BLOCKING_SYNC + + + Event uses blocking synchronization + + + .. autoattribute:: cuda.bindings.driver.CUevent_flags.CU_EVENT_DISABLE_TIMING + + + Event will not record timing data + + + .. autoattribute:: cuda.bindings.driver.CUevent_flags.CU_EVENT_INTERPROCESS + + + Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set + +.. autoclass:: cuda.bindings.driver.CUevent_record_flags + + .. autoattribute:: cuda.bindings.driver.CUevent_record_flags.CU_EVENT_RECORD_DEFAULT + + + Default event record flag + + + .. autoattribute:: cuda.bindings.driver.CUevent_record_flags.CU_EVENT_RECORD_EXTERNAL + + + When using stream capture, create an event record node instead of the default behavior. This flag is invalid when used outside of capture. + +.. autoclass:: cuda.bindings.driver.CUevent_wait_flags + + .. autoattribute:: cuda.bindings.driver.CUevent_wait_flags.CU_EVENT_WAIT_DEFAULT + + + Default event wait flag + + + .. autoattribute:: cuda.bindings.driver.CUevent_wait_flags.CU_EVENT_WAIT_EXTERNAL + + + When using stream capture, create an event wait node instead of the default behavior. This flag is invalid when used outside of capture. + +.. autoclass:: cuda.bindings.driver.CUstreamWaitValue_flags + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_GEQ + + + Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit values). Note this is a cyclic comparison which ignores wraparound. (Default behavior.) + + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_EQ + + + Wait until *addr == value. + + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_AND + + + Wait until (*addr & value) != 0. + + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_NOR + + + Wait until ~(*addr | value) != 0. Support for this operation can be queried with :py:obj:`~.cuDeviceGetAttribute()` and :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR`. + + + .. autoattribute:: cuda.bindings.driver.CUstreamWaitValue_flags.CU_STREAM_WAIT_VALUE_FLUSH + + + Follow the wait operation with a flush of outstanding remote writes. This means that, if a remote write operation is guaranteed to have reached the device before the wait can be satisfied, that write is guaranteed to be visible to downstream device work. The device is permitted to reorder remote writes internally. For example, this flag would be required if two remote writes arrive in a defined order, the wait is satisfied by the second write, and downstream work needs to observe the first write. Support for this operation is restricted to selected platforms and can be queried with :py:obj:`~.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES`. + +.. autoclass:: cuda.bindings.driver.CUstreamWriteValue_flags + + .. autoattribute:: cuda.bindings.driver.CUstreamWriteValue_flags.CU_STREAM_WRITE_VALUE_DEFAULT + + + Default behavior + + + .. autoattribute:: cuda.bindings.driver.CUstreamWriteValue_flags.CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER + + + Permits the write to be reordered with writes which were issued before it, as a performance optimization. Normally, :py:obj:`~.cuStreamWriteValue32` will provide a memory fence before the write, which has similar semantics to __threadfence_system() but is scoped to the stream rather than a CUDA thread. This flag is not supported in the v2 API. + +.. autoclass:: cuda.bindings.driver.CUstreamBatchMemOpType + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_32 + + + Represents a :py:obj:`~.cuStreamWaitValue32` operation + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WRITE_VALUE_32 + + + Represents a :py:obj:`~.cuStreamWriteValue32` operation + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WAIT_VALUE_64 + + + Represents a :py:obj:`~.cuStreamWaitValue64` operation + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_WRITE_VALUE_64 + + + Represents a :py:obj:`~.cuStreamWriteValue64` operation + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_BARRIER + + + Insert a memory barrier of the specified type + + + .. autoattribute:: cuda.bindings.driver.CUstreamBatchMemOpType.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES + + + This has the same effect as :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH`, but as a standalone operation. + +.. autoclass:: cuda.bindings.driver.CUstreamMemoryBarrier_flags + + .. autoattribute:: cuda.bindings.driver.CUstreamMemoryBarrier_flags.CU_STREAM_MEMORY_BARRIER_TYPE_SYS + + + System-wide memory barrier. + + + .. autoattribute:: cuda.bindings.driver.CUstreamMemoryBarrier_flags.CU_STREAM_MEMORY_BARRIER_TYPE_GPU + + + Limit memory barrier scope to the GPU. + +.. autoclass:: cuda.bindings.driver.CUoccupancy_flags + + .. autoattribute:: cuda.bindings.driver.CUoccupancy_flags.CU_OCCUPANCY_DEFAULT + + + Default behavior + + + .. autoattribute:: cuda.bindings.driver.CUoccupancy_flags.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE + + + Assume global caching is enabled and cannot be automatically turned off + +.. autoclass:: cuda.bindings.driver.CUstreamUpdateCaptureDependencies_flags + + .. autoattribute:: cuda.bindings.driver.CUstreamUpdateCaptureDependencies_flags.CU_STREAM_ADD_CAPTURE_DEPENDENCIES + + + Add new nodes to the dependency set + + + .. autoattribute:: cuda.bindings.driver.CUstreamUpdateCaptureDependencies_flags.CU_STREAM_SET_CAPTURE_DEPENDENCIES + + + Replace the dependency set with the new nodes + +.. autoclass:: cuda.bindings.driver.CUasyncNotificationType + + .. autoattribute:: cuda.bindings.driver.CUasyncNotificationType.CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET + +.. autoclass:: cuda.bindings.driver.CUarray_format + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT8 + + + Unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT16 + + + Unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNSIGNED_INT32 + + + Unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SIGNED_INT8 + + + Signed 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SIGNED_INT16 + + + Signed 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SIGNED_INT32 + + + Signed 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_HALF + + + 16-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_FLOAT + + + 32-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_NV12 + + + 8-bit YUV planar format, with 4:2:0 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT8X1 + + + 1 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT8X2 + + + 2 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT8X4 + + + 4 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT16X1 + + + 1 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT16X2 + + + 2 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_UNORM_INT16X4 + + + 4 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT8X1 + + + 1 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT8X2 + + + 2 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT8X4 + + + 4 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT16X1 + + + 1 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT16X2 + + + 2 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_SNORM_INT16X4 + + + 4 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC1_UNORM + + + 4 channel unsigned normalized block-compressed (BC1 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC1_UNORM_SRGB + + + 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC2_UNORM + + + 4 channel unsigned normalized block-compressed (BC2 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC2_UNORM_SRGB + + + 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC3_UNORM + + + 4 channel unsigned normalized block-compressed (BC3 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC3_UNORM_SRGB + + + 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC4_UNORM + + + 1 channel unsigned normalized block-compressed (BC4 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC4_SNORM + + + 1 channel signed normalized block-compressed (BC4 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC5_UNORM + + + 2 channel unsigned normalized block-compressed (BC5 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC5_SNORM + + + 2 channel signed normalized block-compressed (BC5 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC6H_UF16 + + + 3 channel unsigned half-float block-compressed (BC6H compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC6H_SF16 + + + 3 channel signed half-float block-compressed (BC6H compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC7_UNORM + + + 4 channel unsigned normalized block-compressed (BC7 compression) format + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_BC7_UNORM_SRGB + + + 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_P010 + + + 10-bit YUV planar format, with 4:2:0 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_P016 + + + 16-bit YUV planar format, with 4:2:0 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_NV16 + + + 8-bit YUV planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_P210 + + + 10-bit YUV planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_P216 + + + 16-bit YUV planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_YUY2 + + + 2 channel, 8-bit YUV packed planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y210 + + + 2 channel, 10-bit YUV packed planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y216 + + + 2 channel, 16-bit YUV packed planar format, with 4:2:2 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_AYUV + + + 4 channel, 8-bit YUV packed planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y410 + + + 10-bit YUV packed planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y416 + + + 4 channel, 12-bit YUV packed planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y444_PLANAR8 + + + 3 channel 8-bit YUV planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_Y444_PLANAR10 + + + 3 channel 10-bit YUV planar format, with 4:4:4 sampling + + + .. autoattribute:: cuda.bindings.driver.CUarray_format.CU_AD_FORMAT_MAX + +.. autoclass:: cuda.bindings.driver.CUaddress_mode + + .. autoattribute:: cuda.bindings.driver.CUaddress_mode.CU_TR_ADDRESS_MODE_WRAP + + + Wrapping address mode + + + .. autoattribute:: cuda.bindings.driver.CUaddress_mode.CU_TR_ADDRESS_MODE_CLAMP + + + Clamp to edge address mode + + + .. autoattribute:: cuda.bindings.driver.CUaddress_mode.CU_TR_ADDRESS_MODE_MIRROR + + + Mirror address mode + + + .. autoattribute:: cuda.bindings.driver.CUaddress_mode.CU_TR_ADDRESS_MODE_BORDER + + + Border address mode + +.. autoclass:: cuda.bindings.driver.CUfilter_mode + + .. autoattribute:: cuda.bindings.driver.CUfilter_mode.CU_TR_FILTER_MODE_POINT + + + Point filter mode + + + .. autoattribute:: cuda.bindings.driver.CUfilter_mode.CU_TR_FILTER_MODE_LINEAR + + + Linear filter mode + +.. autoclass:: cuda.bindings.driver.CUdevice_attribute + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK + + + Maximum number of threads per block + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X + + + Maximum block dimension X + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y + + + Maximum block dimension Y + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z + + + Maximum block dimension Z + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X + + + Maximum grid dimension X + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y + + + Maximum grid dimension Y + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z + + + Maximum grid dimension Z + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK + + + Maximum shared memory available per block in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY + + + Memory available on device for constant variables in a CUDA C kernel in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_WARP_SIZE + + + Warp size in threads + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_PITCH + + + Maximum pitch in bytes allowed by memory copies + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK + + + Maximum number of 32-bit registers available per block + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CLOCK_RATE + + + Typical clock frequency in kilohertz + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT + + + Alignment requirement for textures + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_OVERLAP + + + Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT + + + Number of multiprocessors on device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT + + + Specifies whether there is a run time limit on kernels + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_INTEGRATED + + + Device is integrated with host memory + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY + + + Device can map host memory into CUDA address space + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_MODE + + + Compute mode (See :py:obj:`~.CUcomputemode` for details) + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH + + + Maximum 1D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH + + + Maximum 2D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT + + + Maximum 2D texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH + + + Maximum 3D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT + + + Maximum 3D texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH + + + Maximum 3D texture depth + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH + + + Maximum 2D layered texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT + + + Maximum 2D layered texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS + + + Maximum layers in a 2D layered texture + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES + + + Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT + + + Alignment requirement for surfaces + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS + + + Device can possibly execute multiple kernels concurrently + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_ECC_ENABLED + + + Device has ECC support enabled + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_BUS_ID + + + PCI bus ID of the device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID + + + PCI device ID of the device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TCC_DRIVER + + + Device is using TCC driver model + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE + + + Peak memory clock frequency in kilohertz + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH + + + Global memory bus width in bits + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE + + + Size of L2 cache in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR + + + Maximum resident threads per multiprocessor + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT + + + Number of asynchronous engines + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING + + + Device shares a unified address space with the host + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH + + + Maximum 1D layered texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS + + + Maximum layers in a 1D layered texture + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER + + + Deprecated, do not use. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH + + + Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT + + + Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE + + + Alternate maximum 3D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE + + + Alternate maximum 3D texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE + + + Alternate maximum 3D texture depth + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID + + + PCI domain ID of the device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT + + + Pitch alignment requirement for textures + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH + + + Maximum cubemap texture width/height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH + + + Maximum cubemap layered texture width/height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS + + + Maximum layers in a cubemap layered texture + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH + + + Maximum 1D surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH + + + Maximum 2D surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT + + + Maximum 2D surface height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH + + + Maximum 3D surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT + + + Maximum 3D surface height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH + + + Maximum 3D surface depth + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH + + + Maximum 1D layered surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS + + + Maximum layers in a 1D layered surface + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH + + + Maximum 2D layered surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT + + + Maximum 2D layered surface height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS + + + Maximum layers in a 2D layered surface + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH + + + Maximum cubemap surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH + + + Maximum cubemap layered surface width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS + + + Maximum layers in a cubemap layered surface + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH + + + Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or :py:obj:`~.cuDeviceGetTexture1DLinearMaxWidth()` instead. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH + + + Maximum 2D linear texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT + + + Maximum 2D linear texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH + + + Maximum 2D linear texture pitch in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH + + + Maximum mipmapped 2D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT + + + Maximum mipmapped 2D texture height + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR + + + Major compute capability version number + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR + + + Minor compute capability version number + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH + + + Maximum mipmapped 1D texture width + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED + + + Device supports stream priorities + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED + + + Device supports caching globals in L1 + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED + + + Device supports caching locals in L1 + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR + + + Maximum shared memory available per multiprocessor in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR + + + Maximum number of 32-bit registers available per multiprocessor + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY + + + Device can allocate managed memory on this system + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD + + + Device is on a multi-GPU board + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID + + + Unique id for a group of devices on the same multi-GPU board + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED + + + Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware) + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO + + + Ratio of single precision performance (in floating-point operations per second) to double precision performance + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS + + + Device supports coherently accessing pageable memory without calling cudaHostRegister on it + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS + + + Device can coherently access managed memory concurrently with the CPU + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED + + + Device supports compute preemption. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM + + + Device can access host registered memory at the same virtual address as the CPU + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 + + + Deprecated, along with v1 MemOps API, :py:obj:`~.cuStreamBatchMemOp` and related APIs are supported. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 + + + Deprecated, along with v1 MemOps API, 64-bit operations are supported in :py:obj:`~.cuStreamBatchMemOp` and related APIs. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 + + + Deprecated, along with v1 MemOps API, :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH + + + Device supports launching cooperative kernels via :py:obj:`~.cuLaunchCooperativeKernel` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH + + + Deprecated, :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` is deprecated. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN + + + Maximum optin shared memory per block + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES + + + The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the device. See :py:obj:`~.Stream Memory Operations` for additional details. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED + + + Device supports host memory registration via :py:obj:`~.cudaHostRegister`. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES + + + Device accesses pageable memory via the host's page tables. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST + + + The host can directly access managed memory on the device without migration. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED + + + Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED + + + Device supports virtual memory management APIs like :py:obj:`~.cuMemAddressReserve`, :py:obj:`~.cuMemCreate`, :py:obj:`~.cuMemMap` and related APIs + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED + + + Device supports exporting memory to a posix file descriptor with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED + + + Device supports exporting memory to a Win32 NT handle with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED + + + Device supports exporting memory to a Win32 KMT handle with :py:obj:`~.cuMemExportToShareableHandle`, if requested via :py:obj:`~.cuMemCreate` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR + + + Maximum number of blocks per multiprocessor + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED + + + Device supports compression of memory + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE + + + Maximum L2 persisting lines capacity setting in bytes. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE + + + Maximum value of :py:obj:`~.CUaccessPolicyWindow.num_bytes`. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED + + + Device supports specifying the GPUDirect RDMA flag with :py:obj:`~.cuMemCreate` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK + + + Shared memory reserved by CUDA driver per block in bytes + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED + + + Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED + + + Device supports using the :py:obj:`~.cuMemHostRegister` flag :py:obj:`~.CU_MEMHOSTERGISTER_READ_ONLY` to register memory that must be mapped as read-only to the GPU + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED + + + External timeline semaphore interop is supported on the device + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED + + + Device supports using the :py:obj:`~.cuMemAllocAsync` and :py:obj:`~.cuMemPool` family of APIs + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED + + + Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS + + + The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the :py:obj:`~.CUflushGPUDirectRDMAWritesOptions` enum + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING + + + GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See :py:obj:`~.CUGPUDirectRDMAWritesOrdering` for the numerical values returned here. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES + + + Handle types supported with mempool based IPC + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH + + + Indicates device supports cluster launch + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED + + + Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS + + + 64-bit operations are supported in :py:obj:`~.cuStreamBatchMemOp` and related MemOp APIs. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR + + + :py:obj:`~.CU_STREAM_WAIT_VALUE_NOR` is supported by MemOp APIs. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED + + + Device supports buffer sharing with dma_buf mechanism. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + + + Device supports IPC Events. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT + + + Number of memory domains the device supports. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED + + + Device supports accessing memory using Tensor Map. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED + + + Device supports exporting memory to a fabric handle with :py:obj:`~.cuMemExportToShareableHandle()` or requested with :py:obj:`~.cuMemCreate()` + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS + + + Device supports unified function pointers. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_NUMA_CONFIG + + + NUMA configuration of a device: value is of type :py:obj:`~.CUdeviceNumaConfig` enum + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_NUMA_ID + + + NUMA node ID of the GPU memory + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED + + + Device supports switch multicast and reduction operations. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MPS_ENABLED + + + Indicates if contexts created on this device will be shared via MPS + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID + + + NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED + + + Device supports CIG with D3D12. + + + .. autoattribute:: cuda.bindings.driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MAX + +.. autoclass:: cuda.bindings.driver.CUpointer_attribute + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_CONTEXT + + + The :py:obj:`~.CUcontext` on which a pointer was allocated or registered + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_TYPE + + + The :py:obj:`~.CUmemorytype` describing the physical location of a pointer + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_POINTER + + + The address at which a pointer's memory may be accessed on the device + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_HOST_POINTER + + + The address at which a pointer's memory may be accessed on the host + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_P2P_TOKENS + + + A pair of tokens for use with the nv-p2p.h Linux kernel interface + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_SYNC_MEMOPS + + + Synchronize every synchronous memory operation initiated on this region + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_BUFFER_ID + + + A process-wide unique ID for an allocated memory region + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_MANAGED + + + Indicates if the pointer points to managed memory + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL + + + A device ordinal of a device on which a pointer was allocated or registered + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE + + + 1 if this pointer maps to an allocation that is suitable for :py:obj:`~.cudaIpcGetMemHandle`, 0 otherwise + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_START_ADDR + + + Starting address for this requested pointer + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_RANGE_SIZE + + + Size of the address range for this requested pointer + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPED + + + 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES + + + Bitmask of allowed :py:obj:`~.CUmemAllocationHandleType` for this allocation + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE + + + 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_ACCESS_FLAGS + + + Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE + + + Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL. + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPING_SIZE + + + Size of the actual underlying mapping that the pointer belongs to + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR + + + The start address of the mapping that the pointer belongs to + + + .. autoattribute:: cuda.bindings.driver.CUpointer_attribute.CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID + + + A process-wide unique id corresponding to the physical allocation the pointer belongs to + +.. autoclass:: cuda.bindings.driver.CUfunction_attribute + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK + + + The maximum number of threads per block, beyond which a launch of the function would fail. This number depends on both the function and the device on which the function is currently loaded. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES + + + The size in bytes of statically-allocated shared memory required by this function. This does not include dynamically-allocated shared memory requested by the user at runtime. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES + + + The size in bytes of user-allocated constant memory required by this function. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES + + + The size in bytes of local memory used by each thread of this function. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NUM_REGS + + + The number of registers used by each thread of this function. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_PTX_VERSION + + + The PTX virtual architecture version for which the function was compiled. This value is the major PTX version * 10 + the minor PTX version, so a PTX version 1.3 function would return the value 13. Note that this may return the undefined value of 0 for cubins compiled prior to CUDA 3.0. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_BINARY_VERSION + + + The binary architecture version for which the function was compiled. This value is the major binary version * 10 + the minor binary version, so a binary version 1.3 function would return the value 13. Note that this will return a value of 10 for legacy cubins that do not have a properly-encoded binary architecture version. + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CACHE_MODE_CA + + + The attribute to indicate whether the function has been compiled with user specified option "-Xptxas --dlcm=ca" set . + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES + + + The maximum size in bytes of dynamically-allocated shared memory that can be used by this function. If the user-specified dynamic shared memory size is larger than this value, the launch will fail. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT + + + On devices where the L1 cache and shared memory use the same hardware resources, this sets the shared memory carveout preference, in percent of the total shared memory. Refer to :py:obj:`~.CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR`. This is only a hint, and the driver can choose a different ratio if required to execute the function. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET + + + If this attribute is set, the kernel must launch with a valid cluster size specified. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH + + + The required cluster width in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. + + + + If the value is set during compile time, it cannot be set at runtime. Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT + + + The required cluster height in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. + + + + If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH + + + The required cluster depth in blocks. The values must either all be 0 or all be positive. The validity of the cluster dimensions is otherwise checked at launch time. + + + + If the value is set during compile time, it cannot be set at runtime. Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED + + + Whether the function can be launched with non-portable cluster size. 1 is allowed, 0 is disallowed. A non-portable cluster size may only function on the specific SKUs the program is tested on. The launch might fail if the program is run on a different hardware platform. + + + + CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking whether the desired size can be launched on the current device. + + + + Portable Cluster Size + + + + A portable cluster size is guaranteed to be functional on all compute capabilities higher than the target compute capability. The portable cluster size for sm_90 is 8 blocks per cluster. This value may increase for future compute capabilities. + + + + The specific hardware unit may support higher cluster sizes that’s not guaranteed to be portable. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE + + + The block scheduling policy of a function. The value type is CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. See :py:obj:`~.cuFuncSetAttribute`, :py:obj:`~.cuKernelSetAttribute` + + + .. autoattribute:: cuda.bindings.driver.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX + +.. autoclass:: cuda.bindings.driver.CUfunc_cache + + .. autoattribute:: cuda.bindings.driver.CUfunc_cache.CU_FUNC_CACHE_PREFER_NONE + + + no preference for shared memory or L1 (default) + + + .. autoattribute:: cuda.bindings.driver.CUfunc_cache.CU_FUNC_CACHE_PREFER_SHARED + + + prefer larger shared memory and smaller L1 cache + + + .. autoattribute:: cuda.bindings.driver.CUfunc_cache.CU_FUNC_CACHE_PREFER_L1 + + + prefer larger L1 cache and smaller shared memory + + + .. autoattribute:: cuda.bindings.driver.CUfunc_cache.CU_FUNC_CACHE_PREFER_EQUAL + + + prefer equal sized L1 cache and shared memory + +.. autoclass:: cuda.bindings.driver.CUsharedconfig + + .. autoattribute:: cuda.bindings.driver.CUsharedconfig.CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE + + + set default shared memory bank size + + + .. autoattribute:: cuda.bindings.driver.CUsharedconfig.CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE + + + set shared memory bank width to four bytes + + + .. autoattribute:: cuda.bindings.driver.CUsharedconfig.CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE + + + set shared memory bank width to eight bytes + +.. autoclass:: cuda.bindings.driver.CUshared_carveout + + .. autoattribute:: cuda.bindings.driver.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_DEFAULT + + + No preference for shared memory or L1 (default) + + + .. autoattribute:: cuda.bindings.driver.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_MAX_SHARED + + + Prefer maximum available shared memory, minimum L1 cache + + + .. autoattribute:: cuda.bindings.driver.CUshared_carveout.CU_SHAREDMEM_CARVEOUT_MAX_L1 + + + Prefer maximum available L1 cache, minimum shared memory + +.. autoclass:: cuda.bindings.driver.CUmemorytype + + .. autoattribute:: cuda.bindings.driver.CUmemorytype.CU_MEMORYTYPE_HOST + + + Host memory + + + .. autoattribute:: cuda.bindings.driver.CUmemorytype.CU_MEMORYTYPE_DEVICE + + + Device memory + + + .. autoattribute:: cuda.bindings.driver.CUmemorytype.CU_MEMORYTYPE_ARRAY + + + Array memory + + + .. autoattribute:: cuda.bindings.driver.CUmemorytype.CU_MEMORYTYPE_UNIFIED + + + Unified device or host memory + +.. autoclass:: cuda.bindings.driver.CUcomputemode + + .. autoattribute:: cuda.bindings.driver.CUcomputemode.CU_COMPUTEMODE_DEFAULT + + + Default compute mode (Multiple contexts allowed per device) + + + .. autoattribute:: cuda.bindings.driver.CUcomputemode.CU_COMPUTEMODE_PROHIBITED + + + Compute-prohibited mode (No contexts can be created on this device at this time) + + + .. autoattribute:: cuda.bindings.driver.CUcomputemode.CU_COMPUTEMODE_EXCLUSIVE_PROCESS + + + Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time) + +.. autoclass:: cuda.bindings.driver.CUmem_advise + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_SET_READ_MOSTLY + + + Data will mostly be read and only occasionally be written to + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_UNSET_READ_MOSTLY + + + Undo the effect of :py:obj:`~.CU_MEM_ADVISE_SET_READ_MOSTLY` + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_SET_PREFERRED_LOCATION + + + Set the preferred location for the data as the specified device + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION + + + Clear the preferred location for the data + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_SET_ACCESSED_BY + + + Data will be accessed by the specified device, so prevent page faults as much as possible + + + .. autoattribute:: cuda.bindings.driver.CUmem_advise.CU_MEM_ADVISE_UNSET_ACCESSED_BY + + + Let the Unified Memory subsystem decide on the page faulting policy for the specified device + +.. autoclass:: cuda.bindings.driver.CUmem_range_attribute + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY + + + Whether the range will mostly be read and only occasionally be written to + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION + + + The preferred location of the range + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY + + + Memory range has :py:obj:`~.CU_MEM_ADVISE_SET_ACCESSED_BY` set for specified device + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION + + + The last location to which the range was prefetched + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE + + + The preferred location type of the range + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID + + + The preferred location id of the range + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE + + + The last location type to which the range was prefetched + + + .. autoattribute:: cuda.bindings.driver.CUmem_range_attribute.CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID + + + The last location id to which the range was prefetched + +.. autoclass:: cuda.bindings.driver.CUjit_option + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_MAX_REGISTERS + + + Max number of registers that a thread may use. + + Option type: unsigned int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_THREADS_PER_BLOCK + + + IN: Specifies minimum number of threads per block to target compilation for + + OUT: Returns the number of threads the compiler actually targeted. This restricts the resource utilization of the compiler (e.g. max registers) such that a block with the given number of threads should be able to launch based on register limitations. Note, this option does not currently take into account any other resource limitations, such as shared memory utilization. + + Cannot be combined with :py:obj:`~.CU_JIT_TARGET`. + + Option type: unsigned int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_WALL_TIME + + + Overwrites the option value with the total wall clock time, in milliseconds, spent in the compiler and linker + + Option type: float + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_INFO_LOG_BUFFER + + + Pointer to a buffer in which to print any log messages that are informational in nature (the buffer size is specified via option :py:obj:`~.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES`) + + Option type: char * + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES + + + IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator) + + OUT: Amount of log buffer filled with messages + + Option type: unsigned int + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_ERROR_LOG_BUFFER + + + Pointer to a buffer in which to print any log messages that reflect errors (the buffer size is specified via option :py:obj:`~.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES`) + + Option type: char * + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES + + + IN: Log buffer size in bytes. Log messages will be capped at this size (including null terminator) + + OUT: Amount of log buffer filled with messages + + Option type: unsigned int + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_OPTIMIZATION_LEVEL + + + Level of optimizations to apply to generated code (0 - 4), with 4 being the default and highest level of optimizations. + + Option type: unsigned int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_TARGET_FROM_CUCONTEXT + + + No option value required. Determines the target based on the current attached context (default) + + Option type: No option value needed + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_TARGET + + + Target is chosen based on supplied :py:obj:`~.CUjit_target`. Cannot be combined with :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`. + + Option type: unsigned int for enumerated type :py:obj:`~.CUjit_target` + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_FALLBACK_STRATEGY + + + Specifies choice of fallback strategy if matching cubin is not found. Choice is based on supplied :py:obj:`~.CUjit_fallback`. This option cannot be used with cuLink* APIs as the linker requires exact matches. + + Option type: unsigned int for enumerated type :py:obj:`~.CUjit_fallback` + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GENERATE_DEBUG_INFO + + + Specifies whether to create debug information in output (-g) (0: false, default) + + Option type: int + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_LOG_VERBOSE + + + Generate verbose log messages (0: false, default) + + Option type: int + + Applies to: compiler and linker + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GENERATE_LINE_INFO + + + Generate line number information (-lineinfo) (0: false, default) + + Option type: int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_CACHE_MODE + + + Specifies whether to enable caching explicitly (-dlcm) + + Choice is based on supplied :py:obj:`~.CUjit_cacheMode_enum`. + + Option type: unsigned int for enumerated type :py:obj:`~.CUjit_cacheMode_enum` + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_NEW_SM3X_OPT + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_FAST_COMPILE + + + This jit option is used for internal purpose only. + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GLOBAL_SYMBOL_NAMES + + + Array of device symbol names that will be relocated to the corresponding host addresses stored in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_ADDRESSES`. + + Must contain :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_COUNT` entries. + + When loading a device module, driver will relocate all encountered unresolved symbols to the host addresses. + + It is only allowed to register symbols that correspond to unresolved global variables. + + It is illegal to register the same device symbol at multiple addresses. + + Option type: const char ** + + Applies to: dynamic linker only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GLOBAL_SYMBOL_ADDRESSES + + + Array of host addresses that will be used to relocate corresponding device symbols stored in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_NAMES`. + + Must contain :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_COUNT` entries. + + Option type: void ** + + Applies to: dynamic linker only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_GLOBAL_SYMBOL_COUNT + + + Number of entries in :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_NAMES` and :py:obj:`~.CU_JIT_GLOBAL_SYMBOL_ADDRESSES` arrays. + + Option type: unsigned int + + Applies to: dynamic linker only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_LTO + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_FTZ + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_PREC_DIV + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_PREC_SQRT + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_FMA + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_REFERENCED_KERNEL_NAMES + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_REFERENCED_KERNEL_COUNT + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_REFERENCED_VARIABLE_NAMES + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_REFERENCED_VARIABLE_COUNT + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_POSITION_INDEPENDENT_CODE + + + Generate position independent code (0: false) + + Option type: int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_MIN_CTA_PER_SM + + + This option hints to the JIT compiler the minimum number of CTAs from the kernel’s grid to be mapped to a SM. This option is ignored when used together with :py:obj:`~.CU_JIT_MAX_REGISTERS` or :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`. Optimizations based on this option need :py:obj:`~.CU_JIT_MAX_THREADS_PER_BLOCK` to be specified as well. For kernels already using PTX directive .minnctapersm, this option will be ignored by default. Use :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take precedence over the PTX directive. Option type: unsigned int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_MAX_THREADS_PER_BLOCK + + + Maximum number threads in a thread block, computed as the product of the maximum extent specifed for each dimension of the block. This limit is guaranteed not to be exeeded in any invocation of the kernel. Exceeding the the maximum number of threads results in runtime error or kernel launch failure. For kernels already using PTX directive .maxntid, this option will be ignored by default. Use :py:obj:`~.CU_JIT_OVERRIDE_DIRECTIVE_VALUES` to let this option take precedence over the PTX directive. Option type: int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_OVERRIDE_DIRECTIVE_VALUES + + + This option lets the values specified using :py:obj:`~.CU_JIT_MAX_REGISTERS`, :py:obj:`~.CU_JIT_THREADS_PER_BLOCK`, :py:obj:`~.CU_JIT_MAX_THREADS_PER_BLOCK` and :py:obj:`~.CU_JIT_MIN_CTA_PER_SM` take precedence over any PTX directives. (0: Disable, default; 1: Enable) Option type: int + + Applies to: compiler only + + + .. autoattribute:: cuda.bindings.driver.CUjit_option.CU_JIT_NUM_OPTIONS + +.. autoclass:: cuda.bindings.driver.CUjit_target + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_30 + + + Compute device class 3.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_32 + + + Compute device class 3.2 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_35 + + + Compute device class 3.5 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_37 + + + Compute device class 3.7 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_50 + + + Compute device class 5.0 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_52 + + + Compute device class 5.2 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_53 + + + Compute device class 5.3 + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_60 + + + Compute device class 6.0. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_61 + + + Compute device class 6.1. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_62 + + + Compute device class 6.2. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_70 + + + Compute device class 7.0. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_72 + + + Compute device class 7.2. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_75 + + + Compute device class 7.5. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_80 + + + Compute device class 8.0. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_86 + + + Compute device class 8.6. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_87 + + + Compute device class 8.7. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_89 + + + Compute device class 8.9. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_90 + + + Compute device class 9.0. Compute device class 9.0. with accelerated features. + + + .. autoattribute:: cuda.bindings.driver.CUjit_target.CU_TARGET_COMPUTE_90A + +.. autoclass:: cuda.bindings.driver.CUjit_fallback + + .. autoattribute:: cuda.bindings.driver.CUjit_fallback.CU_PREFER_PTX + + + Prefer to compile ptx if exact binary match not found + + + .. autoattribute:: cuda.bindings.driver.CUjit_fallback.CU_PREFER_BINARY + + + Prefer to fall back to compatible binary code if exact match not found + +.. autoclass:: cuda.bindings.driver.CUjit_cacheMode + + .. autoattribute:: cuda.bindings.driver.CUjit_cacheMode.CU_JIT_CACHE_OPTION_NONE + + + Compile with no -dlcm flag specified + + + .. autoattribute:: cuda.bindings.driver.CUjit_cacheMode.CU_JIT_CACHE_OPTION_CG + + + Compile with L1 cache disabled + + + .. autoattribute:: cuda.bindings.driver.CUjit_cacheMode.CU_JIT_CACHE_OPTION_CA + + + Compile with L1 cache enabled + +.. autoclass:: cuda.bindings.driver.CUjitInputType + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_CUBIN + + + Compiled device-class-specific device code + + Applicable options: none + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_PTX + + + PTX source code + + Applicable options: PTX compiler options + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_FATBINARY + + + Bundle of multiple cubins and/or PTX of some device code + + Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_OBJECT + + + Host object with embedded device code + + Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_LIBRARY + + + Archive of host objects with embedded device code + + Applicable options: PTX compiler options, :py:obj:`~.CU_JIT_FALLBACK_STRATEGY` + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_INPUT_NVVM + + + [Deprecated] + + + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + + + .. autoattribute:: cuda.bindings.driver.CUjitInputType.CU_JIT_NUM_INPUT_TYPES + +.. autoclass:: cuda.bindings.driver.CUgraphicsRegisterFlags + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_NONE + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER + +.. autoclass:: cuda.bindings.driver.CUgraphicsMapResourceFlags + + .. autoattribute:: cuda.bindings.driver.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY + + + .. autoattribute:: cuda.bindings.driver.CUgraphicsMapResourceFlags.CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD + +.. autoclass:: cuda.bindings.driver.CUarray_cubemap_face + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_X + + + Positive X face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_X + + + Negative X face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_Y + + + Positive Y face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_Y + + + Negative Y face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_POSITIVE_Z + + + Positive Z face of cubemap + + + .. autoattribute:: cuda.bindings.driver.CUarray_cubemap_face.CU_CUBEMAP_FACE_NEGATIVE_Z + + + Negative Z face of cubemap + +.. autoclass:: cuda.bindings.driver.CUlimit + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_STACK_SIZE + + + GPU thread stack size + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_PRINTF_FIFO_SIZE + + + GPU printf FIFO size + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_MALLOC_HEAP_SIZE + + + GPU malloc heap size + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH + + + GPU device runtime launch synchronize depth + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT + + + GPU device runtime pending launch count + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_MAX_L2_FETCH_GRANULARITY + + + A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_PERSISTING_L2_CACHE_SIZE + + + A size in bytes for L2 persisting lines cache size + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_SHMEM_SIZE + + + A maximum size in bytes of shared memory available to CUDA kernels on a CIG context. Can only be queried, cannot be set + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_CIG_ENABLED + + + A non-zero value indicates this CUDA context is a CIG-enabled context. Can only be queried, cannot be set + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED + + + When set to a non-zero value, CUDA will fail to launch a kernel on a CIG context, instead of using the fallback path, if the kernel uses more shared memory than available + + + .. autoattribute:: cuda.bindings.driver.CUlimit.CU_LIMIT_MAX + +.. autoclass:: cuda.bindings.driver.CUresourcetype + + .. autoattribute:: cuda.bindings.driver.CUresourcetype.CU_RESOURCE_TYPE_ARRAY + + + Array resource + + + .. autoattribute:: cuda.bindings.driver.CUresourcetype.CU_RESOURCE_TYPE_MIPMAPPED_ARRAY + + + Mipmapped array resource + + + .. autoattribute:: cuda.bindings.driver.CUresourcetype.CU_RESOURCE_TYPE_LINEAR + + + Linear resource + + + .. autoattribute:: cuda.bindings.driver.CUresourcetype.CU_RESOURCE_TYPE_PITCH2D + + + Pitch 2D resource + +.. autoclass:: cuda.bindings.driver.CUaccessProperty + + .. autoattribute:: cuda.bindings.driver.CUaccessProperty.CU_ACCESS_PROPERTY_NORMAL + + + Normal cache persistence. + + + .. autoattribute:: cuda.bindings.driver.CUaccessProperty.CU_ACCESS_PROPERTY_STREAMING + + + Streaming access is less likely to persit from cache. + + + .. autoattribute:: cuda.bindings.driver.CUaccessProperty.CU_ACCESS_PROPERTY_PERSISTING + + + Persisting access is more likely to persist in cache. + +.. autoclass:: cuda.bindings.driver.CUgraphConditionalNodeType + + .. autoattribute:: cuda.bindings.driver.CUgraphConditionalNodeType.CU_GRAPH_COND_TYPE_IF + + + Conditional 'if' Node. Body executed once if condition value is non-zero. + + + .. autoattribute:: cuda.bindings.driver.CUgraphConditionalNodeType.CU_GRAPH_COND_TYPE_WHILE + + + Conditional 'while' Node. Body executed repeatedly while condition value is non-zero. + +.. autoclass:: cuda.bindings.driver.CUgraphNodeType + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_KERNEL + + + GPU kernel node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMCPY + + + Memcpy node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEMSET + + + Memset node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_HOST + + + Host (executable) node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_GRAPH + + + Node which executes an embedded graph + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EMPTY + + + Empty (no-op) node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_WAIT_EVENT + + + External event wait node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EVENT_RECORD + + + External event record node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL + + + External semaphore signal node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT + + + External semaphore wait node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEM_ALLOC + + + Memory Allocation Node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_MEM_FREE + + + Memory Free Node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_BATCH_MEM_OP + + + Batch MemOp Node + + + .. autoattribute:: cuda.bindings.driver.CUgraphNodeType.CU_GRAPH_NODE_TYPE_CONDITIONAL + + + Conditional Node May be used to implement a conditional execution path or loop + + inside of a graph. The graph(s) contained within the body of the conditional node + + can be selectively executed or iterated upon based on the value of a conditional + + variable. + + + + Handles must be created in advance of creating the node + + using :py:obj:`~.cuGraphConditionalHandleCreate`. + + + + The following restrictions apply to graphs which contain conditional nodes: + + The graph cannot be used in a child node. + + Only one instantiation of the graph may exist at any point in time. + + The graph cannot be cloned. + + + + To set the control value, supply a default value when creating the handle and/or + + call :py:obj:`~.cudaGraphSetConditional` from device code. + +.. autoclass:: cuda.bindings.driver.CUgraphDependencyType + + .. autoattribute:: cuda.bindings.driver.CUgraphDependencyType.CU_GRAPH_DEPENDENCY_TYPE_DEFAULT + + + This is an ordinary dependency. + + + .. autoattribute:: cuda.bindings.driver.CUgraphDependencyType.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC + + + This dependency type allows the downstream node to use `cudaGridDependencySynchronize()`. It may only be used between kernel nodes, and must be used with either the :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC` or :py:obj:`~.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER` outgoing port. + +.. autoclass:: cuda.bindings.driver.CUgraphInstantiateResult + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_SUCCESS + + + Instantiation succeeded + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_ERROR + + + Instantiation failed for an unexpected reason which is described in the return value of the function + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE + + + Instantiation failed due to invalid structure, such as cycles + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED + + + Instantiation for device launch failed because the graph contained an unsupported operation + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiateResult.CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED + + + Instantiation for device launch failed due to the nodes belonging to different contexts + +.. autoclass:: cuda.bindings.driver.CUsynchronizationPolicy + + .. autoattribute:: cuda.bindings.driver.CUsynchronizationPolicy.CU_SYNC_POLICY_AUTO + + + .. autoattribute:: cuda.bindings.driver.CUsynchronizationPolicy.CU_SYNC_POLICY_SPIN + + + .. autoattribute:: cuda.bindings.driver.CUsynchronizationPolicy.CU_SYNC_POLICY_YIELD + + + .. autoattribute:: cuda.bindings.driver.CUsynchronizationPolicy.CU_SYNC_POLICY_BLOCKING_SYNC + +.. autoclass:: cuda.bindings.driver.CUclusterSchedulingPolicy + + .. autoattribute:: cuda.bindings.driver.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_DEFAULT + + + the default policy + + + .. autoattribute:: cuda.bindings.driver.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_SPREAD + + + spread the blocks within a cluster to the SMs + + + .. autoattribute:: cuda.bindings.driver.CUclusterSchedulingPolicy.CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING + + + allow the hardware to load-balance the blocks in a cluster to the SMs + +.. autoclass:: cuda.bindings.driver.CUlaunchMemSyncDomain + + .. autoattribute:: cuda.bindings.driver.CUlaunchMemSyncDomain.CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT + + + Launch kernels in the default domain + + + .. autoattribute:: cuda.bindings.driver.CUlaunchMemSyncDomain.CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE + + + Launch kernels in the remote domain + +.. autoclass:: cuda.bindings.driver.CUlaunchAttributeID + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_IGNORE + + + Ignored entry, for convenient composition + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW + + + Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.accessPolicyWindow`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_COOPERATIVE + + + Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.cooperative`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY + + + Valid for streams. See :py:obj:`~.CUlaunchAttributeValue.syncPolicy`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION + + + Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.clusterDim`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE + + + Valid for graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.clusterSchedulingPolicyPreference`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION + + + Valid for launches. Setting :py:obj:`~.CUlaunchAttributeValue.programmaticStreamSerializationAllowed` to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid's execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT + + + Valid for launches. Set :py:obj:`~.CUlaunchAttributeValue.programmaticEvent` to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event through PTX launchdep.release or CUDA builtin function cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be inserted at the beginning of each block's execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling :py:obj:`~.cuEventSynchronize()`) are not guaranteed to observe the release precisely when it is released. For example, :py:obj:`~.cuEventSynchronize()` may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks. + + The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PRIORITY + + + Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.priority`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP + + + Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.memSyncDomainMap`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN + + + Valid for streams, graph nodes, launches. See :py:obj:`~.CUlaunchAttributeValue.memSyncDomain`. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT + + + Valid for launches. Set :py:obj:`~.CUlaunchAttributeValue.launchCompletionEvent` to record the event. + + Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock. + + A launch completion event is nominally similar to a programmatic event with `triggerAtBlockStart` set except that it is not visible to `cudaGridDependencySynchronize()` and can be used with compute capability less than 9.0. + + The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.CU_EVENT_DISABLE_TIMING` flag set). + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE + + + Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error. + + :py:obj:`~.CUlaunchAttributeValue`::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via :py:obj:`~.CUlaunchAttributeValue`::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node's kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see :py:obj:`~.cudaGraphKernelNodeUpdatesApply`. + + Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via :py:obj:`~.cuGraphDestroyNode`. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via :py:obj:`~.cuGraphKernelNodeCopyAttributes`. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to :py:obj:`~.cuGraphExecUpdate`. + + If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with :py:obj:`~.cuGraphUpload` before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again. + + + .. autoattribute:: cuda.bindings.driver.CUlaunchAttributeID.CU_LAUNCH_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT + + + Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting :py:obj:`~.CUlaunchAttributeValue.sharedMemCarveout` to a percentage between 0-100 signals the CUDA driver to set the shared memory carveout preference, in percent of the total shared memory for that kernel launch. This attribute takes precedence over :py:obj:`~.CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT`. This is only a hint, and the CUDA driver can choose a different configuration if required for the launch. + +.. autoclass:: cuda.bindings.driver.CUstreamCaptureStatus + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_NONE + + + Stream is not capturing + + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_ACTIVE + + + Stream is actively capturing + + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureStatus.CU_STREAM_CAPTURE_STATUS_INVALIDATED + + + Stream is part of a capture sequence that has been invalidated, but not terminated + +.. autoclass:: cuda.bindings.driver.CUstreamCaptureMode + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_GLOBAL + + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_THREAD_LOCAL + + + .. autoattribute:: cuda.bindings.driver.CUstreamCaptureMode.CU_STREAM_CAPTURE_MODE_RELAXED + +.. autoclass:: cuda.bindings.driver.CUdriverProcAddress_flags + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_DEFAULT + + + Default search mode for driver symbols. + + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_LEGACY_STREAM + + + Search for legacy versions of driver symbols. + + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddress_flags.CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM + + + Search for per-thread versions of driver symbols. + +.. autoclass:: cuda.bindings.driver.CUdriverProcAddressQueryResult + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_SUCCESS + + + Symbol was succesfully found + + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND + + + Symbol was not found in search + + + .. autoattribute:: cuda.bindings.driver.CUdriverProcAddressQueryResult.CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT + + + Symbol was found but version supplied was not sufficient + +.. autoclass:: cuda.bindings.driver.CUexecAffinityType + + .. autoattribute:: cuda.bindings.driver.CUexecAffinityType.CU_EXEC_AFFINITY_TYPE_SM_COUNT + + + Create a context with limited SMs. + + + .. autoattribute:: cuda.bindings.driver.CUexecAffinityType.CU_EXEC_AFFINITY_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUcigDataType + + .. autoattribute:: cuda.bindings.driver.CUcigDataType.CIG_DATA_TYPE_D3D12_COMMAND_QUEUE + +.. autoclass:: cuda.bindings.driver.CUlibraryOption + + .. autoattribute:: cuda.bindings.driver.CUlibraryOption.CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE + + + .. autoattribute:: cuda.bindings.driver.CUlibraryOption.CU_LIBRARY_BINARY_IS_PRESERVED + + + Specifes that the argument `code` passed to :py:obj:`~.cuLibraryLoadData()` will be preserved. Specifying this option will let the driver know that `code` can be accessed at any point until :py:obj:`~.cuLibraryUnload()`. The default behavior is for the driver to allocate and maintain its own copy of `code`. Note that this is only a memory usage optimization hint and the driver can choose to ignore it if required. Specifying this option with :py:obj:`~.cuLibraryLoadFromFile()` is invalid and will return :py:obj:`~.CUDA_ERROR_INVALID_VALUE`. + + + .. autoattribute:: cuda.bindings.driver.CUlibraryOption.CU_LIBRARY_NUM_OPTIONS + +.. autoclass:: cuda.bindings.driver.CUresult + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_SUCCESS + + + The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`). + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_VALUE + + + This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_OUT_OF_MEMORY + + + The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_INITIALIZED + + + This indicates that the CUDA driver has not been initialized with :py:obj:`~.cuInit()` or that initialization has failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_DEINITIALIZED + + + This indicates that the CUDA driver is in the process of shutting down. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PROFILER_DISABLED + + + This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PROFILER_NOT_INITIALIZED + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PROFILER_ALREADY_STARTED + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PROFILER_ALREADY_STOPPED + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STUB_LIBRARY + + + This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_DEVICE_UNAVAILABLE + + + This indicates that requested CUDA device is unavailable at the current time. Devices are often unavailable due to use of :py:obj:`~.CU_COMPUTEMODE_EXCLUSIVE_PROCESS` or :py:obj:`~.CU_COMPUTEMODE_PROHIBITED`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NO_DEVICE + + + This indicates that no CUDA-capable devices were detected by the installed CUDA driver. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_DEVICE + + + This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_DEVICE_NOT_LICENSED + + + This error indicates that the Grid license is not applied. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_IMAGE + + + This indicates that the device kernel image is invalid. This can also indicate an invalid CUDA module. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_CONTEXT + + + This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had :py:obj:`~.cuCtxDestroy()` invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See :py:obj:`~.cuCtxGetApiVersion()` for more details. This can also be returned if the green context passed to an API call was not converted to a :py:obj:`~.CUcontext` using :py:obj:`~.cuCtxFromGreenCtx` API. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CONTEXT_ALREADY_CURRENT + + + This indicated that the context being supplied as a parameter to the API call was already the active context. [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MAP_FAILED + + + This indicates that a map or register operation has failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNMAP_FAILED + + + This indicates that an unmap or unregister operation has failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ARRAY_IS_MAPPED + + + This indicates that the specified array is currently mapped and thus cannot be destroyed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ALREADY_MAPPED + + + This indicates that the resource is already mapped. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NO_BINARY_FOR_GPU + + + This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ALREADY_ACQUIRED + + + This indicates that a resource has already been acquired. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_MAPPED + + + This indicates that a resource is not mapped. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_MAPPED_AS_ARRAY + + + This indicates that a mapped resource is not available for access as an array. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_MAPPED_AS_POINTER + + + This indicates that a mapped resource is not available for access as a pointer. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ECC_UNCORRECTABLE + + + This indicates that an uncorrectable ECC error was detected during execution. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNSUPPORTED_LIMIT + + + This indicates that the :py:obj:`~.CUlimit` passed to the API call is not supported by the active device. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CONTEXT_ALREADY_IN_USE + + + This indicates that the :py:obj:`~.CUcontext` passed to the API call can only be bound to a single CPU thread at a time but is already bound to a CPU thread. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PEER_ACCESS_UNSUPPORTED + + + This indicates that peer access is not supported across the given devices. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_PTX + + + This indicates that a PTX JIT compilation failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_GRAPHICS_CONTEXT + + + This indicates an error with OpenGL or DirectX context. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NVLINK_UNCORRECTABLE + + + This indicates that an uncorrectable NVLink error was detected during the execution. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_JIT_COMPILER_NOT_FOUND + + + This indicates that the PTX JIT compiler library was not found. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNSUPPORTED_PTX_VERSION + + + This indicates that the provided PTX was compiled with an unsupported toolchain. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_JIT_COMPILATION_DISABLED + + + This indicates that the PTX JIT compilation was disabled. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY + + + This indicates that the :py:obj:`~.CUexecAffinityType` passed to the API call is not supported by the active device. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC + + + This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_SOURCE + + + This indicates that the device kernel source is invalid. This includes compilation/linker errors encountered in device code or user error. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_FILE_NOT_FOUND + + + This indicates that the file specified was not found. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND + + + This indicates that a link to a shared object failed to resolve. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + + + This indicates that initialization of a shared object failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_OPERATING_SYSTEM + + + This indicates that an OS call failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_HANDLE + + + This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like :py:obj:`~.CUstream` and :py:obj:`~.CUevent`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ILLEGAL_STATE + + + This indicates that a resource required by the API call is not in a valid state to perform the requested operation. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LOSSY_QUERY + + + This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_FOUND + + + This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_READY + + + This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than :py:obj:`~.CUDA_SUCCESS` (which indicates completion). Calls that may return this value include :py:obj:`~.cuEventQuery()` and :py:obj:`~.cuStreamQuery()`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ILLEGAL_ADDRESS + + + While executing a kernel, the device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES + + + This indicates that a launch did not occur because it did not have appropriate resources. This error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count. Passing arguments of the wrong size (i.e. a 64-bit pointer when a 32-bit int is expected) is equivalent to passing too many arguments and can also result in this error. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LAUNCH_TIMEOUT + + + This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT` for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING + + + This error indicates a kernel launch that uses an incompatible texturing mode. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED + + + This error indicates that a call to :py:obj:`~.cuCtxEnablePeerAccess()` is trying to re-enable peer access to a context which has already had peer access to it enabled. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PEER_ACCESS_NOT_ENABLED + + + This error indicates that :py:obj:`~.cuCtxDisablePeerAccess()` is trying to disable peer access which has not been enabled yet via :py:obj:`~.cuCtxEnablePeerAccess()`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE + + + This error indicates that the primary context for the specified device has already been initialized. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CONTEXT_IS_DESTROYED + + + This error indicates that the context current to the calling thread has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary context which has not yet been initialized. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ASSERT + + + A device-side assert triggered during kernel execution. The context cannot be used anymore, and must be destroyed. All existing device memory allocations from this context are invalid and must be reconstructed if the program is to continue using CUDA. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_TOO_MANY_PEERS + + + This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to :py:obj:`~.cuCtxEnablePeerAccess()`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED + + + This error indicates that the memory range passed to :py:obj:`~.cuMemHostRegister()` has already been registered. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED + + + This error indicates that the pointer passed to :py:obj:`~.cuMemHostUnregister()` does not correspond to any currently registered memory region. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_HARDWARE_STACK_ERROR + + + While executing a kernel, the device encountered a stack error. This can be due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_ILLEGAL_INSTRUCTION + + + While executing a kernel, the device encountered an illegal instruction. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MISALIGNED_ADDRESS + + + While executing a kernel, the device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_ADDRESS_SPACE + + + While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_PC + + + While executing a kernel, the device program counter wrapped its address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_LAUNCH_FAILED + + + An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE + + + This error indicates that the number of blocks launched per grid for a kernel that was launched via either :py:obj:`~.cuLaunchCooperativeKernel` or :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` exceeds the maximum number of blocks as allowed by :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessor` or :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` times the number of multiprocessors as specified by the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT`. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_PERMITTED + + + This error indicates that the attempted operation is not permitted. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_NOT_SUPPORTED + + + This error indicates that the attempted operation is not supported on the current system or device. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_SYSTEM_NOT_READY + + + This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH + + + This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE + + + This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_CONNECTION_FAILED + + + This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_RPC_FAILURE + + + This error indicates that the remote procedural call between the MPS server and the MPS client failed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_SERVER_NOT_READY + + + This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_MAX_CLIENTS_REACHED + + + This error indicates that the hardware resources required to create MPS client have been exhausted. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED + + + This error indicates the the hardware resources required to support device connections have been exhausted. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_MPS_CLIENT_TERMINATED + + + This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CDP_NOT_SUPPORTED + + + This error indicates that the module is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CDP_VERSION_MISMATCH + + + This error indicates that a module contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED + + + This error indicates that the operation is not permitted when the stream is capturing. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_INVALIDATED + + + This error indicates that the current capture sequence on the stream has been invalidated due to a previous error. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_MERGE + + + This error indicates that the operation would have resulted in a merge of two independent capture sequences. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNMATCHED + + + This error indicates that the capture was not initiated in this stream. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_UNJOINED + + + This error indicates that the capture sequence contains a fork that was not joined to the primary stream. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_ISOLATION + + + This error indicates that a dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_IMPLICIT + + + This error indicates a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_CAPTURED_EVENT + + + This error indicates that the operation is not permitted on an event which was last recorded in a capturing stream. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD + + + A stream capture sequence not initiated with the :py:obj:`~.CU_STREAM_CAPTURE_MODE_RELAXED` argument to :py:obj:`~.cuStreamBeginCapture` was passed to :py:obj:`~.cuStreamEndCapture` in a different thread. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_TIMEOUT + + + This error indicates that the timeout specified for the wait operation has lapsed. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE + + + This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_EXTERNAL_DEVICE + + + This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device's signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_CLUSTER_SIZE + + + Indicates a kernel launch error due to cluster misconfiguration. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_FUNCTION_NOT_LOADED + + + Indiciates a function handle is not loaded when calling an API that requires a loaded function. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_RESOURCE_TYPE + + + This error indicates one or more resources passed in are not valid resource types for the operation. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION + + + This error indicates one or more resources are insufficient or non-applicable for the operation. + + + .. autoattribute:: cuda.bindings.driver.CUresult.CUDA_ERROR_UNKNOWN + + + This indicates that an unknown internal error has occurred. + +.. autoclass:: cuda.bindings.driver.CUdevice_P2PAttribute + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK + + + A relative value indicating the performance of the link between two devices + + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED + + + P2P Access is enable + + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED + + + Atomic operation over the link supported + + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.driver.CUdevice_P2PAttribute.CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED + + + Accessing CUDA arrays over the link supported + +.. autoclass:: cuda.bindings.driver.CUresourceViewFormat + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_NONE + + + No resource view format (use underlying resource format) + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X8 + + + 1 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X8 + + + 2 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X8 + + + 4 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X8 + + + 1 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X8 + + + 2 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X8 + + + 4 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X16 + + + 1 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X16 + + + 2 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X16 + + + 4 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X16 + + + 1 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X16 + + + 2 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X16 + + + 4 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_1X32 + + + 1 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_2X32 + + + 2 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UINT_4X32 + + + 4 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_1X32 + + + 1 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_2X32 + + + 2 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SINT_4X32 + + + 4 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_1X16 + + + 1 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_2X16 + + + 2 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_4X16 + + + 4 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_1X32 + + + 1 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_2X32 + + + 2 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_FLOAT_4X32 + + + 4 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC1 + + + Block compressed 1 + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC2 + + + Block compressed 2 + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC3 + + + Block compressed 3 + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC4 + + + Block compressed 4 unsigned + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC4 + + + Block compressed 4 signed + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC5 + + + Block compressed 5 unsigned + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC5 + + + Block compressed 5 signed + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC6H + + + Block compressed 6 unsigned half-float + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_SIGNED_BC6H + + + Block compressed 6 signed half-float + + + .. autoattribute:: cuda.bindings.driver.CUresourceViewFormat.CU_RES_VIEW_FORMAT_UNSIGNED_BC7 + + + Block compressed 7 + +.. autoclass:: cuda.bindings.driver.CUtensorMapDataType + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT8 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT16 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT32 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_INT32 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_UINT64 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_INT64 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT16 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT32 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT64 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapDataType.CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ + +.. autoclass:: cuda.bindings.driver.CUtensorMapInterleave + + .. autoattribute:: cuda.bindings.driver.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_NONE + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_16B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapInterleave.CU_TENSOR_MAP_INTERLEAVE_32B + +.. autoclass:: cuda.bindings.driver.CUtensorMapSwizzle + + .. autoattribute:: cuda.bindings.driver.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_NONE + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_32B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_64B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapSwizzle.CU_TENSOR_MAP_SWIZZLE_128B + +.. autoclass:: cuda.bindings.driver.CUtensorMapL2promotion + + .. autoattribute:: cuda.bindings.driver.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_NONE + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_64B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_128B + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapL2promotion.CU_TENSOR_MAP_L2_PROMOTION_L2_256B + +.. autoclass:: cuda.bindings.driver.CUtensorMapFloatOOBfill + + .. autoattribute:: cuda.bindings.driver.CUtensorMapFloatOOBfill.CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE + + + .. autoattribute:: cuda.bindings.driver.CUtensorMapFloatOOBfill.CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA + +.. autoclass:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS + + .. autoattribute:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE + + + No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations + + + .. autoattribute:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ + + + Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case. + + + .. autoattribute:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS.CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE + + + Read-write access, the device has full read-write access to the memory + +.. autoclass:: cuda.bindings.driver.CUexternalMemoryHandleType + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD + + + Handle is an opaque file descriptor + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 + + + Handle is an opaque shared NT handle + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT + + + Handle is an opaque, globally shared handle + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP + + + Handle is a D3D12 heap object + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE + + + Handle is a D3D12 committed resource + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE + + + Handle is a shared NT handle to a D3D11 resource + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + + + Handle is a globally shared handle to a D3D11 resource + + + .. autoattribute:: cuda.bindings.driver.CUexternalMemoryHandleType.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF + + + Handle is an NvSciBuf object + +.. autoclass:: cuda.bindings.driver.CUexternalSemaphoreHandleType + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD + + + Handle is an opaque file descriptor + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 + + + Handle is an opaque shared NT handle + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + + + Handle is an opaque, globally shared handle + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE + + + Handle is a shared NT handle referencing a D3D12 fence object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE + + + Handle is a shared NT handle referencing a D3D11 fence object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC + + + Opaque handle to NvSciSync Object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX + + + Handle is a shared NT handle referencing a D3D11 keyed mutex object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + + + Handle is a globally shared handle referencing a D3D11 keyed mutex object + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD + + + Handle is an opaque file descriptor referencing a timeline semaphore + + + .. autoattribute:: cuda.bindings.driver.CUexternalSemaphoreHandleType.CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + + + Handle is an opaque shared NT handle referencing a timeline semaphore + +.. autoclass:: cuda.bindings.driver.CUmemAllocationHandleType + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_NONE + + + Does not allow any export mechanism. > + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR + + + Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_WIN32 + + + Allows a Win32 NT handle to be used for exporting. (HANDLE) + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_WIN32_KMT + + + Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_FABRIC + + + Allows a fabric handle to be used for exporting. (CUmemFabricHandle) + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationHandleType.CU_MEM_HANDLE_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUmemAccess_flags + + .. autoattribute:: cuda.bindings.driver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_NONE + + + Default, make the address range not accessible + + + .. autoattribute:: cuda.bindings.driver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READ + + + Make the address range read accessible + + + .. autoattribute:: cuda.bindings.driver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_READWRITE + + + Make the address range read-write accessible + + + .. autoattribute:: cuda.bindings.driver.CUmemAccess_flags.CU_MEM_ACCESS_FLAGS_PROT_MAX + +.. autoclass:: cuda.bindings.driver.CUmemLocationType + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_INVALID + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_DEVICE + + + Location is a device location, thus id is a device ordinal + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST + + + Location is host, id is ignored + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST_NUMA + + + Location is a host NUMA node, thus id is a host NUMA node id + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT + + + Location is a host NUMA node of the current thread, id is ignored + + + .. autoattribute:: cuda.bindings.driver.CUmemLocationType.CU_MEM_LOCATION_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUmemAllocationType + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_INVALID + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_PINNED + + + This allocation type is 'pinned', i.e. cannot migrate from its current location while the application is actively using it + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationType.CU_MEM_ALLOCATION_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUmemAllocationGranularity_flags + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_MINIMUM + + + Minimum required granularity for allocation + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationGranularity_flags.CU_MEM_ALLOC_GRANULARITY_RECOMMENDED + + + Recommended granularity for allocation for best performance + +.. autoclass:: cuda.bindings.driver.CUmemRangeHandleType + + .. autoattribute:: cuda.bindings.driver.CUmemRangeHandleType.CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD + + + .. autoattribute:: cuda.bindings.driver.CUmemRangeHandleType.CU_MEM_RANGE_HANDLE_TYPE_MAX + +.. autoclass:: cuda.bindings.driver.CUarraySparseSubresourceType + + .. autoattribute:: cuda.bindings.driver.CUarraySparseSubresourceType.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL + + + .. autoattribute:: cuda.bindings.driver.CUarraySparseSubresourceType.CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL + +.. autoclass:: cuda.bindings.driver.CUmemOperationType + + .. autoattribute:: cuda.bindings.driver.CUmemOperationType.CU_MEM_OPERATION_TYPE_MAP + + + .. autoattribute:: cuda.bindings.driver.CUmemOperationType.CU_MEM_OPERATION_TYPE_UNMAP + +.. autoclass:: cuda.bindings.driver.CUmemHandleType + + .. autoattribute:: cuda.bindings.driver.CUmemHandleType.CU_MEM_HANDLE_TYPE_GENERIC + +.. autoclass:: cuda.bindings.driver.CUmemAllocationCompType + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationCompType.CU_MEM_ALLOCATION_COMP_NONE + + + Allocating non-compressible memory + + + .. autoattribute:: cuda.bindings.driver.CUmemAllocationCompType.CU_MEM_ALLOCATION_COMP_GENERIC + + + Allocating compressible memory + +.. autoclass:: cuda.bindings.driver.CUmulticastGranularity_flags + + .. autoattribute:: cuda.bindings.driver.CUmulticastGranularity_flags.CU_MULTICAST_GRANULARITY_MINIMUM + + + Minimum required granularity + + + .. autoattribute:: cuda.bindings.driver.CUmulticastGranularity_flags.CU_MULTICAST_GRANULARITY_RECOMMENDED + + + Recommended granularity for best performance + +.. autoclass:: cuda.bindings.driver.CUgraphExecUpdateResult + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_SUCCESS + + + The update succeeded + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR + + + The update failed for an unexpected reason which is described in the return value of the function + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED + + + The update failed because the topology changed + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED + + + The update failed because a node type changed + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED + + + The update failed because the function of a kernel node changed (CUDA driver < 11.2) + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED + + + The update failed because the parameters changed in a way that is not supported + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED + + + The update failed because something about the node is not supported + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE + + + The update failed because the function of a kernel node changed in an unsupported way + + + .. autoattribute:: cuda.bindings.driver.CUgraphExecUpdateResult.CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED + + + The update failed because the node attributes changed in a way that is not supported + +.. autoclass:: cuda.bindings.driver.CUmemPool_attribute + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES + + + (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled) + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC + + + (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled) + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES + + + (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled). + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_RELEASE_THRESHOLD + + + (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0) + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT + + + (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool. + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH + + + (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero. + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_CURRENT + + + (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application. + + + .. autoattribute:: cuda.bindings.driver.CUmemPool_attribute.CU_MEMPOOL_ATTR_USED_MEM_HIGH + + + (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero. + +.. autoclass:: cuda.bindings.driver.CUgraphMem_attribute + + .. autoattribute:: cuda.bindings.driver.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT + + + (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs + + + .. autoattribute:: cuda.bindings.driver.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_USED_MEM_HIGH + + + (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero. + + + .. autoattribute:: cuda.bindings.driver.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT + + + (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. + + + .. autoattribute:: cuda.bindings.driver.CUgraphMem_attribute.CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH + + + (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. + +.. autoclass:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesOptions + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesOptions.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST + + + :py:obj:`~.cuFlushGPUDirectRDMAWrites()` and its CUDA Runtime API counterpart are supported on the device. + + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesOptions.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS + + + The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the device. + +.. autoclass:: cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering + + .. autoattribute:: cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE + + + The device does not natively support ordering of remote writes. :py:obj:`~.cuFlushGPUDirectRDMAWrites()` can be leveraged if supported. + + + .. autoattribute:: cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER + + + Natively, the device can consistently consume remote writes, although other CUDA devices may not. + + + .. autoattribute:: cuda.bindings.driver.CUGPUDirectRDMAWritesOrdering.CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES + + + Any CUDA device in the system can consistently consume remote writes to this device. + +.. autoclass:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesScope + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesScope.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER + + + Blocks until remote writes are visible to the CUDA device context owning the data. + + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesScope.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES + + + Blocks until remote writes are visible to all CUDA device contexts. + +.. autoclass:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesTarget + + .. autoattribute:: cuda.bindings.driver.CUflushGPUDirectRDMAWritesTarget.CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX + + + Sets the target for :py:obj:`~.cuFlushGPUDirectRDMAWrites()` to the currently active CUDA device context. + +.. autoclass:: cuda.bindings.driver.CUgraphDebugDot_flags + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE + + + Output all debug data as if every debug flag is enabled + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES + + + Use CUDA Runtime structures for output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS + + + Adds CUDA_KERNEL_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS + + + Adds CUDA_MEMCPY3D values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS + + + Adds CUDA_MEMSET_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS + + + Adds CUDA_HOST_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS + + + Adds CUevent handle from record and wait nodes to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS + + + Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS + + + Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES + + + Adds CUkernelNodeAttrValue values to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES + + + Adds node handles and every kernel function handle to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS + + + Adds memory alloc node parameters to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS + + + Adds memory free node parameters to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS + + + Adds batch mem op node parameters to output + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO + + + Adds edge numbering information + + + .. autoattribute:: cuda.bindings.driver.CUgraphDebugDot_flags.CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS + + + Adds conditional node parameters to output + +.. autoclass:: cuda.bindings.driver.CUuserObject_flags + + .. autoattribute:: cuda.bindings.driver.CUuserObject_flags.CU_USER_OBJECT_NO_DESTRUCTOR_SYNC + + + Indicates the destructor execution is not synchronized by any CUDA handle. + +.. autoclass:: cuda.bindings.driver.CUuserObjectRetain_flags + + .. autoattribute:: cuda.bindings.driver.CUuserObjectRetain_flags.CU_GRAPH_USER_OBJECT_MOVE + + + Transfer references from the caller rather than creating new references. + +.. autoclass:: cuda.bindings.driver.CUgraphInstantiate_flags + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH + + + Automatically free memory allocated in a graph before relaunching. + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD + + + Automatically upload the graph after instantiation. Only supported by :py:obj:`~.cuGraphInstantiateWithParams`. The upload will be performed using the stream provided in `instantiateParams`. + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH + + + Instantiate the graph to be launchable from the device. This flag can only be used on platforms which support unified addressing. This flag cannot be used in conjunction with CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. + + + .. autoattribute:: cuda.bindings.driver.CUgraphInstantiate_flags.CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY + + + Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into. + +.. autoclass:: cuda.bindings.driver.CUdeviceNumaConfig + + .. autoattribute:: cuda.bindings.driver.CUdeviceNumaConfig.CU_DEVICE_NUMA_CONFIG_NONE + + + The GPU is not a NUMA node + + + .. autoattribute:: cuda.bindings.driver.CUdeviceNumaConfig.CU_DEVICE_NUMA_CONFIG_NUMA_NODE + + + The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its NUMA ID + +.. autoclass:: cuda.bindings.driver.CUeglFrameType + + .. autoattribute:: cuda.bindings.driver.CUeglFrameType.CU_EGL_FRAME_TYPE_ARRAY + + + Frame type CUDA array + + + .. autoattribute:: cuda.bindings.driver.CUeglFrameType.CU_EGL_FRAME_TYPE_PITCH + + + Frame type pointer + +.. autoclass:: cuda.bindings.driver.CUeglResourceLocationFlags + + .. autoattribute:: cuda.bindings.driver.CUeglResourceLocationFlags.CU_EGL_RESOURCE_LOCATION_SYSMEM + + + Resource location sysmem + + + .. autoattribute:: cuda.bindings.driver.CUeglResourceLocationFlags.CU_EGL_RESOURCE_LOCATION_VIDMEM + + + Resource location vidmem + +.. autoclass:: cuda.bindings.driver.CUeglColorFormat + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR + + + Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR + + + Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_PLANAR + + + Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR + + + Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RGB + + + R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BGR + + + R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_ARGB + + + R/G/B/A four channels in one surface with BGRA byte ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RGBA + + + R/G/B/A four channels in one surface with ABGR byte ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_L + + + single luminance channel in one surface. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_R + + + single color channel in one surface. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_PLANAR + + + Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR + + + Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUYV_422 + + + Y, U, V in one surface, interleaved as UYVY in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_UYVY_422 + + + Y, U, V in one surface, interleaved as YUYV in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_ABGR + + + R/G/B/A four channels in one surface with RGBA byte ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BGRA + + + R/G/B/A four channels in one surface with ARGB byte ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_A + + + Alpha color format - one channel in one surface. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_RG + + + R/G color format - two channels in one surface with GR byte ordering + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_AYUV + + + Y, U, V, A four channels in one surface, interleaved as VUYA. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR + + + Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR + + + Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR + + + Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR + + + Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_VYUY_ER + + + Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_UYVY_ER + + + Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUYV_ER + + + Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVYU_ER + + + Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV_ER + + + Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUVA_ER + + + Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_AYUV_ER + + + Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER + + + Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER + + + Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER + + + Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER + + + Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER + + + Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER + + + Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_RGGB + + + Bayer format - one channel in one surface with interleaved RGGB ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_BGGR + + + Bayer format - one channel in one surface with interleaved BGGR ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_GRBG + + + Bayer format - one channel in one surface with interleaved GRBG ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_GBRG + + + Bayer format - one channel in one surface with interleaved GBRG ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_RGGB + + + Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_BGGR + + + Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_GRBG + + + Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_GBRG + + + Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_RGGB + + + Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_BGGR + + + Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_GRBG + + + Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_GBRG + + + Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_RGGB + + + Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_BGGR + + + Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_GRBG + + + Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER14_GBRG + + + Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_RGGB + + + Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_BGGR + + + Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_GRBG + + + Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER20_GBRG + + + Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU444_PLANAR + + + Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU422_PLANAR + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_BCCR + + + Bayer format - one channel in one surface with interleaved BCCR ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_RCCB + + + Bayer format - one channel in one surface with interleaved RCCB ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_CRBC + + + Bayer format - one channel in one surface with interleaved CRBC ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER_CBRC + + + Bayer format - one channel in one surface with interleaved CBRC ordering. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER10_CCCC + + + Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_BCCR + + + Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_RCCB + + + Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CRBC + + + Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CBRC + + + Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_BAYER12_CCCC + + + Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y + + + Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 + + + Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 + + + Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 + + + Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 + + + Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 + + + Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 + + + Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 + + + Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 + + + Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 + + + Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 + + + Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 + + + Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR + + + Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 + + + Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y_ER + + + Extended Range Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y_709_ER + + + Extended Range Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10_ER + + + Extended Range Color format for single Y10 plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10_709_ER + + + Extended Range Color format for single Y10 plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12_ER + + + Extended Range Color format for single Y12 plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12_709_ER + + + Extended Range Color format for single Y12 plane. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUVA + + + Y, U, V, A four channels in one surface, interleaved as AVUY. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YUV + + + Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_YVYU + + + Y, U, V in one surface, interleaved as YVYU in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_VYUY + + + Y, U, V in one surface, interleaved as VYUY in one channel. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER + + + Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER + + + Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.driver.CUeglColorFormat.CU_EGL_COLOR_FORMAT_MAX + +.. autoclass:: cuda.bindings.driver.CUdeviceptr_v2 +.. autoclass:: cuda.bindings.driver.CUdeviceptr +.. autoclass:: cuda.bindings.driver.CUdevice_v1 +.. autoclass:: cuda.bindings.driver.CUdevice +.. autoclass:: cuda.bindings.driver.CUcontext +.. autoclass:: cuda.bindings.driver.CUmodule +.. autoclass:: cuda.bindings.driver.CUfunction +.. autoclass:: cuda.bindings.driver.CUlibrary +.. autoclass:: cuda.bindings.driver.CUkernel +.. autoclass:: cuda.bindings.driver.CUarray +.. autoclass:: cuda.bindings.driver.CUmipmappedArray +.. autoclass:: cuda.bindings.driver.CUtexref +.. autoclass:: cuda.bindings.driver.CUsurfref +.. autoclass:: cuda.bindings.driver.CUevent +.. autoclass:: cuda.bindings.driver.CUstream +.. autoclass:: cuda.bindings.driver.CUgraphicsResource +.. autoclass:: cuda.bindings.driver.CUtexObject_v1 +.. autoclass:: cuda.bindings.driver.CUtexObject +.. autoclass:: cuda.bindings.driver.CUsurfObject_v1 +.. autoclass:: cuda.bindings.driver.CUsurfObject +.. autoclass:: cuda.bindings.driver.CUexternalMemory +.. autoclass:: cuda.bindings.driver.CUexternalSemaphore +.. autoclass:: cuda.bindings.driver.CUgraph +.. autoclass:: cuda.bindings.driver.CUgraphNode +.. autoclass:: cuda.bindings.driver.CUgraphExec +.. autoclass:: cuda.bindings.driver.CUmemoryPool +.. autoclass:: cuda.bindings.driver.CUuserObject +.. autoclass:: cuda.bindings.driver.CUgraphConditionalHandle +.. autoclass:: cuda.bindings.driver.CUgraphDeviceNode +.. autoclass:: cuda.bindings.driver.CUasyncCallbackHandle +.. autoclass:: cuda.bindings.driver.CUgreenCtx +.. autoclass:: cuda.bindings.driver.CUuuid +.. autoclass:: cuda.bindings.driver.CUmemFabricHandle_v1 +.. autoclass:: cuda.bindings.driver.CUmemFabricHandle +.. autoclass:: cuda.bindings.driver.CUipcEventHandle_v1 +.. autoclass:: cuda.bindings.driver.CUipcEventHandle +.. autoclass:: cuda.bindings.driver.CUipcMemHandle_v1 +.. autoclass:: cuda.bindings.driver.CUipcMemHandle +.. autoclass:: cuda.bindings.driver.CUstreamBatchMemOpParams_v1 +.. autoclass:: cuda.bindings.driver.CUstreamBatchMemOpParams +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_BATCH_MEM_OP_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUasyncNotificationInfo +.. autoclass:: cuda.bindings.driver.CUasyncCallback +.. autoclass:: cuda.bindings.driver.CUdevprop_v1 +.. autoclass:: cuda.bindings.driver.CUdevprop +.. autoclass:: cuda.bindings.driver.CUlinkState +.. autoclass:: cuda.bindings.driver.CUhostFn +.. autoclass:: cuda.bindings.driver.CUaccessPolicyWindow_v1 +.. autoclass:: cuda.bindings.driver.CUaccessPolicyWindow +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_KERNEL_NODE_PARAMS_v3 +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_MEMSET_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_HOST_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_CONDITIONAL_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUgraphEdgeData +.. autoclass:: cuda.bindings.driver.CUDA_GRAPH_INSTANTIATE_PARAMS +.. autoclass:: cuda.bindings.driver.CUlaunchMemSyncDomainMap +.. autoclass:: cuda.bindings.driver.CUlaunchAttributeValue +.. autoclass:: cuda.bindings.driver.CUlaunchAttribute +.. autoclass:: cuda.bindings.driver.CUlaunchConfig +.. autoclass:: cuda.bindings.driver.CUkernelNodeAttrID +.. autoclass:: cuda.bindings.driver.CUkernelNodeAttrValue_v1 +.. autoclass:: cuda.bindings.driver.CUkernelNodeAttrValue +.. autoclass:: cuda.bindings.driver.CUstreamAttrID +.. autoclass:: cuda.bindings.driver.CUstreamAttrValue_v1 +.. autoclass:: cuda.bindings.driver.CUstreamAttrValue +.. autoclass:: cuda.bindings.driver.CUexecAffinitySmCount_v1 +.. autoclass:: cuda.bindings.driver.CUexecAffinitySmCount +.. autoclass:: cuda.bindings.driver.CUexecAffinityParam_v1 +.. autoclass:: cuda.bindings.driver.CUexecAffinityParam +.. autoclass:: cuda.bindings.driver.CUctxCigParam +.. autoclass:: cuda.bindings.driver.CUctxCreateParams +.. autoclass:: cuda.bindings.driver.CUlibraryHostUniversalFunctionAndDataTable +.. autoclass:: cuda.bindings.driver.CUstreamCallback +.. autoclass:: cuda.bindings.driver.CUoccupancyB2DSize +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY2D_v2 +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY2D +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_v2 +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_PEER_v1 +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY3D_PEER +.. autoclass:: cuda.bindings.driver.CUDA_MEMCPY_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR_v2 +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_DESCRIPTOR +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR_v2 +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY3D_DESCRIPTOR +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES_v1 +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_SPARSE_PROPERTIES +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_ARRAY_MEMORY_REQUIREMENTS +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_DESC +.. autoclass:: cuda.bindings.driver.CUDA_TEXTURE_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_TEXTURE_DESC +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_RESOURCE_VIEW_DESC +.. autoclass:: cuda.bindings.driver.CUtensorMap +.. autoclass:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_POINTER_ATTRIBUTE_P2P_TOKENS +.. autoclass:: cuda.bindings.driver.CUDA_LAUNCH_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_LAUNCH_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_HANDLE_DESC +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_BUFFER_DESC +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUmemGenericAllocationHandle_v1 +.. autoclass:: cuda.bindings.driver.CUmemGenericAllocationHandle +.. autoclass:: cuda.bindings.driver.CUarrayMapInfo_v1 +.. autoclass:: cuda.bindings.driver.CUarrayMapInfo +.. autoclass:: cuda.bindings.driver.CUmemLocation_v1 +.. autoclass:: cuda.bindings.driver.CUmemLocation +.. autoclass:: cuda.bindings.driver.CUmemAllocationProp_v1 +.. autoclass:: cuda.bindings.driver.CUmemAllocationProp +.. autoclass:: cuda.bindings.driver.CUmulticastObjectProp_v1 +.. autoclass:: cuda.bindings.driver.CUmulticastObjectProp +.. autoclass:: cuda.bindings.driver.CUmemAccessDesc_v1 +.. autoclass:: cuda.bindings.driver.CUmemAccessDesc +.. autoclass:: cuda.bindings.driver.CUgraphExecUpdateResultInfo_v1 +.. autoclass:: cuda.bindings.driver.CUgraphExecUpdateResultInfo +.. autoclass:: cuda.bindings.driver.CUmemPoolProps_v1 +.. autoclass:: cuda.bindings.driver.CUmemPoolProps +.. autoclass:: cuda.bindings.driver.CUmemPoolPtrExportData_v1 +.. autoclass:: cuda.bindings.driver.CUmemPoolPtrExportData +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v1 +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_MEM_ALLOC_NODE_PARAMS_v2 +.. autoclass:: cuda.bindings.driver.CUDA_MEM_FREE_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_CHILD_GRAPH_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EVENT_RECORD_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUDA_EVENT_WAIT_NODE_PARAMS +.. autoclass:: cuda.bindings.driver.CUgraphNodeParams +.. autoclass:: cuda.bindings.driver.CUeglFrame_v1 +.. autoclass:: cuda.bindings.driver.CUeglFrame +.. autoclass:: cuda.bindings.driver.CUeglStreamConnection +.. autoattribute:: cuda.bindings.driver.CUDA_VERSION + + CUDA API version number + +.. autoattribute:: cuda.bindings.driver.CU_UUID_HAS_BEEN_DEFINED + + CUDA UUID types + +.. autoattribute:: cuda.bindings.driver.CU_IPC_HANDLE_SIZE + + CUDA IPC handle size + +.. autoattribute:: cuda.bindings.driver.CU_STREAM_LEGACY + + Legacy stream handle + + + + Stream handle that can be passed as a CUstream to use an implicit stream with legacy synchronization behavior. + + + + See details of the \link_sync_behavior + +.. autoattribute:: cuda.bindings.driver.CU_STREAM_PER_THREAD + + Per-thread stream handle + + + + Stream handle that can be passed as a CUstream to use an implicit stream with per-thread synchronization behavior. + + + + See details of the \link_sync_behavior + +.. autoattribute:: cuda.bindings.driver.CU_COMPUTE_ACCELERATED_TARGET_BASE +.. autoattribute:: cuda.bindings.driver.CUDA_CB +.. autoattribute:: cuda.bindings.driver.CU_GRAPH_COND_ASSIGN_DEFAULT + + Conditional node handle flags Default value is applied when graph is launched. + +.. autoattribute:: cuda.bindings.driver.CU_GRAPH_KERNEL_NODE_PORT_DEFAULT + + This port activates when the kernel has finished executing. + +.. autoattribute:: cuda.bindings.driver.CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC + + This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type :py:obj:`~.CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC`. See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT`. + +.. autoattribute:: cuda.bindings.driver.CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER + + This port activates when all blocks of the kernel have begun execution. See also :py:obj:`~.CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT`. + +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_PRIORITY +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE +.. autoattribute:: cuda.bindings.driver.CU_KERNEL_NODE_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_PRIORITY +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +.. autoattribute:: cuda.bindings.driver.CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTALLOC_PORTABLE + + If set, host memory is portable between CUDA contexts. Flag for :py:obj:`~.cuMemHostAlloc()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTALLOC_DEVICEMAP + + If set, host memory is mapped into CUDA address space and :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host pointer. Flag for :py:obj:`~.cuMemHostAlloc()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTALLOC_WRITECOMBINED + + If set, host memory is allocated as write-combined - fast to write, faster to DMA, slow to read except via SSE4 streaming load instruction (MOVNTDQA). Flag for :py:obj:`~.cuMemHostAlloc()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTREGISTER_PORTABLE + + If set, host memory is portable between CUDA contexts. Flag for :py:obj:`~.cuMemHostRegister()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTREGISTER_DEVICEMAP + + If set, host memory is mapped into CUDA address space and :py:obj:`~.cuMemHostGetDevicePointer()` may be called on the host pointer. Flag for :py:obj:`~.cuMemHostRegister()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTREGISTER_IOMEMORY + + If set, the passed memory pointer is treated as pointing to some memory-mapped I/O space, e.g. belonging to a third-party PCIe device. On Windows the flag is a no-op. On Linux that memory is marked as non cache-coherent for the GPU and is expected to be physically contiguous. It may return :py:obj:`~.CUDA_ERROR_NOT_PERMITTED` if run as an unprivileged user, :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED` on older Linux kernel versions. On all other platforms, it is not supported and :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED` is returned. Flag for :py:obj:`~.cuMemHostRegister()` + +.. autoattribute:: cuda.bindings.driver.CU_MEMHOSTREGISTER_READ_ONLY + + If set, the passed memory pointer is treated as pointing to memory that is considered read-only by the device. On platforms without :py:obj:`~.CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES`, this flag is required in order to register memory mapped to the CPU as read-only. Support for the use of this flag can be queried from the device attribute :py:obj:`~.CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED`. Using this flag with a current context associated with a device that does not have this attribute set will cause :py:obj:`~.cuMemHostRegister` to error with :py:obj:`~.CUDA_ERROR_NOT_SUPPORTED`. + +.. autoattribute:: cuda.bindings.driver.CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL + + Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers + +.. autoattribute:: cuda.bindings.driver.CU_TENSOR_MAP_NUM_QWORDS + + Size of tensor map descriptor + +.. autoattribute:: cuda.bindings.driver.CUDA_EXTERNAL_MEMORY_DEDICATED + + Indicates that the external memory object is a dedicated resource + +.. autoattribute:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC + + When the `flags` parameter of :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS` contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. + +.. autoattribute:: cuda.bindings.driver.CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC + + When the `flags` parameter of :py:obj:`~.CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS` contains this flag, it indicates that waiting on an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. + +.. autoattribute:: cuda.bindings.driver.CUDA_NVSCISYNC_ATTR_SIGNAL + + When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to this, it indicates that application needs signaler specific NvSciSyncAttr to be filled by :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. + +.. autoattribute:: cuda.bindings.driver.CUDA_NVSCISYNC_ATTR_WAIT + + When `flags` of :py:obj:`~.cuDeviceGetNvSciSyncAttributes` is set to this, it indicates that application needs waiter specific NvSciSyncAttr to be filled by :py:obj:`~.cuDeviceGetNvSciSyncAttributes`. + +.. autoattribute:: cuda.bindings.driver.CU_MEM_CREATE_USAGE_TILE_POOL + + This flag if set indicates that the memory will be used as a tile pool. + +.. autoattribute:: cuda.bindings.driver.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC + + If set, each kernel launched as part of :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution. + +.. autoattribute:: cuda.bindings.driver.CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC + + If set, any subsequent work pushed in a stream that participated in a call to :py:obj:`~.cuLaunchCooperativeKernelMultiDevice` will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_LAYERED + + If set, the CUDA array is a collection of layers, where each layer is either a 1D or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number of layers, not the depth of a 3D array. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_2DARRAY + + Deprecated, use CUDA_ARRAY3D_LAYERED + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_SURFACE_LDST + + This flag must be set in order to bind a surface reference to the CUDA array + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_CUBEMAP + + If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The width of such a CUDA array must be equal to its height, and Depth must be six. If :py:obj:`~.CUDA_ARRAY3D_LAYERED` flag is also set, then the CUDA array is a collection of cubemaps and Depth must be a multiple of six. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_TEXTURE_GATHER + + This flag must be set in order to perform texture gather operations on a CUDA array. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_DEPTH_TEXTURE + + This flag if set indicates that the CUDA array is a DEPTH_TEXTURE. + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_COLOR_ATTACHMENT + + This flag indicates that the CUDA array may be bound as a color target in an external graphics API + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_SPARSE + + This flag if set indicates that the CUDA array or CUDA mipmapped array is a sparse CUDA array or CUDA mipmapped array respectively + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_DEFERRED_MAPPING + + This flag if set indicates that the CUDA array or CUDA mipmapped array will allow deferred memory mapping + +.. autoattribute:: cuda.bindings.driver.CUDA_ARRAY3D_VIDEO_ENCODE_DECODE + + This flag indicates that the CUDA array will be used for hardware accelerated video encode/decode operations. + +.. autoattribute:: cuda.bindings.driver.CU_TRSA_OVERRIDE_FORMAT + + Override the texref format with a format inferred from the array. Flag for :py:obj:`~.cuTexRefSetArray()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_READ_AS_INTEGER + + Read the texture as integers rather than promoting the values to floats in the range [0,1]. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_NORMALIZED_COORDINATES + + Use normalized texture coordinates in the range [0,1) instead of [0,dim). Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_SRGB + + Perform sRGB->linear conversion during texture read. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION + + Disable any trilinear filtering optimizations. Flag for :py:obj:`~.cuTexRefSetFlags()` and :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_TRSF_SEAMLESS_CUBEMAP + + Enable seamless cube map filtering. Flag for :py:obj:`~.cuTexObjectCreate()` + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_END_AS_INT + + C++ compile time constant for CU_LAUNCH_PARAM_END + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_END + + End of array terminator for the `extra` parameter to :py:obj:`~.cuLaunchKernel` + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT + + C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_POINTER + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_BUFFER_POINTER + + Indicator that the next value in the `extra` parameter to :py:obj:`~.cuLaunchKernel` will be a pointer to a buffer containing all kernel parameters used for launching kernel `f`. This buffer needs to honor all alignment/padding requirements of the individual parameters. If :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not also specified in the `extra` array, then :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` will have no effect. + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT + + C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_SIZE + +.. autoattribute:: cuda.bindings.driver.CU_LAUNCH_PARAM_BUFFER_SIZE + + Indicator that the next value in the `extra` parameter to :py:obj:`~.cuLaunchKernel` will be a pointer to a size_t which contains the size of the buffer specified with :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER`. It is required that :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER` also be specified in the `extra` array if the value associated with :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE` is not zero. + +.. autoattribute:: cuda.bindings.driver.CU_PARAM_TR_DEFAULT + + For texture references loaded into the module, use default texunit from texture reference. + +.. autoattribute:: cuda.bindings.driver.CU_DEVICE_CPU + + Device that represents the CPU + +.. autoattribute:: cuda.bindings.driver.CU_DEVICE_INVALID + + Device that represents an invalid device + +.. autoattribute:: cuda.bindings.driver.MAX_PLANES + + Maximum number of planes per frame + +.. autoattribute:: cuda.bindings.driver.CUDA_EGL_INFINITE_TIMEOUT + + Indicates that timeout for :py:obj:`~.cuEGLStreamConsumerAcquireFrame` is infinite. + + +Error Handling +-------------- + +This section describes the error handling functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGetErrorString +.. autofunction:: cuda.bindings.driver.cuGetErrorName + +Initialization +-------------- + +This section describes the initialization functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuInit + +Version Management +------------------ + +This section describes the version management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuDriverGetVersion + +Device Management +----------------- + +This section describes the device management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuDeviceGet +.. autofunction:: cuda.bindings.driver.cuDeviceGetCount +.. autofunction:: cuda.bindings.driver.cuDeviceGetName +.. autofunction:: cuda.bindings.driver.cuDeviceGetUuid +.. autofunction:: cuda.bindings.driver.cuDeviceGetUuid_v2 +.. autofunction:: cuda.bindings.driver.cuDeviceGetLuid +.. autofunction:: cuda.bindings.driver.cuDeviceTotalMem +.. autofunction:: cuda.bindings.driver.cuDeviceGetTexture1DLinearMaxWidth +.. autofunction:: cuda.bindings.driver.cuDeviceGetAttribute +.. autofunction:: cuda.bindings.driver.cuDeviceGetNvSciSyncAttributes +.. autofunction:: cuda.bindings.driver.cuDeviceSetMemPool +.. autofunction:: cuda.bindings.driver.cuDeviceGetMemPool +.. autofunction:: cuda.bindings.driver.cuDeviceGetDefaultMemPool +.. autofunction:: cuda.bindings.driver.cuDeviceGetExecAffinitySupport +.. autofunction:: cuda.bindings.driver.cuFlushGPUDirectRDMAWrites + +Primary Context Management +-------------------------- + +This section describes the primary context management functions of the low-level CUDA driver application programming interface. + + + +The primary context is unique per device and shared with the CUDA runtime API. These functions allow integration with other libraries using CUDA. + +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxRetain +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxRelease +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxSetFlags +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxGetState +.. autofunction:: cuda.bindings.driver.cuDevicePrimaryCtxReset + +Context Management +------------------ + +This section describes the context management functions of the low-level CUDA driver application programming interface. + + + +Please note that some functions are described in Primary Context Management section. + +.. autofunction:: cuda.bindings.driver.cuCtxCreate +.. autofunction:: cuda.bindings.driver.cuCtxCreate_v3 +.. autofunction:: cuda.bindings.driver.cuCtxCreate_v4 +.. autofunction:: cuda.bindings.driver.cuCtxDestroy +.. autofunction:: cuda.bindings.driver.cuCtxPushCurrent +.. autofunction:: cuda.bindings.driver.cuCtxPopCurrent +.. autofunction:: cuda.bindings.driver.cuCtxSetCurrent +.. autofunction:: cuda.bindings.driver.cuCtxGetCurrent +.. autofunction:: cuda.bindings.driver.cuCtxGetDevice +.. autofunction:: cuda.bindings.driver.cuCtxGetFlags +.. autofunction:: cuda.bindings.driver.cuCtxSetFlags +.. autofunction:: cuda.bindings.driver.cuCtxGetId +.. autofunction:: cuda.bindings.driver.cuCtxSynchronize +.. autofunction:: cuda.bindings.driver.cuCtxSetLimit +.. autofunction:: cuda.bindings.driver.cuCtxGetLimit +.. autofunction:: cuda.bindings.driver.cuCtxGetCacheConfig +.. autofunction:: cuda.bindings.driver.cuCtxSetCacheConfig +.. autofunction:: cuda.bindings.driver.cuCtxGetApiVersion +.. autofunction:: cuda.bindings.driver.cuCtxGetStreamPriorityRange +.. autofunction:: cuda.bindings.driver.cuCtxResetPersistingL2Cache +.. autofunction:: cuda.bindings.driver.cuCtxGetExecAffinity +.. autofunction:: cuda.bindings.driver.cuCtxRecordEvent +.. autofunction:: cuda.bindings.driver.cuCtxWaitEvent + +Module Management +----------------- + +This section describes the module management functions of the low-level CUDA driver application programming interface. + +.. autoclass:: cuda.bindings.driver.CUmoduleLoadingMode + + .. autoattribute:: cuda.bindings.driver.CUmoduleLoadingMode.CU_MODULE_EAGER_LOADING + + + Lazy Kernel Loading is not enabled + + + .. autoattribute:: cuda.bindings.driver.CUmoduleLoadingMode.CU_MODULE_LAZY_LOADING + + + Lazy Kernel Loading is enabled + +.. autofunction:: cuda.bindings.driver.cuModuleLoad +.. autofunction:: cuda.bindings.driver.cuModuleLoadData +.. autofunction:: cuda.bindings.driver.cuModuleLoadDataEx +.. autofunction:: cuda.bindings.driver.cuModuleLoadFatBinary +.. autofunction:: cuda.bindings.driver.cuModuleUnload +.. autofunction:: cuda.bindings.driver.cuModuleGetLoadingMode +.. autofunction:: cuda.bindings.driver.cuModuleGetFunction +.. autofunction:: cuda.bindings.driver.cuModuleGetFunctionCount +.. autofunction:: cuda.bindings.driver.cuModuleEnumerateFunctions +.. autofunction:: cuda.bindings.driver.cuModuleGetGlobal +.. autofunction:: cuda.bindings.driver.cuLinkCreate +.. autofunction:: cuda.bindings.driver.cuLinkAddData +.. autofunction:: cuda.bindings.driver.cuLinkAddFile +.. autofunction:: cuda.bindings.driver.cuLinkComplete +.. autofunction:: cuda.bindings.driver.cuLinkDestroy + +Library Management +------------------ + +This section describes the library management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuLibraryLoadData +.. autofunction:: cuda.bindings.driver.cuLibraryLoadFromFile +.. autofunction:: cuda.bindings.driver.cuLibraryUnload +.. autofunction:: cuda.bindings.driver.cuLibraryGetKernel +.. autofunction:: cuda.bindings.driver.cuLibraryGetKernelCount +.. autofunction:: cuda.bindings.driver.cuLibraryEnumerateKernels +.. autofunction:: cuda.bindings.driver.cuLibraryGetModule +.. autofunction:: cuda.bindings.driver.cuKernelGetFunction +.. autofunction:: cuda.bindings.driver.cuKernelGetLibrary +.. autofunction:: cuda.bindings.driver.cuLibraryGetGlobal +.. autofunction:: cuda.bindings.driver.cuLibraryGetManaged +.. autofunction:: cuda.bindings.driver.cuLibraryGetUnifiedFunction +.. autofunction:: cuda.bindings.driver.cuKernelGetAttribute +.. autofunction:: cuda.bindings.driver.cuKernelSetAttribute +.. autofunction:: cuda.bindings.driver.cuKernelSetCacheConfig +.. autofunction:: cuda.bindings.driver.cuKernelGetName +.. autofunction:: cuda.bindings.driver.cuKernelGetParamInfo + +Memory Management +----------------- + +This section describes the memory management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuMemGetInfo +.. autofunction:: cuda.bindings.driver.cuMemAlloc +.. autofunction:: cuda.bindings.driver.cuMemAllocPitch +.. autofunction:: cuda.bindings.driver.cuMemFree +.. autofunction:: cuda.bindings.driver.cuMemGetAddressRange +.. autofunction:: cuda.bindings.driver.cuMemAllocHost +.. autofunction:: cuda.bindings.driver.cuMemFreeHost +.. autofunction:: cuda.bindings.driver.cuMemHostAlloc +.. autofunction:: cuda.bindings.driver.cuMemHostGetDevicePointer +.. autofunction:: cuda.bindings.driver.cuMemHostGetFlags +.. autofunction:: cuda.bindings.driver.cuMemAllocManaged +.. autofunction:: cuda.bindings.driver.cuDeviceRegisterAsyncNotification +.. autofunction:: cuda.bindings.driver.cuDeviceUnregisterAsyncNotification +.. autofunction:: cuda.bindings.driver.cuDeviceGetByPCIBusId +.. autofunction:: cuda.bindings.driver.cuDeviceGetPCIBusId +.. autofunction:: cuda.bindings.driver.cuIpcGetEventHandle +.. autofunction:: cuda.bindings.driver.cuIpcOpenEventHandle +.. autofunction:: cuda.bindings.driver.cuIpcGetMemHandle +.. autofunction:: cuda.bindings.driver.cuIpcOpenMemHandle +.. autofunction:: cuda.bindings.driver.cuIpcCloseMemHandle +.. autofunction:: cuda.bindings.driver.cuMemHostRegister +.. autofunction:: cuda.bindings.driver.cuMemHostUnregister +.. autofunction:: cuda.bindings.driver.cuMemcpy +.. autofunction:: cuda.bindings.driver.cuMemcpyPeer +.. autofunction:: cuda.bindings.driver.cuMemcpyHtoD +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoH +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoD +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoA +.. autofunction:: cuda.bindings.driver.cuMemcpyAtoD +.. autofunction:: cuda.bindings.driver.cuMemcpyHtoA +.. autofunction:: cuda.bindings.driver.cuMemcpyAtoH +.. autofunction:: cuda.bindings.driver.cuMemcpyAtoA +.. autofunction:: cuda.bindings.driver.cuMemcpy2D +.. autofunction:: cuda.bindings.driver.cuMemcpy2DUnaligned +.. autofunction:: cuda.bindings.driver.cuMemcpy3D +.. autofunction:: cuda.bindings.driver.cuMemcpy3DPeer +.. autofunction:: cuda.bindings.driver.cuMemcpyAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyPeerAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyHtoDAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoHAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyDtoDAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyHtoAAsync +.. autofunction:: cuda.bindings.driver.cuMemcpyAtoHAsync +.. autofunction:: cuda.bindings.driver.cuMemcpy2DAsync +.. autofunction:: cuda.bindings.driver.cuMemcpy3DAsync +.. autofunction:: cuda.bindings.driver.cuMemcpy3DPeerAsync +.. autofunction:: cuda.bindings.driver.cuMemsetD8 +.. autofunction:: cuda.bindings.driver.cuMemsetD16 +.. autofunction:: cuda.bindings.driver.cuMemsetD32 +.. autofunction:: cuda.bindings.driver.cuMemsetD2D8 +.. autofunction:: cuda.bindings.driver.cuMemsetD2D16 +.. autofunction:: cuda.bindings.driver.cuMemsetD2D32 +.. autofunction:: cuda.bindings.driver.cuMemsetD8Async +.. autofunction:: cuda.bindings.driver.cuMemsetD16Async +.. autofunction:: cuda.bindings.driver.cuMemsetD32Async +.. autofunction:: cuda.bindings.driver.cuMemsetD2D8Async +.. autofunction:: cuda.bindings.driver.cuMemsetD2D16Async +.. autofunction:: cuda.bindings.driver.cuMemsetD2D32Async +.. autofunction:: cuda.bindings.driver.cuArrayCreate +.. autofunction:: cuda.bindings.driver.cuArrayGetDescriptor +.. autofunction:: cuda.bindings.driver.cuArrayGetSparseProperties +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayGetSparseProperties +.. autofunction:: cuda.bindings.driver.cuArrayGetMemoryRequirements +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayGetMemoryRequirements +.. autofunction:: cuda.bindings.driver.cuArrayGetPlane +.. autofunction:: cuda.bindings.driver.cuArrayDestroy +.. autofunction:: cuda.bindings.driver.cuArray3DCreate +.. autofunction:: cuda.bindings.driver.cuArray3DGetDescriptor +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayCreate +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayGetLevel +.. autofunction:: cuda.bindings.driver.cuMipmappedArrayDestroy +.. autofunction:: cuda.bindings.driver.cuMemGetHandleForAddressRange + +Virtual Memory Management +------------------------- + +This section describes the virtual memory management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuMemAddressReserve +.. autofunction:: cuda.bindings.driver.cuMemAddressFree +.. autofunction:: cuda.bindings.driver.cuMemCreate +.. autofunction:: cuda.bindings.driver.cuMemRelease +.. autofunction:: cuda.bindings.driver.cuMemMap +.. autofunction:: cuda.bindings.driver.cuMemMapArrayAsync +.. autofunction:: cuda.bindings.driver.cuMemUnmap +.. autofunction:: cuda.bindings.driver.cuMemSetAccess +.. autofunction:: cuda.bindings.driver.cuMemGetAccess +.. autofunction:: cuda.bindings.driver.cuMemExportToShareableHandle +.. autofunction:: cuda.bindings.driver.cuMemImportFromShareableHandle +.. autofunction:: cuda.bindings.driver.cuMemGetAllocationGranularity +.. autofunction:: cuda.bindings.driver.cuMemGetAllocationPropertiesFromHandle +.. autofunction:: cuda.bindings.driver.cuMemRetainAllocationHandle + +Stream Ordered Memory Allocator +------------------------------- + +This section describes the stream ordered memory allocator exposed by the low-level CUDA driver application programming interface. + + + + + +**overview** + + + +The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior. + +The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee. + + + + + +**Supported Platforms** + + + +Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED + +.. autofunction:: cuda.bindings.driver.cuMemFreeAsync +.. autofunction:: cuda.bindings.driver.cuMemAllocAsync +.. autofunction:: cuda.bindings.driver.cuMemPoolTrimTo +.. autofunction:: cuda.bindings.driver.cuMemPoolSetAttribute +.. autofunction:: cuda.bindings.driver.cuMemPoolGetAttribute +.. autofunction:: cuda.bindings.driver.cuMemPoolSetAccess +.. autofunction:: cuda.bindings.driver.cuMemPoolGetAccess +.. autofunction:: cuda.bindings.driver.cuMemPoolCreate +.. autofunction:: cuda.bindings.driver.cuMemPoolDestroy +.. autofunction:: cuda.bindings.driver.cuMemAllocFromPoolAsync +.. autofunction:: cuda.bindings.driver.cuMemPoolExportToShareableHandle +.. autofunction:: cuda.bindings.driver.cuMemPoolImportFromShareableHandle +.. autofunction:: cuda.bindings.driver.cuMemPoolExportPointer +.. autofunction:: cuda.bindings.driver.cuMemPoolImportPointer + +Multicast Object Management +--------------------------- + +This section describes the CUDA multicast object operations exposed by the low-level CUDA driver application programming interface. + + + + + +**overview** + + + +A multicast object created via cuMulticastCreate enables certain memory operations to be broadcast to a team of devices. Devices can be added to a multicast object via cuMulticastAddDevice. Memory can be bound on each participating device via either cuMulticastBindMem or cuMulticastBindAddr. Multicast objects can be mapped into a device's virtual address space using the virtual memmory management APIs (see cuMemMap and cuMemSetAccess). + + + + + +**Supported Platforms** + + + +Support for multicast on a specific device can be queried using the device attribute CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED + +.. autofunction:: cuda.bindings.driver.cuMulticastCreate +.. autofunction:: cuda.bindings.driver.cuMulticastAddDevice +.. autofunction:: cuda.bindings.driver.cuMulticastBindMem +.. autofunction:: cuda.bindings.driver.cuMulticastBindAddr +.. autofunction:: cuda.bindings.driver.cuMulticastUnbind +.. autofunction:: cuda.bindings.driver.cuMulticastGetGranularity + +Unified Addressing +------------------ + +This section describes the unified addressing functions of the low-level CUDA driver application programming interface. + + + + + +**Overview** + + + +CUDA devices can share a unified address space with the host. For these devices there is no distinction between a device pointer and a host pointer -- the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below). + + + + + +**Supported Platforms** + + + +Whether or not a device supports unified addressing may be queried by calling cuDeviceGetAttribute() with the device attribute CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING. + +Unified addressing is automatically enabled in 64-bit processes + + + + + +**Looking Up Information from Pointer Values** + + + +It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cuPointerGetAttribute() + +Since pointers are unique, it is not necessary to specify information about the pointers specified to the various copy functions in the CUDA API. The function cuMemcpy() may be used to perform a copy between two pointers, ignoring whether they point to host or device memory (making cuMemcpyHtoD(), cuMemcpyDtoD(), and cuMemcpyDtoH() unnecessary for devices supporting unified addressing). For multidimensional copies, the memory type CU_MEMORYTYPE_UNIFIED may be used to specify that the CUDA driver should infer the location of the pointer from its value. + + + + + +**Automatic Mapping of Host Allocated Host Memory** + + + +All host memory allocated in all contexts using cuMemAllocHost() and cuMemHostAlloc() is always directly accessible from all contexts on all devices that support unified addressing. This is the case regardless of whether or not the flags CU_MEMHOSTALLOC_PORTABLE and CU_MEMHOSTALLOC_DEVICEMAP are specified. + +The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host, so it is not necessary to call cuMemHostGetDevicePointer() to get the device pointer for these allocations. + +Note that this is not the case for memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below. + + + + + +**Automatic Registration of Peer Memory** + + + +Upon enabling direct access from a context that supports unified addressing to another peer context that supports unified addressing using cuCtxEnablePeerAccess() all memory allocated in the peer context using cuMemAlloc() and cuMemAllocPitch() will immediately be accessible by the current context. The device pointer value through which any peer memory may be accessed in the current context is the same pointer value through which that memory may be accessed in the peer context. + + + + + +**Exceptions, Disjoint Addressing** + + + +Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cuMemHostRegister() and host memory allocated using the flag CU_MEMHOSTALLOC_WRITECOMBINED. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all contexts that support unified addressing. + +This device address may be queried using cuMemHostGetDevicePointer() when a context using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory through cuMemcpy() and similar functions using the CU_MEMORYTYPE_UNIFIED memory type. + +.. autofunction:: cuda.bindings.driver.cuPointerGetAttribute +.. autofunction:: cuda.bindings.driver.cuMemPrefetchAsync +.. autofunction:: cuda.bindings.driver.cuMemPrefetchAsync_v2 +.. autofunction:: cuda.bindings.driver.cuMemAdvise +.. autofunction:: cuda.bindings.driver.cuMemAdvise_v2 +.. autofunction:: cuda.bindings.driver.cuMemRangeGetAttribute +.. autofunction:: cuda.bindings.driver.cuMemRangeGetAttributes +.. autofunction:: cuda.bindings.driver.cuPointerSetAttribute +.. autofunction:: cuda.bindings.driver.cuPointerGetAttributes + +Stream Management +----------------- + +This section describes the stream management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuStreamCreate +.. autofunction:: cuda.bindings.driver.cuStreamCreateWithPriority +.. autofunction:: cuda.bindings.driver.cuStreamGetPriority +.. autofunction:: cuda.bindings.driver.cuStreamGetFlags +.. autofunction:: cuda.bindings.driver.cuStreamGetId +.. autofunction:: cuda.bindings.driver.cuStreamGetCtx +.. autofunction:: cuda.bindings.driver.cuStreamGetCtx_v2 +.. autofunction:: cuda.bindings.driver.cuStreamWaitEvent +.. autofunction:: cuda.bindings.driver.cuStreamAddCallback +.. autofunction:: cuda.bindings.driver.cuStreamBeginCapture +.. autofunction:: cuda.bindings.driver.cuStreamBeginCaptureToGraph +.. autofunction:: cuda.bindings.driver.cuThreadExchangeStreamCaptureMode +.. autofunction:: cuda.bindings.driver.cuStreamEndCapture +.. autofunction:: cuda.bindings.driver.cuStreamIsCapturing +.. autofunction:: cuda.bindings.driver.cuStreamGetCaptureInfo +.. autofunction:: cuda.bindings.driver.cuStreamGetCaptureInfo_v3 +.. autofunction:: cuda.bindings.driver.cuStreamUpdateCaptureDependencies +.. autofunction:: cuda.bindings.driver.cuStreamUpdateCaptureDependencies_v2 +.. autofunction:: cuda.bindings.driver.cuStreamAttachMemAsync +.. autofunction:: cuda.bindings.driver.cuStreamQuery +.. autofunction:: cuda.bindings.driver.cuStreamSynchronize +.. autofunction:: cuda.bindings.driver.cuStreamDestroy +.. autofunction:: cuda.bindings.driver.cuStreamCopyAttributes +.. autofunction:: cuda.bindings.driver.cuStreamGetAttribute +.. autofunction:: cuda.bindings.driver.cuStreamSetAttribute + +Event Management +---------------- + +This section describes the event management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuEventCreate +.. autofunction:: cuda.bindings.driver.cuEventRecord +.. autofunction:: cuda.bindings.driver.cuEventRecordWithFlags +.. autofunction:: cuda.bindings.driver.cuEventQuery +.. autofunction:: cuda.bindings.driver.cuEventSynchronize +.. autofunction:: cuda.bindings.driver.cuEventDestroy +.. autofunction:: cuda.bindings.driver.cuEventElapsedTime + +External Resource Interoperability +---------------------------------- + +This section describes the external resource interoperability functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuImportExternalMemory +.. autofunction:: cuda.bindings.driver.cuExternalMemoryGetMappedBuffer +.. autofunction:: cuda.bindings.driver.cuExternalMemoryGetMappedMipmappedArray +.. autofunction:: cuda.bindings.driver.cuDestroyExternalMemory +.. autofunction:: cuda.bindings.driver.cuImportExternalSemaphore +.. autofunction:: cuda.bindings.driver.cuSignalExternalSemaphoresAsync +.. autofunction:: cuda.bindings.driver.cuWaitExternalSemaphoresAsync +.. autofunction:: cuda.bindings.driver.cuDestroyExternalSemaphore + +Stream Memory Operations +------------------------ + +This section describes the stream memory operations of the low-level CUDA driver application programming interface. + + + +Support for the CU_STREAM_WAIT_VALUE_NOR flag can be queried with ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2. + + + +Support for the cuStreamWriteValue64() and cuStreamWaitValue64() functions, as well as for the CU_STREAM_MEM_OP_WAIT_VALUE_64 and CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + + + +Support for both CU_STREAM_WAIT_VALUE_FLUSH and CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform hardware features and can be queried with cuDeviceGetAttribute() and CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES. + + + +Note that all memory pointers passed as parameters to these operations are device pointers. Where necessary a device pointer should be obtained, for example with cuMemHostGetDevicePointer(). + + + +None of the operations accepts pointers to managed memory buffers (cuMemAllocManaged). + + + +Warning: Improper use of these APIs may deadlock the application. Synchronization ordering established through these APIs is not visible to CUDA. CUDA tasks that are (even indirectly) ordered by these APIs should also have that order expressed with CUDA-visible dependencies such as events. This ensures that the scheduler does not serialize them in an improper order. + +.. autofunction:: cuda.bindings.driver.cuStreamWaitValue32 +.. autofunction:: cuda.bindings.driver.cuStreamWaitValue64 +.. autofunction:: cuda.bindings.driver.cuStreamWriteValue32 +.. autofunction:: cuda.bindings.driver.cuStreamWriteValue64 +.. autofunction:: cuda.bindings.driver.cuStreamBatchMemOp + +Execution Control +----------------- + +This section describes the execution control functions of the low-level CUDA driver application programming interface. + +.. autoclass:: cuda.bindings.driver.CUfunctionLoadingState + + .. autoattribute:: cuda.bindings.driver.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_UNLOADED + + + .. autoattribute:: cuda.bindings.driver.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_LOADED + + + .. autoattribute:: cuda.bindings.driver.CUfunctionLoadingState.CU_FUNCTION_LOADING_STATE_MAX + +.. autofunction:: cuda.bindings.driver.cuFuncGetAttribute +.. autofunction:: cuda.bindings.driver.cuFuncSetAttribute +.. autofunction:: cuda.bindings.driver.cuFuncSetCacheConfig +.. autofunction:: cuda.bindings.driver.cuFuncGetModule +.. autofunction:: cuda.bindings.driver.cuFuncGetName +.. autofunction:: cuda.bindings.driver.cuFuncGetParamInfo +.. autofunction:: cuda.bindings.driver.cuFuncIsLoaded +.. autofunction:: cuda.bindings.driver.cuFuncLoad +.. autofunction:: cuda.bindings.driver.cuLaunchKernel +.. autofunction:: cuda.bindings.driver.cuLaunchKernelEx +.. autofunction:: cuda.bindings.driver.cuLaunchCooperativeKernel +.. autofunction:: cuda.bindings.driver.cuLaunchCooperativeKernelMultiDevice +.. autofunction:: cuda.bindings.driver.cuLaunchHostFunc + +Graph Management +---------------- + +This section describes the graph management functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGraphCreate +.. autofunction:: cuda.bindings.driver.cuGraphAddKernelNode +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddMemcpyNode +.. autofunction:: cuda.bindings.driver.cuGraphMemcpyNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphMemcpyNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddMemsetNode +.. autofunction:: cuda.bindings.driver.cuGraphMemsetNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphMemsetNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddHostNode +.. autofunction:: cuda.bindings.driver.cuGraphHostNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphHostNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddChildGraphNode +.. autofunction:: cuda.bindings.driver.cuGraphChildGraphNodeGetGraph +.. autofunction:: cuda.bindings.driver.cuGraphAddEmptyNode +.. autofunction:: cuda.bindings.driver.cuGraphAddEventRecordNode +.. autofunction:: cuda.bindings.driver.cuGraphEventRecordNodeGetEvent +.. autofunction:: cuda.bindings.driver.cuGraphEventRecordNodeSetEvent +.. autofunction:: cuda.bindings.driver.cuGraphAddEventWaitNode +.. autofunction:: cuda.bindings.driver.cuGraphEventWaitNodeGetEvent +.. autofunction:: cuda.bindings.driver.cuGraphEventWaitNodeSetEvent +.. autofunction:: cuda.bindings.driver.cuGraphAddExternalSemaphoresSignalNode +.. autofunction:: cuda.bindings.driver.cuGraphExternalSemaphoresSignalNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphExternalSemaphoresSignalNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddExternalSemaphoresWaitNode +.. autofunction:: cuda.bindings.driver.cuGraphExternalSemaphoresWaitNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphExternalSemaphoresWaitNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddBatchMemOpNode +.. autofunction:: cuda.bindings.driver.cuGraphBatchMemOpNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphBatchMemOpNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecBatchMemOpNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddMemAllocNode +.. autofunction:: cuda.bindings.driver.cuGraphMemAllocNodeGetParams +.. autofunction:: cuda.bindings.driver.cuGraphAddMemFreeNode +.. autofunction:: cuda.bindings.driver.cuGraphMemFreeNodeGetParams +.. autofunction:: cuda.bindings.driver.cuDeviceGraphMemTrim +.. autofunction:: cuda.bindings.driver.cuDeviceGetGraphMemAttribute +.. autofunction:: cuda.bindings.driver.cuDeviceSetGraphMemAttribute +.. autofunction:: cuda.bindings.driver.cuGraphClone +.. autofunction:: cuda.bindings.driver.cuGraphNodeFindInClone +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetType +.. autofunction:: cuda.bindings.driver.cuGraphGetNodes +.. autofunction:: cuda.bindings.driver.cuGraphGetRootNodes +.. autofunction:: cuda.bindings.driver.cuGraphGetEdges +.. autofunction:: cuda.bindings.driver.cuGraphGetEdges_v2 +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetDependencies +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetDependencies_v2 +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetDependentNodes +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetDependentNodes_v2 +.. autofunction:: cuda.bindings.driver.cuGraphAddDependencies +.. autofunction:: cuda.bindings.driver.cuGraphAddDependencies_v2 +.. autofunction:: cuda.bindings.driver.cuGraphRemoveDependencies +.. autofunction:: cuda.bindings.driver.cuGraphRemoveDependencies_v2 +.. autofunction:: cuda.bindings.driver.cuGraphDestroyNode +.. autofunction:: cuda.bindings.driver.cuGraphInstantiate +.. autofunction:: cuda.bindings.driver.cuGraphInstantiateWithParams +.. autofunction:: cuda.bindings.driver.cuGraphExecGetFlags +.. autofunction:: cuda.bindings.driver.cuGraphExecKernelNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecMemcpyNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecMemsetNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecHostNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecChildGraphNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecEventRecordNodeSetEvent +.. autofunction:: cuda.bindings.driver.cuGraphExecEventWaitNodeSetEvent +.. autofunction:: cuda.bindings.driver.cuGraphExecExternalSemaphoresSignalNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecExternalSemaphoresWaitNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphNodeSetEnabled +.. autofunction:: cuda.bindings.driver.cuGraphNodeGetEnabled +.. autofunction:: cuda.bindings.driver.cuGraphUpload +.. autofunction:: cuda.bindings.driver.cuGraphLaunch +.. autofunction:: cuda.bindings.driver.cuGraphExecDestroy +.. autofunction:: cuda.bindings.driver.cuGraphDestroy +.. autofunction:: cuda.bindings.driver.cuGraphExecUpdate +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeCopyAttributes +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeGetAttribute +.. autofunction:: cuda.bindings.driver.cuGraphKernelNodeSetAttribute +.. autofunction:: cuda.bindings.driver.cuGraphDebugDotPrint +.. autofunction:: cuda.bindings.driver.cuUserObjectCreate +.. autofunction:: cuda.bindings.driver.cuUserObjectRetain +.. autofunction:: cuda.bindings.driver.cuUserObjectRelease +.. autofunction:: cuda.bindings.driver.cuGraphRetainUserObject +.. autofunction:: cuda.bindings.driver.cuGraphReleaseUserObject +.. autofunction:: cuda.bindings.driver.cuGraphAddNode +.. autofunction:: cuda.bindings.driver.cuGraphAddNode_v2 +.. autofunction:: cuda.bindings.driver.cuGraphNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphExecNodeSetParams +.. autofunction:: cuda.bindings.driver.cuGraphConditionalHandleCreate + +Occupancy +--------- + +This section describes the occupancy calculation functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxActiveBlocksPerMultiprocessor +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxPotentialBlockSize +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxPotentialBlockSizeWithFlags +.. autofunction:: cuda.bindings.driver.cuOccupancyAvailableDynamicSMemPerBlock +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxPotentialClusterSize +.. autofunction:: cuda.bindings.driver.cuOccupancyMaxActiveClusters + +Texture Object Management +------------------------- + +This section describes the texture object management functions of the low-level CUDA driver application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher. + +.. autofunction:: cuda.bindings.driver.cuTexObjectCreate +.. autofunction:: cuda.bindings.driver.cuTexObjectDestroy +.. autofunction:: cuda.bindings.driver.cuTexObjectGetResourceDesc +.. autofunction:: cuda.bindings.driver.cuTexObjectGetTextureDesc +.. autofunction:: cuda.bindings.driver.cuTexObjectGetResourceViewDesc + +Surface Object Management +------------------------- + +This section describes the surface object management functions of the low-level CUDA driver application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher. + +.. autofunction:: cuda.bindings.driver.cuSurfObjectCreate +.. autofunction:: cuda.bindings.driver.cuSurfObjectDestroy +.. autofunction:: cuda.bindings.driver.cuSurfObjectGetResourceDesc + +Tensor Map Object Managment +--------------------------- + +This section describes the tensor map object management functions of the low-level CUDA driver application programming interface. The tensor core API is only supported on devices of compute capability 9.0 or higher. + +.. autofunction:: cuda.bindings.driver.cuTensorMapEncodeTiled +.. autofunction:: cuda.bindings.driver.cuTensorMapEncodeIm2col +.. autofunction:: cuda.bindings.driver.cuTensorMapReplaceAddress + +Peer Context Memory Access +-------------------------- + +This section describes the direct peer context memory access functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuDeviceCanAccessPeer +.. autofunction:: cuda.bindings.driver.cuCtxEnablePeerAccess +.. autofunction:: cuda.bindings.driver.cuCtxDisablePeerAccess +.. autofunction:: cuda.bindings.driver.cuDeviceGetP2PAttribute + +Graphics Interoperability +------------------------- + +This section describes the graphics interoperability functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGraphicsUnregisterResource +.. autofunction:: cuda.bindings.driver.cuGraphicsSubResourceGetMappedArray +.. autofunction:: cuda.bindings.driver.cuGraphicsResourceGetMappedMipmappedArray +.. autofunction:: cuda.bindings.driver.cuGraphicsResourceGetMappedPointer +.. autofunction:: cuda.bindings.driver.cuGraphicsResourceSetMapFlags +.. autofunction:: cuda.bindings.driver.cuGraphicsMapResources +.. autofunction:: cuda.bindings.driver.cuGraphicsUnmapResources + +Driver Entry Point Access +------------------------- + +This section describes the driver entry point access functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGetProcAddress + +Coredump Attributes Control API +------------------------------- + +This section describes the coredump attribute control functions of the low-level CUDA driver application programming interface. + +.. autoclass:: cuda.bindings.driver.CUcoredumpSettings + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_ENABLE_ON_EXCEPTION + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_TRIGGER_HOST + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_LIGHTWEIGHT + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_ENABLE_USER_TRIGGER + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_FILE + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_PIPE + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_GENERATION_FLAGS + + + .. autoattribute:: cuda.bindings.driver.CUcoredumpSettings.CU_COREDUMP_MAX + +.. autoclass:: cuda.bindings.driver.CUCoredumpGenerationFlags + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_DEFAULT_FLAGS + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_NONRELOCATED_ELF_IMAGES + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_GLOBAL_MEMORY + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_SHARED_MEMORY + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_LOCAL_MEMORY + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_ABORT + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_SKIP_CONSTBANK_MEMORY + + + .. autoattribute:: cuda.bindings.driver.CUCoredumpGenerationFlags.CU_COREDUMP_LIGHTWEIGHT_FLAGS + +.. autofunction:: cuda.bindings.driver.cuCoredumpGetAttribute +.. autofunction:: cuda.bindings.driver.cuCoredumpGetAttributeGlobal +.. autofunction:: cuda.bindings.driver.cuCoredumpSetAttribute +.. autofunction:: cuda.bindings.driver.cuCoredumpSetAttributeGlobal + +Green Contexts +-------------- + +This section describes the APIs for creation and manipulation of green contexts in the CUDA driver. Green contexts are a lightweight alternative to traditional contexts, with the ability to pass in a set of resources that they should be initialized with. This allows the developer to represent distinct spatial partitions of the GPU, provision resources for them, and target them via the same programming model that CUDA exposes (streams, kernel launches, etc.). + + + +There are 4 main steps to using these new set of APIs. + +- (1) Start with an initial set of resources, for example via cuDeviceGetDevResource. Only SM type is supported today. + + + + + + + +- (2) Partition this set of resources by providing them as input to a partition API, for example: cuDevSmResourceSplitByCount. + + + + + + + +- (3) Finalize the specification of resources by creating a descriptor via cuDevResourceGenerateDesc. + + + + + + + +- (4) Provision the resources and create a green context via cuGreenCtxCreate. + + + + + + + + + + + +For ``CU_DEV_RESOURCE_TYPE_SM``\ , the partitions created have minimum SM count requirements, often rounding up and aligning the minCount provided to cuDevSmResourceSplitByCount. The following is a guideline for each architecture and may be subject to change: + +- On Compute Architecture 6.X: The minimum count is 1 SM. + + + + + + + +- On Compute Architecture 7.X: The minimum count is 2 SMs and must be a multiple of 2. + + + + + + + +- On Compute Architecture 8.X: The minimum count is 4 SMs and must be a multiple of 2. + + + + + + + +- On Compute Architecture 9.0+: The minimum count is 8 SMs and must be a multiple of 8. + + + + + + + + + + + +In the future, flags can be provided to tradeoff functional and performance characteristics versus finer grained SM partitions. + + + +Even if the green contexts have disjoint SM partitions, it is not guaranteed that the kernels launched in them will run concurrently or have forward progress guarantees. This is due to other resources (like HW connections, see ::CUDA_DEVICE_MAX_CONNECTIONS) that could cause a dependency. Additionally, in certain scenarios, it is possible for the workload to run on more SMs than was provisioned (but never less). The following are two scenarios which can exhibit this behavior: + +- On Volta+ MPS: When ``CUDA_MPS_ACTIVE_THREAD_PERCENTAGE``\ is used, the set of SMs that are used for running kernels can be scaled up to the value of SMs used for the MPS client. + + + + + + + +- On Compute Architecture 9.x: When a module with dynamic parallelism (CDP) is loaded, all future kernels running under green contexts may use and share an additional set of 2 SMs. + +.. autoclass:: cuda.bindings.driver.CUdevSmResource_st +.. autoclass:: cuda.bindings.driver.CUdevResource_st +.. autoclass:: cuda.bindings.driver.CUdevSmResource +.. autoclass:: cuda.bindings.driver.CUdevResource +.. autoclass:: cuda.bindings.driver.CUgreenCtxCreate_flags + + .. autoattribute:: cuda.bindings.driver.CUgreenCtxCreate_flags.CU_GREEN_CTX_DEFAULT_STREAM + + + Required. Creates a default stream to use inside the green context + +.. autoclass:: cuda.bindings.driver.CUdevSmResourceSplit_flags + + .. autoattribute:: cuda.bindings.driver.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_IGNORE_SM_COSCHEDULING + + + .. autoattribute:: cuda.bindings.driver.CUdevSmResourceSplit_flags.CU_DEV_SM_RESOURCE_SPLIT_MAX_POTENTIAL_CLUSTER_SIZE + +.. autoclass:: cuda.bindings.driver.CUdevResourceType + + .. autoattribute:: cuda.bindings.driver.CUdevResourceType.CU_DEV_RESOURCE_TYPE_INVALID + + + .. autoattribute:: cuda.bindings.driver.CUdevResourceType.CU_DEV_RESOURCE_TYPE_SM + + + Streaming multiprocessors related information + +.. autoclass:: cuda.bindings.driver.CUdevResourceDesc +.. autoclass:: cuda.bindings.driver.CUdevSmResource +.. autofunction:: cuda.bindings.driver._CONCAT_OUTER +.. autofunction:: cuda.bindings.driver.cuGreenCtxCreate +.. autofunction:: cuda.bindings.driver.cuGreenCtxDestroy +.. autofunction:: cuda.bindings.driver.cuCtxFromGreenCtx +.. autofunction:: cuda.bindings.driver.cuDeviceGetDevResource +.. autofunction:: cuda.bindings.driver.cuCtxGetDevResource +.. autofunction:: cuda.bindings.driver.cuGreenCtxGetDevResource +.. autofunction:: cuda.bindings.driver.cuDevSmResourceSplitByCount +.. autofunction:: cuda.bindings.driver.cuDevResourceGenerateDesc +.. autofunction:: cuda.bindings.driver.cuGreenCtxRecordEvent +.. autofunction:: cuda.bindings.driver.cuGreenCtxWaitEvent +.. autofunction:: cuda.bindings.driver.cuStreamGetGreenCtx +.. autofunction:: cuda.bindings.driver.cuGreenCtxStreamCreate +.. autoattribute:: cuda.bindings.driver.RESOURCE_ABI_VERSION +.. autoattribute:: cuda.bindings.driver.RESOURCE_ABI_EXTERNAL_BYTES +.. autoattribute:: cuda.bindings.driver._CONCAT_INNER +.. autoattribute:: cuda.bindings.driver._CONCAT_OUTER + +EGL Interoperability +-------------------- + +This section describes the EGL interoperability functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuGraphicsEGLRegisterImage +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerConnect +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerConnectWithFlags +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerDisconnect +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerAcquireFrame +.. autofunction:: cuda.bindings.driver.cuEGLStreamConsumerReleaseFrame +.. autofunction:: cuda.bindings.driver.cuEGLStreamProducerConnect +.. autofunction:: cuda.bindings.driver.cuEGLStreamProducerDisconnect +.. autofunction:: cuda.bindings.driver.cuEGLStreamProducerPresentFrame +.. autofunction:: cuda.bindings.driver.cuEGLStreamProducerReturnFrame +.. autofunction:: cuda.bindings.driver.cuGraphicsResourceGetMappedEglFrame +.. autofunction:: cuda.bindings.driver.cuEventCreateFromEGLSync + +OpenGL Interoperability +----------------------- + +This section describes the OpenGL interoperability functions of the low-level CUDA driver application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interoperability. + +.. autoclass:: cuda.bindings.driver.CUGLDeviceList + + .. autoattribute:: cuda.bindings.driver.CUGLDeviceList.CU_GL_DEVICE_LIST_ALL + + + The CUDA devices for all GPUs used by the current OpenGL context + + + .. autoattribute:: cuda.bindings.driver.CUGLDeviceList.CU_GL_DEVICE_LIST_CURRENT_FRAME + + + The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame + + + .. autoattribute:: cuda.bindings.driver.CUGLDeviceList.CU_GL_DEVICE_LIST_NEXT_FRAME + + + The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame + +.. autofunction:: cuda.bindings.driver.cuGraphicsGLRegisterBuffer +.. autofunction:: cuda.bindings.driver.cuGraphicsGLRegisterImage +.. autofunction:: cuda.bindings.driver.cuGLGetDevices + +Profiler Control +---------------- + +This section describes the profiler control functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuProfilerStart +.. autofunction:: cuda.bindings.driver.cuProfilerStop + +VDPAU Interoperability +---------------------- + +This section describes the VDPAU interoperability functions of the low-level CUDA driver application programming interface. + +.. autofunction:: cuda.bindings.driver.cuVDPAUGetDevice +.. autofunction:: cuda.bindings.driver.cuVDPAUCtxCreate +.. autofunction:: cuda.bindings.driver.cuGraphicsVDPAURegisterVideoSurface +.. autofunction:: cuda.bindings.driver.cuGraphicsVDPAURegisterOutputSurface diff --git a/docs_src/source/module/nvrtc.rst b/docs_src/source/module/nvrtc.rst index cc7d31be..2a1297c1 100644 --- a/docs_src/source/module/nvrtc.rst +++ b/docs_src/source/module/nvrtc.rst @@ -7,80 +7,80 @@ Error Handling NVRTC defines the following enumeration type and function for API call error handling. -.. autoclass:: cuda.nvrtc.nvrtcResult +.. autoclass:: cuda.bindings.nvrtc.nvrtcResult - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_SUCCESS + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_SUCCESS - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_OUT_OF_MEMORY + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_OUT_OF_MEMORY - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_PROGRAM_CREATION_FAILURE + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_PROGRAM_CREATION_FAILURE - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_INPUT + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_INPUT - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_PROGRAM + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_PROGRAM - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_OPTION + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_INVALID_OPTION - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_COMPILATION + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_COMPILATION - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_BUILTIN_OPERATION_FAILURE + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_BUILTIN_OPERATION_FAILURE - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_INTERNAL_ERROR + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_INTERNAL_ERROR - .. autoattribute:: cuda.nvrtc.nvrtcResult.NVRTC_ERROR_TIME_FILE_WRITE_FAILED + .. autoattribute:: cuda.bindings.nvrtc.nvrtcResult.NVRTC_ERROR_TIME_FILE_WRITE_FAILED -.. autofunction:: cuda.nvrtc.nvrtcGetErrorString +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetErrorString General Information Query ------------------------- NVRTC defines the following function for general information query. -.. autofunction:: cuda.nvrtc.nvrtcVersion -.. autofunction:: cuda.nvrtc.nvrtcGetNumSupportedArchs -.. autofunction:: cuda.nvrtc.nvrtcGetSupportedArchs +.. autofunction:: cuda.bindings.nvrtc.nvrtcVersion +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetNumSupportedArchs +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetSupportedArchs Compilation ----------- NVRTC defines the following type and functions for actual compilation. -.. autoclass:: cuda.nvrtc.nvrtcProgram -.. autofunction:: cuda.nvrtc.nvrtcCreateProgram -.. autofunction:: cuda.nvrtc.nvrtcDestroyProgram -.. autofunction:: cuda.nvrtc.nvrtcCompileProgram -.. autofunction:: cuda.nvrtc.nvrtcGetPTXSize -.. autofunction:: cuda.nvrtc.nvrtcGetPTX -.. autofunction:: cuda.nvrtc.nvrtcGetCUBINSize -.. autofunction:: cuda.nvrtc.nvrtcGetCUBIN -.. autofunction:: cuda.nvrtc.nvrtcGetNVVMSize -.. autofunction:: cuda.nvrtc.nvrtcGetNVVM -.. autofunction:: cuda.nvrtc.nvrtcGetLTOIRSize -.. autofunction:: cuda.nvrtc.nvrtcGetLTOIR -.. autofunction:: cuda.nvrtc.nvrtcGetOptiXIRSize -.. autofunction:: cuda.nvrtc.nvrtcGetOptiXIR -.. autofunction:: cuda.nvrtc.nvrtcGetProgramLogSize -.. autofunction:: cuda.nvrtc.nvrtcGetProgramLog -.. autofunction:: cuda.nvrtc.nvrtcAddNameExpression -.. autofunction:: cuda.nvrtc.nvrtcGetLoweredName +.. autoclass:: cuda.bindings.nvrtc.nvrtcProgram +.. autofunction:: cuda.bindings.nvrtc.nvrtcCreateProgram +.. autofunction:: cuda.bindings.nvrtc.nvrtcDestroyProgram +.. autofunction:: cuda.bindings.nvrtc.nvrtcCompileProgram +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetPTXSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetPTX +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetCUBINSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetCUBIN +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetNVVMSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetNVVM +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetLTOIRSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetLTOIR +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetOptiXIRSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetOptiXIR +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetProgramLogSize +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetProgramLog +.. autofunction:: cuda.bindings.nvrtc.nvrtcAddNameExpression +.. autofunction:: cuda.bindings.nvrtc.nvrtcGetLoweredName Supported Compile Options ------------------------- diff --git a/docs_src/source/module/runtime.rst b/docs_src/source/module/runtime.rst new file mode 100644 index 00000000..55687b68 --- /dev/null +++ b/docs_src/source/module/runtime.rst @@ -0,0 +1,5274 @@ +------- +runtime +------- + +Profiler Control +---------------- + +This section describes the profiler control functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaProfilerStart +.. autofunction:: cuda.bindings.runtime.cudaProfilerStop + +Device Management +----------------- + +impl_private + + + + + + + +This section describes the device management functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaDeviceReset +.. autofunction:: cuda.bindings.runtime.cudaDeviceSynchronize +.. autofunction:: cuda.bindings.runtime.cudaDeviceSetLimit +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetLimit +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetTexture1DLinearMaxWidth +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetCacheConfig +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetStreamPriorityRange +.. autofunction:: cuda.bindings.runtime.cudaDeviceSetCacheConfig +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetByPCIBusId +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetPCIBusId +.. autofunction:: cuda.bindings.runtime.cudaIpcGetEventHandle +.. autofunction:: cuda.bindings.runtime.cudaIpcOpenEventHandle +.. autofunction:: cuda.bindings.runtime.cudaIpcGetMemHandle +.. autofunction:: cuda.bindings.runtime.cudaIpcOpenMemHandle +.. autofunction:: cuda.bindings.runtime.cudaIpcCloseMemHandle +.. autofunction:: cuda.bindings.runtime.cudaDeviceFlushGPUDirectRDMAWrites +.. autofunction:: cuda.bindings.runtime.cudaDeviceRegisterAsyncNotification +.. autofunction:: cuda.bindings.runtime.cudaDeviceUnregisterAsyncNotification +.. autofunction:: cuda.bindings.runtime.cudaGetDeviceCount +.. autofunction:: cuda.bindings.runtime.cudaGetDeviceProperties +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetDefaultMemPool +.. autofunction:: cuda.bindings.runtime.cudaDeviceSetMemPool +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetMemPool +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetNvSciSyncAttributes +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetP2PAttribute +.. autofunction:: cuda.bindings.runtime.cudaChooseDevice +.. autofunction:: cuda.bindings.runtime.cudaInitDevice +.. autofunction:: cuda.bindings.runtime.cudaSetDevice +.. autofunction:: cuda.bindings.runtime.cudaGetDevice +.. autofunction:: cuda.bindings.runtime.cudaSetDeviceFlags +.. autofunction:: cuda.bindings.runtime.cudaGetDeviceFlags + +Error Handling +-------------- + +This section describes the error handling functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGetLastError +.. autofunction:: cuda.bindings.runtime.cudaPeekAtLastError +.. autofunction:: cuda.bindings.runtime.cudaGetErrorName +.. autofunction:: cuda.bindings.runtime.cudaGetErrorString + +Stream Management +----------------- + +This section describes the stream management functions of the CUDA runtime application programming interface. + +.. autoclass:: cuda.bindings.runtime.cudaStreamCallback_t +.. autofunction:: cuda.bindings.runtime.cudaStreamCreate +.. autofunction:: cuda.bindings.runtime.cudaStreamCreateWithFlags +.. autofunction:: cuda.bindings.runtime.cudaStreamCreateWithPriority +.. autofunction:: cuda.bindings.runtime.cudaStreamGetPriority +.. autofunction:: cuda.bindings.runtime.cudaStreamGetFlags +.. autofunction:: cuda.bindings.runtime.cudaStreamGetId +.. autofunction:: cuda.bindings.runtime.cudaCtxResetPersistingL2Cache +.. autofunction:: cuda.bindings.runtime.cudaStreamCopyAttributes +.. autofunction:: cuda.bindings.runtime.cudaStreamGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaStreamSetAttribute +.. autofunction:: cuda.bindings.runtime.cudaStreamDestroy +.. autofunction:: cuda.bindings.runtime.cudaStreamWaitEvent +.. autofunction:: cuda.bindings.runtime.cudaStreamAddCallback +.. autofunction:: cuda.bindings.runtime.cudaStreamSynchronize +.. autofunction:: cuda.bindings.runtime.cudaStreamQuery +.. autofunction:: cuda.bindings.runtime.cudaStreamAttachMemAsync +.. autofunction:: cuda.bindings.runtime.cudaStreamBeginCapture +.. autofunction:: cuda.bindings.runtime.cudaStreamBeginCaptureToGraph +.. autofunction:: cuda.bindings.runtime.cudaThreadExchangeStreamCaptureMode +.. autofunction:: cuda.bindings.runtime.cudaStreamEndCapture +.. autofunction:: cuda.bindings.runtime.cudaStreamIsCapturing +.. autofunction:: cuda.bindings.runtime.cudaStreamGetCaptureInfo +.. autofunction:: cuda.bindings.runtime.cudaStreamGetCaptureInfo_v3 +.. autofunction:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies +.. autofunction:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies_v2 + +Event Management +---------------- + +This section describes the event management functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaEventCreate +.. autofunction:: cuda.bindings.runtime.cudaEventCreateWithFlags +.. autofunction:: cuda.bindings.runtime.cudaEventRecord +.. autofunction:: cuda.bindings.runtime.cudaEventRecordWithFlags +.. autofunction:: cuda.bindings.runtime.cudaEventQuery +.. autofunction:: cuda.bindings.runtime.cudaEventSynchronize +.. autofunction:: cuda.bindings.runtime.cudaEventDestroy +.. autofunction:: cuda.bindings.runtime.cudaEventElapsedTime + +External Resource Interoperability +---------------------------------- + +This section describes the external resource interoperability functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaImportExternalMemory +.. autofunction:: cuda.bindings.runtime.cudaExternalMemoryGetMappedBuffer +.. autofunction:: cuda.bindings.runtime.cudaExternalMemoryGetMappedMipmappedArray +.. autofunction:: cuda.bindings.runtime.cudaDestroyExternalMemory +.. autofunction:: cuda.bindings.runtime.cudaImportExternalSemaphore +.. autofunction:: cuda.bindings.runtime.cudaSignalExternalSemaphoresAsync +.. autofunction:: cuda.bindings.runtime.cudaWaitExternalSemaphoresAsync +.. autofunction:: cuda.bindings.runtime.cudaDestroyExternalSemaphore + +Execution Control +----------------- + +This section describes the execution control functions of the CUDA runtime application programming interface. + + + +Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module. + +.. autofunction:: cuda.bindings.runtime.cudaFuncSetCacheConfig +.. autofunction:: cuda.bindings.runtime.cudaFuncGetAttributes +.. autofunction:: cuda.bindings.runtime.cudaFuncSetAttribute +.. autofunction:: cuda.bindings.runtime.cudaLaunchHostFunc + +Occupancy +--------- + +This section describes the occupancy calculation functions of the CUDA runtime application programming interface. + + + +Besides the occupancy calculator functions (cudaOccupancyMaxActiveBlocksPerMultiprocessor and cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags), there are also C++ only occupancy-based launch configuration functions documented in C++ API Routines module. + + + +See cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API) cudaOccupancyAvailableDynamicSMemPerBlock (C++ API), + +.. autofunction:: cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessor +.. autofunction:: cuda.bindings.runtime.cudaOccupancyAvailableDynamicSMemPerBlock +.. autofunction:: cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + +Memory Management +----------------- + +This section describes the memory management functions of the CUDA runtime application programming interface. + + + +Some functions have overloaded C++ API template versions documented separately in the C++ API Routines module. + +.. autofunction:: cuda.bindings.runtime.cudaMallocManaged +.. autofunction:: cuda.bindings.runtime.cudaMalloc +.. autofunction:: cuda.bindings.runtime.cudaMallocHost +.. autofunction:: cuda.bindings.runtime.cudaMallocPitch +.. autofunction:: cuda.bindings.runtime.cudaMallocArray +.. autofunction:: cuda.bindings.runtime.cudaFree +.. autofunction:: cuda.bindings.runtime.cudaFreeHost +.. autofunction:: cuda.bindings.runtime.cudaFreeArray +.. autofunction:: cuda.bindings.runtime.cudaFreeMipmappedArray +.. autofunction:: cuda.bindings.runtime.cudaHostAlloc +.. autofunction:: cuda.bindings.runtime.cudaHostRegister +.. autofunction:: cuda.bindings.runtime.cudaHostUnregister +.. autofunction:: cuda.bindings.runtime.cudaHostGetDevicePointer +.. autofunction:: cuda.bindings.runtime.cudaHostGetFlags +.. autofunction:: cuda.bindings.runtime.cudaMalloc3D +.. autofunction:: cuda.bindings.runtime.cudaMalloc3DArray +.. autofunction:: cuda.bindings.runtime.cudaMallocMipmappedArray +.. autofunction:: cuda.bindings.runtime.cudaGetMipmappedArrayLevel +.. autofunction:: cuda.bindings.runtime.cudaMemcpy3D +.. autofunction:: cuda.bindings.runtime.cudaMemcpy3DPeer +.. autofunction:: cuda.bindings.runtime.cudaMemcpy3DAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpy3DPeerAsync +.. autofunction:: cuda.bindings.runtime.cudaMemGetInfo +.. autofunction:: cuda.bindings.runtime.cudaArrayGetInfo +.. autofunction:: cuda.bindings.runtime.cudaArrayGetPlane +.. autofunction:: cuda.bindings.runtime.cudaArrayGetMemoryRequirements +.. autofunction:: cuda.bindings.runtime.cudaMipmappedArrayGetMemoryRequirements +.. autofunction:: cuda.bindings.runtime.cudaArrayGetSparseProperties +.. autofunction:: cuda.bindings.runtime.cudaMipmappedArrayGetSparseProperties +.. autofunction:: cuda.bindings.runtime.cudaMemcpy +.. autofunction:: cuda.bindings.runtime.cudaMemcpyPeer +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2D +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DToArray +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DFromArray +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DArrayToArray +.. autofunction:: cuda.bindings.runtime.cudaMemcpyAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpyPeerAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DToArrayAsync +.. autofunction:: cuda.bindings.runtime.cudaMemcpy2DFromArrayAsync +.. autofunction:: cuda.bindings.runtime.cudaMemset +.. autofunction:: cuda.bindings.runtime.cudaMemset2D +.. autofunction:: cuda.bindings.runtime.cudaMemset3D +.. autofunction:: cuda.bindings.runtime.cudaMemsetAsync +.. autofunction:: cuda.bindings.runtime.cudaMemset2DAsync +.. autofunction:: cuda.bindings.runtime.cudaMemset3DAsync +.. autofunction:: cuda.bindings.runtime.cudaMemPrefetchAsync +.. autofunction:: cuda.bindings.runtime.cudaMemPrefetchAsync_v2 +.. autofunction:: cuda.bindings.runtime.cudaMemAdvise +.. autofunction:: cuda.bindings.runtime.cudaMemAdvise_v2 +.. autofunction:: cuda.bindings.runtime.cudaMemRangeGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaMemRangeGetAttributes +.. autofunction:: cuda.bindings.runtime.make_cudaPitchedPtr +.. autofunction:: cuda.bindings.runtime.make_cudaPos +.. autofunction:: cuda.bindings.runtime.make_cudaExtent + +Stream Ordered Memory Allocator +------------------------------- + +**overview** + + + +The asynchronous allocator allows the user to allocate and free in stream order. All asynchronous accesses of the allocation must happen between the stream executions of the allocation and the free. If the memory is accessed outside of the promised stream order, a use before allocation / use after free error will cause undefined behavior. + +The allocator is free to reallocate the memory as long as it can guarantee that compliant memory accesses will not overlap temporally. The allocator may refer to internal stream ordering as well as inter-stream dependencies (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. The allocator may also insert inter-stream dependencies to establish the temporal guarantee. + + + + + +**Supported Platforms** + + + +Whether or not a device supports the integrated stream ordered memory allocator may be queried by calling cudaDeviceGetAttribute() with the device attribute cudaDevAttrMemoryPoolsSupported. + +.. autofunction:: cuda.bindings.runtime.cudaMallocAsync +.. autofunction:: cuda.bindings.runtime.cudaFreeAsync +.. autofunction:: cuda.bindings.runtime.cudaMemPoolTrimTo +.. autofunction:: cuda.bindings.runtime.cudaMemPoolSetAttribute +.. autofunction:: cuda.bindings.runtime.cudaMemPoolGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaMemPoolSetAccess +.. autofunction:: cuda.bindings.runtime.cudaMemPoolGetAccess +.. autofunction:: cuda.bindings.runtime.cudaMemPoolCreate +.. autofunction:: cuda.bindings.runtime.cudaMemPoolDestroy +.. autofunction:: cuda.bindings.runtime.cudaMallocFromPoolAsync +.. autofunction:: cuda.bindings.runtime.cudaMemPoolExportToShareableHandle +.. autofunction:: cuda.bindings.runtime.cudaMemPoolImportFromShareableHandle +.. autofunction:: cuda.bindings.runtime.cudaMemPoolExportPointer +.. autofunction:: cuda.bindings.runtime.cudaMemPoolImportPointer + +Unified Addressing +------------------ + +This section describes the unified addressing functions of the CUDA runtime application programming interface. + + + + + +**Overview** + + + +CUDA devices can share a unified address space with the host. + + For these devices there is no distinction between a device pointer and a host pointer -- the same pointer value may be used to access memory from the host program and from a kernel running on the device (with exceptions enumerated below). + + + + + +**Supported Platforms** + + + +Whether or not a device supports unified addressing may be queried by calling cudaGetDeviceProperties() with the device property cudaDeviceProp::unifiedAddressing. + +Unified addressing is automatically enabled in 64-bit processes . + + + + + +**Looking Up Information from Pointer Values** + + + +It is possible to look up information about the memory which backs a pointer value. For instance, one may want to know if a pointer points to host or device memory. As another example, in the case of device memory, one may want to know on which CUDA device the memory resides. These properties may be queried using the function cudaPointerGetAttributes() + +Since pointers are unique, it is not necessary to specify information about the pointers specified to cudaMemcpy() and other copy functions. + + The copy direction cudaMemcpyDefault may be used to specify that the CUDA runtime should infer the location of the pointer from its value. + + + + + +**Automatic Mapping of Host Allocated Host Memory** + + + +All host memory allocated through all devices using cudaMallocHost() and cudaHostAlloc() is always directly accessible from all devices that support unified addressing. This is the case regardless of whether or not the flags cudaHostAllocPortable and cudaHostAllocMapped are specified. + +The pointer value through which allocated host memory may be accessed in kernels on all devices that support unified addressing is the same as the pointer value through which that memory is accessed on the host. It is not necessary to call cudaHostGetDevicePointer() to get the device pointer for these allocations. + + + +Note that this is not the case for memory allocated using the flag cudaHostAllocWriteCombined, as discussed below. + + + + + +**Direct Access of Peer Memory** + + + +Upon enabling direct access from a device that supports unified addressing to another peer device that supports unified addressing using cudaDeviceEnablePeerAccess() all memory allocated in the peer device using cudaMalloc() and cudaMallocPitch() will immediately be accessible by the current device. The device pointer value through which any peer's memory may be accessed in the current device is the same pointer value through which that memory may be accessed from the peer device. + + + + + +**Exceptions, Disjoint Addressing** + + + +Not all memory may be accessed on devices through the same pointer value through which they are accessed on the host. These exceptions are host memory registered using cudaHostRegister() and host memory allocated using the flag cudaHostAllocWriteCombined. For these exceptions, there exists a distinct host and device address for the memory. The device address is guaranteed to not overlap any valid host pointer range and is guaranteed to have the same value across all devices that support unified addressing. + + + +This device address may be queried using cudaHostGetDevicePointer() when a device using unified addressing is current. Either the host or the unified device pointer value may be used to refer to this memory in cudaMemcpy() and similar functions using the cudaMemcpyDefault memory direction. + +.. autofunction:: cuda.bindings.runtime.cudaPointerGetAttributes + +Peer Device Memory Access +------------------------- + +This section describes the peer device memory access functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaDeviceCanAccessPeer +.. autofunction:: cuda.bindings.runtime.cudaDeviceEnablePeerAccess +.. autofunction:: cuda.bindings.runtime.cudaDeviceDisablePeerAccess + +OpenGL Interoperability +----------------------- + +impl_private + + + +This section describes the OpenGL interoperability functions of the CUDA runtime application programming interface. Note that mapping of OpenGL resources is performed with the graphics API agnostic, resource mapping interface described in Graphics Interopability. + +.. autoclass:: cuda.bindings.runtime.cudaGLDeviceList + + .. autoattribute:: cuda.bindings.runtime.cudaGLDeviceList.cudaGLDeviceListAll + + + The CUDA devices for all GPUs used by the current OpenGL context + + + .. autoattribute:: cuda.bindings.runtime.cudaGLDeviceList.cudaGLDeviceListCurrentFrame + + + The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame + + + .. autoattribute:: cuda.bindings.runtime.cudaGLDeviceList.cudaGLDeviceListNextFrame + + + The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame + +.. autofunction:: cuda.bindings.runtime.cudaGLGetDevices +.. autofunction:: cuda.bindings.runtime.cudaGraphicsGLRegisterImage +.. autofunction:: cuda.bindings.runtime.cudaGraphicsGLRegisterBuffer + +Direct3D 9 Interoperability +--------------------------- + + + + +Direct3D 10 Interoperability +---------------------------- + + + + +Direct3D 11 Interoperability +---------------------------- + + + + +VDPAU Interoperability +---------------------- + +This section describes the VDPAU interoperability functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaVDPAUGetDevice +.. autofunction:: cuda.bindings.runtime.cudaVDPAUSetVDPAUDevice +.. autofunction:: cuda.bindings.runtime.cudaGraphicsVDPAURegisterVideoSurface +.. autofunction:: cuda.bindings.runtime.cudaGraphicsVDPAURegisterOutputSurface + +EGL Interoperability +-------------------- + +This section describes the EGL interoperability functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGraphicsEGLRegisterImage +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerConnect +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerConnectWithFlags +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerDisconnect +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerAcquireFrame +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamConsumerReleaseFrame +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamProducerConnect +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamProducerDisconnect +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamProducerPresentFrame +.. autofunction:: cuda.bindings.runtime.cudaEGLStreamProducerReturnFrame +.. autofunction:: cuda.bindings.runtime.cudaGraphicsResourceGetMappedEglFrame +.. autofunction:: cuda.bindings.runtime.cudaEventCreateFromEGLSync + +Graphics Interoperability +------------------------- + +This section describes the graphics interoperability functions of the CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGraphicsUnregisterResource +.. autofunction:: cuda.bindings.runtime.cudaGraphicsResourceSetMapFlags +.. autofunction:: cuda.bindings.runtime.cudaGraphicsMapResources +.. autofunction:: cuda.bindings.runtime.cudaGraphicsUnmapResources +.. autofunction:: cuda.bindings.runtime.cudaGraphicsResourceGetMappedPointer +.. autofunction:: cuda.bindings.runtime.cudaGraphicsSubResourceGetMappedArray +.. autofunction:: cuda.bindings.runtime.cudaGraphicsResourceGetMappedMipmappedArray + +Texture Object Management +------------------------- + +This section describes the low level texture object management functions of the CUDA runtime application programming interface. The texture object API is only supported on devices of compute capability 3.0 or higher. + +.. autofunction:: cuda.bindings.runtime.cudaGetChannelDesc +.. autofunction:: cuda.bindings.runtime.cudaCreateChannelDesc +.. autofunction:: cuda.bindings.runtime.cudaCreateTextureObject +.. autofunction:: cuda.bindings.runtime.cudaDestroyTextureObject +.. autofunction:: cuda.bindings.runtime.cudaGetTextureObjectResourceDesc +.. autofunction:: cuda.bindings.runtime.cudaGetTextureObjectTextureDesc +.. autofunction:: cuda.bindings.runtime.cudaGetTextureObjectResourceViewDesc + +Surface Object Management +------------------------- + +This section describes the low level texture object management functions of the CUDA runtime application programming interface. The surface object API is only supported on devices of compute capability 3.0 or higher. + +.. autofunction:: cuda.bindings.runtime.cudaCreateSurfaceObject +.. autofunction:: cuda.bindings.runtime.cudaDestroySurfaceObject +.. autofunction:: cuda.bindings.runtime.cudaGetSurfaceObjectResourceDesc + +Version Management +------------------ + + + +.. autofunction:: cuda.bindings.runtime.cudaDriverGetVersion +.. autofunction:: cuda.bindings.runtime.cudaRuntimeGetVersion +.. autofunction:: cuda.bindings.runtime.getLocalRuntimeVersion + +Graph Management +---------------- + +This section describes the graph management functions of CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGraphCreate +.. autofunction:: cuda.bindings.runtime.cudaGraphAddKernelNode +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeCopyAttributes +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeGetAttribute +.. autofunction:: cuda.bindings.runtime.cudaGraphKernelNodeSetAttribute +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemcpyNode +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemcpyNode1D +.. autofunction:: cuda.bindings.runtime.cudaGraphMemcpyNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams1D +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemsetNode +.. autofunction:: cuda.bindings.runtime.cudaGraphMemsetNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphMemsetNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddHostNode +.. autofunction:: cuda.bindings.runtime.cudaGraphHostNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphHostNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddChildGraphNode +.. autofunction:: cuda.bindings.runtime.cudaGraphChildGraphNodeGetGraph +.. autofunction:: cuda.bindings.runtime.cudaGraphAddEmptyNode +.. autofunction:: cuda.bindings.runtime.cudaGraphAddEventRecordNode +.. autofunction:: cuda.bindings.runtime.cudaGraphEventRecordNodeGetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphEventRecordNodeSetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphAddEventWaitNode +.. autofunction:: cuda.bindings.runtime.cudaGraphEventWaitNodeGetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphEventWaitNodeSetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphAddExternalSemaphoresSignalNode +.. autofunction:: cuda.bindings.runtime.cudaGraphExternalSemaphoresSignalNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExternalSemaphoresSignalNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddExternalSemaphoresWaitNode +.. autofunction:: cuda.bindings.runtime.cudaGraphExternalSemaphoresWaitNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExternalSemaphoresWaitNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemAllocNode +.. autofunction:: cuda.bindings.runtime.cudaGraphMemAllocNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphAddMemFreeNode +.. autofunction:: cuda.bindings.runtime.cudaGraphMemFreeNodeGetParams +.. autofunction:: cuda.bindings.runtime.cudaDeviceGraphMemTrim +.. autofunction:: cuda.bindings.runtime.cudaDeviceGetGraphMemAttribute +.. autofunction:: cuda.bindings.runtime.cudaDeviceSetGraphMemAttribute +.. autofunction:: cuda.bindings.runtime.cudaGraphClone +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeFindInClone +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetType +.. autofunction:: cuda.bindings.runtime.cudaGraphGetNodes +.. autofunction:: cuda.bindings.runtime.cudaGraphGetRootNodes +.. autofunction:: cuda.bindings.runtime.cudaGraphGetEdges +.. autofunction:: cuda.bindings.runtime.cudaGraphGetEdges_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetDependencies +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetDependencies_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetDependentNodes +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetDependentNodes_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphAddDependencies +.. autofunction:: cuda.bindings.runtime.cudaGraphAddDependencies_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphRemoveDependencies +.. autofunction:: cuda.bindings.runtime.cudaGraphRemoveDependencies_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphDestroyNode +.. autofunction:: cuda.bindings.runtime.cudaGraphInstantiate +.. autofunction:: cuda.bindings.runtime.cudaGraphInstantiateWithFlags +.. autofunction:: cuda.bindings.runtime.cudaGraphInstantiateWithParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecGetFlags +.. autofunction:: cuda.bindings.runtime.cudaGraphExecKernelNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecMemcpyNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecMemcpyNodeSetParams1D +.. autofunction:: cuda.bindings.runtime.cudaGraphExecMemsetNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecHostNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecChildGraphNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecEventRecordNodeSetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphExecEventWaitNodeSetEvent +.. autofunction:: cuda.bindings.runtime.cudaGraphExecExternalSemaphoresSignalNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecExternalSemaphoresWaitNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeSetEnabled +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeGetEnabled +.. autofunction:: cuda.bindings.runtime.cudaGraphExecUpdate +.. autofunction:: cuda.bindings.runtime.cudaGraphUpload +.. autofunction:: cuda.bindings.runtime.cudaGraphLaunch +.. autofunction:: cuda.bindings.runtime.cudaGraphExecDestroy +.. autofunction:: cuda.bindings.runtime.cudaGraphDestroy +.. autofunction:: cuda.bindings.runtime.cudaGraphDebugDotPrint +.. autofunction:: cuda.bindings.runtime.cudaUserObjectCreate +.. autofunction:: cuda.bindings.runtime.cudaUserObjectRetain +.. autofunction:: cuda.bindings.runtime.cudaUserObjectRelease +.. autofunction:: cuda.bindings.runtime.cudaGraphRetainUserObject +.. autofunction:: cuda.bindings.runtime.cudaGraphReleaseUserObject +.. autofunction:: cuda.bindings.runtime.cudaGraphAddNode +.. autofunction:: cuda.bindings.runtime.cudaGraphAddNode_v2 +.. autofunction:: cuda.bindings.runtime.cudaGraphNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphExecNodeSetParams +.. autofunction:: cuda.bindings.runtime.cudaGraphConditionalHandleCreate + +Driver Entry Point Access +------------------------- + +This section describes the driver entry point access functions of CUDA runtime application programming interface. + +.. autofunction:: cuda.bindings.runtime.cudaGetDriverEntryPoint +.. autofunction:: cuda.bindings.runtime.cudaGetDriverEntryPointByVersion + +C++ API Routines +---------------- +C++-style interface built on top of CUDA runtime API. +impl_private + + + + + + + +This section describes the C++ high level API functions of the CUDA runtime application programming interface. To use these functions, your application needs to be compiled with the ``nvcc``\ compiler. + + +Interactions with the CUDA Driver API +------------------------------------- + +This section describes the interactions between the CUDA Driver API and the CUDA Runtime API + + + + + +**Primary Contexts** + + + +There exists a one to one relationship between CUDA devices in the CUDA Runtime API and ::CUcontext s in the CUDA Driver API within a process. The specific context which the CUDA Runtime API uses for a device is called the device's primary context. From the perspective of the CUDA Runtime API, a device and its primary context are synonymous. + + + + + +**Initialization and Tear-Down** + + + +CUDA Runtime API calls operate on the CUDA Driver API ::CUcontext which is current to to the calling host thread. + +The function cudaInitDevice() ensures that the primary context is initialized for the requested device but does not make it current to the calling thread. + +The function cudaSetDevice() initializes the primary context for the specified device and makes it current to the calling thread by calling ::cuCtxSetCurrent(). + +The CUDA Runtime API will automatically initialize the primary context for a device at the first CUDA Runtime API call which requires an active context. If no ::CUcontext is current to the calling thread when a CUDA Runtime API call which requires an active context is made, then the primary context for a device will be selected, made current to the calling thread, and initialized. + +The context which the CUDA Runtime API initializes will be initialized using the parameters specified by the CUDA Runtime API functions cudaSetDeviceFlags(), ::cudaD3D9SetDirect3DDevice(), ::cudaD3D10SetDirect3DDevice(), ::cudaD3D11SetDirect3DDevice(), cudaGLSetGLDevice(), and cudaVDPAUSetVDPAUDevice(). Note that these functions will fail with cudaErrorSetOnActiveProcess if they are called when the primary context for the specified device has already been initialized. (or if the current device has already been initialized, in the case of cudaSetDeviceFlags()). + +Primary contexts will remain active until they are explicitly deinitialized using cudaDeviceReset(). The function cudaDeviceReset() will deinitialize the primary context for the calling thread's current device immediately. The context will remain current to all of the threads that it was current to. The next CUDA Runtime API call on any thread which requires an active context will trigger the reinitialization of that device's primary context. + +Note that primary contexts are shared resources. It is recommended that the primary context not be reset except just before exit or to recover from an unspecified launch failure. + + + + + +**Context Interoperability** + + + +Note that the use of multiple ::CUcontext s per device within a single process will substantially degrade performance and is strongly discouraged. Instead, it is highly recommended that the implicit one-to-one device-to-context mapping for the process provided by the CUDA Runtime API be used. + +If a non-primary ::CUcontext created by the CUDA Driver API is current to a thread then the CUDA Runtime API calls to that thread will operate on that ::CUcontext, with some exceptions listed below. Interoperability between data types is discussed in the following sections. + +The function cudaPointerGetAttributes() will return the error cudaErrorIncompatibleDriverContext if the pointer being queried was allocated by a non-primary context. The function cudaDeviceEnablePeerAccess() and the rest of the peer access API may not be called when a non-primary ::CUcontext is current. + + To use the pointer query and peer access APIs with a context created using the CUDA Driver API, it is necessary that the CUDA Driver API be used to access these features. + +All CUDA Runtime API state (e.g, global variables' addresses and values) travels with its underlying ::CUcontext. In particular, if a ::CUcontext is moved from one thread to another then all CUDA Runtime API state will move to that thread as well. + +Please note that attaching to legacy contexts (those with a version of 3010 as returned by ::cuCtxGetApiVersion()) is not possible. The CUDA Runtime will return cudaErrorIncompatibleDriverContext in such cases. + + + + + +**Interactions between CUstream and cudaStream_t** + + + +The types ::CUstream and cudaStream_t are identical and may be used interchangeably. + + + + + +**Interactions between CUevent and cudaEvent_t** + + + +The types ::CUevent and cudaEvent_t are identical and may be used interchangeably. + + + + + +**Interactions between CUarray and cudaArray_t** + + + +The types ::CUarray and struct ::cudaArray * represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a ::CUarray in a CUDA Runtime API function which takes a struct ::cudaArray *, it is necessary to explicitly cast the ::CUarray to a struct ::cudaArray *. + +In order to use a struct ::cudaArray * in a CUDA Driver API function which takes a ::CUarray, it is necessary to explicitly cast the struct ::cudaArray * to a ::CUarray . + + + + + +**Interactions between CUgraphicsResource and cudaGraphicsResource_t** + + + +The types ::CUgraphicsResource and cudaGraphicsResource_t represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a ::CUgraphicsResource in a CUDA Runtime API function which takes a cudaGraphicsResource_t, it is necessary to explicitly cast the ::CUgraphicsResource to a cudaGraphicsResource_t. + +In order to use a cudaGraphicsResource_t in a CUDA Driver API function which takes a ::CUgraphicsResource, it is necessary to explicitly cast the cudaGraphicsResource_t to a ::CUgraphicsResource. + + + + + +**Interactions between CUtexObject and cudaTextureObject_t** + + + +The types ::CUtexObject and cudaTextureObject_t represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a ::CUtexObject in a CUDA Runtime API function which takes a cudaTextureObject_t, it is necessary to explicitly cast the ::CUtexObject to a cudaTextureObject_t. + +In order to use a cudaTextureObject_t in a CUDA Driver API function which takes a ::CUtexObject, it is necessary to explicitly cast the cudaTextureObject_t to a ::CUtexObject. + + + + + +**Interactions between CUsurfObject and cudaSurfaceObject_t** + + + +The types ::CUsurfObject and cudaSurfaceObject_t represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a ::CUsurfObject in a CUDA Runtime API function which takes a cudaSurfaceObject_t, it is necessary to explicitly cast the ::CUsurfObject to a cudaSurfaceObject_t. + +In order to use a cudaSurfaceObject_t in a CUDA Driver API function which takes a ::CUsurfObject, it is necessary to explicitly cast the cudaSurfaceObject_t to a ::CUsurfObject. + + + + + +**Interactions between CUfunction and cudaFunction_t** + + + +The types ::CUfunction and cudaFunction_t represent the same data type and may be used interchangeably by casting the two types between each other. + +In order to use a cudaFunction_t in a CUDA Driver API function which takes a ::CUfunction, it is necessary to explicitly cast the cudaFunction_t to a ::CUfunction. + +.. autofunction:: cuda.bindings.runtime.cudaGetKernel + +Data types used by CUDA Runtime +------------------------------- + + + +.. autoclass:: cuda.bindings.runtime.cudaEglPlaneDesc_st +.. autoclass:: cuda.bindings.runtime.cudaEglFrame_st +.. autoclass:: cuda.bindings.runtime.cudaChannelFormatDesc +.. autoclass:: cuda.bindings.runtime.cudaArraySparseProperties +.. autoclass:: cuda.bindings.runtime.cudaArrayMemoryRequirements +.. autoclass:: cuda.bindings.runtime.cudaPitchedPtr +.. autoclass:: cuda.bindings.runtime.cudaExtent +.. autoclass:: cuda.bindings.runtime.cudaPos +.. autoclass:: cuda.bindings.runtime.cudaMemcpy3DParms +.. autoclass:: cuda.bindings.runtime.cudaMemcpyNodeParams +.. autoclass:: cuda.bindings.runtime.cudaMemcpy3DPeerParms +.. autoclass:: cuda.bindings.runtime.cudaMemsetParams +.. autoclass:: cuda.bindings.runtime.cudaMemsetParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaAccessPolicyWindow +.. autoclass:: cuda.bindings.runtime.cudaHostNodeParams +.. autoclass:: cuda.bindings.runtime.cudaHostNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaResourceDesc +.. autoclass:: cuda.bindings.runtime.cudaResourceViewDesc +.. autoclass:: cuda.bindings.runtime.cudaPointerAttributes +.. autoclass:: cuda.bindings.runtime.cudaFuncAttributes +.. autoclass:: cuda.bindings.runtime.cudaMemLocation +.. autoclass:: cuda.bindings.runtime.cudaMemAccessDesc +.. autoclass:: cuda.bindings.runtime.cudaMemPoolProps +.. autoclass:: cuda.bindings.runtime.cudaMemPoolPtrExportData +.. autoclass:: cuda.bindings.runtime.cudaMemAllocNodeParams +.. autoclass:: cuda.bindings.runtime.cudaMemAllocNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaMemFreeNodeParams +.. autoclass:: cuda.bindings.runtime.CUuuid_st +.. autoclass:: cuda.bindings.runtime.cudaDeviceProp +.. autoclass:: cuda.bindings.runtime.cudaIpcEventHandle_st +.. autoclass:: cuda.bindings.runtime.cudaIpcMemHandle_st +.. autoclass:: cuda.bindings.runtime.cudaMemFabricHandle_st +.. autoclass:: cuda.bindings.runtime.cudaExternalMemoryHandleDesc +.. autoclass:: cuda.bindings.runtime.cudaExternalMemoryBufferDesc +.. autoclass:: cuda.bindings.runtime.cudaExternalMemoryMipmappedArrayDesc +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreHandleDesc +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreSignalParams +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreWaitParams +.. autoclass:: cuda.bindings.runtime.cudaKernelNodeParams +.. autoclass:: cuda.bindings.runtime.cudaKernelNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreSignalNodeParams +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreSignalNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreWaitNodeParams +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreWaitNodeParamsV2 +.. autoclass:: cuda.bindings.runtime.cudaConditionalNodeParams +.. autoclass:: cuda.bindings.runtime.cudaChildGraphNodeParams +.. autoclass:: cuda.bindings.runtime.cudaEventRecordNodeParams +.. autoclass:: cuda.bindings.runtime.cudaEventWaitNodeParams +.. autoclass:: cuda.bindings.runtime.cudaGraphNodeParams +.. autoclass:: cuda.bindings.runtime.cudaGraphEdgeData_st +.. autoclass:: cuda.bindings.runtime.cudaGraphInstantiateParams_st +.. autoclass:: cuda.bindings.runtime.cudaGraphExecUpdateResultInfo_st +.. autoclass:: cuda.bindings.runtime.cudaGraphKernelNodeUpdate +.. autoclass:: cuda.bindings.runtime.cudaLaunchMemSyncDomainMap_st +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttributeValue +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttribute_st +.. autoclass:: cuda.bindings.runtime.cudaAsyncNotificationInfo +.. autoclass:: cuda.bindings.runtime.cudaTextureDesc +.. autoclass:: cuda.bindings.runtime.cudaEglFrameType + + .. autoattribute:: cuda.bindings.runtime.cudaEglFrameType.cudaEglFrameTypeArray + + + Frame type CUDA array + + + .. autoattribute:: cuda.bindings.runtime.cudaEglFrameType.cudaEglFrameTypePitch + + + Frame type CUDA pointer + +.. autoclass:: cuda.bindings.runtime.cudaEglResourceLocationFlags + + .. autoattribute:: cuda.bindings.runtime.cudaEglResourceLocationFlags.cudaEglResourceLocationSysmem + + + Resource location sysmem + + + .. autoattribute:: cuda.bindings.runtime.cudaEglResourceLocationFlags.cudaEglResourceLocationVidmem + + + Resource location vidmem + +.. autoclass:: cuda.bindings.runtime.cudaEglColorFormat + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420Planar + + + Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar + + + Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV422Planar + + + Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV422SemiPlanar + + + Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatARGB + + + R/G/B/A four channels in one surface with BGRA byte ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatRGBA + + + R/G/B/A four channels in one surface with ABGR byte ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatL + + + single luminance channel in one surface. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatR + + + single color channel in one surface. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV444Planar + + + Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV444SemiPlanar + + + Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUYV422 + + + Y, U, V in one surface, interleaved as UYVY in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatUYVY422 + + + Y, U, V in one surface, interleaved as YUYV in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatABGR + + + R/G/B/A four channels in one surface with RGBA byte ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBGRA + + + R/G/B/A four channels in one surface with ARGB byte ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatA + + + Alpha color format - one channel in one surface. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatRG + + + R/G color format - two channels in one surface with GR byte ordering + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatAYUV + + + Y, U, V, A four channels in one surface, interleaved as VUYA. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU444SemiPlanar + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU422SemiPlanar + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar + + + Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar + + + Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar + + + Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar + + + Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar + + + Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatVYUY_ER + + + Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatUYVY_ER + + + Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUYV_ER + + + Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVYU_ER + + + Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUVA_ER + + + Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatAYUV_ER + + + Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV444Planar_ER + + + Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV422Planar_ER + + + Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_ER + + + Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV444SemiPlanar_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV422SemiPlanar_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_ER + + + Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU444Planar_ER + + + Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU422Planar_ER + + + Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_ER + + + Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU444SemiPlanar_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU422SemiPlanar_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_ER + + + Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerRGGB + + + Bayer format - one channel in one surface with interleaved RGGB ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerBGGR + + + Bayer format - one channel in one surface with interleaved BGGR ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerGRBG + + + Bayer format - one channel in one surface with interleaved GRBG ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerGBRG + + + Bayer format - one channel in one surface with interleaved GBRG ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10RGGB + + + Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10BGGR + + + Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10GRBG + + + Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10GBRG + + + Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12RGGB + + + Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12BGGR + + + Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12GRBG + + + Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12GBRG + + + Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer14RGGB + + + Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer14BGGR + + + Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer14GRBG + + + Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer14GBRG + + + Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer20RGGB + + + Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer20BGGR + + + Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer20GRBG + + + Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer20GBRG + + + Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU444Planar + + + Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU422Planar + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420Planar + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerIspRGGB + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerIspBGGR + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerIspGRBG + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerIspGBRG + + + Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerBCCR + + + Bayer format - one channel in one surface with interleaved BCCR ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerRCCB + + + Bayer format - one channel in one surface with interleaved RCCB ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerCRBC + + + Bayer format - one channel in one surface with interleaved CRBC ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayerCBRC + + + Bayer format - one channel in one surface with interleaved CBRC ordering. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer10CCCC + + + Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12BCCR + + + Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12RCCB + + + Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12CRBC + + + Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12CBRC + + + Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatBayer12CCCC + + + Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY + + + Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_2020 + + + Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_2020 + + + Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_2020 + + + Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_2020 + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420SemiPlanar_709 + + + Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420SemiPlanar_709 + + + Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUV420Planar_709 + + + Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVU420Planar_709 + + + Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_709 + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_2020 + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar_2020 + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_422SemiPlanar_709 + + + Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY_ER + + + Extended Range Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY_709_ER + + + Extended Range Color format for single Y plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10_ER + + + Extended Range Color format for single Y10 plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10_709_ER + + + Extended Range Color format for single Y10 plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12_ER + + + Extended Range Color format for single Y12 plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12_709_ER + + + Extended Range Color format for single Y12 plane. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYUVA + + + Y, U, V, A four channels in one surface, interleaved as AVUY. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatYVYU + + + Y, U, V in one surface, interleaved as YVYU in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatVYUY + + + Y, U, V in one surface, interleaved as VYUY in one channel. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER + + + Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + + + .. autoattribute:: cuda.bindings.runtime.cudaEglColorFormat.cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER + + + Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + +.. autoclass:: cuda.bindings.runtime.cudaError_t + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaSuccess + + + The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`). + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidValue + + + This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMemoryAllocation + + + The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInitializationError + + + The API call failed because the CUDA driver and runtime could not be initialized. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCudartUnloading + + + This indicates that a CUDA Runtime API call cannot be executed because it is being called during process shut down, at a point in time after CUDA driver has been unloaded. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorProfilerDisabled + + + This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorProfilerNotInitialized + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorProfilerAlreadyStarted + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorProfilerAlreadyStopped + + + [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidConfiguration + + + This indicates that a kernel launch is requesting resources that can never be satisfied by the current device. Requesting more shared memory per block than the device supports will trigger this error, as will requesting too many threads or blocks. See :py:obj:`~.cudaDeviceProp` for more device limitations. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidPitchValue + + + This indicates that one or more of the pitch-related parameters passed to the API call is not within the acceptable range for pitch. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidSymbol + + + This indicates that the symbol name/identifier passed to the API call is not a valid name or identifier. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidHostPointer + + + This indicates that at least one host pointer passed to the API call is not a valid host pointer. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidDevicePointer + + + This indicates that at least one device pointer passed to the API call is not a valid device pointer. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidTexture + + + This indicates that the texture passed to the API call is not a valid texture. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidTextureBinding + + + This indicates that the texture binding is not valid. This occurs if you call :py:obj:`~.cudaGetTextureAlignmentOffset()` with an unbound texture. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidChannelDescriptor + + + This indicates that the channel descriptor passed to the API call is not valid. This occurs if the format is not one of the formats specified by :py:obj:`~.cudaChannelFormatKind`, or if one of the dimensions is invalid. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidMemcpyDirection + + + This indicates that the direction of the memcpy passed to the API call is not one of the types specified by :py:obj:`~.cudaMemcpyKind`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorAddressOfConstant + + + This indicated that the user has taken the address of a constant variable, which was forbidden up until the CUDA 3.1 release. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorTextureFetchFailed + + + This indicated that a texture fetch was not able to be performed. This was previously used for device emulation of texture operations. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorTextureNotBound + + + This indicated that a texture was not bound for access. This was previously used for device emulation of texture operations. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSynchronizationError + + + This indicated that a synchronization operation had failed. This was previously used for some device emulation functions. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidFilterSetting + + + This indicates that a non-float texture was being accessed with linear filtering. This is not supported by CUDA. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidNormSetting + + + This indicates that an attempt was made to read a non-float texture as a normalized float. This is not supported by CUDA. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMixedDeviceExecution + + + Mixing of device and device emulation code was not allowed. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotYetImplemented + + + This indicates that the API call is not yet implemented. Production releases of CUDA will never return this error. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMemoryValueTooLarge + + + This indicated that an emulated device pointer exceeded the 32-bit address range. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStubLibrary + + + This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInsufficientDriver + + + This indicates that the installed NVIDIA CUDA driver is older than the CUDA runtime library. This is not a supported configuration. Users should install an updated NVIDIA display driver to allow the application to run. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCallRequiresNewerDriver + + + This indicates that the API call requires a newer CUDA driver than the one currently installed. Users should install an updated NVIDIA CUDA driver to allow the API call to succeed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidSurface + + + This indicates that the surface passed to the API call is not a valid surface. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDuplicateVariableName + + + This indicates that multiple global or constant variables (across separate CUDA source files in the application) share the same string name. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDuplicateTextureName + + + This indicates that multiple textures (across separate CUDA source files in the application) share the same string name. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDuplicateSurfaceName + + + This indicates that multiple surfaces (across separate CUDA source files in the application) share the same string name. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDevicesUnavailable + + + This indicates that all CUDA devices are busy or unavailable at the current time. Devices are often busy/unavailable due to use of :py:obj:`~.cudaComputeModeProhibited`, :py:obj:`~.cudaComputeModeExclusiveProcess`, or when long running CUDA kernels have filled up the GPU and are blocking new work from starting. They can also be unavailable due to memory constraints on a device that already has active CUDA work being performed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorIncompatibleDriverContext + + + This indicates that the current context is not compatible with this the CUDA Runtime. This can only occur if you are using CUDA Runtime/Driver interoperability and have created an existing Driver context using the driver API. The Driver context may be incompatible either because the Driver context was created using an older version of the API, because the Runtime API call expects a primary driver context and the Driver context is not primary, or because the Driver context has been destroyed. Please see :py:obj:`~.Interactions`with the CUDA Driver API" for more information. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMissingConfiguration + + + The device function being invoked (usually via :py:obj:`~.cudaLaunchKernel()`) was not previously configured via the :py:obj:`~.cudaConfigureCall()` function. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorPriorLaunchFailure + + + This indicated that a previous kernel launch failed. This was previously used for device emulation of kernel launches. [Deprecated] + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchMaxDepthExceeded + + + This error indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchFileScopedTex + + + This error indicates that a grid launch did not occur because the kernel uses file-scoped textures which are unsupported by the device runtime. Kernels launched via the device runtime only support textures created with the Texture Object API's. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchFileScopedSurf + + + This error indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. Kernels launched via the device runtime only support surfaces created with the Surface Object API's. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSyncDepthExceeded + + + This error indicates that a call to :py:obj:`~.cudaDeviceSynchronize` made from the device runtime failed because the call was made at grid depth greater than than either the default (2 levels of grids) or user specified device limit :py:obj:`~.cudaLimitDevRuntimeSyncDepth`. To be able to synchronize on launched grids at a greater depth successfully, the maximum nested depth at which :py:obj:`~.cudaDeviceSynchronize` will be called must be specified with the :py:obj:`~.cudaLimitDevRuntimeSyncDepth` limit to the :py:obj:`~.cudaDeviceSetLimit` api before the host-side launch of a kernel using the device runtime. Keep in mind that additional levels of sync depth require the runtime to reserve large amounts of device memory that cannot be used for user allocations. Note that :py:obj:`~.cudaDeviceSynchronize` made from device runtime is only supported on devices of compute capability < 9.0. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchPendingCountExceeded + + + This error indicates that a device runtime grid launch failed because the launch would exceed the limit :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount`. For this launch to proceed successfully, :py:obj:`~.cudaDeviceSetLimit` must be called to set the :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount` to be higher than the upper bound of outstanding launches that can be issued to the device runtime. Keep in mind that raising the limit of pending device runtime launches will require the runtime to reserve device memory that cannot be used for user allocations. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidDeviceFunction + + + The requested device function does not exist or is not compiled for the proper device architecture. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNoDevice + + + This indicates that no CUDA-capable devices were detected by the installed CUDA driver. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidDevice + + + This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDeviceNotLicensed + + + This indicates that the device doesn't have a valid Grid License. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSoftwareValidityNotEstablished + + + By default, the CUDA runtime may perform a minimal set of self-tests, as well as CUDA driver tests, to establish the validity of both. Introduced in CUDA 11.2, this error return indicates that at least one of these tests has failed and the validity of either the runtime or the driver could not be established. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStartupFailure + + + This indicates an internal startup failure in the CUDA runtime. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidKernelImage + + + This indicates that the device kernel image is invalid. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDeviceUninitialized + + + This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had :py:obj:`~.cuCtxDestroy()` invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See :py:obj:`~.cuCtxGetApiVersion()` for more details. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMapBufferObjectFailed + + + This indicates that the buffer object could not be mapped. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnmapBufferObjectFailed + + + This indicates that the buffer object could not be unmapped. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorArrayIsMapped + + + This indicates that the specified array is currently mapped and thus cannot be destroyed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorAlreadyMapped + + + This indicates that the resource is already mapped. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNoKernelImageForDevice + + + This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorAlreadyAcquired + + + This indicates that a resource has already been acquired. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotMapped + + + This indicates that a resource is not mapped. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotMappedAsArray + + + This indicates that a mapped resource is not available for access as an array. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotMappedAsPointer + + + This indicates that a mapped resource is not available for access as a pointer. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorECCUncorrectable + + + This indicates that an uncorrectable ECC error was detected during execution. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnsupportedLimit + + + This indicates that the :py:obj:`~.cudaLimit` passed to the API call is not supported by the active device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorDeviceAlreadyInUse + + + This indicates that a call tried to access an exclusive-thread device that is already in use by a different thread. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorPeerAccessUnsupported + + + This error indicates that P2P access is not supported across the given devices. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidPtx + + + A PTX compilation failed. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidGraphicsContext + + + This indicates an error with the OpenGL or DirectX context. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNvlinkUncorrectable + + + This indicates that an uncorrectable NVLink error was detected during the execution. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorJitCompilerNotFound + + + This indicates that the PTX JIT compiler library was not found. The JIT Compiler library is used for PTX compilation. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnsupportedPtxVersion + + + This indicates that the provided PTX was compiled with an unsupported toolchain. The most common reason for this, is the PTX was generated by a compiler newer than what is supported by the CUDA driver and PTX JIT compiler. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorJitCompilationDisabled + + + This indicates that the JIT compilation was disabled. The JIT compilation compiles PTX. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnsupportedExecAffinity + + + This indicates that the provided execution affinity is not supported by the device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnsupportedDevSideSync + + + This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidSource + + + This indicates that the device kernel source is invalid. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorFileNotFound + + + This indicates that the file specified was not found. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSharedObjectSymbolNotFound + + + This indicates that a link to a shared object failed to resolve. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSharedObjectInitFailed + + + This indicates that initialization of a shared object failed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorOperatingSystem + + + This error indicates that an OS call failed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidResourceHandle + + + This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like :py:obj:`~.cudaStream_t` and :py:obj:`~.cudaEvent_t`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorIllegalState + + + This indicates that a resource required by the API call is not in a valid state to perform the requested operation. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLossyQuery + + + This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSymbolNotFound + + + This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotReady + + + This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than :py:obj:`~.cudaSuccess` (which indicates completion). Calls that may return this value include :py:obj:`~.cudaEventQuery()` and :py:obj:`~.cudaStreamQuery()`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorIllegalAddress + + + The device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchOutOfResources + + + This indicates that a launch did not occur because it did not have appropriate resources. Although this error is similar to :py:obj:`~.cudaErrorInvalidConfiguration`, this error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchTimeout + + + This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device property :py:obj:`~.kernelExecTimeoutEnabled` for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchIncompatibleTexturing + + + This error indicates a kernel launch that uses an incompatible texturing mode. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorPeerAccessAlreadyEnabled + + + This error indicates that a call to :py:obj:`~.cudaDeviceEnablePeerAccess()` is trying to re-enable peer addressing on from a context which has already had peer addressing enabled. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorPeerAccessNotEnabled + + + This error indicates that :py:obj:`~.cudaDeviceDisablePeerAccess()` is trying to disable peer addressing which has not been enabled yet via :py:obj:`~.cudaDeviceEnablePeerAccess()`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSetOnActiveProcess + + + This indicates that the user has called :py:obj:`~.cudaSetValidDevices()`, :py:obj:`~.cudaSetDeviceFlags()`, :py:obj:`~.cudaD3D9SetDirect3DDevice()`, :py:obj:`~.cudaD3D10SetDirect3DDevice`, :py:obj:`~.cudaD3D11SetDirect3DDevice()`, or :py:obj:`~.cudaVDPAUSetVDPAUDevice()` after initializing the CUDA runtime by calling non-device management operations (allocating memory and launching kernels are examples of non-device management operations). This error can also be returned if using runtime/driver interoperability and there is an existing :py:obj:`~.CUcontext` active on the host thread. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorContextIsDestroyed + + + This error indicates that the context current to the calling thread has been destroyed using :py:obj:`~.cuCtxDestroy`, or is a primary context which has not yet been initialized. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorAssert + + + An assert triggered in device code during kernel execution. The device cannot be used again. All existing allocations are invalid. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorTooManyPeers + + + This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to :py:obj:`~.cudaEnablePeerAccess()`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorHostMemoryAlreadyRegistered + + + This error indicates that the memory range passed to :py:obj:`~.cudaHostRegister()` has already been registered. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorHostMemoryNotRegistered + + + This error indicates that the pointer passed to :py:obj:`~.cudaHostUnregister()` does not correspond to any currently registered memory region. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorHardwareStackError + + + Device encountered an error in the call stack during kernel execution, possibly due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorIllegalInstruction + + + The device encountered an illegal instruction during kernel execution This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMisalignedAddress + + + The device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidAddressSpace + + + While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidPc + + + The device encountered an invalid program counter. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorLaunchFailure + + + An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCooperativeLaunchTooLarge + + + This error indicates that the number of blocks launched per grid for a kernel that was launched via either :py:obj:`~.cudaLaunchCooperativeKernel` or :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` exceeds the maximum number of blocks as allowed by :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessor` or :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` times the number of multiprocessors as specified by the device attribute :py:obj:`~.cudaDevAttrMultiProcessorCount`. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotPermitted + + + This error indicates the attempted operation is not permitted. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorNotSupported + + + This error indicates the attempted operation is not supported on the current system or device. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSystemNotReady + + + This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorSystemDriverMismatch + + + This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCompatNotSupportedOnDevice + + + This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsConnectionFailed + + + This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsRpcFailure + + + This error indicates that the remote procedural call between the MPS server and the MPS client failed. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsServerNotReady + + + This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsMaxClientsReached + + + This error indicates that the hardware resources required to create MPS client have been exhausted. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsMaxConnectionsReached + + + This error indicates the the hardware resources required to device connections have been exhausted. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorMpsClientTerminated + + + This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCdpNotSupported + + + This error indicates, that the program is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCdpVersionMismatch + + + This error indicates, that the program contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureUnsupported + + + The operation is not permitted when the stream is capturing. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureInvalidated + + + The current capture sequence on the stream has been invalidated due to a previous error. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureMerge + + + The operation would have resulted in a merge of two independent capture sequences. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureUnmatched + + + The capture was not initiated in this stream. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureUnjoined + + + The capture sequence contains a fork that was not joined to the primary stream. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureIsolation + + + A dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureImplicit + + + The operation would have resulted in a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorCapturedEvent + + + The operation is not permitted on an event which was last recorded in a capturing stream. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorStreamCaptureWrongThread + + + A stream capture sequence not initiated with the :py:obj:`~.cudaStreamCaptureModeRelaxed` argument to :py:obj:`~.cudaStreamBeginCapture` was passed to :py:obj:`~.cudaStreamEndCapture` in a different thread. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorTimeout + + + This indicates that the wait operation has timed out. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorGraphExecUpdateFailure + + + This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorExternalDevice + + + This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device's signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidClusterSize + + + This indicates that a kernel launch error has occurred due to cluster misconfiguration. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorFunctionNotLoaded + + + Indiciates a function handle is not loaded when calling an API that requires a loaded function. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidResourceType + + + This error indicates one or more resources passed in are not valid resource types for the operation. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorInvalidResourceConfiguration + + + This error indicates one or more resources are insufficient or non-applicable for the operation. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorUnknown + + + This indicates that an unknown internal error has occurred. + + + .. autoattribute:: cuda.bindings.runtime.cudaError_t.cudaErrorApiFailureBase + +.. autoclass:: cuda.bindings.runtime.cudaChannelFormatKind + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSigned + + + Signed channel format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsigned + + + Unsigned channel format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindFloat + + + Float channel format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindNone + + + No channel format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindNV12 + + + Unsigned 8-bit integers, planar 4:2:0 YUV format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X1 + + + 1 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X2 + + + 2 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized8X4 + + + 4 channel unsigned 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X1 + + + 1 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X2 + + + 2 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedNormalized16X4 + + + 4 channel unsigned 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X1 + + + 1 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X2 + + + 2 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized8X4 + + + 4 channel signed 8-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X1 + + + 1 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X2 + + + 2 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedNormalized16X4 + + + 4 channel signed 16-bit normalized integer + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1 + + + 4 channel unsigned normalized block-compressed (BC1 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed1SRGB + + + 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2 + + + 4 channel unsigned normalized block-compressed (BC2 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed2SRGB + + + 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3 + + + 4 channel unsigned normalized block-compressed (BC3 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed3SRGB + + + 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed4 + + + 1 channel unsigned normalized block-compressed (BC4 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed4 + + + 1 channel signed normalized block-compressed (BC4 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed5 + + + 2 channel unsigned normalized block-compressed (BC5 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed5 + + + 2 channel signed normalized block-compressed (BC5 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed6H + + + 3 channel unsigned half-float block-compressed (BC6H compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindSignedBlockCompressed6H + + + 3 channel signed half-float block-compressed (BC6H compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7 + + + 4 channel unsigned normalized block-compressed (BC7 compression) format + + + .. autoattribute:: cuda.bindings.runtime.cudaChannelFormatKind.cudaChannelFormatKindUnsignedBlockCompressed7SRGB + + + 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding + +.. autoclass:: cuda.bindings.runtime.cudaMemoryType + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryType.cudaMemoryTypeUnregistered + + + Unregistered memory + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryType.cudaMemoryTypeHost + + + Host memory + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryType.cudaMemoryTypeDevice + + + Device memory + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryType.cudaMemoryTypeManaged + + + Managed memory + +.. autoclass:: cuda.bindings.runtime.cudaMemcpyKind + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyHostToHost + + + Host -> Host + + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice + + + Host -> Device + + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost + + + Device -> Host + + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice + + + Device -> Device + + + .. autoattribute:: cuda.bindings.runtime.cudaMemcpyKind.cudaMemcpyDefault + + + Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing + +.. autoclass:: cuda.bindings.runtime.cudaAccessProperty + + .. autoattribute:: cuda.bindings.runtime.cudaAccessProperty.cudaAccessPropertyNormal + + + Normal cache persistence. + + + .. autoattribute:: cuda.bindings.runtime.cudaAccessProperty.cudaAccessPropertyStreaming + + + Streaming access is less likely to persit from cache. + + + .. autoattribute:: cuda.bindings.runtime.cudaAccessProperty.cudaAccessPropertyPersisting + + + Persisting access is more likely to persist in cache. + +.. autoclass:: cuda.bindings.runtime.cudaStreamCaptureStatus + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureStatus.cudaStreamCaptureStatusNone + + + Stream is not capturing + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureStatus.cudaStreamCaptureStatusActive + + + Stream is actively capturing + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureStatus.cudaStreamCaptureStatusInvalidated + + + Stream is part of a capture sequence that has been invalidated, but not terminated + +.. autoclass:: cuda.bindings.runtime.cudaStreamCaptureMode + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureMode.cudaStreamCaptureModeThreadLocal + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamCaptureMode.cudaStreamCaptureModeRelaxed + +.. autoclass:: cuda.bindings.runtime.cudaSynchronizationPolicy + + .. autoattribute:: cuda.bindings.runtime.cudaSynchronizationPolicy.cudaSyncPolicyAuto + + + .. autoattribute:: cuda.bindings.runtime.cudaSynchronizationPolicy.cudaSyncPolicySpin + + + .. autoattribute:: cuda.bindings.runtime.cudaSynchronizationPolicy.cudaSyncPolicyYield + + + .. autoattribute:: cuda.bindings.runtime.cudaSynchronizationPolicy.cudaSyncPolicyBlockingSync + +.. autoclass:: cuda.bindings.runtime.cudaClusterSchedulingPolicy + + .. autoattribute:: cuda.bindings.runtime.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyDefault + + + the default policy + + + .. autoattribute:: cuda.bindings.runtime.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicySpread + + + spread the blocks within a cluster to the SMs + + + .. autoattribute:: cuda.bindings.runtime.cudaClusterSchedulingPolicy.cudaClusterSchedulingPolicyLoadBalancing + + + allow the hardware to load-balance the blocks in a cluster to the SMs + +.. autoclass:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependenciesFlags + + .. autoattribute:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamAddCaptureDependencies + + + Add new nodes to the dependency set + + + .. autoattribute:: cuda.bindings.runtime.cudaStreamUpdateCaptureDependenciesFlags.cudaStreamSetCaptureDependencies + + + Replace the dependency set with the new nodes + +.. autoclass:: cuda.bindings.runtime.cudaUserObjectFlags + + .. autoattribute:: cuda.bindings.runtime.cudaUserObjectFlags.cudaUserObjectNoDestructorSync + + + Indicates the destructor execution is not synchronized by any CUDA handle. + +.. autoclass:: cuda.bindings.runtime.cudaUserObjectRetainFlags + + .. autoattribute:: cuda.bindings.runtime.cudaUserObjectRetainFlags.cudaGraphUserObjectMove + + + Transfer references from the caller rather than creating new references. + +.. autoclass:: cuda.bindings.runtime.cudaGraphicsRegisterFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone + + + Default + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsReadOnly + + + CUDA will not write to this resource + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard + + + CUDA will only write to and will not read from this resource + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsSurfaceLoadStore + + + CUDA will bind this resource to a surface reference + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsTextureGather + + + CUDA will perform texture gather operations on this resource + +.. autoclass:: cuda.bindings.runtime.cudaGraphicsMapFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsMapFlags.cudaGraphicsMapFlagsNone + + + Default; Assume resource can be read/written + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsMapFlags.cudaGraphicsMapFlagsReadOnly + + + CUDA will not write to this resource + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsMapFlags.cudaGraphicsMapFlagsWriteDiscard + + + CUDA will only write to and will not read from this resource + +.. autoclass:: cuda.bindings.runtime.cudaGraphicsCubeFace + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveX + + + Positive X face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeX + + + Negative X face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveY + + + Positive Y face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeY + + + Negative Y face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFacePositiveZ + + + Positive Z face of cubemap + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphicsCubeFace.cudaGraphicsCubeFaceNegativeZ + + + Negative Z face of cubemap + +.. autoclass:: cuda.bindings.runtime.cudaResourceType + + .. autoattribute:: cuda.bindings.runtime.cudaResourceType.cudaResourceTypeArray + + + Array resource + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceType.cudaResourceTypeMipmappedArray + + + Mipmapped array resource + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceType.cudaResourceTypeLinear + + + Linear resource + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceType.cudaResourceTypePitch2D + + + Pitch 2D resource + +.. autoclass:: cuda.bindings.runtime.cudaResourceViewFormat + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatNone + + + No resource view format (use underlying resource format) + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedChar1 + + + 1 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedChar2 + + + 2 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedChar4 + + + 4 channel unsigned 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedChar1 + + + 1 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedChar2 + + + 2 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedChar4 + + + 4 channel signed 8-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedShort1 + + + 1 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedShort2 + + + 2 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedShort4 + + + 4 channel unsigned 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedShort1 + + + 1 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedShort2 + + + 2 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedShort4 + + + 4 channel signed 16-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedInt1 + + + 1 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedInt2 + + + 2 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedInt4 + + + 4 channel unsigned 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedInt1 + + + 1 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedInt2 + + + 2 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedInt4 + + + 4 channel signed 32-bit integers + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatHalf1 + + + 1 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatHalf2 + + + 2 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatHalf4 + + + 4 channel 16-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatFloat1 + + + 1 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatFloat2 + + + 2 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatFloat4 + + + 4 channel 32-bit floating point + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed1 + + + Block compressed 1 + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed2 + + + Block compressed 2 + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed3 + + + Block compressed 3 + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed4 + + + Block compressed 4 unsigned + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed4 + + + Block compressed 4 signed + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed5 + + + Block compressed 5 unsigned + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed5 + + + Block compressed 5 signed + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed6H + + + Block compressed 6 unsigned half-float + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatSignedBlockCompressed6H + + + Block compressed 6 signed half-float + + + .. autoattribute:: cuda.bindings.runtime.cudaResourceViewFormat.cudaResViewFormatUnsignedBlockCompressed7 + + + Block compressed 7 + +.. autoclass:: cuda.bindings.runtime.cudaFuncAttribute + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeMaxDynamicSharedMemorySize + + + Maximum dynamic shared memory size + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributePreferredSharedMemoryCarveout + + + Preferred shared memory-L1 cache split + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeClusterDimMustBeSet + + + Indicator to enforce valid cluster dimension specification on kernel launch + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeRequiredClusterWidth + + + Required cluster width + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeRequiredClusterHeight + + + Required cluster height + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeRequiredClusterDepth + + + Required cluster depth + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeNonPortableClusterSizeAllowed + + + Whether non-portable cluster scheduling policy is supported + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeClusterSchedulingPolicyPreference + + + Required cluster scheduling policy preference + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncAttribute.cudaFuncAttributeMax + +.. autoclass:: cuda.bindings.runtime.cudaFuncCache + + .. autoattribute:: cuda.bindings.runtime.cudaFuncCache.cudaFuncCachePreferNone + + + Default function cache configuration, no preference + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncCache.cudaFuncCachePreferShared + + + Prefer larger shared memory and smaller L1 cache + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncCache.cudaFuncCachePreferL1 + + + Prefer larger L1 cache and smaller shared memory + + + .. autoattribute:: cuda.bindings.runtime.cudaFuncCache.cudaFuncCachePreferEqual + + + Prefer equal size L1 cache and shared memory + +.. autoclass:: cuda.bindings.runtime.cudaSharedMemConfig + + .. autoattribute:: cuda.bindings.runtime.cudaSharedMemConfig.cudaSharedMemBankSizeDefault + + + .. autoattribute:: cuda.bindings.runtime.cudaSharedMemConfig.cudaSharedMemBankSizeFourByte + + + .. autoattribute:: cuda.bindings.runtime.cudaSharedMemConfig.cudaSharedMemBankSizeEightByte + +.. autoclass:: cuda.bindings.runtime.cudaSharedCarveout + + .. autoattribute:: cuda.bindings.runtime.cudaSharedCarveout.cudaSharedmemCarveoutDefault + + + No preference for shared memory or L1 (default) + + + .. autoattribute:: cuda.bindings.runtime.cudaSharedCarveout.cudaSharedmemCarveoutMaxShared + + + Prefer maximum available shared memory, minimum L1 cache + + + .. autoattribute:: cuda.bindings.runtime.cudaSharedCarveout.cudaSharedmemCarveoutMaxL1 + + + Prefer maximum available L1 cache, minimum shared memory + +.. autoclass:: cuda.bindings.runtime.cudaComputeMode + + .. autoattribute:: cuda.bindings.runtime.cudaComputeMode.cudaComputeModeDefault + + + Default compute mode (Multiple threads can use :py:obj:`~.cudaSetDevice()` with this device) + + + .. autoattribute:: cuda.bindings.runtime.cudaComputeMode.cudaComputeModeExclusive + + + Compute-exclusive-thread mode (Only one thread in one process will be able to use :py:obj:`~.cudaSetDevice()` with this device) + + + .. autoattribute:: cuda.bindings.runtime.cudaComputeMode.cudaComputeModeProhibited + + + Compute-prohibited mode (No threads can use :py:obj:`~.cudaSetDevice()` with this device) + + + .. autoattribute:: cuda.bindings.runtime.cudaComputeMode.cudaComputeModeExclusiveProcess + + + Compute-exclusive-process mode (Many threads in one process will be able to use :py:obj:`~.cudaSetDevice()` with this device) + +.. autoclass:: cuda.bindings.runtime.cudaLimit + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitStackSize + + + GPU thread stack size + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitPrintfFifoSize + + + GPU printf FIFO size + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitMallocHeapSize + + + GPU malloc heap size + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitDevRuntimeSyncDepth + + + GPU device runtime synchronize depth + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitDevRuntimePendingLaunchCount + + + GPU device runtime pending launch count + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitMaxL2FetchGranularity + + + A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint + + + .. autoattribute:: cuda.bindings.runtime.cudaLimit.cudaLimitPersistingL2CacheSize + + + A size in bytes for L2 persisting lines cache size + +.. autoclass:: cuda.bindings.runtime.cudaMemoryAdvise + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseSetReadMostly + + + Data will mostly be read and only occassionally be written to + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseUnsetReadMostly + + + Undo the effect of :py:obj:`~.cudaMemAdviseSetReadMostly` + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseSetPreferredLocation + + + Set the preferred location for the data as the specified device + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseUnsetPreferredLocation + + + Clear the preferred location for the data + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseSetAccessedBy + + + Data will be accessed by the specified device, so prevent page faults as much as possible + + + .. autoattribute:: cuda.bindings.runtime.cudaMemoryAdvise.cudaMemAdviseUnsetAccessedBy + + + Let the Unified Memory subsystem decide on the page faulting policy for the specified device + +.. autoclass:: cuda.bindings.runtime.cudaMemRangeAttribute + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeReadMostly + + + Whether the range will mostly be read and only occassionally be written to + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocation + + + The preferred location of the range + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeAccessedBy + + + Memory range has :py:obj:`~.cudaMemAdviseSetAccessedBy` set for specified device + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocation + + + The last location to which the range was prefetched + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationType + + + The preferred location type of the range + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributePreferredLocationId + + + The preferred location id of the range + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationType + + + The last location type to which the range was prefetched + + + .. autoattribute:: cuda.bindings.runtime.cudaMemRangeAttribute.cudaMemRangeAttributeLastPrefetchLocationId + + + The last location id to which the range was prefetched + +.. autoclass:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesOptions + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionHost + + + :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` and its CUDA Driver API counterpart are supported on the device. + + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesOptions.cudaFlushGPUDirectRDMAWritesOptionMemOps + + + The :py:obj:`~.CU_STREAM_WAIT_VALUE_FLUSH` flag and the :py:obj:`~.CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES` MemOp are supported on the CUDA device. + +.. autoclass:: cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering + + .. autoattribute:: cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingNone + + + The device does not natively support ordering of GPUDirect RDMA writes. :py:obj:`~.cudaFlushGPUDirectRDMAWrites()` can be leveraged if supported. + + + .. autoattribute:: cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingOwner + + + Natively, the device can consistently consume GPUDirect RDMA writes, although other CUDA devices may not. + + + .. autoattribute:: cuda.bindings.runtime.cudaGPUDirectRDMAWritesOrdering.cudaGPUDirectRDMAWritesOrderingAllDevices + + + Any CUDA device in the system can consistently consume GPUDirect RDMA writes to this device. + +.. autoclass:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesScope + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToOwner + + + Blocks until remote writes are visible to the CUDA device context owning the data. + + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesScope.cudaFlushGPUDirectRDMAWritesToAllDevices + + + Blocks until remote writes are visible to all CUDA device contexts. + +.. autoclass:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesTarget + + .. autoattribute:: cuda.bindings.runtime.cudaFlushGPUDirectRDMAWritesTarget.cudaFlushGPUDirectRDMAWritesTargetCurrentDevice + + + Sets the target for :py:obj:`~.cudaDeviceFlushGPUDirectRDMAWrites()` to the currently active CUDA device context. + +.. autoclass:: cuda.bindings.runtime.cudaDeviceAttr + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxThreadsPerBlock + + + Maximum number of threads per block + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxBlockDimX + + + Maximum block dimension X + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxBlockDimY + + + Maximum block dimension Y + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxBlockDimZ + + + Maximum block dimension Z + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxGridDimX + + + Maximum grid dimension X + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxGridDimY + + + Maximum grid dimension Y + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxGridDimZ + + + Maximum grid dimension Z + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlock + + + Maximum shared memory available per block in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTotalConstantMemory + + + Memory available on device for constant variables in a CUDA C kernel in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrWarpSize + + + Warp size in threads + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxPitch + + + Maximum pitch in bytes allowed by memory copies + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxRegistersPerBlock + + + Maximum number of 32-bit registers available per block + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrClockRate + + + Peak clock frequency in kilohertz + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTextureAlignment + + + Alignment requirement for textures + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGpuOverlap + + + Device can possibly copy memory and execute a kernel concurrently + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMultiProcessorCount + + + Number of multiprocessors on device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrKernelExecTimeout + + + Specifies whether there is a run time limit on kernels + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrIntegrated + + + Device is integrated with host memory + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCanMapHostMemory + + + Device can map host memory into CUDA address space + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrComputeMode + + + Compute mode (See :py:obj:`~.cudaComputeMode` for details) + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DWidth + + + Maximum 1D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DWidth + + + Maximum 2D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DHeight + + + Maximum 2D texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidth + + + Maximum 3D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeight + + + Maximum 3D texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepth + + + Maximum 3D texture depth + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredWidth + + + Maximum 2D layered texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredHeight + + + Maximum 2D layered texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLayeredLayers + + + Maximum layers in a 2D layered texture + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrSurfaceAlignment + + + Alignment requirement for surfaces + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrConcurrentKernels + + + Device can possibly execute multiple kernels concurrently + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrEccEnabled + + + Device has ECC support enabled + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPciBusId + + + PCI bus ID of the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPciDeviceId + + + PCI device ID of the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTccDriver + + + Device is using TCC driver model + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMemoryClockRate + + + Peak memory clock frequency in kilohertz + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGlobalMemoryBusWidth + + + Global memory bus width in bits + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrL2CacheSize + + + Size of L2 cache in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxThreadsPerMultiProcessor + + + Maximum resident threads per multiprocessor + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrAsyncEngineCount + + + Number of asynchronous engines + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrUnifiedAddressing + + + Device shares a unified address space with the host + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredWidth + + + Maximum 1D layered texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DLayeredLayers + + + Maximum layers in a 1D layered texture + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherWidth + + + Maximum 2D texture width if cudaArrayTextureGather is set + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DGatherHeight + + + Maximum 2D texture height if cudaArrayTextureGather is set + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DWidthAlt + + + Alternate maximum 3D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DHeightAlt + + + Alternate maximum 3D texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture3DDepthAlt + + + Alternate maximum 3D texture depth + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPciDomainId + + + PCI domain ID of the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTexturePitchAlignment + + + Pitch alignment requirement for textures + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapWidth + + + Maximum cubemap texture width/height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredWidth + + + Maximum cubemap layered texture width/height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTextureCubemapLayeredLayers + + + Maximum layers in a cubemap layered texture + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface1DWidth + + + Maximum 1D surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DWidth + + + Maximum 2D surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DHeight + + + Maximum 2D surface height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface3DWidth + + + Maximum 3D surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface3DHeight + + + Maximum 3D surface height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface3DDepth + + + Maximum 3D surface depth + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredWidth + + + Maximum 1D layered surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface1DLayeredLayers + + + Maximum layers in a 1D layered surface + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredWidth + + + Maximum 2D layered surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredHeight + + + Maximum 2D layered surface height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurface2DLayeredLayers + + + Maximum layers in a 2D layered surface + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapWidth + + + Maximum cubemap surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredWidth + + + Maximum cubemap layered surface width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSurfaceCubemapLayeredLayers + + + Maximum layers in a cubemap layered surface + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DLinearWidth + + + Maximum 1D linear texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearWidth + + + Maximum 2D linear texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearHeight + + + Maximum 2D linear texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DLinearPitch + + + Maximum 2D linear texture pitch in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedWidth + + + Maximum mipmapped 2D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture2DMipmappedHeight + + + Maximum mipmapped 2D texture height + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor + + + Major compute capability version number + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor + + + Minor compute capability version number + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTexture1DMipmappedWidth + + + Maximum mipmapped 1D texture width + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrStreamPrioritiesSupported + + + Device supports stream priorities + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGlobalL1CacheSupported + + + Device supports caching globals in L1 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrLocalL1CacheSupported + + + Device supports caching locals in L1 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerMultiprocessor + + + Maximum shared memory available per multiprocessor in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxRegistersPerMultiprocessor + + + Maximum number of 32-bit registers available per multiprocessor + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrManagedMemory + + + Device can allocate managed memory on this system + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrIsMultiGpuBoard + + + Device is on a multi-GPU board + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMultiGpuBoardGroupID + + + Unique identifier for a group of devices on the same multi-GPU board + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrHostNativeAtomicSupported + + + Link between the device and the host supports native atomic operations + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrSingleToDoublePrecisionPerfRatio + + + Ratio of single precision performance (in floating-point operations per second) to double precision performance + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPageableMemoryAccess + + + Device supports coherently accessing pageable memory without calling cudaHostRegister on it + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrConcurrentManagedAccess + + + Device can coherently access managed memory concurrently with the CPU + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrComputePreemptionSupported + + + Device supports Compute Preemption + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCanUseHostPointerForRegisteredMem + + + Device can access host registered memory at the same virtual address as the CPU + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved92 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved93 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved94 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCooperativeLaunch + + + Device supports launching cooperative kernels via :py:obj:`~.cudaLaunchCooperativeKernel` + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCooperativeMultiDeviceLaunch + + + Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxSharedMemoryPerBlockOptin + + + The maximum optin shared memory per block. This value may vary by chip. See :py:obj:`~.cudaFuncSetAttribute` + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrCanFlushRemoteWrites + + + Device supports flushing of outstanding remote writes. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrHostRegisterSupported + + + Device supports host memory registration via :py:obj:`~.cudaHostRegister`. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrPageableMemoryAccessUsesHostPageTables + + + Device accesses pageable memory via the host's page tables. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrDirectManagedMemAccessFromHost + + + Host can directly access managed memory on the device without migration. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxBlocksPerMultiprocessor + + + Maximum number of blocks per multiprocessor + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxPersistingL2CacheSize + + + Maximum L2 persisting lines capacity setting in bytes. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxAccessPolicyWindowSize + + + Maximum value of :py:obj:`~.cudaAccessPolicyWindow.num_bytes`. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReservedSharedMemoryPerBlock + + + Shared memory reserved by CUDA driver per block in bytes + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrSparseCudaArraySupported + + + Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrHostRegisterReadOnlySupported + + + Device supports using the :py:obj:`~.cudaHostRegister` flag cudaHostRegisterReadOnly to register memory that must be mapped as read-only to the GPU + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrTimelineSemaphoreInteropSupported + + + External timeline semaphore interop is supported on the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMaxTimelineSemaphoreInteropSupported + + + Deprecated, External timeline semaphore interop is supported on the device + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMemoryPoolsSupported + + + Device supports using the :py:obj:`~.cudaMallocAsync` and :py:obj:`~.cudaMemPool` family of APIs + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGPUDirectRDMASupported + + + Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAFlushWritesOptions + + + The returned attribute shall be interpreted as a bitmask, where the individual bits are listed in the :py:obj:`~.cudaFlushGPUDirectRDMAWritesOptions` enum + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrGPUDirectRDMAWritesOrdering + + + GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See :py:obj:`~.cudaGPUDirectRDMAWritesOrdering` for the numerical values returned here. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMemoryPoolSupportedHandleTypes + + + Handle types supported with mempool based IPC + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrClusterLaunch + + + Indicates device supports cluster launch + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrDeferredMappingCudaArraySupported + + + Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved122 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved123 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved124 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrIpcEventSupport + + + Device supports IPC Events. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMemSyncDomainCount + + + Number of memory synchronization domains the device supports. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved127 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved128 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved129 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrNumaConfig + + + NUMA configuration of a device: value is of type :py:obj:`~.cudaDeviceNumaConfig` enum + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrNumaId + + + NUMA node ID of the GPU memory + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrReserved132 + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMpsEnabled + + + Contexts created on this device will be shared via MPS + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrHostNumaId + + + NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrD3D12CigSupported + + + Device supports CIG with D3D12. + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceAttr.cudaDevAttrMax + +.. autoclass:: cuda.bindings.runtime.cudaMemPoolAttr + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolReuseFollowEventDependencies + + + (value type = int) Allow cuMemAllocAsync to use memory asynchronously freed in another streams as long as a stream ordering dependency of the allocating stream on the free action exists. Cuda events and null stream interactions can create the required stream ordered dependencies. (default enabled) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolReuseAllowOpportunistic + + + (value type = int) Allow reuse of already completed frees when there is no dependency between the free and allocation. (default enabled) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolReuseAllowInternalDependencies + + + (value type = int) Allow cuMemAllocAsync to insert new stream dependencies in order to establish the stream ordering required to reuse a piece of memory released by cuFreeAsync (default enabled). + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrReleaseThreshold + + + (value type = cuuint64_t) Amount of reserved memory in bytes to hold onto before trying to release memory back to the OS. When more than the release threshold bytes of memory are held by the memory pool, the allocator will try to release memory back to the OS on the next call to stream, event or context synchronize. (default 0) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrReservedMemCurrent + + + (value type = cuuint64_t) Amount of backing memory currently allocated for the mempool. + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrReservedMemHigh + + + (value type = cuuint64_t) High watermark of backing memory allocated for the mempool since the last time it was reset. High watermark can only be reset to zero. + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrUsedMemCurrent + + + (value type = cuuint64_t) Amount of memory from the pool that is currently in use by the application. + + + .. autoattribute:: cuda.bindings.runtime.cudaMemPoolAttr.cudaMemPoolAttrUsedMemHigh + + + (value type = cuuint64_t) High watermark of the amount of memory from the pool that was in use by the application since the last time it was reset. High watermark can only be reset to zero. + +.. autoclass:: cuda.bindings.runtime.cudaMemLocationType + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeInvalid + + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeDevice + + + Location is a device location, thus id is a device ordinal + + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeHost + + + Location is host, id is ignored + + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeHostNuma + + + Location is a host NUMA node, thus id is a host NUMA node id + + + .. autoattribute:: cuda.bindings.runtime.cudaMemLocationType.cudaMemLocationTypeHostNumaCurrent + + + Location is the host NUMA node closest to the current thread's CPU, id is ignored + +.. autoclass:: cuda.bindings.runtime.cudaMemAccessFlags + + .. autoattribute:: cuda.bindings.runtime.cudaMemAccessFlags.cudaMemAccessFlagsProtNone + + + Default, make the address range not accessible + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAccessFlags.cudaMemAccessFlagsProtRead + + + Make the address range read accessible + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAccessFlags.cudaMemAccessFlagsProtReadWrite + + + Make the address range read-write accessible + +.. autoclass:: cuda.bindings.runtime.cudaMemAllocationType + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationType.cudaMemAllocationTypeInvalid + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationType.cudaMemAllocationTypePinned + + + This allocation type is 'pinned', i.e. cannot migrate from its current location while the application is actively using it + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationType.cudaMemAllocationTypeMax + +.. autoclass:: cuda.bindings.runtime.cudaMemAllocationHandleType + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypeNone + + + Does not allow any export mechanism. > + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypePosixFileDescriptor + + + Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypeWin32 + + + Allows a Win32 NT handle to be used for exporting. (HANDLE) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypeWin32Kmt + + + Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) + + + .. autoattribute:: cuda.bindings.runtime.cudaMemAllocationHandleType.cudaMemHandleTypeFabric + + + Allows a fabric handle to be used for exporting. (cudaMemFabricHandle_t) + +.. autoclass:: cuda.bindings.runtime.cudaGraphMemAttributeType + + .. autoattribute:: cuda.bindings.runtime.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemCurrent + + + (value type = cuuint64_t) Amount of memory, in bytes, currently associated with graphs. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphMemAttributeType.cudaGraphMemAttrUsedMemHigh + + + (value type = cuuint64_t) High watermark of memory, in bytes, associated with graphs since the last time it was reset. High watermark can only be reset to zero. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemCurrent + + + (value type = cuuint64_t) Amount of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphMemAttributeType.cudaGraphMemAttrReservedMemHigh + + + (value type = cuuint64_t) High watermark of memory, in bytes, currently allocated for use by the CUDA graphs asynchronous allocator. + +.. autoclass:: cuda.bindings.runtime.cudaDeviceP2PAttr + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceP2PAttr.cudaDevP2PAttrPerformanceRank + + + A relative value indicating the performance of the link between two devices + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceP2PAttr.cudaDevP2PAttrAccessSupported + + + Peer access is enabled + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceP2PAttr.cudaDevP2PAttrNativeAtomicSupported + + + Native atomic operation over the link supported + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceP2PAttr.cudaDevP2PAttrCudaArrayAccessSupported + + + Accessing CUDA arrays over the link supported + +.. autoclass:: cuda.bindings.runtime.cudaExternalMemoryHandleType + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueFd + + + Handle is an opaque file descriptor + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32 + + + Handle is an opaque shared NT handle + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeOpaqueWin32Kmt + + + Handle is an opaque, globally shared handle + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Heap + + + Handle is a D3D12 heap object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D12Resource + + + Handle is a D3D12 committed resource + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11Resource + + + Handle is a shared NT handle to a D3D11 resource + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeD3D11ResourceKmt + + + Handle is a globally shared handle to a D3D11 resource + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryHandleType.cudaExternalMemoryHandleTypeNvSciBuf + + + Handle is an NvSciBuf object + +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueFd + + + Handle is an opaque file descriptor + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32 + + + Handle is an opaque shared NT handle + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt + + + Handle is an opaque, globally shared handle + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D12Fence + + + Handle is a shared NT handle referencing a D3D12 fence object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeD3D11Fence + + + Handle is a shared NT handle referencing a D3D11 fence object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeNvSciSync + + + Opaque handle to NvSciSync Object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutex + + + Handle is a shared NT handle referencing a D3D11 keyed mutex object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeKeyedMutexKmt + + + Handle is a shared KMT handle referencing a D3D11 keyed mutex object + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd + + + Handle is an opaque handle file descriptor referencing a timeline semaphore + + + .. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreHandleType.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32 + + + Handle is an opaque handle file descriptor referencing a timeline semaphore + +.. autoclass:: cuda.bindings.runtime.cudaCGScope + + .. autoattribute:: cuda.bindings.runtime.cudaCGScope.cudaCGScopeInvalid + + + Invalid cooperative group scope + + + .. autoattribute:: cuda.bindings.runtime.cudaCGScope.cudaCGScopeGrid + + + Scope represented by a grid_group + + + .. autoattribute:: cuda.bindings.runtime.cudaCGScope.cudaCGScopeMultiGrid + + + Scope represented by a multi_grid_group + +.. autoclass:: cuda.bindings.runtime.cudaGraphConditionalHandleFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphConditionalHandleFlags.cudaGraphCondAssignDefault + + + Apply default handle value when graph is launched. + +.. autoclass:: cuda.bindings.runtime.cudaGraphConditionalNodeType + + .. autoattribute:: cuda.bindings.runtime.cudaGraphConditionalNodeType.cudaGraphCondTypeIf + + + Conditional 'if' Node. Body executed once if condition value is non-zero. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphConditionalNodeType.cudaGraphCondTypeWhile + + + Conditional 'while' Node. Body executed repeatedly while condition value is non-zero. + +.. autoclass:: cuda.bindings.runtime.cudaGraphNodeType + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeKernel + + + GPU kernel node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeMemcpy + + + Memcpy node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeMemset + + + Memset node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeHost + + + Host (executable) node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeGraph + + + Node which executes an embedded graph + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeEmpty + + + Empty (no-op) node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeWaitEvent + + + External event wait node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeEventRecord + + + External event record node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreSignal + + + External semaphore signal node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeExtSemaphoreWait + + + External semaphore wait node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeMemAlloc + + + Memory allocation node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeMemFree + + + Memory free node + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeConditional + + + Conditional node May be used to implement a conditional execution path or loop + + inside of a graph. The graph(s) contained within the body of the conditional node + + can be selectively executed or iterated upon based on the value of a conditional + + variable. + + + + Handles must be created in advance of creating the node + + using :py:obj:`~.cudaGraphConditionalHandleCreate`. + + + + The following restrictions apply to graphs which contain conditional nodes: + + The graph cannot be used in a child node. + + Only one instantiation of the graph may exist at any point in time. + + The graph cannot be cloned. + + + + To set the control value, supply a default value when creating the handle and/or + + call :py:obj:`~.cudaGraphSetConditional` from device code. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphNodeType.cudaGraphNodeTypeCount + +.. autoclass:: cuda.bindings.runtime.cudaGraphDependencyType + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDependencyType.cudaGraphDependencyTypeDefault + + + This is an ordinary dependency. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDependencyType.cudaGraphDependencyTypeProgrammatic + + + This dependency type allows the downstream node to use `cudaGridDependencySynchronize()`. It may only be used between kernel nodes, and must be used with either the :py:obj:`~.cudaGraphKernelNodePortProgrammatic` or :py:obj:`~.cudaGraphKernelNodePortLaunchCompletion` outgoing port. + +.. autoclass:: cuda.bindings.runtime.cudaGraphExecUpdateResult + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateSuccess + + + The update succeeded + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateError + + + The update failed for an unexpected reason which is described in the return value of the function + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorTopologyChanged + + + The update failed because the topology changed + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNodeTypeChanged + + + The update failed because a node type changed + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorFunctionChanged + + + The update failed because the function of a kernel node changed (CUDA driver < 11.2) + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorParametersChanged + + + The update failed because the parameters changed in a way that is not supported + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorNotSupported + + + The update failed because something about the node is not supported + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorUnsupportedFunctionChange + + + The update failed because the function of a kernel node changed in an unsupported way + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphExecUpdateResult.cudaGraphExecUpdateErrorAttributesChanged + + + The update failed because the node attributes changed in a way that is not supported + +.. autoclass:: cuda.bindings.runtime.cudaGraphInstantiateResult + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateSuccess + + + Instantiation succeeded + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateError + + + Instantiation failed for an unexpected reason which is described in the return value of the function + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateInvalidStructure + + + Instantiation failed due to invalid structure, such as cycles + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateNodeOperationNotSupported + + + Instantiation for device launch failed because the graph contained an unsupported operation + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateResult.cudaGraphInstantiateMultipleDevicesNotSupported + + + Instantiation for device launch failed due to the nodes belonging to different contexts + +.. autoclass:: cuda.bindings.runtime.cudaGraphKernelNodeField + + .. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldInvalid + + + Invalid field + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldGridDim + + + Grid dimension update + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldParam + + + Kernel parameter update + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodeField.cudaGraphKernelNodeFieldEnabled + + + Node enable/disable + +.. autoclass:: cuda.bindings.runtime.cudaGetDriverEntryPointFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGetDriverEntryPointFlags.cudaEnableDefault + + + Default search mode for driver symbols. + + + .. autoattribute:: cuda.bindings.runtime.cudaGetDriverEntryPointFlags.cudaEnableLegacyStream + + + Search for legacy versions of driver symbols. + + + .. autoattribute:: cuda.bindings.runtime.cudaGetDriverEntryPointFlags.cudaEnablePerThreadDefaultStream + + + Search for per-thread versions of driver symbols. + +.. autoclass:: cuda.bindings.runtime.cudaDriverEntryPointQueryResult + + .. autoattribute:: cuda.bindings.runtime.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSuccess + + + Search for symbol found a match + + + .. autoattribute:: cuda.bindings.runtime.cudaDriverEntryPointQueryResult.cudaDriverEntryPointSymbolNotFound + + + Search for symbol was not found + + + .. autoattribute:: cuda.bindings.runtime.cudaDriverEntryPointQueryResult.cudaDriverEntryPointVersionNotSufficent + + + Search for symbol was found but version wasn't great enough + +.. autoclass:: cuda.bindings.runtime.cudaGraphDebugDotFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsVerbose + + + Output all debug data as if every debug flag is enabled + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeParams + + + Adds :py:obj:`~.cudaKernelNodeParams` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemcpyNodeParams + + + Adds :py:obj:`~.cudaMemcpy3DParms` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsMemsetNodeParams + + + Adds :py:obj:`~.cudaMemsetParams` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHostNodeParams + + + Adds :py:obj:`~.cudaHostNodeParams` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsEventNodeParams + + + Adds cudaEvent_t handle from record and wait nodes to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasSignalNodeParams + + + Adds :py:obj:`~.cudaExternalSemaphoreSignalNodeParams` values to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsExtSemasWaitNodeParams + + + Adds :py:obj:`~.cudaExternalSemaphoreWaitNodeParams` to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsKernelNodeAttributes + + + Adds cudaKernelNodeAttrID values to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsHandles + + + Adds node handles and every kernel function handle to output + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphDebugDotFlags.cudaGraphDebugDotFlagsConditionalNodeParams + + + Adds :py:obj:`~.cudaConditionalNodeParams` to output + +.. autoclass:: cuda.bindings.runtime.cudaGraphInstantiateFlags + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagAutoFreeOnLaunch + + + Automatically free memory allocated in a graph before relaunching. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUpload + + + Automatically upload the graph after instantiation. Only supported by + + :py:obj:`~.cudaGraphInstantiateWithParams`. The upload will be performed using the + + stream provided in `instantiateParams`. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagDeviceLaunch + + + Instantiate the graph to be launchable from the device. This flag can only + + be used on platforms which support unified addressing. This flag cannot be + + used in conjunction with cudaGraphInstantiateFlagAutoFreeOnLaunch. + + + .. autoattribute:: cuda.bindings.runtime.cudaGraphInstantiateFlags.cudaGraphInstantiateFlagUseNodePriority + + + Run the graph using the per-node priority attributes rather than the priority of the stream it is launched into. + +.. autoclass:: cuda.bindings.runtime.cudaLaunchMemSyncDomain + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainDefault + + + Launch kernels in the default domain + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchMemSyncDomain.cudaLaunchMemSyncDomainRemote + + + Launch kernels in the remote domain + +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttributeID + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeIgnore + + + Ignored entry, for convenient composition + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeAccessPolicyWindow + + + Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.accessPolicyWindow`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeCooperative + + + Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.cooperative`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeSynchronizationPolicy + + + Valid for streams. See :py:obj:`~.cudaLaunchAttributeValue.syncPolicy`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeClusterDimension + + + Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.clusterDim`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeClusterSchedulingPolicyPreference + + + Valid for graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.clusterSchedulingPolicyPreference`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticStreamSerialization + + + Valid for launches. Setting :py:obj:`~.cudaLaunchAttributeValue.programmaticStreamSerializationAllowed` to non-0 signals that the kernel will use programmatic means to resolve its stream dependency, so that the CUDA runtime should opportunistically allow the grid's execution to overlap with the previous kernel in the stream, if that kernel requests the overlap. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeProgrammaticEvent + + + Valid for launches. Set :py:obj:`~.cudaLaunchAttributeValue.programmaticEvent` to record the event. Event recorded through this launch attribute is guaranteed to only trigger after all block in the associated kernel trigger the event. A block can trigger the event programmatically in a future CUDA release. A trigger can also be inserted at the beginning of each block's execution if triggerAtBlockStart is set to non-0. The dependent launches can choose to wait on the dependency using the programmatic sync (cudaGridDependencySynchronize() or equivalent PTX instructions). Note that dependents (including the CPU thread calling :py:obj:`~.cudaEventSynchronize()`) are not guaranteed to observe the release precisely when it is released. For example, :py:obj:`~.cudaEventSynchronize()` may only observe the event trigger long after the associated kernel has completed. This recording type is primarily meant for establishing programmatic dependency between device tasks. Note also this type of dependency allows, but does not guarantee, concurrent execution of tasks. + + The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.cudaEventDisableTiming` flag set). + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributePriority + + + Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.priority`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomainMap + + + Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.memSyncDomainMap`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeMemSyncDomain + + + Valid for streams, graph nodes, launches. See :py:obj:`~.cudaLaunchAttributeValue.memSyncDomain`. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeLaunchCompletionEvent + + + Valid for launches. Set :py:obj:`~.cudaLaunchAttributeValue.launchCompletionEvent` to record the event. + + Nominally, the event is triggered once all blocks of the kernel have begun execution. Currently this is a best effort. If a kernel B has a launch completion dependency on a kernel A, B may wait until A is complete. Alternatively, blocks of B may begin before all blocks of A have begun, for example if B can claim execution resources unavailable to A (e.g. they run on different GPUs) or if B is a higher priority than A. Exercise caution if such an ordering inversion could lead to deadlock. + + A launch completion event is nominally similar to a programmatic event with `triggerAtBlockStart` set except that it is not visible to `cudaGridDependencySynchronize()` and can be used with compute capability less than 9.0. + + The event supplied must not be an interprocess or interop event. The event must disable timing (i.e. must be created with the :py:obj:`~.cudaEventDisableTiming` flag set). + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributeDeviceUpdatableKernelNode + + + Valid for graph nodes, launches. This attribute is graphs-only, and passing it to a launch in a non-capturing stream will result in an error. + + :cudaLaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can only be set to 0 or 1. Setting the field to 1 indicates that the corresponding kernel node should be device-updatable. On success, a handle will be returned via :py:obj:`~.cudaLaunchAttributeValue`::deviceUpdatableKernelNode::devNode which can be passed to the various device-side update functions to update the node's kernel parameters from within another kernel. For more information on the types of device updates that can be made, as well as the relevant limitations thereof, see :py:obj:`~.cudaGraphKernelNodeUpdatesApply`. + + Nodes which are device-updatable have additional restrictions compared to regular kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via :py:obj:`~.cudaGraphDestroyNode`. Additionally, once opted-in to this functionality, a node cannot opt out, and any attempt to set the deviceUpdatable attribute to 0 will result in an error. Device-updatable kernel nodes also cannot have their attributes copied to/from another kernel node via :py:obj:`~.cudaGraphKernelNodeCopyAttributes`. Graphs containing one or more device-updatable nodes also do not allow multiple instantiation, and neither the graph nor its instantiated version can be passed to :py:obj:`~.cudaGraphExecUpdate`. + + If a graph contains device-updatable nodes and updates those nodes from the device from within the graph, the graph must be uploaded with :py:obj:`~.cuGraphUpload` before it is launched. For such a graph, if host-side executable graph updates are made to the device-updatable nodes, the graph must be uploaded before it is launched again. + + + .. autoattribute:: cuda.bindings.runtime.cudaLaunchAttributeID.cudaLaunchAttributePreferredSharedMemoryCarveout + + + Valid for launches. On devices where the L1 cache and shared memory use the same hardware resources, setting :py:obj:`~.cudaLaunchAttributeValue.sharedMemCarveout` to a percentage between 0-100 signals sets the shared memory carveout preference in percent of the total shared memory for that kernel launch. This attribute takes precedence over :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout`. This is only a hint, and the driver can choose a different configuration if required for the launch. + +.. autoclass:: cuda.bindings.runtime.cudaDeviceNumaConfig + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceNumaConfig.cudaDeviceNumaConfigNone + + + The GPU is not a NUMA node + + + .. autoattribute:: cuda.bindings.runtime.cudaDeviceNumaConfig.cudaDeviceNumaConfigNumaNode + + + The GPU is a NUMA node, cudaDevAttrNumaId contains its NUMA ID + +.. autoclass:: cuda.bindings.runtime.cudaAsyncNotificationType + + .. autoattribute:: cuda.bindings.runtime.cudaAsyncNotificationType.cudaAsyncNotificationTypeOverBudget + +.. autoclass:: cuda.bindings.runtime.cudaSurfaceBoundaryMode + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceBoundaryMode.cudaBoundaryModeZero + + + Zero boundary mode + + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceBoundaryMode.cudaBoundaryModeClamp + + + Clamp boundary mode + + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceBoundaryMode.cudaBoundaryModeTrap + + + Trap boundary mode + +.. autoclass:: cuda.bindings.runtime.cudaSurfaceFormatMode + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceFormatMode.cudaFormatModeForced + + + Forced format mode + + + .. autoattribute:: cuda.bindings.runtime.cudaSurfaceFormatMode.cudaFormatModeAuto + + + Auto format mode + +.. autoclass:: cuda.bindings.runtime.cudaTextureAddressMode + + .. autoattribute:: cuda.bindings.runtime.cudaTextureAddressMode.cudaAddressModeWrap + + + Wrapping address mode + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureAddressMode.cudaAddressModeClamp + + + Clamp to edge address mode + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureAddressMode.cudaAddressModeMirror + + + Mirror address mode + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureAddressMode.cudaAddressModeBorder + + + Border address mode + +.. autoclass:: cuda.bindings.runtime.cudaTextureFilterMode + + .. autoattribute:: cuda.bindings.runtime.cudaTextureFilterMode.cudaFilterModePoint + + + Point filter mode + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureFilterMode.cudaFilterModeLinear + + + Linear filter mode + +.. autoclass:: cuda.bindings.runtime.cudaTextureReadMode + + .. autoattribute:: cuda.bindings.runtime.cudaTextureReadMode.cudaReadModeElementType + + + Read texture as specified element type + + + .. autoattribute:: cuda.bindings.runtime.cudaTextureReadMode.cudaReadModeNormalizedFloat + + + Read texture as normalized float + +.. autoclass:: cuda.bindings.runtime.cudaEglPlaneDesc +.. autoclass:: cuda.bindings.runtime.cudaEglFrame +.. autoclass:: cuda.bindings.runtime.cudaEglStreamConnection +.. autoclass:: cuda.bindings.runtime.cudaArray_t +.. autoclass:: cuda.bindings.runtime.cudaArray_const_t +.. autoclass:: cuda.bindings.runtime.cudaMipmappedArray_t +.. autoclass:: cuda.bindings.runtime.cudaMipmappedArray_const_t +.. autoclass:: cuda.bindings.runtime.cudaHostFn_t +.. autoclass:: cuda.bindings.runtime.CUuuid +.. autoclass:: cuda.bindings.runtime.cudaUUID_t +.. autoclass:: cuda.bindings.runtime.cudaIpcEventHandle_t +.. autoclass:: cuda.bindings.runtime.cudaIpcMemHandle_t +.. autoclass:: cuda.bindings.runtime.cudaMemFabricHandle_t +.. autoclass:: cuda.bindings.runtime.cudaStream_t +.. autoclass:: cuda.bindings.runtime.cudaEvent_t +.. autoclass:: cuda.bindings.runtime.cudaGraphicsResource_t +.. autoclass:: cuda.bindings.runtime.cudaExternalMemory_t +.. autoclass:: cuda.bindings.runtime.cudaExternalSemaphore_t +.. autoclass:: cuda.bindings.runtime.cudaGraph_t +.. autoclass:: cuda.bindings.runtime.cudaGraphNode_t +.. autoclass:: cuda.bindings.runtime.cudaUserObject_t +.. autoclass:: cuda.bindings.runtime.cudaGraphConditionalHandle +.. autoclass:: cuda.bindings.runtime.cudaFunction_t +.. autoclass:: cuda.bindings.runtime.cudaKernel_t +.. autoclass:: cuda.bindings.runtime.cudaMemPool_t +.. autoclass:: cuda.bindings.runtime.cudaGraphEdgeData +.. autoclass:: cuda.bindings.runtime.cudaGraphExec_t +.. autoclass:: cuda.bindings.runtime.cudaGraphInstantiateParams +.. autoclass:: cuda.bindings.runtime.cudaGraphExecUpdateResultInfo +.. autoclass:: cuda.bindings.runtime.cudaGraphDeviceNode_t +.. autoclass:: cuda.bindings.runtime.cudaLaunchMemSyncDomainMap +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttributeValue +.. autoclass:: cuda.bindings.runtime.cudaLaunchAttribute +.. autoclass:: cuda.bindings.runtime.cudaAsyncCallbackHandle_t +.. autoclass:: cuda.bindings.runtime.cudaAsyncNotificationInfo_t +.. autoclass:: cuda.bindings.runtime.cudaAsyncCallback +.. autoclass:: cuda.bindings.runtime.cudaSurfaceObject_t +.. autoclass:: cuda.bindings.runtime.cudaTextureObject_t +.. autoattribute:: cuda.bindings.runtime.CUDA_EGL_MAX_PLANES + + Maximum number of planes per frame + +.. autoattribute:: cuda.bindings.runtime.cudaHostAllocDefault + + Default page-locked allocation flag + +.. autoattribute:: cuda.bindings.runtime.cudaHostAllocPortable + + Pinned memory accessible by all CUDA contexts + +.. autoattribute:: cuda.bindings.runtime.cudaHostAllocMapped + + Map allocation into device space + +.. autoattribute:: cuda.bindings.runtime.cudaHostAllocWriteCombined + + Write-combined memory + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterDefault + + Default host memory registration flag + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterPortable + + Pinned memory accessible by all CUDA contexts + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterMapped + + Map registered memory into device space + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterIoMemory + + Memory-mapped I/O space + +.. autoattribute:: cuda.bindings.runtime.cudaHostRegisterReadOnly + + Memory-mapped read-only + +.. autoattribute:: cuda.bindings.runtime.cudaPeerAccessDefault + + Default peer addressing enable flag + +.. autoattribute:: cuda.bindings.runtime.cudaStreamDefault + + Default stream flag + +.. autoattribute:: cuda.bindings.runtime.cudaStreamNonBlocking + + Stream does not synchronize with stream 0 (the NULL stream) + +.. autoattribute:: cuda.bindings.runtime.cudaStreamLegacy + + Legacy stream handle + + + + Stream handle that can be passed as a cudaStream_t to use an implicit stream with legacy synchronization behavior. + + + + See details of the \link_sync_behavior + +.. autoattribute:: cuda.bindings.runtime.cudaStreamPerThread + + Per-thread stream handle + + + + Stream handle that can be passed as a cudaStream_t to use an implicit stream with per-thread synchronization behavior. + + + + See details of the \link_sync_behavior + +.. autoattribute:: cuda.bindings.runtime.cudaEventDefault + + Default event flag + +.. autoattribute:: cuda.bindings.runtime.cudaEventBlockingSync + + Event uses blocking synchronization + +.. autoattribute:: cuda.bindings.runtime.cudaEventDisableTiming + + Event will not record timing data + +.. autoattribute:: cuda.bindings.runtime.cudaEventInterprocess + + Event is suitable for interprocess use. cudaEventDisableTiming must be set + +.. autoattribute:: cuda.bindings.runtime.cudaEventRecordDefault + + Default event record flag + +.. autoattribute:: cuda.bindings.runtime.cudaEventRecordExternal + + Event is captured in the graph as an external event node when performing stream capture + +.. autoattribute:: cuda.bindings.runtime.cudaEventWaitDefault + + Default event wait flag + +.. autoattribute:: cuda.bindings.runtime.cudaEventWaitExternal + + Event is captured in the graph as an external event node when performing stream capture + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleAuto + + Device flag - Automatic scheduling + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleSpin + + Device flag - Spin default scheduling + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleYield + + Device flag - Yield default scheduling + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleBlockingSync + + Device flag - Use blocking synchronization + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceBlockingSync + + Device flag - Use blocking synchronization [Deprecated] + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceScheduleMask + + Device schedule flags mask + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceMapHost + + Device flag - Support mapped pinned allocations + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceLmemResizeToMax + + Device flag - Keep local memory allocation after launch + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceSyncMemops + + Device flag - Ensure synchronous memory operations on this context will synchronize + +.. autoattribute:: cuda.bindings.runtime.cudaDeviceMask + + Device flags mask + +.. autoattribute:: cuda.bindings.runtime.cudaArrayDefault + + Default CUDA array allocation flag + +.. autoattribute:: cuda.bindings.runtime.cudaArrayLayered + + Must be set in cudaMalloc3DArray to create a layered CUDA array + +.. autoattribute:: cuda.bindings.runtime.cudaArraySurfaceLoadStore + + Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind surfaces to the CUDA array + +.. autoattribute:: cuda.bindings.runtime.cudaArrayCubemap + + Must be set in cudaMalloc3DArray to create a cubemap CUDA array + +.. autoattribute:: cuda.bindings.runtime.cudaArrayTextureGather + + Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform texture gather operations on the CUDA array + +.. autoattribute:: cuda.bindings.runtime.cudaArrayColorAttachment + + Must be set in cudaExternalMemoryGetMappedMipmappedArray if the mipmapped array is used as a color target in a graphics API + +.. autoattribute:: cuda.bindings.runtime.cudaArraySparse + + Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a sparse CUDA array or CUDA mipmapped array + +.. autoattribute:: cuda.bindings.runtime.cudaArrayDeferredMapping + + Must be set in cudaMallocArray, cudaMalloc3DArray or cudaMallocMipmappedArray in order to create a deferred mapping CUDA array or CUDA mipmapped array + +.. autoattribute:: cuda.bindings.runtime.cudaIpcMemLazyEnablePeerAccess + + Automatically enable peer access between remote devices as needed + +.. autoattribute:: cuda.bindings.runtime.cudaMemAttachGlobal + + Memory can be accessed by any stream on any device + +.. autoattribute:: cuda.bindings.runtime.cudaMemAttachHost + + Memory cannot be accessed by any stream on any device + +.. autoattribute:: cuda.bindings.runtime.cudaMemAttachSingle + + Memory can only be accessed by a single stream on the associated device + +.. autoattribute:: cuda.bindings.runtime.cudaOccupancyDefault + + Default behavior + +.. autoattribute:: cuda.bindings.runtime.cudaOccupancyDisableCachingOverride + + Assume global caching is enabled and cannot be automatically turned off + +.. autoattribute:: cuda.bindings.runtime.cudaCpuDeviceId + + Device id that represents the CPU + +.. autoattribute:: cuda.bindings.runtime.cudaInvalidDeviceId + + Device id that represents an invalid device + +.. autoattribute:: cuda.bindings.runtime.cudaInitDeviceFlagsAreValid + + Tell the CUDA runtime that DeviceFlags is being set in cudaInitDevice call + +.. autoattribute:: cuda.bindings.runtime.cudaCooperativeLaunchMultiDeviceNoPreSync + + If set, each kernel launched as part of :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` only waits for prior work in the stream corresponding to that GPU to complete before the kernel begins execution. + +.. autoattribute:: cuda.bindings.runtime.cudaCooperativeLaunchMultiDeviceNoPostSync + + If set, any subsequent work pushed in a stream that participated in a call to :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice` will only wait for the kernel launched on the GPU corresponding to that stream to complete before it begins execution. + +.. autoattribute:: cuda.bindings.runtime.cudaArraySparsePropertiesSingleMipTail + + Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers + +.. autoattribute:: cuda.bindings.runtime.CUDART_CB +.. autoattribute:: cuda.bindings.runtime.CU_UUID_HAS_BEEN_DEFINED + + CUDA UUID types + +.. autoattribute:: cuda.bindings.runtime.CUDA_IPC_HANDLE_SIZE + + CUDA IPC Handle Size + +.. autoattribute:: cuda.bindings.runtime.cudaExternalMemoryDedicated + + Indicates that the external memory object is a dedicated resource + +.. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreSignalSkipNvSciBufMemSync + + When the /p flags parameter of :py:obj:`~.cudaExternalSemaphoreSignalParams` contains this flag, it indicates that signaling an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. + +.. autoattribute:: cuda.bindings.runtime.cudaExternalSemaphoreWaitSkipNvSciBufMemSync + + When the /p flags parameter of :py:obj:`~.cudaExternalSemaphoreWaitParams` contains this flag, it indicates that waiting an external semaphore object should skip performing appropriate memory synchronization operations over all the external memory objects that are imported as :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, which otherwise are performed by default to ensure data coherency with other importers of the same NvSciBuf memory objects. + +.. autoattribute:: cuda.bindings.runtime.cudaNvSciSyncAttrSignal + + When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to this, it indicates that application need signaler specific NvSciSyncAttr to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. + +.. autoattribute:: cuda.bindings.runtime.cudaNvSciSyncAttrWait + + When /p flags of :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` is set to this, it indicates that application need waiter specific NvSciSyncAttr to be filled by :py:obj:`~.cudaDeviceGetNvSciSyncAttributes`. + +.. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodePortDefault + + This port activates when the kernel has finished executing. + +.. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodePortProgrammatic + + This port activates when all blocks of the kernel have performed cudaTriggerProgrammaticLaunchCompletion() or have terminated. It must be used with edge type :py:obj:`~.cudaGraphDependencyTypeProgrammatic`. See also :py:obj:`~.cudaLaunchAttributeProgrammaticEvent`. + +.. autoattribute:: cuda.bindings.runtime.cudaGraphKernelNodePortLaunchCompletion + + This port activates when all blocks of the kernel have begun execution. See also :py:obj:`~.cudaLaunchAttributeLaunchCompletionEvent`. + +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttrID +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributeAccessPolicyWindow +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributeSynchronizationPolicy +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributeMemSyncDomainMap +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributeMemSyncDomain +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttributePriority +.. autoattribute:: cuda.bindings.runtime.cudaStreamAttrValue +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttrID +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeAccessPolicyWindow +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeCooperative +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributePriority +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeClusterDimension +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeClusterSchedulingPolicyPreference +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeMemSyncDomainMap +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeMemSyncDomain +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributePreferredSharedMemoryCarveout +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttributeDeviceUpdatableKernelNode +.. autoattribute:: cuda.bindings.runtime.cudaKernelNodeAttrValue +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType1D +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType2D +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType3D +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceTypeCubemap +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType1DLayered +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceType2DLayered +.. autoattribute:: cuda.bindings.runtime.cudaSurfaceTypeCubemapLayered +.. autoattribute:: cuda.bindings.runtime.cudaTextureType1D +.. autoattribute:: cuda.bindings.runtime.cudaTextureType2D +.. autoattribute:: cuda.bindings.runtime.cudaTextureType3D +.. autoattribute:: cuda.bindings.runtime.cudaTextureTypeCubemap +.. autoattribute:: cuda.bindings.runtime.cudaTextureType1DLayered +.. autoattribute:: cuda.bindings.runtime.cudaTextureType2DLayered +.. autoattribute:: cuda.bindings.runtime.cudaTextureTypeCubemapLayered diff --git a/docs_src/source/overview.md b/docs_src/source/overview.md index d7b068b3..155be761 100644 --- a/docs_src/source/overview.md +++ b/docs_src/source/overview.md @@ -49,7 +49,7 @@ Python package. In this example, you copy data from the host to device. You need [NumPy](https://numpy.org/doc/stable/contents.html) to store data on the host. ```{code-cell} python -from cuda import cuda, nvrtc +from cuda.bindings import driver, nvrtc import numpy as np ``` @@ -60,9 +60,9 @@ object model. ```{code-cell} python def _cudaGetErrorEnum(error): - if isinstance(error, cuda.CUresult): - err, name = cuda.cuGetErrorName(error) - return name if err == cuda.CUresult.CUDA_SUCCESS else "" + if isinstance(error, driver.CUresult): + err, name = driver.cuGetErrorName(error) + return name if err == driver.CUresult.CUDA_SUCCESS else "" elif isinstance(error, nvrtc.nvrtcResult): return nvrtc.nvrtcGetErrorString(error)[1] else: @@ -110,14 +110,14 @@ the program is compiled to target our local compute capability architecture with ```{code-cell} python # Initialize CUDA Driver API -checkCudaErrors(cuda.cuInit(0)) +checkCudaErrors(driver.cuInit(0)) # Retrieve handle for device 0 -cuDevice = checkCudaErrors(cuda.cuDeviceGet(0)) +cuDevice = checkCudaErrors(driver.cuDeviceGet(0)) # Derive target architecture for device 0 -major = checkCudaErrors(cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevice)) -minor = checkCudaErrors(cuda.cuDeviceGetAttribute(cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevice)) +major = checkCudaErrors(driver.cuDeviceGetAttribute(driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevice)) +minor = checkCudaErrors(driver.cuDeviceGetAttribute(driver.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevice)) arch_arg = bytes(f'--gpu-architecture=compute_{major}{minor}', 'ascii') # Create program @@ -140,7 +140,7 @@ following code example, a handle for compute device 0 is passed to ```{code-cell} python # Create context -context = checkCudaErrors(cuda.cuCtxCreate(0, cuDevice)) +context = checkCudaErrors(driver.cuCtxCreate(0, cuDevice)) ``` With a CUDA context created on device 0, load the PTX generated earlier into a @@ -152,8 +152,8 @@ After loading into the module, extract a specific kernel with # Load PTX as module data and retrieve function ptx = np.char.array(ptx) # Note: Incompatible --gpu-architecture would be detected here -module = checkCudaErrors(cuda.cuModuleLoadData(ptx.ctypes.data)) -kernel = checkCudaErrors(cuda.cuModuleGetFunction(module, b"saxpy")) +module = checkCudaErrors(driver.cuModuleLoadData(ptx.ctypes.data)) +kernel = checkCudaErrors(driver.cuModuleGetFunction(module, b"saxpy")) ``` Next, get all your data prepared and transferred to the GPU. For increased @@ -185,16 +185,16 @@ Python doesn’t have a natural concept of pointers, yet `cuMemcpyHtoDAsync` exp XX. ```{code-cell} python -dXclass = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) -dYclass = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) -dOutclass = checkCudaErrors(cuda.cuMemAlloc(bufferSize)) +dXclass = checkCudaErrors(driver.cuMemAlloc(bufferSize)) +dYclass = checkCudaErrors(driver.cuMemAlloc(bufferSize)) +dOutclass = checkCudaErrors(driver.cuMemAlloc(bufferSize)) -stream = checkCudaErrors(cuda.cuStreamCreate(0)) +stream = checkCudaErrors(driver.cuStreamCreate(0)) -checkCudaErrors(cuda.cuMemcpyHtoDAsync( +checkCudaErrors(driver.cuMemcpyHtoDAsync( dXclass, hX.ctypes.data, bufferSize, stream )) -checkCudaErrors(cuda.cuMemcpyHtoDAsync( +checkCudaErrors(driver.cuMemcpyHtoDAsync( dYclass, hY.ctypes.data, bufferSize, stream )) ``` @@ -223,7 +223,7 @@ args = np.array([arg.ctypes.data for arg in args], dtype=np.uint64) Now the kernel can be launched: ```{code-cell} python -checkCudaErrors(cuda.cuLaunchKernel( +checkCudaErrors(driver.cuLaunchKernel( kernel, NUM_BLOCKS, # grid x dim 1, # grid y dim @@ -237,10 +237,10 @@ checkCudaErrors(cuda.cuLaunchKernel( 0, # extra (ignore) )) -checkCudaErrors(cuda.cuMemcpyDtoHAsync( +checkCudaErrors(driver.cuMemcpyDtoHAsync( hOut.ctypes.data, dOutclass, bufferSize, stream )) -checkCudaErrors(cuda.cuStreamSynchronize(stream)) +checkCudaErrors(driver.cuStreamSynchronize(stream)) ``` The `cuLaunchKernel` function takes the compiled module kernel and execution @@ -262,12 +262,12 @@ Perform verification of the data to ensure correctness and finish the code with memory clean up. ```{code-cell} python -checkCudaErrors(cuda.cuStreamDestroy(stream)) -checkCudaErrors(cuda.cuMemFree(dXclass)) -checkCudaErrors(cuda.cuMemFree(dYclass)) -checkCudaErrors(cuda.cuMemFree(dOutclass)) -checkCudaErrors(cuda.cuModuleUnload(module)) -checkCudaErrors(cuda.cuCtxDestroy(context)) +checkCudaErrors(driver.cuStreamDestroy(stream)) +checkCudaErrors(driver.cuMemFree(dXclass)) +checkCudaErrors(driver.cuMemFree(dYclass)) +checkCudaErrors(driver.cuMemFree(dOutclass)) +checkCudaErrors(driver.cuModuleUnload(module)) +checkCudaErrors(driver.cuCtxDestroy(context)) ``` ## Performance diff --git a/docs_src/source/release.md b/docs_src/source/release.md index 03f9ab20..c3ae5a30 100644 --- a/docs_src/source/release.md +++ b/docs_src/source/release.md @@ -5,6 +5,7 @@ maxdepth: 3 --- + 12.6.1 12.6.0 12.5.0 12.4.0 @@ -13,6 +14,7 @@ maxdepth: 3 12.2.0 12.1.0 12.0.0 + 11.8.4 11.8.3 11.8.2 11.8.1 diff --git a/docs_src/source/release/11.8.4-notes.md b/docs_src/source/release/11.8.4-notes.md new file mode 100644 index 00000000..9cae2915 --- /dev/null +++ b/docs_src/source/release/11.8.4-notes.md @@ -0,0 +1,32 @@ +# CUDA Python 11.8.4 Release notes + +Released on October 7, 2024 + +## Hightlights +- Resolve [Issue #89](https://github.com/NVIDIA/cuda-python/issues/89): Fix getLocalRuntimeVersion searching for wrong libcudart version +- Resolve [Issue #90](https://github.com/NVIDIA/cuda-python/issues/90): Use new layout in preperation for cuda-python becoming a metapackage + +## Limitations + +### CUDA Functions Not Supported in this Release + +- Symbol APIs + - cudaGraphExecMemcpyNodeSetParamsFromSymbol + - cudaGraphExecMemcpyNodeSetParamsToSymbol + - cudaGraphAddMemcpyNodeToSymbol + - cudaGraphAddMemcpyNodeFromSymbol + - cudaGraphMemcpyNodeSetParamsToSymbol + - cudaGraphMemcpyNodeSetParamsFromSymbol + - cudaMemcpyToSymbol + - cudaMemcpyFromSymbol + - cudaMemcpyToSymbolAsync + - cudaMemcpyFromSymbolAsync + - cudaGetSymbolAddress + - cudaGetSymbolSize + - cudaGetFuncBySymbol +- Launch Options + - cudaLaunchKernel + - cudaLaunchCooperativeKernel + - cudaLaunchCooperativeKernelMultiDevice +- cudaSetValidDevices +- cudaVDPAUSetVDPAUDevice diff --git a/docs_src/source/release/12.6.1-notes.md b/docs_src/source/release/12.6.1-notes.md new file mode 100644 index 00000000..d5fe82ac --- /dev/null +++ b/docs_src/source/release/12.6.1-notes.md @@ -0,0 +1,33 @@ +# CUDA Python 12.6.1 Release notes + +Released on October 7, 2024 + +## Hightlights +- Resolve [Issue #90](https://github.com/NVIDIA/cuda-python/issues/90): Use new layout in preperation for cuda-python becoming a metapackage + +## Limitations + +### CUDA Functions Not Supported in this Release + +- Symbol APIs + - cudaGraphExecMemcpyNodeSetParamsFromSymbol + - cudaGraphExecMemcpyNodeSetParamsToSymbol + - cudaGraphAddMemcpyNodeToSymbol + - cudaGraphAddMemcpyNodeFromSymbol + - cudaGraphMemcpyNodeSetParamsToSymbol + - cudaGraphMemcpyNodeSetParamsFromSymbol + - cudaMemcpyToSymbol + - cudaMemcpyFromSymbol + - cudaMemcpyToSymbolAsync + - cudaMemcpyFromSymbolAsync + - cudaGetSymbolAddress + - cudaGetSymbolSize + - cudaGetFuncBySymbol +- Launch Options + - cudaLaunchKernel + - cudaLaunchCooperativeKernel + - cudaLaunchCooperativeKernelMultiDevice +- cudaSetValidDevices +- cudaVDPAUSetVDPAUDevice +- cudaFuncGetName +- cudaFuncGetParamInfo