-
Notifications
You must be signed in to change notification settings - Fork 9
/
test_cuda.py
148 lines (122 loc) · 4.98 KB
/
test_cuda.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2023 Herman Ye @Auromix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Herman Ye @Auromix
# Description: Test CUDA and Pytorch environment
import torch
# import torchvision
def main():
# Check PyTorch version
torch_version = torch.__version__
print(f"Torch Version: {torch_version}")
# Check torchvision version
# torchvision_version = torchvision.__version__
# print(f"Torchvision Version: {torchvision_version}")
# Check if CUDA is available
cuda_available = torch.cuda.is_available()
print(f"CUDA with torch available: {cuda_available}")
# Check CUDA version
cuda_version = torch.version.cuda if cuda_available else "N/A (CUDA not available)"
print(f"CUDA Version: {cuda_version}")
# Check cuDNN version
print(f"cuDNN Version: {str(torch.backends.cudnn.version())}")
if cuda_available:
# Get the current CUDA device
current_device = torch.cuda.current_device()
print(f"Current CUDA device: {current_device}")
# Get the number of CUDA devices
num_devices = torch.cuda.device_count()
print(f"Number of CUDA devices: {num_devices}")
# Get the name of the current CUDA device
device_name = torch.cuda.get_device_name(current_device)
print(f"Name of current CUDA device: {device_name}")
# TEST
print("")
print("TESTING CUDA WITH PYTORCH")
print("========================")
# Generate a random matrix on CPU
x = torch.rand(5, 3)
print("Random matrix on CPU:")
print(x)
if cuda_available:
# Move the matrix to CUDA (GPU)
x = x.cuda()
print("\nRandom matrix on CUDA (GPU):")
print(x)
if cuda_available:
print("\nTensor operations:")
a = torch.cuda.FloatTensor(2).zero_()
b = torch.randn(2).cuda()
c = a + b
print('Tensor a = ' + str(a))
print('Tensor b = ' + str(b))
print('Tensor c = ' + str(c))
# Add this function to your code if you want to check the environment before running your code
# cuda_pytorch_environment_check()
def cuda_pytorch_environment_check(print_info=True, print_test=False):
try:
# Check PyTorch version
torch_version = torch.__version__
# # Check torchvision version
# torchvision_version = torchvision.__version__
# Check if CUDA is available
cuda_available = torch.cuda.is_available()
# Check CUDA version
cuda_version = torch.version.cuda if cuda_available else "N/A (CUDA not available)"
# Check cuDNN version
cudnn_version = str(torch.backends.cudnn.version())
# Get the current CUDA device
current_device = torch.cuda.current_device()
# Get the number of CUDA devices
num_devices = torch.cuda.device_count()
# Get the name of the current CUDA device
device_name = torch.cuda.get_device_name(current_device)
# Print info
if print_info:
print("CUDA PYTORCH ENVIRONMENT CHECK")
print("##############################")
print(f"Torch Version: {torch_version}")
# print(f"Torchvision Version: {torchvision_version}")
print(f"CUDA with torch available: {cuda_available}")
print(f"CUDA Version: {cuda_version}")
print(f"cuDNN Version: {cudnn_version}")
print(f"Current CUDA device: {current_device}")
print(f"Number of CUDA devices: {num_devices}")
print(f"Name of current CUDA device: {device_name}")
print("##############################")
# Test CUDA tensors
test_cuda_a = torch.cuda.FloatTensor(2).zero_()
test_cuda_b = torch.randn(2).cuda()
test_cuda_c = test_cuda_a + test_cuda_b
test_cuda_d = torch.zeros(4, device="cuda:0")
# Print test results
if print_test:
print("\nCUDA TENSOR TEST")
print("##############################")
print("Tensor operations:")
print("Tensor a (float 2) = " + str(test_cuda_a))
print("Tensor b (randn 2) = " + str(test_cuda_b))
print("Tensor c ( a + b ) = " + str(test_cuda_c))
print("Tensor d (zeros 4) = " + str(test_cuda_d))
print("##############################")
except Exception as e:
print("CUDA PYTORCH CHECK FAILED")
print(f"Error: {e}")
else:
print("\nCUDA PYTORCH CHECK DONE\n")
if __name__ == "__main__":
main()