-
Notifications
You must be signed in to change notification settings - Fork 1.9k
/
Copy pathsimpleP2P.cu
260 lines (221 loc) · 8.91 KB
/
simpleP2P.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This sample demonstrates a combination of Peer-to-Peer (P2P) and
* Unified Virtual Address Space (UVA) features new to SDK 4.0
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
// CUDA includes
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
__global__ void SimpleKernel(float *src, float *dst) {
// Just a dummy kernel, doing enough for us to verify that everything
// worked
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] * 2.0f;
}
inline bool IsAppBuiltAs64() { return sizeof(void *) == 8; }
int main(int argc, char **argv) {
printf("[%s] - Starting...\n", argv[0]);
if (!IsAppBuiltAs64()) {
printf(
"%s is only supported with on 64-bit OSs and the application must be "
"built as a 64-bit target. Test is being waived.\n",
argv[0]);
exit(EXIT_WAIVED);
}
// Number of GPUs
printf("Checking for multiple GPUs...\n");
int gpu_n;
checkCudaErrors(cudaGetDeviceCount(&gpu_n));
printf("CUDA-capable device count: %i\n", gpu_n);
if (gpu_n < 2) {
printf(
"Two or more GPUs with Peer-to-Peer access capability are required for "
"%s.\n",
argv[0]);
printf("Waiving test.\n");
exit(EXIT_WAIVED);
}
// Query device properties
cudaDeviceProp prop[64];
int gpuid[2]; // we want to find the first two GPU's that can support P2P
for (int i = 0; i < gpu_n; i++) {
checkCudaErrors(cudaGetDeviceProperties(&prop[i], i));
}
// Check possibility for peer access
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
int can_access_peer;
int p2pCapableGPUs[2]; // We take only 1 pair of P2P capable GPUs
p2pCapableGPUs[0] = p2pCapableGPUs[1] = -1;
// Show all the combinations of supported P2P GPUs
for (int i = 0; i < gpu_n; i++) {
for (int j = 0; j < gpu_n; j++) {
if (i == j) {
continue;
}
checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer, i, j));
printf("> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[i].name,
i, prop[j].name, j, can_access_peer ? "Yes" : "No");
if (can_access_peer && p2pCapableGPUs[0] == -1) {
p2pCapableGPUs[0] = i;
p2pCapableGPUs[1] = j;
}
}
}
if (p2pCapableGPUs[0] == -1 || p2pCapableGPUs[1] == -1) {
printf(
"Two or more GPUs with Peer-to-Peer access capability are required for "
"%s.\n",
argv[0]);
printf(
"Peer to Peer access is not available amongst GPUs in the system, "
"waiving test.\n");
exit(EXIT_WAIVED);
}
// Use first pair of p2p capable GPUs detected.
gpuid[0] = p2pCapableGPUs[0];
gpuid[1] = p2pCapableGPUs[1];
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0],
gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[1], 0));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[0], 0));
// Allocate buffers
const size_t buf_size = 1024 * 1024 * 16 * sizeof(float);
printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n",
int(buf_size / 1024 / 1024), gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
float *g0;
checkCudaErrors(cudaMalloc(&g0, buf_size));
checkCudaErrors(cudaSetDevice(gpuid[1]));
float *g1;
checkCudaErrors(cudaMalloc(&g1, buf_size));
float *h0;
checkCudaErrors(
cudaMallocHost(&h0, buf_size)); // Automatically portable with UVA
// Create CUDA event handles
printf("Creating event handles...\n");
cudaEvent_t start_event, stop_event;
float time_memcpy;
int eventflags = cudaEventBlockingSync;
checkCudaErrors(cudaEventCreateWithFlags(&start_event, eventflags));
checkCudaErrors(cudaEventCreateWithFlags(&stop_event, eventflags));
// P2P memcopy() benchmark
checkCudaErrors(cudaEventRecord(start_event, 0));
for (int i = 0; i < 100; i++) {
// With UVA we don't need to specify source and target devices, the
// runtime figures this out by itself from the pointers
// Ping-pong copy between GPUs
if (i % 2 == 0) {
checkCudaErrors(cudaMemcpy(g1, g0, buf_size, cudaMemcpyDefault));
} else {
checkCudaErrors(cudaMemcpy(g0, g1, buf_size, cudaMemcpyDefault));
}
}
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event, stop_event));
printf("cudaMemcpyPeer / cudaMemcpy between GPU%d and GPU%d: %.2fGB/s\n",
gpuid[0], gpuid[1],
(1.0f / (time_memcpy / 1000.0f)) * ((100.0f * buf_size)) / 1024.0f /
1024.0f / 1024.0f);
// Prepare host buffer and copy to GPU 0
printf("Preparing host buffer and memcpy to GPU%d...\n", gpuid[0]);
for (int i = 0; i < buf_size / sizeof(float); i++) {
h0[i] = float(i % 4096);
}
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaMemcpy(g0, h0, buf_size, cudaMemcpyDefault));
// Kernel launch configuration
const dim3 threads(512, 1);
const dim3 blocks((buf_size / sizeof(float)) / threads.x, 1);
// Run kernel on GPU 1, reading input from the GPU 0 buffer, writing
// output to the GPU 1 buffer
printf(
"Run kernel on GPU%d, taking source data from GPU%d and writing to "
"GPU%d...\n",
gpuid[1], gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[1]));
SimpleKernel<<<blocks, threads>>>(g0, g1);
checkCudaErrors(cudaDeviceSynchronize());
// Run kernel on GPU 0, reading input from the GPU 1 buffer, writing
// output to the GPU 0 buffer
printf(
"Run kernel on GPU%d, taking source data from GPU%d and writing to "
"GPU%d...\n",
gpuid[0], gpuid[1], gpuid[0]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
SimpleKernel<<<blocks, threads>>>(g1, g0);
checkCudaErrors(cudaDeviceSynchronize());
// Copy data back to host and verify
printf("Copy data back to host from GPU%d and verify results...\n", gpuid[0]);
checkCudaErrors(cudaMemcpy(h0, g0, buf_size, cudaMemcpyDefault));
int error_count = 0;
for (int i = 0; i < buf_size / sizeof(float); i++) {
// Re-generate input data and apply 2x '* 2.0f' computation of both
// kernel runs
if (h0[i] != float(i % 4096) * 2.0f * 2.0f) {
printf("Verification error @ element %i: val = %f, ref = %f\n", i, h0[i],
(float(i % 4096) * 2.0f * 2.0f));
if (error_count++ > 10) {
break;
}
}
}
// Disable peer access (also unregisters memory for non-UVA cases)
printf("Disabling peer access...\n");
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[1]));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[0]));
// Cleanup and shutdown
printf("Shutting down...\n");
checkCudaErrors(cudaEventDestroy(start_event));
checkCudaErrors(cudaEventDestroy(stop_event));
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaFree(g0));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaFree(g1));
checkCudaErrors(cudaFreeHost(h0));
for (int i = 0; i < gpu_n; i++) {
checkCudaErrors(cudaSetDevice(i));
}
if (error_count != 0) {
printf("Test failed!\n");
exit(EXIT_FAILURE);
} else {
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
}