# 环境准备
Windows 下安装好CUDA,VS2013。
创建一个空的控制台程序,新增加一个文件“test.cu”。
##配置头文件和库文件目录
```
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.0\include
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.0\lib\x64
```
```
cublas.lib
cublas_device.lib
cuda.lib
cudadevrt.lib
cudart.lib
cudart_static.lib
cufft.lib
cufftw.lib
curand.lib
cusolver.lib
cusparse.lib
nppc.lib
nppial.lib
nppicc.lib
nppicom.lib
nppidei.lib
nppif.lib
nppig.lib
nppim.lib
nppist.lib
nppisu.lib
nppitc.lib
npps.lib
nvblas.lib
nvcuvid.lib
nvgraph.lib
nvml.lib
nvrtc.lib
OpenCL.lib
kernel32.lib
user32.lib
gdi32.lib
winspool.lib
comdlg32.lib
advapi32.lib
shell32.lib
ole32.lib
oleaut32.lib
uuid.lib
odbc32.lib
odbccp32.lib
```
##配置项目属性、文件属性
# 第一个CUDA程序
```
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
__global__ void addKernel(int *c, int *a, int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
cudaError_t addWithCuda(int *c, const int *a, const int *b, const int size);
int main()
{
const int arraysize = 5;
const int a[arraysize] = { 1, 2, 3, 4, 5 };
const int b[arraysize] = { 10, 20, 30, 40, 50};
int c[arraysize] = { 0 };
cudaError_t cudaStatus = addWithCuda(c, a, b, arraysize);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{ 1, 2, 3, 4, 5 } + { 10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d} \n",
c[0], c[1], c[2], c[3], c[4]);
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
cudaError_t addWithCuda(int *c, const int *a, const int *b, const int size)
{
int * dev_a = NULL;
int * dev_b = NULL;
int * dev_c = NULL;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr,"cudaSetDevice failed!");
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_c, size*sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size*sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size*sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a,a, size*sizeof(int),cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b,b, size*sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed");
goto Error;
}
addKernel << <1, size >> >(dev_c, dev_a, dev_b);
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadSynchronize failed");
goto Error;
}
cudaStatus = cudaMemcpy(c,dev_c,size*sizeof(int),cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_b);
cudaFree(dev_a);
return cudaStatus;
}
```
运行:
原文地址:https://www.cnblogs.com/xiaojianliu/p/11129812.html