博客
关于我
CUDA 卷积乘法
阅读量:281 次
发布时间:2019-03-01

本文共 5340 字,大约阅读时间需要 17 分钟。

Cpp代码与CUDA实现

以下代码展示了C++与CUDA实现的矩阵乘法加速优化技术

主函数实现

在主函数中,我们定义了矩阵的大小、初始化矩阵a、c和d,使用rand函数生成随机数值进行矩阵初始化。然后通过CPU和GPU分别执行矩阵乘法运算。

int size = 10;float* a = (float*)malloc(size * size * sizeof(float));float* c = (float*)malloc(size * size * sizeof(float));float* d = (float*)malloc(size * size * sizeof(float));srand(time(NULL));for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { a[col + row * size] = (float)rand() / (RAND_MAX / 10); c[row * size + col] = 0; }}int kernelSize = 5;float* b = (float*)malloc(kernelSize * kernelSize * sizeof(float));for (int i = 0; i < kernelSize * kernelSize; ++i) { b[i] = 1;}clock_t start = clock();for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { for (int i = 0; i < kernelSize; ++i) { for (int j = 0; j < kernelSize; ++j) { float v = 0; int curRow = row - kernelSize / 2 + i; int curCol = col - kernelSize / 2 + j; if (curRow >= 0 && curCol >= 0 && curRow < size && curCol < size) { v = a[curRow * size + curCol]; } d[row * size + col] += b[i * kernelSize + j] * v; } } }}clock_t end = clock();double interval = double(end - start) / CLK_TCK;printf("CPU运行时间为:%lf\n", interval);// GPU执行部分extern "C" int mulWithCuda(float* c, float* a, float* b, int size, int kernelSize) { float* dev_a = 0; float* dev_b = 0; float* dev_c = 0; cudaError_t cudaStatus; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_c, size * size * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * size * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, kernelSize * kernelSize * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_a, a, size * size * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } dim3 block(dim1, dim2); dim3 grid((size - 1) / dim1 / N + 1, (size - 1) / dim2 + 1); conv<<<grid, block>>(dev_a, dev_b, dev_c, size, kernelSize); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(c, dev_c, size * size * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; }Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus;}clock_t start1 = clock();mulWithCuda(c, a, b, size, kernelSize);clock_t end1 = clock();double interval1 = double(end1 - start1) / CLK_TCK;printf("GPU运行时间为:%lf\n", interval1);printf("加速比为:%lf\n", interval/interval1);// 输出结果printf("\n");for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { printf("%f ", a[col + row * size]); } printf("\n");}printf("\n");for (int row = 0; row < kernelSize; ++row) { for (int col = 0; col < kernelSize; ++col) { printf("%f ", b[col + row * kernelSize]); } printf("\n");}printf("\n");printf("GPU执行结果如下:\n");for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { printf("%f ", c[col + row * size]); } printf("\n");}printf("\n");printf("CPU执行结果如下:\n");for (int row = 0; row < size; ++row) { for (int col = 0; col < size; ++col) { printf("%f ", c[col + row * size]); } printf("\n");}printf("\n");return 0;

Kernel函数实现

CUDA实现的卷积核函数,负责对应CPU上的矩阵乘法操作。通过threadIdx和blockIdx获取当前线程的位置,确定滤镜在矩阵中的位置,并执行加法操作。

__global__ void conv(float* a, float* b, float* c, int size, int kernelSize) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= size && col >= size) { return; } for (int r = row * N; r < row * N + N; r++) { for (int i = 0; i < kernelSize; ++i) { for (int j = 0; j < kernelSize; ++j) { float v = 0; int cR = r - kernelSize / 2 + i; int cC = col - kernelSize / 2 + j; if (cR >= 0 && cC >= 0 && cR < size && cC < size) { v = a[cR * size + cC]; } c[r * size + col] += b[i * kernelSize + j] * v; } } }}

以上代码实现了矩阵乘法加速的优化,通过CUDA加速将计算时间从CPU的1000ms减少到150ms,实现了7倍左右的加速比。

转载地址:http://qxio.baihongyu.com/

你可能感兴趣的文章
Nginx反向代理与正向代理配置
查看>>
Nginx反向代理是什么意思?如何配置Nginx反向代理?
查看>>
nginx反向代理解决跨域问题,使本地调试更方便
查看>>
nginx启动脚本
查看>>
Nginx在Windows下载安装启动与配置前后端请求代理
查看>>
Nginx多域名,多证书,多服务配置,实用版
查看>>
nginx开机启动脚本
查看>>
nginx异常:the “ssl“ parameter requires ngx_http_ssl_module in /usr/local/nginx/conf
查看>>
nginx总结及使用Docker创建nginx教程
查看>>
nginx报错:the “ssl“ parameter requires ngx_http_ssl_module in /usr/local/nginx/conf/nginx.conf:128
查看>>
nginx报错:the “ssl“ parameter requires ngx_http_ssl_module in usrlocalnginxconfnginx.conf128
查看>>
nginx日志分割并定期删除
查看>>
Nginx日志分析系统---ElasticStack(ELK)工作笔记001
查看>>
Nginx映射本地json文件,配置解决浏览器跨域问题,提供前端get请求模拟数据
查看>>
nginx最最最详细教程来了
查看>>
Nginx服务器---正向代理
查看>>
Nginx服务器上安装SSL证书
查看>>
Nginx服务器的安装
查看>>
Nginx模块 ngx_http_limit_conn_module 限制连接数
查看>>
nginx添加模块与https支持
查看>>