CUDA 矩阵相乘

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include <stdio.h>
#include <stdlib.h>
#include "cublas_v2.h"

#define BLOCK_SIZE 16

/***************/

用cuBlas的内置函数API,cublasSgemm

cudaError_t multiWithcuBlase(float *c, float *a, float *b, unsigned int aH, unsigned int aW, unsigned int bH, unsigned int bW);

{

……………… 

  cublasHandle_t handle;
  cublasStatus_t ret;
  ret = cublasCreate(&handle);

  const float alpha = 1.0f;
  const float beta = 0.0f;

  ret = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, aH, bW, aW, &alpha, gpu_a, aH, gpu_b, bH, &beta, gpu_c, aH);

………………

}

/***************/

用shared内存辅助

__global__ void multiKernel(float *c, float *a, float*b, unsigned int aW, unsigned int bW)

{
  int xBlock = blockIdx.x;
  int yBlock = blockIdx.y;

  int xThread = threadIdx.x;
  int yThread = threadIdx.y;

  unsigned int aWidth = aW;
  unsigned int bWidth = bW;

  float Cvalue= 0;

  for(int i=0; i< aWidth/BLOCK_SIZE; ++i)
  {
    __shared__ int aSub[BLOCK_SIZE][BLOCK_SIZE];
    __shared__ int bSub[BLOCK_SIZE][BLOCK_SIZE];

    aSub[yThread][xThread] = a[(yBlock*blockDim.y + yThread)*aWidth + i*blockDim.x + xThread];
    bSub[yThread][xThread] = b[(i*blockDim.y + yThread)*bWidth + xBlock*blockDim.x + xThread];

    __syncthreads();

    for(int e=0; e<BLOCK_SIZE; ++e)
    {
      Cvalue += aSub[yThread][e]*bSub[e][xThread];
    }
    __syncthreads();
  }

  int cIndex = (yBlock*blockDim.y + yThread)*bWidth + xBlock*blockDim.x + xThread;
  c[cIndex] = Cvalue;
}

/***************/

用shared内存辅助,并将循环打开

__global__ void multiKernel_NoLoop(float *c, float *a, float*b, unsigned int aW, unsigned int bW)
{
  int xBlock = blockIdx.x;
  int yBlock = blockIdx.y;

  int xThread = threadIdx.x;
  int yThread = threadIdx.y;

  unsigned int aWidth = aW;
  unsigned int bWidth = bW;

  float Cvalue= 0;

  for(int i=0; i< aWidth/BLOCK_SIZE; ++i)
  {
    __shared__ int aSub[BLOCK_SIZE][BLOCK_SIZE];
    __shared__ int bSub[BLOCK_SIZE][BLOCK_SIZE];

    aSub[yThread][xThread] = a[(yBlock*blockDim.y + yThread)*aWidth + i*blockDim.x + xThread];
    bSub[yThread][xThread] = b[(i*blockDim.y + yThread)*bWidth + xBlock*blockDim.x + xThread];

    __syncthreads();

    Cvalue += aSub[yThread][0]*bSub[0][xThread] + aSub[yThread][1]*bSub[1][xThread] + \
           aSub[yThread][2]*bSub[2][xThread] + aSub[yThread][3]*bSub[3][xThread] + \
          aSub[yThread][4]*bSub[4][xThread] + aSub[yThread][5]*bSub[5][xThread] + \
          aSub[yThread][6]*bSub[6][xThread] + aSub[yThread][7]*bSub[7][xThread] + \
          aSub[yThread][8]*bSub[8][xThread] + aSub[yThread][9]*bSub[9][xThread] + \
          aSub[yThread][10]*bSub[10][xThread] + aSub[yThread][11]*bSub[11][xThread] + \
          aSub[yThread][12]*bSub[12][xThread] + aSub[yThread][13]*bSub[13][xThread] + \
          aSub[yThread][14]*bSub[14][xThread] + aSub[yThread][15]*bSub[15][xThread] ;

    __syncthreads();
  }

  int cIndex = (yBlock*blockDim.y + yThread)*bWidth + xBlock*blockDim.x + xThread;
  c[cIndex] = Cvalue;
}

矩阵大小:320*320:

512*512:

ps 机器太差没办法

CUDA 矩阵相乘,布布扣,bubuko.com

时间: 2024-08-02 21:09:19

CUDA 矩阵相乘的相关文章

CUDA 矩阵相乘完整代码

#include "cuda_runtime.h"#include "device_launch_parameters.h" #include <stdio.h>#include <stdlib.h>#include <time.h>#include "cublas_v2.h" #define BLOCK_SIZE 16 cudaError_t multiCuda(float *c, float *a, flo

【CUDA并行编程之四】矩阵相乘

前面介绍了基本的Cuda编程的相关知识,那么这一篇在此基础之上来看看GPU在处理数据计算上的高效能,我们拿矩阵相乘来作为例子. 1.CPU上执行矩阵相乘以及性能. 在CPU上进行矩阵相乘运算的代码: mat_mul.cc: <span style="font-family:Microsoft YaHei;font-size:18px;">//a[i]*b[i] + c[i] = d[i] #include<iostream> #include<vector

CUDA范例精解通用GPU架构-(2)其实写个矩阵相乘并不是那么难

程序代码及图解析: #include <iostream> #include "book.h" __global__ void add( int a, int b, int *c ) { *c = a + b; } int main( void ) { int c; int *dev_c; HANDLE_ERROR( cudaMalloc( (void**)&dev_c, sizeof(int) ) ); add<<<1,1>>>

使用cublas 矩阵库函数实现矩阵相乘

2014-08-10 cublas中执行矩阵乘法运算的函数 首先要注意的是cublas使用的是以列为主的存储方式,和c/c++中的以行为主的方式是不一样的.处理方法可参考下面的注释代码 // SOME PRECAUTIONS: // IF WE WANT TO CALCULATE ROW-MAJOR MATRIX MULTIPLY C = A * B, // WE JUST NEED CALL CUBLAS API IN A REVERSE ORDER: cublasSegemm(B, A)!

C语言 &#183; 矩阵相乘 &#183; 算法提高

算法提高 矩阵相乘 时间限制:1.0s   内存限制:256.0MB 问题描述 小明最近在为线性代数而头疼,线性代数确实很抽象(也很无聊),可惜他的老师正在讲这矩阵乘法这一段内容. 当然,小明上课打瞌睡也没问题,但线性代数的习题可是很可怕的. 小明希望你来帮他完成这个任务. 现在给你一个ai行aj列的矩阵和一个bi行bj列的矩阵, 要你求出他们相乘的积(当然也是矩阵). (输入数据保证aj=bi,不需要判断) 输入格式 输入文件共有ai+bi+2行,并且输入的所有数为整数(long long范围

矩阵相乘

有一个x*y的矩阵和一个y*z矩阵相乘,元素均为整数,求两个矩阵相乘得到的矩阵.这是一道华为OJ题,具体描述忘记了,大致内容如此.并且要求实现的函数参数为指针. 例如矩阵1为 int m1[1][3]={2,-6,3}; 矩阵2为 int m2[3][1]={4,-2,-4}; 乘积矩阵初始化为 int r[1][1]=0; 要求实现的函数为 void matrix_multiple(int *m1,int *m2,int *r,int x,int y,int z) 代码如下: void mat

稀疏矩阵的三元组顺序表存储及矩阵相乘算法小结

稀疏矩阵的三元组顺序表存储及矩阵相乘算法小结 巧若拙(欢迎转载,但请注明出处:http://blog.csdn.net/qiaoruozhuo) 一:稀疏矩阵的三元组顺序表数据结构 typedef int ElemType; typedef struct { intx, y;  //该非零元素的行下标和列下标 ElemTypee; //该非零元素的值 } Triple; typedef struct { Tripledata[MAXSIZE]; //非零元素三元组顺序表 intmu, nu, t

ObjC语法练习 冒泡排序、选择排序、矩阵相乘

用OC实现的冒泡排序.选择排序.矩阵相乘,纯粹是用来练习语法. 冒泡排序,程序如下: void bubbleSort() { //初始化数组 NSMutableArray *array1 = [[NSMutableArray alloc] initWithCapacity:8]; [array1 addObject:@"5"]; [array1 addObject:@"10"]; [array1 addObject:@"8"]; [array1

cublas 矩阵相乘API详解

#include "cuda_runtime.h"#include "device_launch_parameters.h" #include <stdio.h>#include <stdlib.h>#include "cublas_v2.h" void multiCPU(float *c, float *a, float *b, unsigned int aH, unsigned int aW, unsigned int