#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <iostream>
#include <math.h>
#include <stdio.h>
#include <complex.h>
#include "cuComplex.h"
#include <typeinfo> //输出变量类型所需头文件
#include <time.h>
typedef cuDoubleComplex complexd;
using namespace std;
#define pi acos(-1)
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
int N = 70; //阵元个数
int Len = 2400; //采样点数
int theta_N = 1800; //角度步进数
__device__ __host__ complexd operator*(complexd a, complexd b) { return cuCmul(a,b); }
__device__ __host__ complexd operator+(complexd a, complexd b) { return cuCadd(a,b); }
__device__ __host__ complexd operator/(complexd a, complexd b) { return cuCdiv(a,b); }
__device__ __host__ complexd exp_(complexd arg)
{
complexd res; //定义一个复数
double s, c;
double e = exp(arg.x);
sincos(arg.y, &s, &c);
res.x = c * e;
res.y = s * e;
return res;
}
/* CUDA核函数 */
__global__ void beamforming_nb(complexd* sig_out, complexd* sig_in,complexd* time_delay,int theta_N,int Len,int N)
{
int row = threadIdx.y + blockDim.y * blockIdx.y;
int col = threadIdx.x + blockDim.x * blockIdx.x;
complexd temp;
complexd add_;
if (row < theta_N && col < Len)
{
for (int i = 0; i < N; i++)
{
//theta_N x Len大小的矩阵sig_out = time_delay矩阵的row行i列 X sig_in矩阵i行col列
add_ = time_delay[row * N + i] * sig_in[col + i * Len];
__syncthreads();
temp = temp + add_;
__syncthreads();
}
sig_out[row * Len + col] = temp;
temp.x = 0;
temp.y = 0;
}
}
int main(int argc, char ** argv)
{
/* 初始参数定义 */
const double c = 1500; //介质声速
const double T = 1; //采样时长
const int FS = 2400; //采样频率
auto LEN = T*FS; //采样点数
double t[Len]; //时间长度
for(int i = 0;i<Len;i++)
{
t[i] = i/LEN;
//cout <<t[i]<<endl; //验证通过
}
const double f0 = 300; //频率为300
const double d = 0.27; //传感器间距为0.27m
const double deg2rad = pi/180; // cos是弧度
const double theta = 60; //目标方位角角度制
const double theta_rad = theta * deg2rad; //目标方位角弧度制
cudaError_t res;
/* 原始信号定义 */
complexd sig_[Len]; //原始信号列表,长度为:采样频率*采样时间
for(int i = 0;i<Len;i++)
{
complexd temp{0,2*pi*f0*t[i]};
sig_[i] = exp_(temp); //原始信号
//cout << cuCreal(sig_[i]) << '+' << cuCimag(sig_[i])<< 'i' <<'\n'; //验证通过
}
/* 加入驾驶向量 */
complexd sig[N*Len]; //定义未加入噪声信号
for(int i = 0;i<N;i++)
{
complexd steer{0,2*pi*f0*cos(theta_rad)*i*d/c}; //驾驶向量
// cout << cuCreal(exp_(steer)) << '+' << cuCimag(exp_(steer))<< 'i' << endl; //验证通过
for(int j = 0;j<Len;j++)
{
sig[i*Len+j] = sig_[j] * exp_(steer); //未加入噪声信号.N*Len
// cout << cuCreal(sig[i*Len+j]) << '+' << cuCimag(sig[i*Len+j])<< 'i' <<'\n'; //验证通过
}
}
/* 计算并加入theta_stp */
double theta_n = 1800;
complexd t_delay[theta_N*N];
for(int i = 0;i<theta_N;i++)
{
for(int j = 0 ;j<N;j++)
{
complexd tao{0,j*2*pi*f0*cos(i*(180/theta_n)*deg2rad)*d/c};
t_delay[i*N + j] = exp_(cuConj(tao)); //第i个角度下的N个补偿,最终得到theta_N*N矩阵
}
}
/* CPU计算 */
double ttt;
clock_t at, bt;
at = clock();
complexd *h_pt;
h_pt = (complexd *)malloc(theta_N*Len*sizeof(complexd));
complexd h_temp;
for(int i = 0 ; i < theta_N; i ++ )
{
for(int j = 0; j < Len ; j ++)
{
for(int k = 0 ; k <N; k ++)
{
h_temp = h_temp + t_delay[i * N + k] * sig[k * Len + j];
}
h_pt[i * Len + j] = h_temp;
h_temp.x = 0;
h_temp.y = 0;
}
}
bt = clock();
ttt = double(bt-at)/CLOCKS_PER_SEC;
cout << ttt << "s" << endl;
/* CUDA加速 */
complexd *sig_in;
complexd *time_delay;
complexd *sig_out;
res = cudaMalloc((void**)&sig_in,N*Len*sizeof(complexd));CHECK(res)
res = cudaMalloc((void**)&sig_out,theta_N*Len*sizeof(complexd));CHECK(res)
res = cudaMalloc((void**)&time_delay,theta_N*N*sizeof(complexd));CHECK(res)
res = cudaMemcpy(sig_in,sig,N*Len*sizeof(complexd),cudaMemcpyHostToDevice);CHECK(res)
res = cudaMemcpy(time_delay,t_delay,theta_N*N*sizeof(complexd),cudaMemcpyHostToDevice);CHECK(res)
dim3 threadsPerBlocks(32,32); //先尝试角度采样点个数与采样频率相同的情况
dim3 numBlocks((Len)/threadsPerBlocks.x,(theta_N)/threadsPerBlocks.y);
/*
Jetson Orin 模块包含以下内容:NVIDIA Ampere架构GPU,
具有多达2048个CUDA 核、多达64个Tensor核多达12个Arm A78AE CPU核
*/
double tt;
clock_t a, b;
a = clock();
beamforming_nb<<<numBlocks,threadsPerBlocks>>>(sig_out,sig_in,time_delay,theta_N,Len,N);
cudaDeviceSynchronize();
b = clock();
tt = double(b-a)/CLOCKS_PER_SEC;
cout << tt << "s" << endl;
complexd *pt_sig = NULL; //定义输出列表
pt_sig = (complexd*)malloc(theta_N*Len*sizeof(complexd)); //输出信息
res = cudaMemcpy(pt_sig,sig_out,theta_N*Len*sizeof(complexd),cudaMemcpyDeviceToHost);CHECK(res)
cout << cuCreal(pt_sig[(2399)])<< '+' << cuCimag(pt_sig[(2399)]) << 'i' << '\n';
cout << cuCreal(h_pt[(2400-1)])<< '+' << cuCimag(h_pt[(2400-1)]) << 'i' << '\n';
// -3.74895500 + 1.3897567i
cudaFree(sig_in);
cudaFree(time_delay);
cudaFree(sig_out);
free(pt_sig);
free(h_pt);
return 0;
}
输出结果
24.4865s
0.000353s
-3.74896+13.8978i
-3.74896+1.38976i
想请问一下,为什么通过GPU加速计算的结果和实际上通过CPU计算的结果为什么会有不同,通过验证观察发现:部分数据是相等的,但是就比如说输出的第2400个数据就是不等的,现在可以保证CPU端计算的结果是正确的,但是无法找到GPU计算出错的原因,希望大家能够给予帮助!