1
0
Fork 2

优化代码对齐,Review和调整部分注释

LRD_Develop
詹力 2024-01-06 23:56:40 +08:00
parent ed6a09bf8b
commit ce2f195559
4 changed files with 233 additions and 186 deletions

View File

@ -56,7 +56,6 @@ unsigned char * Detection::load_model(const char *filename, int *model_size)
Detection::Detection()
{
/* Create the neural network */
printf("Loading mode...\n");
int model_data_size = 0;
@ -127,7 +126,6 @@ Detection::Detection()
printf("model input height=%d, width=%d, channel=%d\n", height, width, channel);
memset(inputs, 0, sizeof(inputs));
inputs[0].index = 0;
inputs[0].type = RKNN_TENSOR_UINT8;
@ -137,6 +135,7 @@ Detection::Detection()
}
// 这个地方采用直接传值,会影响性能
detect_result_group_t Detection::outputParse(cv::Mat netInputImg)
{
@ -157,7 +156,8 @@ detect_result_group_t Detection::outputParse(cv::Mat netInputImg)
memset(&pads, 0, sizeof(BOX_RECT));
cv::Size target_size(width, height);
cv::Mat resized_img(target_size.height, target_size.width, CV_8UC3);
// 计算缩放比例
// 需要将输入图像分辨率转换到检测模型指定的分辨率
float scale_w = (float)target_size.width / img.cols;
float scale_h = (float)target_size.height / img.rows;

View File

@ -39,7 +39,6 @@ void *ranging(void *args) // ranging线程
int main(int argc, char **argv)
{
pthread_t tids[1]; // 执行ranging线程
int ret = pthread_create(&tids[0], NULL, ranging, NULL);

View File

@ -1,17 +1,18 @@
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <stdio.h>
#include "ranging.h"
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <dlfcn.h>
// OpenCV相关
#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <sys/stat.h>
#include <sys/types.h>
@ -19,19 +20,22 @@
#include <vector>
#include <tuple>
#include <string>
#include <string.h>
#include <algorithm>
// 上面重复无用的头文件应该删除
// #include <ros/ros.h>
using namespace cv;
void Ranging::rectifyImage(Mat &oriImgL, Mat &oriImgR) //重映射函数
// 重映射函数
void Ranging::rectifyImage(Mat &oriImgL, Mat &oriImgR)
{
remap(oriImgL, oriImgL, mapX1, mapX2, cv::INTER_LINEAR);
remap(oriImgR, oriImgR, mapY1, mapY2, cv::INTER_LINEAR);
}
std::vector<float> Ranging::pic2cam(int u, int v) //像素坐标转相机坐标
// 像素坐标转相机坐标
std::vector<float> Ranging::pic2cam(int u, int v)
{
//(u,v)->(x,y)"(loc[0],loc[1])"
std::vector<float> loc;
@ -40,28 +44,38 @@ std::vector<float> Ranging::pic2cam(int u, int v) //像素坐标转相机坐标
return loc;
}
std::vector<int> Ranging::muban(Mat &left_image, Mat &right_image, const int *coordinates) //模板匹配
// 模板匹配
std::vector<int> Ranging::muban(Mat &left_image, Mat &right_image, const int *coordinates)
{
int x1 = coordinates[0], y1 = coordinates[1], x2 = coordinates[2], y2 = coordinates[3];
Mat tpl = right_image.rowRange(max(y1 - 2, 0), min(y2 + 2, 479)).colRange(x1, x2); //获取目标框
Mat target = left_image.rowRange(max(y1 - 20, 0), min(y2 + 20, 479)).colRange(0, 639); //待匹配图像,极线约束,只需要同水平区域
// 获取目标框
Mat tpl = right_image.rowRange(max(y1 - 2, 0), min(y2 + 2, 479)).colRange(x1, x2);
// 待匹配图像,极线约束,只需要同水平区域
Mat target = left_image.rowRange(max(y1 - 20, 0), min(y2 + 20, 479)).colRange(0, 639);
int th = tpl.rows, tw = tpl.cols;
Mat result;
matchTemplate(target, tpl, result, TM_CCOEFF_NORMED); //匹配方法:归一化相关系数即零均值归一化互相关
// 匹配方法:归一化相关系数即零均值归一化互相关
matchTemplate(target, tpl, result, TM_CCOEFF_NORMED);
double minVal, maxVal;
Point minLoc, maxLoc;
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc); //得到匹配点坐标
// 得到匹配点坐标
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc);
Point tl = maxLoc, br;
// 这个地方应该修改为宏
// 转为像素坐标系 640 - 1
br.x = min(maxLoc.x + tw, 639);
// 转为像素坐标系 480 - 1
br.y = min(maxLoc.y + th, 479);
br.x = min(maxLoc.x + tw, 639); //转为像素坐标系
br.y = min(maxLoc.y + th, 479); //转为像素坐标系
////展示匹配结果
// br.x = min(maxLoc.x + tw, 319);
// br.y = min(maxLoc.y + th, 239);
//rectangle(target, tl, br, (0, 255, 0), 3);
//imshow("match-", target);
//waitKey(2);
/*
// 展示匹配结果
br.x = min(maxLoc.x + tw, 319);
br.y = min(maxLoc.y + th, 239);
rectangle(target, tl, br, (0, 255, 0), 3);
imshow("match-", target);
waitKey(2);
*/
std::vector<int> maxloc;
maxloc.push_back(maxLoc.x);
@ -69,6 +83,7 @@ std::vector<int> Ranging::muban(Mat &left_image, Mat &right_image, const int *co
return maxloc;
}
// 这个函数暂时不知道作用
void Ranging::horizon_estimate(Mat& img, Mat& bboxs,int k)
{
//保证摄像头与地面平行
@ -138,49 +153,70 @@ void Ranging::getInfo(Mat &imgL, Mat &imgR, Mat &detBoxes, Mat &info)
Mat imgGrayL, imgGrayR;
cvtColor(imgL, imgGrayL, COLOR_BGR2GRAY);
cvtColor(imgR, imgGrayR, COLOR_BGR2GRAY);
// 优化这个地方的imgR_weight没有起到任何作用
// 纯粹浪费CPU计算了
Mat imgR_weight = imgR.clone();
Mat infoRow;
for (uchar i = 0; i < detBoxes.rows; i++)
{
// 获得位置信息
int x1 = detBoxes.at<float>(i, 0);
int y1 = detBoxes.at<float>(i, 1);
int x2 = detBoxes.at<float>(i, 2);
int y2 = detBoxes.at<float>(i, 3);
// 获取置信度信息
float Conf = detBoxes.at<float>(i, 4);
int cls = detBoxes.at<float>(i, 5);
if (x1 > 600 || x2 < 50 || y1 < 5 || y2 > 475 || x1 < 2 || x2 > 590 || abs(x2 - x1) > 550) //当目标框偏左、偏右或者过大,略去该物体
{
// 当目标框偏左、偏右或者过大,略去该物体
if (x1 > 600 || x2 < 50 || y1 < 5 || y2 > 475 || x1 < 2 || x2 > 590 || abs(x2 - x1) > 550) {
continue;
}
rectangle(imgR, Point(int(x1), int(y1)),
Point(int(x2), int(y2)), Scalar(0, 0, 255)); //绘制目标框
// 绘制目标框
rectangle(imgR, Point(int(x1), int(y1)),Point(int(x2), int(y2)), Scalar(0, 0, 255));
// 检测的是右图,利用模板匹配去找左图的位置
int coordinates[4] = {x1, y1, x2, y2};
std::vector<int> disp_pixel = muban(imgGrayL, imgGrayR, coordinates); //模板匹配
float disp_pixel_x = disp_pixel[0] - x1; //ˮ计算水平视差
float disp_pixel_y = disp_pixel[1] - y1; //
disp_pixel_x = (int)(disp_pixel_x + disp_pixel_x * 0.12); //0.12为模板匹配产生的误差,为经验值,通过拟合得到
// 模板匹配
std::vector<int> disp_pixel = muban(imgGrayL, imgGrayR, coordinates);
// 这里用disp_x 有点奇怪用dispDiffX
// 计算水平视差 左图匹配的x坐标和右图的x坐标之差
float disp_pixel_x = disp_pixel[0] - x1;
// 垂直的误差没有起到作用
float disp_pixel_y = disp_pixel[1] - y1;
// 0.12为模板匹配产生的误差,为经验值,通过拟合得到
disp_pixel_x = (int)(disp_pixel_x + disp_pixel_x * 0.12);
//Mat disp_matrix = Mat(1, 1, CV_32F, Scalar(disp_pixel_x)), disp_pixel_xyz;
Mat disp_matrix = Mat(imgGrayL.rows, imgGrayL.cols, CV_32F, Scalar(disp_pixel_x)); //定义视差矩阵,所有值均为水平视差,方便转换为三维坐标,并具有水平距离信息
// 定义视差矩阵,所有值均为水平视差,方便转换为三维坐标,并具有水平距离信息
Mat disp_matrix = Mat(imgGrayL.rows, imgGrayL.cols, CV_32F, Scalar(disp_pixel_x));
Mat threed_pixel_xyz, threedImage;
reprojectImageTo3D(disp_matrix, threedImage, q, false); //2d->3d
threed_pixel_xyz = threedImage.mul(threedImage); //每一像素点求平方,
// 通过quat将显示图像从2d映射到3d
reprojectImageTo3D(disp_matrix, threedImage, q, false);
// 图像的3个通道每一像素点求平方
threed_pixel_xyz = threedImage.mul(threedImage);
std::vector<Mat> channels;
// 求平方后的结果分离为3个通道然后求和即是欧式距离
split(threed_pixel_xyz.clone(), channels);
threed_pixel_xyz = channels[0] + channels[1] + channels[2]; //计算欧式距离
// 计算欧式距离 = r^2 + g^2 + b^2
threed_pixel_xyz = channels[0] + channels[1] + channels[2];
threed_pixel_xyz.forEach<float>([](float &value, const int *position) { value = sqrt(value); }); // 获得距离d
int mid_pixel = int((x1 + x2) / 2);
std::vector<float> mid = pic2cam(imgGrayR.cols / 2, imgGrayR.rows); //计算角度,从像素坐标转为相机坐标
// 计算角度,从像素坐标转为相机坐标
// 这里用结构体比用vector更加节省CPU运算
std::vector<float> mid = pic2cam(imgGrayR.cols / 2, imgGrayR.rows);
std::vector<float> loc_tar = pic2cam(mid_pixel, imgGrayR.rows);
float alfa = atan((loc_tar[0] - mid[0]) / q.at<double>(2, 3));
if (disp_pixel_x > 240) // 距离太近,视差过大
// 距离太近,视差过大
if (disp_pixel_x > 240)
{
char cm[15];
//sprintf(cm, "cannot match !");
@ -223,7 +259,7 @@ void Ranging::getInfo(Mat &imgL, Mat &imgR, Mat &detBoxes, Mat &info)
infoRow.copyTo(info.row(i));
continue;
}
// <20><>ͼ<EFBFBD><CDBC><EFBFBD>ϻ<EFBFBD><CFBB><EFBFBD><EFBFBD><EFBFBD>Ϣ
// char dc[50], wh[50];
// std::string cname = className[cls + 1];
// sprintf(dc, "dis:%.2fcm %d", median, cls);
@ -231,7 +267,6 @@ void Ranging::getInfo(Mat &imgL, Mat &imgR, Mat &detBoxes, Mat &info)
// putText(imgR, dc, Point(x1, y2), FONT_HERSHEY_PLAIN, 1.5, Scalar(0, 0, 255), 2);
// putText(imgR, wh, Point(x1, y1), FONT_HERSHEY_PLAIN, 1.5, Scalar(0, 0, 255), 1.5);
//返回数据
infoRow = (Mat_<float>(1, 4) << median, w1, h1, alfa);
infoRow.copyTo(info.row(i));
@ -239,7 +274,6 @@ void Ranging::getInfo(Mat &imgL, Mat &imgR, Mat &detBoxes, Mat &info)
}
//cv::imshow("kk",imgR);
//cv::waitKey(1);
}
Ranging::Ranging(int index, int imgw, int imgh) : //初始化
@ -266,38 +300,40 @@ Ranging::Ranging(int index, int imgw, int imgh) : //初始化
stereoRectify(cam_matrix_left.t(), distortion_l, cam_matrix_right.t(), distortion_r,
imgSize, rotate.t(), trans, r1, r2, p1, p2, q);//立体校正
initUndistortRectifyMap(cam_matrix_left.t(), distortion_l, r1, p1, imgSize, CV_32F, mapX1, mapX2);//计算无畸变和修正转换映射
initUndistortRectifyMap(cam_matrix_right.t(), distortion_r, r2, p2, imgSize, CV_32F, mapY1, mapY2);//计算无畸变和修正转换映射
// 计算无畸变和修正转换映射
initUndistortRectifyMap(cam_matrix_left.t(), distortion_l, r1, p1, imgSize, CV_32F, mapX1, mapX2);
// 计算无畸变和修正转换映射
initUndistortRectifyMap(cam_matrix_right.t(), distortion_r, r2, p2, imgSize, CV_32F, mapY1, mapY2);
// RKNN_Create(&hdx, modelPath); // 初始化检测模型
std::cout<< " ******************* CAMERA initialization ********************" << std::endl;
}
std::vector<Mat> Ranging::get_range()
{
double rang_old, rang_now;
// rang_old = ros::Time::now().toSec(); //测试运行时间
Mat frame, lframe, rframe;
// vcapture.read(frame); //获取视频帧
// 获取视频帧
vcapture >> frame;
// 显示原始图像
//cv::imshow("frame",frame);
if (!frame.empty())
{
int64 t = getTickCount();
Mat lframe(frame.colRange(0, imgw).clone()); //拷贝左图
Mat rframe(frame.colRange(imgw, imgw * 2).clone()); //拷贝右图
// 拷贝左图
Mat lframe(frame.colRange(0, imgw).clone());
// 拷贝右图
Mat rframe(frame.colRange(imgw, imgw * 2).clone());
rectifyImage(lframe, rframe); //
rectifyImage(lframe, rframe);
// 这里拷贝一次然后再yolo里面再拷贝一次以及函数传参也拷贝了一次?
cv::Mat Rframe = rframe.clone();
// yolov5s.outputPars修改为detectImage()
detect_result_group = yolov5s.outputParse(Rframe);
if (detect_result_group.count<=0)
@ -305,13 +341,17 @@ std::vector<Mat> Ranging::get_range()
std::cout<<"detect nothing"<<std::endl;
}
// detction box transfor to our format
Mat detBoxes(detect_result_group.count, 5, CV_32F); //定义矩阵,存储目标检测内容,存储格式(x,y,x,y,conf,cls)
// 定义矩阵,存储目标检测内容,存储格式(x,y,x,y,conf,cls)
Mat detBoxes(detect_result_group.count, 5, CV_32F);
char text[256];
for (int i = 0; i < detect_result_group.count; i++) //存储目标检测内容 (x,y,x,y,conf,cls)
// 存储目标检测内容 (x,y,x,y,conf,cls)
// 这里应该写一个函数drawDetectBobox()
for (int i = 0; i < detect_result_group.count; i++)
{
detect_result_t *det_result = &(detect_result_group.results[i]);
sprintf(text, "%s %.1f%%", det_result->name, det_result->prop * 100);
// printf("%s @ (%d %d %d %d) %f\n", det_result->name, det_result->box.left, det_result->box.top,
// det_result->box.right, det_result->box.bottom, det_result->prop);
@ -338,9 +378,15 @@ std::vector<Mat> Ranging::get_range()
// }
}
Mat info(detect_result_group.count, 4, CV_32F); // 存储测距信息存储格式距离d宽w高h角度α
// 存储测距信息存储格式距离d宽w高h角度α
Mat info(detect_result_group.count, 4, CV_32F);
// 如果有检测目标
if (detect_result_group.count)
{
// getInfo()修改为calObjectRanging()
// 在这里测量目标的距离并存储到info中
// 优化: 这个地方重复的将左右视图转换为灰度图浪费CPU的算力
getInfo(lframe, rframe, detBoxes, info);
//show stereo distance
@ -367,6 +413,7 @@ std::vector<Mat> Ranging::get_range()
// }
}
// 这个地方应该整为一个函数,负责显示帧率图像
t = getTickCount() - t;
char fps[50];
sprintf(fps, "fps: %d", int(1 / (t / getTickFrequency())));
@ -374,6 +421,7 @@ std::vector<Mat> Ranging::get_range()
cv::imshow("k",Rframe);
cv::waitKey(1);
return std::vector<Mat>{rframe, detBoxes, info};
}
return std::vector<Mat>{rframe};