#include #include #include #include #include "ranging.h" #include #include #include "rknn_sdk.h" #include #include #include #include "opencv2/core.hpp" #include "opencv2/imgcodecs.hpp" #include "opencv2/imgproc.hpp" #include #include #include #include #include #include #include #include #include using namespace cv; void Ranging::rectifyImage(Mat &oriImgL, Mat &oriImgR) //重映射函数 { remap(oriImgL, oriImgL, mapX1, mapX2, INTER_LINEAR); remap(oriImgR, oriImgR, mapY1, mapY2, INTER_LINEAR); } std::vector Ranging::pic2cam(int u, int v) //像素坐标转相机坐标 { //(u,v)->(x,y)"(loc[0],loc[1])" std::vector loc; loc.push_back((u - cam_matrix_right.at(0, 2)) * q.at(2, 3) / cam_matrix_right.at(0, 0)); loc.push_back((v - cam_matrix_right.at(1, 2)) * q.at(2, 3) / cam_matrix_right.at(1, 1)); return loc; } std::vector Ranging::muban(Mat &left_image, Mat &right_image, const int *coordinates) //模板匹配 { int x1 = coordinates[0], y1 = coordinates[1], x2 = coordinates[2], y2 = coordinates[3]; // cv::Mat right_image_,left_image_; // cv::pyrDown(right_image, right_image_); // cv::pyrDown(left_image, left_image_); Mat tpl = right_image.rowRange(max(y1 - 2, 0), min(y2 + 2, 479)).colRange(x1, x2); //获取目标框 Mat target = left_image.rowRange(max(y1 - 20, 0), min(y2 + 20, 479)).colRange(0, 639); //待匹配图像,极线约束,只需要同水平区域 // Mat tpl = right_image_.rowRange(max(int(0.5*y1 - 2), 0), min(int(0.5*y2 + 2), 479)).colRange(int(0.5*x1), int(0.5*x2)); // Mat target = left_image_.rowRange(max(int(0.5*y1 - 10), 0), min(int(0.5*y2 + 10), 239)).colRange(0, 319); int th = tpl.rows, tw = tpl.cols; Mat result; // methods = TM_CCOEFF_NORMED, TM_SQDIFF_NORMED, TM_CCORR_NORMED] matchTemplate(target, tpl, result, TM_CCOEFF_NORMED); //匹配方法:归一化相关系数即零均值归一化互相关 double minVal, maxVal; Point minLoc, maxLoc; minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc); //得到匹配点坐标 Point tl = maxLoc, br; br.x = min(maxLoc.x + tw, 639); //转为像素坐标系 br.y = min(maxLoc.y + th, 479); //转为像素坐标系 ////展示匹配结果 // br.x = min(maxLoc.x + tw, 319); // br.y = min(maxLoc.y + th, 239); //rectangle(target, tl, br, (0, 255, 0), 3); //imshow("match-", target); //waitKey(2); std::vector maxloc; maxloc.push_back(maxLoc.x); maxloc.push_back(maxLoc.y); return maxloc; } void Ranging::horizon_estimate(Mat& img, Mat& bboxs,int k) { //保证摄像头与地面平行 int x1 = bboxs.at(k, 0); int x2 = bboxs.at(k, 2); int y1 = bboxs.at(k, 1); int y2 = bboxs.at(k, 3); float Conf = bboxs.at(k, 4); int cls = bboxs.at(k, 5); float Y_B, Y_H; Mat edge, grayImage; vector idx; Mat tpl = img.rowRange(y1, min(y2+5,479)).colRange(x1, x2); // 取感兴趣范围 //Mat target = left_image.rowRange(max(y1 - 20, 0), min(y2 + 20, 479)).colRange(0, 639); Mat Z = Mat::zeros(2, tpl.cols, CV_32FC1); cvtColor(tpl, grayImage, COLOR_BGR2GRAY); GaussianBlur(grayImage,grayImage,Size(5,5),0); Canny(grayImage, edge, 120, 180, 3); //提取边缘,获取与地面接触点 //cv::imshow("1",edge); //cv::waitKey(1); float cluster[650]; for (int i = 0;i<650;i++) { cluster[i] = 0; } int y_b, y_h; int j = 0; for (int i = 0; i < x2-x1; i++) { //Mat temp = edge.rowRange(max(y1, 0), min(y2 + 4, 479)).colRange(x1, x2); Mat temp = edge.col(i); //取第i列 // std::cout << "temp: " < point_b = pic2cam(x1 + i, 240); //转为相机坐标系 std::vector point_H = pic2cam(320, 240); float alfa = atan((point_b[0] - point_H[0]) / q.at(2, 3)); if (idx.size() < 1) { Z.at(0, i) = 0; Z.at(1, i) = alfa; continue; } int y_b = idx[idx.size() - 1].y + y1; // y_b = int(y_b + y_b*0.03); int y_h = 240; point_b = pic2cam(x1 + i, y_b); //转为相机坐标系 point_H = pic2cam(320, y_h); Y_B = point_b[1]; Y_H = point_H[1]; // std::cout << "y_b: " << y_b << std::endl; float H_c = 60; //摄像头离地高度,单位mm float theta = 0; //摄像头与地面夹角,弧度 float d = (1/cos(theta)* cos(theta)) * q.at(2, 3) * H_c / (Y_B - Y_H)- H_c*tan(theta); alfa = atan((point_b[0] - point_H[0]) / q.at(2, 3)); //cout << "d: " << d << endl; if (d > 700) {d = 0;} Z.at(0, i) = d/cos(alfa); Z.at(1, i) = alfa; } /*if (i > 0) { if (abs(Z.at(0, i) - Z.at(0, i - 1)) < 40) //聚类 { cluster[j] = cluster[j] + 1; } else { j = j + 1; cluster[j] = 1; //std::cout<<"j : "<< j<< std::endl; } } } //int max_loc = distance(cluster,max_element(cluster,cluster + sizeof(cluster)/sizeof(cluster[0]))); //只保留数量最多的一类,其余置0 //std::cout<<" max_loc : "<< max_loc<(0, t) = 0; } } temp = temp + cluster[i]; }*/ // std::cout << "z : " <Z = Z.clone(); } void Ranging::getInfo(Mat &imgL, Mat &imgR, Mat &detBoxes, Mat &info) { // ֱ��ͼ���⻯ Mat imgGrayL, imgGrayR; cvtColor(imgL, imgGrayL, COLOR_BGR2GRAY); cvtColor(imgR, imgGrayR, COLOR_BGR2GRAY); Mat imgR_weight = imgR.clone(); Mat infoRow; for (uchar i = 0; i < detBoxes.rows; i++) { int x1 = detBoxes.at(i, 0); int y1 = detBoxes.at(i, 1); int x2 = detBoxes.at(i, 2); int y2 = detBoxes.at(i, 3); //std::cout<<"x1: "<(i, 4); int cls = detBoxes.at(i, 5); if (cls == 6 || cls == 10 || cls == 11) //体重秤,地毯和毛巾采用单目测距方法 { horizon_estimate(imgR_weight, detBoxes, i); } if (x1 > 600 || x2 < 50 || y1 < 5 || y2 > 475 || x1 < 2 || x2 > 590 || abs(x2 - x1) > 550) //当目标框偏左、偏右或者过大,略去该物体 { //char cm[15]; //sprintf(cm, "cannot match !"); char cm[15]; //sprintf(cm, "cannot match !"); sprintf(cm, "%d , %.2f", cls,Conf); putText(imgR, cm, Point((x1), (y1)), FONT_HERSHEY_PLAIN, 2.2, Scalar(0, 0, 255), 2); infoRow = (Mat_(1, 4) << -1, -1, -1, -1); infoRow.copyTo(info.row(i)); rectangle(imgR, Point(int(x1), int(y1)), Point(int(x2), int(y2)), Scalar(0, 0, 255)); continue; } if (cls!=0 && cls!=1 && cls!=2 && cls!= 6 && cls!= 7 && cls!= 10 && cls!=11 && cls!=14 && cls!=15) //大物体不进行测距 { if (x1 < 30 || x2 < 80 || x1>580 || x2 > 610) //删除边缘的目标框 { detBoxes.at(i, 5) = -1; cls = detBoxes.at(i, 5); } //char cm[15]; //sprintf(cm, "cannot match !"); char cm[15]; //sprintf(cm, "cannot match !"); sprintf(cm, "%d , %.2f", cls,Conf); putText(imgR, cm, Point((x1), (y1)), FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255), 2); infoRow = (Mat_(1, 4) << -1, -1, -1, -1); infoRow.copyTo(info.row(i)); rectangle(imgR, Point(int(x1), int(y1)), Point(int(x2), int(y2)), Scalar(0, 0, 255)); continue; } rectangle(imgR, Point(int(x1), int(y1)), Point(int(x2), int(y2)), Scalar(0, 0, 255)); //绘制目标框 int coordinates[4] = {x1, y1, x2, y2}; std::vector disp_pixel = muban(imgGrayL, imgGrayR, coordinates); //模板匹配 float disp_pixel_x = disp_pixel[0] - x1; //ˮ计算水平视差 float disp_pixel_y = disp_pixel[1] - y1; // disp_pixel_x = (int)(disp_pixel_x + disp_pixel_x * 0.12); //0.12为模板匹配产生的误差,为经验值,通过拟合得到 //Mat disp_matrix = Mat(1, 1, CV_32F, Scalar(disp_pixel_x)), disp_pixel_xyz; Mat disp_matrix = Mat(imgGrayL.rows, imgGrayL.cols, CV_32F, Scalar(disp_pixel_x)); //定义视差矩阵,所有值均为水平视差,方便转换为三维坐标,并具有水平距离信息 Mat threed_pixel_xyz, threedImage; reprojectImageTo3D(disp_matrix, threedImage, q, false); //2d->3d threed_pixel_xyz = threedImage.mul(threedImage); //每一像素点求平方, std::vector channels; split(threed_pixel_xyz.clone(), channels); threed_pixel_xyz = channels[0] + channels[1] + channels[2]; //计算欧式距离 threed_pixel_xyz.forEach([](float &value, const int *position) { value = sqrt(value); }); // 获得距离d int mid_pixel = int((x1 + x2) / 2); std::vector mid = pic2cam(imgGrayR.cols / 2, imgGrayR.rows); //计算角度,从像素坐标转为相机坐标 std::vector loc_tar = pic2cam(mid_pixel, imgGrayR.rows); float alfa = atan((loc_tar[0] - mid[0]) / q.at(2, 3)); if (disp_pixel_x > 240) // 距离太近,视差过大 { char cm[15]; //sprintf(cm, "cannot match !"); sprintf(cm, "%d , %.2f", cls,Conf); putText(imgR, cm, Point((x1), (y1)), FONT_HERSHEY_PLAIN, 2.2, Scalar(0, 0, 255), 2); infoRow = (Mat_(1, 4) << -1, -1, -1, -1); infoRow.copyTo(info.row(i)); continue; } else { float median = threed_pixel_xyz.at((int)(y1 + y2) / 2, (int)(x1 + x2) / 2); std::vector ltPoint = pic2cam(x1, y1); std::vector rbPoint = pic2cam(x2, y2); float xx1 = ltPoint[0], yy1 = ltPoint[1], xx2 = rbPoint[0], yy2 = rbPoint[1]; //计算宽高 float f = q.at(2, 3); float f1 = sqrt(xx1 * xx1 + yy1 * yy1 + f * f); //推导得出 //float w1 = median * sqrt((xx1 - xx2) * (xx1 - xx2) / 4) / f1; float h1 = median * sqrt((yy1 - yy2) * (yy1 - yy2) / 4) / f1; float f2 = sqrt(xx2 * xx2 + yy2 * yy2 + f * f); //float w2 = median * sqrt((xx2 - xx1) * (xx2 - xx1) / 4) / f2; float h2 = median * sqrt((yy2 - yy1) * (yy2 - yy1) / 4) / f2; float w1 = sqrt(pow((threedImage.at(y2, x1)[0] - threedImage.at(y2, x2)[0]), 2) + pow((threedImage.at(y2, x1)[1] - threedImage.at(y2, x2)[1]), 2) + pow((threedImage.at(y2, x1)[2] - threedImage.at(y2, x2)[2]), 2)); w1 = w1 / 10; h1 = (h1 + h2) / 10; median /= 10; if (median > 120) //过远测距误差较大 { //char tf[9]; //sprintf(tf, "Too far!"); char cm[15]; //sprintf(cm, "cannot match !"); sprintf(cm, "%d , %.2f", cls,Conf); putText(imgR, cm, Point((x1), (y1)), FONT_HERSHEY_PLAIN, 2.2, Scalar(0, 0, 255), 2); infoRow = (Mat_(1, 4) << -1, -1, -1, -1); infoRow.copyTo(info.row(i)); continue; } // ��ͼ���ϻ�����Ϣ char dc[50], wh[50]; std::string cname = className[cls + 1]; sprintf(dc, "dis:%.2fcm %d", median, cls); sprintf(wh, "W: %.2fcm H: %.2fcm alfa: %2f", w1, h1, alfa); putText(imgR, dc, Point(x1, y2), FONT_HERSHEY_PLAIN, 1.5, Scalar(0, 0, 255), 2); putText(imgR, wh, Point(x1, y1), FONT_HERSHEY_PLAIN, 1.5, Scalar(0, 0, 255), 1.5); //返回数据 infoRow = (Mat_(1, 4) << median, w1, h1, alfa); infoRow.copyTo(info.row(i)); }; } } Ranging::Ranging(int index, int imgw, int imgh) : //初始化 mapX1(imgh, imgw, CV_64F), //初始化矩阵 ,用于计算无畸变和修正转换映射。 mapX2(imgh, imgw, CV_64F), mapY1(imgh, imgw, CV_64F), mapY2(imgh, imgw, CV_64F), q(4, 4, CV_64F), imgw(imgw), imgh(imgh) { // Z = Mat::zeros(2, 1, CV_32FC1); if (!vcapture.open(index)) { std::cout << "Open camera failed !" << std::endl; exit(EXIT_FAILURE); } else { //vcapture.set(CAP_PROP_FOURCC, CAP_OPENCV_MJPEG); vcapture.set(CAP_PROP_FPS, 30); vcapture.set(CAP_PROP_FRAME_WIDTH, imgw * 2); vcapture.set(CAP_PROP_FRAME_HEIGHT, imgh); vcapture.set(CAP_PROP_BUFFERSIZE, 1); auto imgSize = Size(imgw, imgh); Mat r1(3, 3, CV_64F), r2(3, 3, CV_64F), p1(3, 4, CV_64F), p2(3, 4, CV_64F); stereoRectify(cam_matrix_left.t(), distortion_l, cam_matrix_right.t(), distortion_r, imgSize, rotate.t(), trans, r1, r2, p1, p2, q);//立体校正 initUndistortRectifyMap(cam_matrix_left.t(), distortion_l, r1, p1, imgSize, CV_32F, mapX1, mapX2);//计算无畸变和修正转换映射 initUndistortRectifyMap(cam_matrix_right.t(), distortion_r, r2, p2, imgSize, CV_32F, mapY1, mapY2);//计算无畸变和修正转换映射 RKNN_Create(&hdx, modelPath); // 初始化检测模型 std::cout<< " ******************* CAMERA initialization ********************" << std::endl; } } // std::vector Ranging::pic2cam(int u, int v) //像素坐标转相机坐标 // { // std::vector loc; // loc.push_back((u - cam_matrix_right.at(0, 2)) * q.at(2, 3) / cam_matrix_right.at(0, 0)); // loc.push_back((v - cam_matrix_right.at(1, 2)) * q.at(2, 3) / cam_matrix_right.at(1, 1)); // return loc; // } std::vector Ranging::get_range() { double rang_old, rang_now; rang_old = ros::Time::now().toSec(); //测试运行时间 Mat frame, lframe, rframe; vcapture.read(frame); //获取视频帧 if (!frame.empty()) { int64 t = getTickCount(); Mat lframe(frame.colRange(0, imgw).clone()); //拷贝左图 Mat rframe(frame.colRange(imgw, imgw * 2).clone()); //拷贝右图 //imshow("lframe",lframe); //waitKey(1); rectifyImage(lframe, rframe); // DetRet *det_ret = NULL; // InputData input_data; //定义输入数据 int shape[4] = {1, 3, rframe.rows, rframe.cols}, det_num = 0; memcpy(input_data.shape, shape, sizeof(shape)); Mat Rframe = rframe.clone(); double detect_old, detect_now; detect_old = ros::Time::now().toSec(); input_data.data = Rframe.data; //std::cout<<"Rframe.data.shape"<{rframe}; //没有检测框时,退出 } std::cout << "det_num: " << det_num << std::endl; // detction box transfor to our format Mat detBoxes(det_num, 6, CV_32F); //定义矩阵,存储目标检测内容,存储格式(x,y,x,y,conf,cls) for (int i = 0; i < det_num; i++) //存储目标检测内容 (x,y,x,y,conf,cls) { DetRet det_result = det_ret[i]; // xyxy conf cls float xmin = rframe.cols * det_result.fLeft; float ymin = rframe.rows * det_result.fTop; float xmax = rframe.cols * det_result.fRight; float ymax = rframe.rows * det_result.fBottom; detBoxes.at(i, 0) = xmin; detBoxes.at(i, 1) = ymin; detBoxes.at(i, 2) = xmax; detBoxes.at(i, 3) = ymax; detBoxes.at(i, 4) = det_result.fConf; // 实验测试,过滤过大的误检框 float ratio = (xmax - xmin) * (ymax - ymin) / 308480.; if (ratio > 0.7) { detBoxes.at(i, 5) = -1; continue; } detBoxes.at(i, 5) = det_result.nLabel; if (det_result.nLabel == 1 || det_result.nLabel == 2) //检测目标为体重秤和风扇底座时,将目标框拉高,使杆子的雷达点能与目标框对应 { detBoxes.at(i, 1) = 100; } } Mat info(det_num, 4, CV_32F); // 存储测距信息,存储格式:距离d,宽w,高h,角度α if (det_num) { // double rang_old, rang_now; // rang_old = ros::Time::now().toSec(); getInfo(lframe, rframe, detBoxes, info); // rang_now = ros::Time::now().toSec(); // cout << "data_dis_time: " << rang_now-rang_old << endl; } t = getTickCount() - t; char fps[50]; sprintf(fps, "fps: %d", int(1 / (t / getTickFrequency()))); putText(rframe, fps, Point(20, 20), FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255), 1.5); // rang_now = ros::Time::now().toSec(); // cout << "data_rang_time" << rang_now - rang_old << endl; return std::vector{rframe, detBoxes, info}; } return std::vector{rframe}; }