在初中物理课堂上,我们可能都见过一个蜡烛投影实验:在一个暗箱的前方放着一支点燃的蜡烛,蜡烛的光透过暗箱上的一个小孔投影在暗箱的后方平面上,并在这个平面上形成一个倒立的蜡
烛图像。在这个过程中,小孔模型能够把三维世界中的蜡烛投影到一个二维成像平面。同理,我们可以用这个简单的模型来解释相机的成像过程
我们可以把一个世界坐标点先转换到相机坐标系,再除掉它最后一维的数值(即该点距离相机成像平面的深度),这相当于把最后一维进行归一化处理,得到点 P 在相机归一化平面上的投影:
归一化坐标可看成相机前方`z = 1 处的平面上的一个点,这个 z = 1 平面也称为归一化平面。归一化坐标再左乘内参就得到了像素坐标,所以我们可以把像素坐标 [u; v]T 看成对归一化平面上的点进
行量化测量的结果。从这个模型中也可以看出,如果对相机坐标同时乘以任意非零常数,归一化坐标都是一样的,这说明点的深度在投影过程中被丢失了,所以单目视觉中没法得到像素点的深度值。
// // Created by Qian. // #include <opencv2/opencv.hpp> #include <string> using namespace std; string image_file = "../test.png"; // 请确保路径正确 int main(int argc, char **argv) { // 本程序需要你自己实现去畸变部分的代码。尽管我们可以调用OpenCV的去畸变,但自己实现一遍有助于理解。 // 畸变参数 double k1 = -0.28340811, k2 = 0.07395907, p1 = 0.00019359, p2 = 1.76187114e-05; // 内参 double fx = 458.654, fy = 457.296, cx = 367.215, cy = 248.375; cv::Mat image = cv::imread(image_file,CV_8UC1); // 图像是灰度图,CV_8UC1 int rows = image.rows, cols = image.cols; cv::Mat image_undistort = cv::Mat(rows, cols, CV_8UC1); // 去畸变以后的图 // 计算去畸变后图像的内容 for (int v = 0; v < rows; v++) for (int u = 0; u < cols; u++) { double u_distorted = 0, v_distorted = 0; // TODO 按照公式,计算点(u,v)对应到畸变图像中的坐标(u_distorted, v_distorted) (~6 lines) // start your code here //首先转化成归一化坐标 double x=(u-cx)/fx; double y=(v-cy)/fy; double r2=x*x+y*y; double x_distorted = x * (1 + k1 * r2 + k2 * r2 * r2)+2*p1*x*y+p2*(r2+2*x*x); double y_distorted= y * (1 + k1 * r2 + k2 * r2 * r2)+p1*(r2+2*y*y)+2*p2*x*y; // double x_distorted = x * (1 + k1 * r2 + k2 * r2 * r2); // double y_distorted= y * (1 + k1 * r2 + k2 * r2 * r2); //还原为像素坐标 u_distorted = fx * x_distorted + cx; v_distorted = fy * y_distorted + cy; // end your code here // 赋值 (最近邻插值) if (u_distorted >= 0 && v_distorted >= 0 && u_distorted < cols && v_distorted < rows) { image_undistort.at<uchar>(v, u) = image.at<uchar>((int) v_distorted, (int) u_distorted); } else { image_undistort.at<uchar>(v, u) = 0; } } // 画图去畸变后图像 cv::imshow("image undistorted", image_undistort); cv::waitKey(); } return 0; }
双目视觉
#include <opencv2/opencv.hpp> #include <vector> #include <string> #include <Eigen/Core> #include <pangolin/pangolin.h> #include <unistd.h> using namespace std; using namespace Eigen; // 文件路径 string left_file = "/home/qian/slambook2/ch5/stereo/left.png"; string right_file = "/home/qian/slambook2/ch5/stereo/right.png"; // 在pangolin中画图,已写好,无需调整 void showPointCloud( const vector<Vector4d, Eigen::aligned_allocator<Vector4d>> &pointcloud); //使用eigen库中的变量Vector、matrix等时需要这样写 int main(int argc, char **argv) { // 内参 double fx = 718.856, fy = 718.856, cx = 607.1928, cy = 185.2157; // 基线 double b = 0.573; // 读取图像 cv::Mat left = cv::imread(left_file, 0); cv::Mat right = cv::imread(right_file, 0); cv::Ptr<cv::StereoSGBM> sgbm = cv::StereoSGBM::create( 0, 96, 9, 8 * 9 * 9, 32 * 9 * 9, 1, 63, 10, 100, 32); // 神奇的参数 cv::Mat disparity_sgbm, disparity; sgbm->compute(left, right, disparity_sgbm); disparity_sgbm.convertTo(disparity, CV_32F, 1.0 / 16.0f); // 生成点云 vector<Vector4d, Eigen::aligned_allocator<Vector4d>> pointcloud; // 如果你的机器慢,请把后面的v++和u++改成v+=2, u+=2 for (int v = 0; v < left.rows; v++) for (int u = 0; u < left.cols; u++) { if (disparity.at<float>(v, u) <= 0.0 || disparity.at<float>(v, u) >= 96.0) continue; Vector4d point(0, 0, 0, left.at<uchar>(v, u) / 255.0); // 前三维为xyz,第四维为颜色 // 根据双目模型计算 point 的位置 double x = (u - cx) / fx; double y = (v - cy) / fy; double depth = fx * b / (disparity.at<float>(v, u)); point[0] = x * depth; point[1] = y * depth; point[2] = depth; pointcloud.push_back(point); } cv::imshow("disparity", disparity / 96.0); cv::waitKey(0); // 画出点云 showPointCloud(pointcloud); return 0; } void showPointCloud(const vector<Vector4d, Eigen::aligned_allocator<Vector4d>> &pointcloud) { if (pointcloud.empty()) { cerr << "Point cloud is empty!" << endl; return; } pangolin::CreateWindowAndBind("Point Cloud Viewer", 1024, 768); glEnable(GL_DEPTH_TEST); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); pangolin::OpenGlRenderState s_cam( pangolin::ProjectionMatrix(1024, 768, 500, 500, 512, 389, 0.1, 1000), pangolin::ModelViewLookAt(0, -0.1, -1.8, 0, 0, 0, 0.0, -1.0, 0.0) ); pangolin::View &d_cam = pangolin::CreateDisplay() .SetBounds(0.0, 1.0, pangolin::Attach::Pix(175), 1.0, -1024.0f / 768.0f) .SetHandler(new pangolin::Handler3D(s_cam)); while (pangolin::ShouldQuit() == false) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); d_cam.Activate(s_cam); glClearColor(1.0f, 1.0f, 1.0f, 1.0f); glPointSize(2); glBegin(GL_POINTS); for (auto &p: pointcloud) { glColor3f(p[3], p[3], p[3]); glVertex3d(p[0], p[1], p[2]); } glEnd(); pangolin::FinishFrame(); usleep(5000); // sleep 5 ms } return; }
深度摄像头点云拼接
#include <iostream> #include <fstream> #include <opencv2/opencv.hpp> #include <boost/format.hpp> // for formating strings #include <sophus/se3.hpp> #include <pangolin/pangolin.h> using namespace std; typedef vector<Sophus::SE3d, Eigen::aligned_allocator<Sophus::SE3d>> TrajectoryType; typedef Eigen::Matrix<double, 6, 1> Vector6d; // 在pangolin中画图,已写好,无需调整 void showPointCloud( const vector<Vector6d, Eigen::aligned_allocator<Vector6d>> &pointcloud); int main(int argc, char **argv) { vector<cv::Mat> colorImgs, depthImgs; // 彩色图和深度图 TrajectoryType poses; // 相机位姿 ifstream fin("./pose.txt"); if (!fin) { cerr << "请在有pose.txt的目录下运行此程序" << endl; return 1; } for (int i = 0; i < 5; i++) { boost::format fmt("./%s/%d.%s"); //图像文件格式 colorImgs.push_back(cv::imread((fmt % "color" % (i + 1) % "png").str())); depthImgs.push_back(cv::imread((fmt % "depth" % (i + 1) % "pgm").str(), -1)); // 使用-1读取原始图像 double data[7] = {0}; for (auto &d:data) fin >> d; Sophus::SE3d pose(Eigen::Quaterniond(data[6], data[3], data[4], data[5]), Eigen::Vector3d(data[0], data[1], data[2])); poses.push_back(pose); } // 计算点云并拼接 // 相机内参 double cx = 325.5; double cy = 253.5; double fx = 518.0; double fy = 519.0; double depthScale = 1000.0; vector<Vector6d, Eigen::aligned_allocator<Vector6d>> pointcloud; pointcloud.reserve(1000000); for (int i = 0; i < 5; i++) { cout << "转换图像中: " << i + 1 << endl; cv::Mat color = colorImgs[i]; cv::Mat depth = depthImgs[i]; Sophus::SE3d T = poses[i]; for (int v = 0; v < color.rows; v++) for (int u = 0; u < color.cols; u++) { unsigned int d = depth.ptr<unsigned short>(v)[u]; // 深度值 if (d == 0) continue; // 为0表示没有测量到 Eigen::Vector3d point; point[2] = double(d) / depthScale; point[0] = (u - cx) * point[2] / fx; point[1] = (v - cy) * point[2] / fy; Eigen::Vector3d pointWorld = T * point; Vector6d p; p.head<3>() = pointWorld; p[5] = color.data[v * color.step + u * color.channels()]; // blue p[4] = color.data[v * color.step + u * color.channels() + 1]; // green p[3] = color.data[v * color.step + u * color.channels() + 2]; // red pointcloud.push_back(p); } } cout << "点云共有" << pointcloud.size() << "个点." << endl; showPointCloud(pointcloud); return 0; } void showPointCloud(const vector<Vector6d, Eigen::aligned_allocator<Vector6d>> &pointcloud) { if (pointcloud.empty()) { cerr << "Point cloud is empty!" << endl; return; } pangolin::CreateWindowAndBind("Point Cloud Viewer", 1024, 768); glEnable(GL_DEPTH_TEST); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); pangolin::OpenGlRenderState s_cam( pangolin::ProjectionMatrix(1024, 768, 500, 500, 512, 389, 0.1, 1000), pangolin::ModelViewLookAt(0, -0.1, -1.8, 0, 0, 0, 0.0, -1.0, 0.0) ); pangolin::View &d_cam = pangolin::CreateDisplay() .SetBounds(0.0, 1.0, pangolin::Attach::Pix(175), 1.0, -1024.0f / 768.0f) .SetHandler(new pangolin::Handler3D(s_cam)); while (pangolin::ShouldQuit() == false) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); d_cam.Activate(s_cam); glClearColor(1.0f, 1.0f, 1.0f, 1.0f); glPointSize(2); glBegin(GL_POINTS); for (auto &p: pointcloud) { glColor3d(p[3] / 255.0, p[4] / 255.0, p[5] / 255.0); glVertex3d(p[0], p[1], p[2]); } glEnd(); pangolin::FinishFrame(); usleep(5000); // sleep 5 ms } return; }