opencv 使用摄像头内部/外部重新扭曲点

t30tvxxf  于 2022-11-24  发布在  其他
关注(0)|答案(7)|浏览(164)

给定一组2D点,如何应用undistortPoints的相反值?
我有相机的内在和distCoeffs,并希望(例如)创建一个正方形,并扭曲它,就像相机通过透镜看到它。
我在这里发现了一个“扭曲”补丁:http://code.opencv.org/issues/1387,但似乎这只对图像有好处,我想在稀疏点上工作。

alen0pnh

alen0pnh1#

这个问题是相当古老的,但由于我结束了这里从谷歌搜索没有看到一个整洁的答案,我决定回答它无论如何。
有一个名为projectPoints的函数可以实现这一点。OpenCV在使用calibrateCamerastereoCalibrate等函数估计摄像机参数时,会在内部使用C版本
编辑:
要使用2D点作为输入,我们可以使用convertPointsToHomogeneous将所有z坐标设置为1,并使用projectPoints(无旋转和平移)。

cv::Mat points2d = ...;
cv::Mat points3d;
cv::Mat distorted_points2d;
convertPointsToHomogeneous(points2d, points3d);
projectPoints(points3d, cv::Vec3f(0,0,0), cv::Vec3f(0,0,0), camera_matrix, dist_coeffs, distorted_points2d);
w6mmgewl

w6mmgewl2#

一个简单的解决方案是使用initUndistortRectifyMap来获得从未失真坐标到失真坐标的Map:

cv::Mat K = ...; // 3x3 intrinsic parameters
cv::Mat D = ...; // 4x1 or similar distortion parameters
int W = 640; // image width
int H = 480; // image height

cv::Mat mapx, mapy;
cv::initUndistortRectifyMap(K, D, cv::Mat(), K, cv::Size(W, H), 
  CV_32F, mapx, mapy);

float distorted_x = mapx.at<float>(y, x);
float distorted_y = mapy.at<float>(y, x);

我编辑澄清代码是否正确:
我引用initUndistortRectifyMap的文档:
对于目标(校正和矫正的)图像中的每个像素(u,v),该函数计算源图像中(即,来自照相机的原始图像中)的对应坐标。
Mapx(u,v)= x ′ ′ f_x + c_x
图y(u,v)= y ′ ′ f_y + c_y

brqmpdu1

brqmpdu13#

undistortPoint是项目点的简单反转版本
在我的情况下,我想做以下:

取消扭曲点:

int undisortPoints(const vector<cv::Point2f> &uv, vector<cv::Point2f> &xy, const cv::Mat &M, const cv::Mat &d)
{
    cv::undistortPoints(uv, xy, M, d, cv::Mat(), M);
    return 0;
}

这会将点还原到与图像原点非常相似的坐标,但不会发生扭曲。这是cv::undistort()函数的默认行为。

重新扭曲点:

int distortPoints(const vector<cv::Point2f> &xy, vector<cv::Point2f> &uv, const cv::Mat &M, const cv::Mat &d)
{
    vector<cv::Point2f> xy2;
    vector<cv::Point3f>  xyz;
    cv::undistortPoints(xy, xy2, M, cv::Mat());
    for (cv::Point2f p : xy2)xyz.push_back(cv::Point3f(p.x, p.y, 1));
    cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1);
    cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1);
    cv::projectPoints(xyz, rvec, tvec, M, d, uv);
    return 0;
}

这里的小技巧是首先用线性相机模型将这些点投影到z=1平面上。然后,必须用原始相机模型将它们投影。
我发现这些有用,我希望它对你也有用。

yruzcnhs

yruzcnhs4#

我也有过同样的需求。下面是一个可能的解决方案:

void MyDistortPoints(const std::vector<cv::Point2d> & src, std::vector<cv::Point2d> & dst, 
                     const cv::Mat & cameraMatrix, const cv::Mat & distorsionMatrix)
{
  dst.clear();
  double fx = cameraMatrix.at<double>(0,0);
  double fy = cameraMatrix.at<double>(1,1);
  double ux = cameraMatrix.at<double>(0,2);
  double uy = cameraMatrix.at<double>(1,2);

  double k1 = distorsionMatrix.at<double>(0, 0);
  double k2 = distorsionMatrix.at<double>(0, 1);
  double p1 = distorsionMatrix.at<double>(0, 2);
  double p2 = distorsionMatrix.at<double>(0, 3);
  double k3 = distorsionMatrix.at<double>(0, 4);
  //BOOST_FOREACH(const cv::Point2d &p, src)
  for (unsigned int i = 0; i < src.size(); i++)
  {
    const cv::Point2d &p = src[i];
    double x = p.x;
    double y = p.y;
    double xCorrected, yCorrected;
    //Step 1 : correct distorsion
    {     
      double r2 = x*x + y*y;
      //radial distorsion
      xCorrected = x * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
      yCorrected = y * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);

      //tangential distorsion
      //The "Learning OpenCV" book is wrong here !!!
      //False equations from the "Learning OpenCv" book
      //xCorrected = xCorrected + (2. * p1 * y + p2 * (r2 + 2. * x * x)); 
      //yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x);
      //Correct formulae found at : http://www.vision.caltech.edu/bouguetj/calib_doc/htmls/parameters.html
      xCorrected = xCorrected + (2. * p1 * x * y + p2 * (r2 + 2. * x * x));
      yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x * y);
    }
    //Step 2 : ideal coordinates => actual coordinates
    {
      xCorrected = xCorrected * fx + ux;
      yCorrected = yCorrected * fy + uy;
    }
    dst.push_back(cv::Point2d(xCorrected, yCorrected));
  }

}

void MyDistortPoints(const std::vector<cv::Point2d> & src, std::vector<cv::Point2d> & dst, 
                     const cv::Matx33d & cameraMatrix, const cv::Matx<double, 1, 5> & distorsionMatrix)
{
  cv::Mat cameraMatrix2(cameraMatrix);
  cv::Mat distorsionMatrix2(distorsionMatrix);
  return MyDistortPoints(src, dst, cameraMatrix2, distorsionMatrix2);
}

void TestDistort()
{
  cv::Matx33d cameraMatrix = 0.;
  {
    //cameraMatrix Init
    double fx = 1000., fy = 950.;
    double ux = 324., uy = 249.;
    cameraMatrix(0, 0) = fx;
    cameraMatrix(1, 1) = fy;
    cameraMatrix(0, 2) = ux;
    cameraMatrix(1, 2) = uy;
    cameraMatrix(2, 2) = 1.;
  }

  cv::Matx<double, 1, 5> distorsionMatrix;
  {
    //distorsion Init
    const double k1 = 0.5, k2 = -0.5, k3 = 0.000005, p1 = 0.07, p2 = -0.05;

    distorsionMatrix(0, 0) = k1;
    distorsionMatrix(0, 1) = k2;
    distorsionMatrix(0, 2) = p1;
    distorsionMatrix(0, 3) = p2;
    distorsionMatrix(0, 4) = k3;
  }

  std::vector<cv::Point2d> distortedPoints;
  std::vector<cv::Point2d> undistortedPoints;
  std::vector<cv::Point2d> redistortedPoints;
  distortedPoints.push_back(cv::Point2d(324., 249.));// equals to optical center
  distortedPoints.push_back(cv::Point2d(340., 200));
  distortedPoints.push_back(cv::Point2d(785., 345.));
  distortedPoints.push_back(cv::Point2d(0., 0.));
  cv::undistortPoints(distortedPoints, undistortedPoints, cameraMatrix, distorsionMatrix);  
  MyDistortPoints(undistortedPoints, redistortedPoints, cameraMatrix, distorsionMatrix);
  cv::undistortPoints(redistortedPoints, undistortedPoints, cameraMatrix, distorsionMatrix);  

  //Poor man's unit test ensuring we have an accuracy that is better than 0.001 pixel
  for (unsigned int i = 0; i < undistortedPoints.size(); i++)
  {
    cv::Point2d dist = redistortedPoints[i] - distortedPoints[i];
    double norm = sqrt(dist.dot(dist));
    std::cout << "norm = " << norm << std::endl;
    assert(norm < 1E-3);
  }
}
lf3rwulv

lf3rwulv5#

对于那些还在搜索的人,这里有一个简单的python函数,可以将点扭曲回去:

def distortPoints(undistortedPoints, k, d):
    
    undistorted = np.float32(undistortedPoints[:, np.newaxis, :])

    kInv = np.linalg.inv(k)

    for i in range(len(undistorted)):
        srcv = np.array([undistorted[i][0][0], undistorted[i][0][1], 1])
        dstv = kInv.dot(srcv)
        undistorted[i][0][0] = dstv[0]
        undistorted[i][0][1] = dstv[1]

    distorted = cv2.fisheye.distortPoints(undistorted, k, d)
    return distorted

示例:

undistorted = np.array([(639.64, 362.09), (234, 567)])
distorted = distortPoints(undistorted, camK, camD)
print(distorted)
bqjvbblv

bqjvbblv6#

This question and it's related questions on SO have been around for nearly a decade, but there still isn't an answer that satisfies the criteria below so I'm proposing a new answer that

  • uses methods readily available in OpenCV,
  • works for points, not images, (and also points at subpixel locations),
  • can be used beyond fisheye distortion models,
  • does not involve manual interpolation or maps and
  • can be used in the context of rectification

Preliminaries

It is important to distinquish between ideal coordinates (also called 'normalized' or 'sensor' coordinates) which are the input variables to the distortion model or 'x' and 'y' in the OpenCV docs vs. observed coordinates (also called 'image' coordinates) or 'u' and 'v' in OpenCV docs. Ideal coordinates have been normalized by the intrinsic parameters so that they have been scaled by the focal length and are relative to the image centroid at (cx,cy). This is important to point out because the undistortPoints() method can return either ideal or observed coordinates depending on the input arguments.
undistortPoints() can essentially do any combination of two things: remove distortions and apply a rotational transformation with the output either being in ideal or observed coordinates, depending on if a projection mat ( InputArray P ) is provided in the input. The input coordinates ( InputArray src ) for undistortPoints() is always in observed or image coordinates.
At a high level undistortPoints() converts the input coordinates from observed to ideal coordinates and uses an iterative process to remove distortions from the ideal or normalized points. The reason the process is iterative is because the OpenCV distortion model is not easy to invert analytically.
In the example below, we use undistortPoints() twice. First, we apply a reverse rotational transformation to undo image rectification. This step can be skipped if you are not working with rectified images. The output of this first step is in observed coordinates so we use undistortPoints() again to convert these to ideal coordinates. The conversion to ideal coordinates makes setting up the input for projectPoints() easier (which we use to apply the distortions). With the ideal coordinates, we can simply convert them to homogeneous by appending a 1 to each point. This is equivalent to projecting the points to a plane in 3D world coordinates with a linear camera model.
As of currently, there isn't a method in OpenCV to apply distortions to a set of ideal coordinates (with the exception of fisheye distortions using distort() ) so we employ the projectPoints() method which can apply distortions as well as transformations as part of its projection algorithm. The tricky part about using projectPoints() is that the input is in terms of world or model coordinates in 3D, which is why we homogenized the output of the second use of undistortPoints() . By using projectPoints() with a dummy, zero-valued rotation vector ( InputArray rvec ) and translation vector ( Input Array tvec ) the result is simply a distorted set of coordinates which is conveniently output in observed or image coordinates.

Some helpful links

Difference between undistortPoints() and projectPoints() in OpenCV https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8chttps://docs.opencv.org/3.4/da/d54/group__imgproc__transform.html#ga55c716492470bfe86b0ee9bf3a1f0f7eRe-distort points with camera intrinsics/extrinsicshttps://stackoverflow.com/questions/28678985/exact-definition-of-the-matrices-in-opencv-stereorectify#:~:text=Normally%20the%20definition%20of%20a,matrix%20with%20the%20extrinsic%20parametershttps://docs.opencv.org/4.x/db/d58/group__calib3d__fisheye.html#ga75d8877a98e38d0b29b6892c5f8d7765https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga617b1685d4059c6040827800e72ad2b6 Does OpenCV's undistortPoints also rectify them?

Removing distortions in rectified image coordinates

Before providing the solution to recovering the original image coordinates with distortions we provide a short snippet to convert from the original distorted image coordinates to the corresponding rectified, undistorted coordinates that can be used for testing the reverse solution below.
The rotation matrix R1 and the projection matrix P1 come from stereoRectify() . The intrinsic parameters M1 and distortion parameters D1 come from stereoCalibrate() .

const size_t img_w = 2448;
const size_t img_h = 2048;
const size_t num_rand_pts = 100;

// observed coordinates of the points in the original 
// distorted image (used as a benchmark for testing)
std::vector<cv::Point2f> benchmark_obs_dist_points;

// undistorted and rectified obnserved coordinates
std::vector<cv::Point2f> obs_rect_undist_points;

// initialize with uniform random numbers
cv::RNG rng( 0xFFFFFFFF );
for(size_t i =0;i<num_rand_pts;++i)
    benchmark_obs_dist_points.push_back(
        cv::Point2f(rng.uniform(0.0,(double)img_w),
        rng.uniform(0.0,(double)img_h))
    ); 

// undistort and rectify
cv::undistortPoints(benchmark_obs_dist_points,obs_rect_undist_points,
    M1,D1,R1,P1);

Re-distorting and unrectifying points to recover the original image coordinates

We will need three mats to reverse the rectification: the inverse of the rectification rotation matrix from stereoRectify R1 , and two others to 'swap' the P1 and M1 projections that happen in undistortPoints() . P1_prime is the rotation matrix sub-portion of the projection matrix and M1_prime converts the rectification rotation matrix into a projection matrix with no translation. Note this only works if the output of stereoRectify has no translation, i.e. the last column of P1 is zeros which can be easily verified.

assert(cv::norm(P1(cv::Rect(3,0,1,3))==0.0));

// create a 3x3 shallow copy of the rotation matrix portion of the projection P1
cv::Mat P1_prime = P1(cv::Rect(0,0,3,3));

// create a 3x4 projection matrix with the rotation portion of 
// the rectification rotation matrix R1
cv::Mat M1_prime = cv::Mat::zeros(3,4,CV_64F);
M1.copyTo(M1_prime(cv::Rect(0,0,3,3)));

With these mats, the reversal can proceed as follows

// reverse the image rectification transformation 
// (result will still be undistorted)
std::vector<cv::Point2f> obs_undist_points;
cv::undistortPoints(obs_rect_undist_points,obs_undist_points,
    P1_prime,cv::Mat(),R1.inv(),M1_prime);

// convert the image coordinates into sensor or normalized or ideal coordinates
// (again, still undistorted)
std::vector<cv::Point2f> ideal_undist_points;
cv::undistortPoints(obs_undist_points,ideal_undist_points,M1,cv::Mat());

// artificially project the ideal 2d points to a plane in world coordinates 
// using a linear camera model (z=1)
std::vector<cv::Point3f> world_undist_points;
for (cv::Point2f pt : ideal_undist_points)
    world_undist_points.push_back(cv::Point3f(pt.x,pt.y,1));

// add the distortions back in to get the original coordinates
cv::Mat rvec = cv::Mat::zeros(3,1,CV_64FC1); // dummy zero rotation vec
cv::Mat tvec = cv::Mat::zeros(3,1,CV_64FC1); // dummy zero translation vec
std::vector<cv::Point2f> obs_dist_points;
cv::projectPoints(world_undist_points,rvec,tvec,M1,D1,obs_dist_points);

To test the results, we can compare them to the benchmark values

for(size_t i=0;i<num_rand_pts;++i)
    std::cout << "benchmark_x: " << benchmark_obs_dist_points[i].x
      << " benchmark_y: " << benchmark_obs_dist_points[i].y 
      << " computed_x: " << obs_dist_points[i].x 
      << " computed_y: " << obs_dist_points[i].y 
      << " diff_x: " 
      << std::abs(benchmark_obs_dist_points[i].x-obs_dist_points[i].x) 
      << " diff_y: " 
      << std::abs(benchmark_obs_dist_points[i].y-obs_dist_points[i].y) 
      << std::endl;
uqjltbpv

uqjltbpv7#

这是main.cpp。它是自给自足的,除了opencv不需要其他任何东西。我不记得我在哪里找到它的,它工作,我在我的项目中使用它。程序吃了一组标准的棋盘图像,并生成json/xml文件与所有的相机扭曲。

#include <iostream>
#include <sstream>
#include <time.h>
#include <stdio.h>

#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>

#ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
#endif

using namespace cv;
using namespace std;

static void help()
{
        cout <<  "This is a camera calibration sample." << endl
        <<  "Usage: calibration configurationFile"  << endl
        <<  "Near the sample file you'll find the configuration file, which has detailed help of "
        "how to edit it.  It may be any OpenCV supported file format XML/YAML." << endl;
}
class Settings
{
public:
        Settings() : goodInput(false) {}
        enum Pattern { NOT_EXISTING, CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
        enum InputType {INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST};

        void write(FileStorage& fs) const                        //Write serialization for this class
        {
                fs << "{" << "BoardSize_Width"  << boardSize.width
                << "BoardSize_Height" << boardSize.height
                << "Square_Size"         << squareSize
                << "Calibrate_Pattern" << patternToUse
                << "Calibrate_NrOfFrameToUse" << nrFrames
                << "Calibrate_FixAspectRatio" << aspectRatio
                << "Calibrate_AssumeZeroTangentialDistortion" << calibZeroTangentDist
                << "Calibrate_FixPrincipalPointAtTheCenter" << calibFixPrincipalPoint

                << "Write_DetectedFeaturePoints" << bwritePoints
                << "Write_extrinsicParameters"   << bwriteExtrinsics
                << "Write_outputFileName"  << outputFileName

                << "Show_UndistortedImage" << showUndistorsed

                << "Input_FlipAroundHorizontalAxis" << flipVertical
                << "Input_Delay" << delay
                << "Input" << input
                << "}";
        }
        void read(const FileNode& node)                          //Read serialization for this class
        {
                node["BoardSize_Width" ] >> boardSize.width;
                node["BoardSize_Height"] >> boardSize.height;
                node["Calibrate_Pattern"] >> patternToUse;
                node["Square_Size"]  >> squareSize;
                node["Calibrate_NrOfFrameToUse"] >> nrFrames;
                node["Calibrate_FixAspectRatio"] >> aspectRatio;
                node["Write_DetectedFeaturePoints"] >> bwritePoints;
                node["Write_extrinsicParameters"] >> bwriteExtrinsics;
                node["Write_outputFileName"] >> outputFileName;
                node["Calibrate_AssumeZeroTangentialDistortion"] >> calibZeroTangentDist;
                node["Calibrate_FixPrincipalPointAtTheCenter"] >> calibFixPrincipalPoint;
                node["Input_FlipAroundHorizontalAxis"] >> flipVertical;
                node["Show_UndistortedImage"] >> showUndistorsed;
                node["Input"] >> input;
                node["Input_Delay"] >> delay;
                interprate();
        }
        void interprate()
        {
                goodInput = true;
                if (boardSize.width <= 0 || boardSize.height <= 0)
                {
                        cerr << "Invalid Board size: " << boardSize.width << " " << boardSize.height << endl;
                        goodInput = false;
                }
                if (squareSize <= 10e-6)
                {
                        cerr << "Invalid square size " << squareSize << endl;
                        goodInput = false;
                }
                if (nrFrames <= 0)
                {
                        cerr << "Invalid number of frames " << nrFrames << endl;
                        goodInput = false;
                }

                if (input.empty())      // Check for valid input
                        inputType = INVALID;
                else
                {
                        if (input[0] >= '0' && input[0] <= '9')
                        {
                                stringstream ss(input);
                                ss >> cameraID;
                                inputType = CAMERA;
                        }
                        else
                        {
                                if (readStringList(input, imageList))
                                {
                                        inputType = IMAGE_LIST;
                                        nrFrames = (nrFrames < (int)imageList.size()) ? nrFrames : (int)imageList.size();
                                }
                                else
                                        inputType = VIDEO_FILE;
                        }
                        if (inputType == CAMERA)
                                inputCapture.open(cameraID);
                        if (inputType == VIDEO_FILE)
                                inputCapture.open(input);
                        if (inputType != IMAGE_LIST && !inputCapture.isOpened())
                                inputType = INVALID;
                }
                if (inputType == INVALID)
                {
                        cerr << " Inexistent input: " << input << endl;
                        goodInput = false;
                }

                flag = 0;
                if(calibFixPrincipalPoint) flag |= CV_CALIB_FIX_PRINCIPAL_POINT;
                if(calibZeroTangentDist)   flag |= CV_CALIB_ZERO_TANGENT_DIST;
                if(aspectRatio)            flag |= CV_CALIB_FIX_ASPECT_RATIO;

                calibrationPattern = NOT_EXISTING;
                if (!patternToUse.compare("CHESSBOARD")) calibrationPattern = CHESSBOARD;
                if (!patternToUse.compare("CIRCLES_GRID")) calibrationPattern = CIRCLES_GRID;
                if (!patternToUse.compare("ASYMMETRIC_CIRCLES_GRID")) calibrationPattern = ASYMMETRIC_CIRCLES_GRID;
                if (calibrationPattern == NOT_EXISTING)
                {
                        cerr << " Inexistent camera calibration mode: " << patternToUse << endl;
                        goodInput = false;
                }
                atImageList = 0;

        }
        Mat nextImage()
        {
                Mat result;
                if( inputCapture.isOpened() )
                {
                        Mat view0;
                        inputCapture >> view0;
                        view0.copyTo(result);
                }
                else if( atImageList < (int)imageList.size() )
                        result = imread(imageList[atImageList++], CV_LOAD_IMAGE_COLOR);

                return result;
        }

        static bool readStringList( const string& filename, vector<string>& l )
        {
                l.clear();
                FileStorage fs(filename, FileStorage::READ);
                if( !fs.isOpened() )
                        return false;
                FileNode n = fs.getFirstTopLevelNode();
                if( n.type() != FileNode::SEQ )
                        return false;
                FileNodeIterator it = n.begin(), it_end = n.end();
                for( ; it != it_end; ++it )
                        l.push_back((string)*it);
                return true;
        }
public:
        Size boardSize;            // The size of the board -> Number of items by width and height
        Pattern calibrationPattern;// One of the Chessboard, circles, or asymmetric circle pattern
        float squareSize;          // The size of a square in your defined unit (point, millimeter,etc).
        int nrFrames;              // The number of frames to use from the input for calibration
        float aspectRatio;         // The aspect ratio
        int delay;                 // In case of a video input
        bool bwritePoints;         //  Write detected feature points
        bool bwriteExtrinsics;     // Write extrinsic parameters
        bool calibZeroTangentDist; // Assume zero tangential distortion
        bool calibFixPrincipalPoint;// Fix the principal point at the center
        bool flipVertical;          // Flip the captured images around the horizontal axis
        string outputFileName;      // The name of the file where to write
        bool showUndistorsed;       // Show undistorted images after calibration
        string input;               // The input ->


        int cameraID;
        vector<string> imageList;
        int atImageList;
        VideoCapture inputCapture;
        InputType inputType;
        bool goodInput;
        int flag;

private:
        string patternToUse;

};

static void read(const FileNode& node, Settings& x, const Settings& default_value = Settings())
{
        if(node.empty())
                x = default_value;
        else
                x.read(node);
}

enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };

bool runCalibrationAndSave(Settings& s, Size imageSize, Mat&  cameraMatrix, Mat& distCoeffs,
                           vector<vector<Point2f> > imagePoints );

int main(int argc, char* argv[])
{
//        help();
        Settings s;
        const string inputSettingsFile = argc > 1 ? argv[1] : "default.xml";
        FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
        if (!fs.isOpened())
        {
                cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
                return -1;
        }
        fs["Settings"] >> s;
        fs.release();                                         // close Settings file

        if (!s.goodInput)
        {
                cout << "Invalid input detected. Application stopping. " << endl;
                return -1;
        }

        vector<vector<Point2f> > imagePoints;
        Mat cameraMatrix, distCoeffs;
        Size imageSize;
        int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
        clock_t prevTimestamp = 0;
        const Scalar RED(0,0,255), GREEN(0,255,0);
        const char ESC_KEY = 27;

        for(int i = 0;;++i)
        {
                Mat view;
                bool blinkOutput = false;

                view = s.nextImage();

                //-----  If no more image, or got enough, then stop calibration and show result -------------
                if( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames )
                {
                        if( runCalibrationAndSave(s, imageSize,  cameraMatrix, distCoeffs, imagePoints))
                                mode = CALIBRATED;
                        else
                                mode = DETECTION;
                }
                if(view.empty())          // If no more images then run calibration, save and stop loop.
                {
                        if( imagePoints.size() > 0 )
                                runCalibrationAndSave(s, imageSize,  cameraMatrix, distCoeffs, imagePoints);
                        break;
                }

                imageSize = view.size();  // Format input image.
                if( s.flipVertical )    flip( view, view, 0 );

                vector<Point2f> pointBuf;

                bool found;
                switch( s.calibrationPattern ) // Find feature points on the input format
                {
                        case Settings::CHESSBOARD:
                                found = findChessboardCorners( view, s.boardSize, pointBuf,
                                                              CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
                                break;
                        case Settings::CIRCLES_GRID:
                                found = findCirclesGrid( view, s.boardSize, pointBuf );
                                break;
                        case Settings::ASYMMETRIC_CIRCLES_GRID:
                                found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
                                break;
                        default:
                                found = false;
                                break;
                }

                if ( found)                // If done with success,
                {
                        // improve the found corners' coordinate accuracy for chessboard
                        if( s.calibrationPattern == Settings::CHESSBOARD)
                        {
                                Mat viewGray;
                                cvtColor(view, viewGray, COLOR_BGR2GRAY);
                                cornerSubPix( viewGray, pointBuf, Size(11,11),
                                             Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
                        }

                        if( mode == CAPTURING &&  // For camera only take new samples after delay time
                           (!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) )
                        {
                                imagePoints.push_back(pointBuf);
                                prevTimestamp = clock();
                                blinkOutput = s.inputCapture.isOpened();
                        }

                        // Draw the corners.
                        drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
                }

                //----------------------------- Output Text ------------------------------------------------
                string msg = (mode == CAPTURING) ? "100/100" :
                mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
                int baseLine = 0;
                Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
                Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);

                if( mode == CAPTURING )
                {
                        if(s.showUndistorsed)
                                msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames );
                        else
                                msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames );
                }

                putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ?  GREEN : RED);

                if( blinkOutput )
                        bitwise_not(view, view);

                //------------------------- Video capture  output  undistorted ------------------------------
                if( mode == CALIBRATED && s.showUndistorsed )
                {
                        Mat temp = view.clone();
                        undistort(temp, view, cameraMatrix, distCoeffs);
                }

                //------------------------------ Show image and check for input commands -------------------
                imshow("Image View", view);
                char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay);

                if( key  == ESC_KEY )
                        break;

                if( key == 'u' && mode == CALIBRATED )
                        s.showUndistorsed = !s.showUndistorsed;

                if( s.inputCapture.isOpened() && key == 'g' )
                {
                        mode = CAPTURING;
                        imagePoints.clear();
                }
        }

        // -----------------------Show the undistorted image for the image list ------------------------
        if( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed )
        {
                Mat view, rview, map1, map2;
                initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
                                        getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
                                        imageSize, CV_16SC2, map1, map2);

                for(int i = 0; i < (int)s.imageList.size(); i++ )
                {
                        view = imread(s.imageList[i], 1);
                        if(view.empty())
                                continue;
                        remap(view, rview, map1, map2, INTER_LINEAR);
                        imshow("Image View", rview);
                        char c = (char)waitKey();
                        if( c  == ESC_KEY || c == 'q' || c == 'Q' )
                                break;
                }
        }

        return 0;
}

static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
                                        const vector<vector<Point2f> >& imagePoints,
                                        const vector<Mat>& rvecs, const vector<Mat>& tvecs,
                                        const Mat& cameraMatrix , const Mat& distCoeffs,
                                        vector<float>& perViewErrors)
{
        vector<Point2f> imagePoints2;
        int i, totalPoints = 0;
        double totalErr = 0, err;
        perViewErrors.resize(objectPoints.size());

        for( i = 0; i < (int)objectPoints.size(); ++i )
        {
                projectPoints( Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,
                              distCoeffs, imagePoints2);
                err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2);

                int n = (int)objectPoints[i].size();
                perViewErrors[i] = (float) std::sqrt(err*err/n);
                totalErr        += err*err;
                totalPoints     += n;
        }

        return std::sqrt(totalErr/totalPoints);
}

static void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners,
                                     Settings::Pattern patternType /*= Settings::CHESSBOARD*/)
{
        corners.clear();

        switch(patternType)
        {
                case Settings::CHESSBOARD:
                case Settings::CIRCLES_GRID:
                        for( int i = 0; i < boardSize.height; ++i )
                                for( int j = 0; j < boardSize.width; ++j )
                                        corners.push_back(Point3f(float( j*squareSize ), float( i*squareSize ), 0));
                        break;

                case Settings::ASYMMETRIC_CIRCLES_GRID:
                        for( int i = 0; i < boardSize.height; i++ )
                                for( int j = 0; j < boardSize.width; j++ )
                                        corners.push_back(Point3f(float((2*j + i % 2)*squareSize), float(i*squareSize), 0));
                        break;
                default:
                        break;
        }
}

static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
                           vector<vector<Point2f> > imagePoints, vector<Mat>& rvecs, vector<Mat>& tvecs,
                           vector<float>& reprojErrs,  double& totalAvgErr)
{

        cameraMatrix = Mat::eye(3, 3, CV_64F);
        if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
                cameraMatrix.at<double>(0,0) = 1.0;

        distCoeffs = Mat::zeros(8, 1, CV_64F);

        vector<vector<Point3f> > objectPoints(1);
        calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern);

        objectPoints.resize(imagePoints.size(),objectPoints[0]);

        //Find intrinsic and extrinsic camera parameters
        double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
                                     distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);

        cout << "Re-projection error reported by calibrateCamera: "<< rms << endl;

        bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);

        totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints,
                                                rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);

        return ok;
}

// Print camera parameters to the output file
static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
                             const vector<Mat>& rvecs, const vector<Mat>& tvecs,
                             const vector<float>& reprojErrs, const vector<vector<Point2f> >& imagePoints,
                             double totalAvgErr )
{
        FileStorage fs( s.outputFileName, FileStorage::WRITE );

        time_t tm;
        time( &tm );
        struct tm *t2 = localtime( &tm );
        char buf[1024];
        strftime( buf, sizeof(buf)-1, "%c", t2 );

        fs << "calibration_Time" << buf;

        if( !rvecs.empty() || !reprojErrs.empty() )
                fs << "nrOfFrames" << (int)std::max(rvecs.size(), reprojErrs.size());
        fs << "image_Width" << imageSize.width;
        fs << "image_Height" << imageSize.height;
        fs << "board_Width" << s.boardSize.width;
        fs << "board_Height" << s.boardSize.height;
        fs << "square_Size" << s.squareSize;

        if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
                fs << "FixAspectRatio" << s.aspectRatio;

        if( s.flag )
        {
                sprintf( buf, "flags: %s%s%s%s",
                        s.flag & CV_CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "",
                        s.flag & CV_CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" : "",
                        s.flag & CV_CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "",
                        s.flag & CV_CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "" );
                cvWriteComment( *fs, buf, 0 );

        }

        fs << "flagValue" << s.flag;

        fs << "Camera_Matrix" << cameraMatrix;
        fs << "Distortion_Coefficients" << distCoeffs;

        fs << "Avg_Reprojection_Error" << totalAvgErr;
        if( !reprojErrs.empty() )
                fs << "Per_View_Reprojection_Errors" << Mat(reprojErrs);

        if( !rvecs.empty() && !tvecs.empty() )
        {
                CV_Assert(rvecs[0].type() == tvecs[0].type());
                Mat bigmat((int)rvecs.size(), 6, rvecs[0].type());
                for( int i = 0; i < (int)rvecs.size(); i++ )
                {
                        Mat r = bigmat(Range(i, i+1), Range(0,3));
                        Mat t = bigmat(Range(i, i+1), Range(3,6));

                        CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1);
                        CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1);
                        //*.t() is MatExpr (not Mat) so we can use assignment operator
                        r = rvecs[i].t();
                        t = tvecs[i].t();
                }
                cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
                fs << "Extrinsic_Parameters" << bigmat;
        }

        if( !imagePoints.empty() )
        {
                Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
                for( int i = 0; i < (int)imagePoints.size(); i++ )
                {
                        Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
                        Mat imgpti(imagePoints[i]);
                        imgpti.copyTo(r);
                }
                fs << "Image_points" << imagePtMat;
        }
}

bool runCalibrationAndSave(Settings& s, Size imageSize, Mat&  cameraMatrix, Mat& distCoeffs,vector<vector<Point2f> > imagePoints )
{
        vector<Mat> rvecs, tvecs;
        vector<float> reprojErrs;
        double totalAvgErr = 0;

        bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs,
                                 reprojErrs, totalAvgErr);
        cout << (ok ? "Calibration succeeded" : "Calibration failed")
        << ". avg re projection error = "  << totalAvgErr ;

        if( ok )
                saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs,
                                 imagePoints, totalAvgErr);
        return ok;
}

相关问题