diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..3d7c418
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,74 @@
+cmake_minimum_required(VERSION 2.8.12)
+
+# Project name
+project(gv_tools)
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -MP")
+add_compile_options(-MP)
+
+add_definitions(-DGLOG_NO_ABBREVIATED_SEVERITIES)
+add_definitions(-DNOMINMAX)
+
+if(NOT WIN32)
+ string(ASCII 27 Esc)
+ set(ColourReset "${Esc}[m")
+ set(Red "${Esc}[31m")
+endif()
+
+if(DEFINED CATKIN_DEVEL_PREFIX)
+ message("${Red}!!!!!!!ROS workspace detected, building ROS node...!!!!!!!${ColourReset}")
+else()
+ message("${Red}!!!!!!!Building non-ROS libraries and examples...!!!!!!! ${ColourReset}")
+endif()
+
+find_package(OpenCV)
+ if(NOT OpenCV_FOUND)
+ message(FATAL_ERROR "OpenCV not found.")
+ endif()
+find_package(Eigen3 REQUIRED)
+message(STATUS "OPENCV: " ${OpenCV_VERSION} )
+
+include_directories(
+ ./
+ ./include
+ ${EIGEN3_INCLUDE_DIR}
+ ${OpenCV_INCLUDE_DIRS}
+)
+file(GLOB_RECURSE SRC_FILE_LIST "src/*.cpp" "src/*.cc" "src/*.c" "camodocal/*.cpp" "camodocal/*.cc" "camodocal/*.c")
+file(GLOB_RECURSE HEARDER_FILE_LIST "include/*.h" "include/*.hpp")
+
+list(APPEND LIBS
+ ${OpenCV_LIBS}
+)
+add_library(gv_tools SHARED ${SRC_FILE_LIST})
+add_executable(track_dataset example/track_dataset.cpp)
+target_link_libraries(track_dataset gv_tools ${LIBS})
+
+if(DEFINED CATKIN_DEVEL_PREFIX)
+ find_package (catkin REQUIRED COMPONENTS
+ roscpp
+ rospy
+ std_msgs
+ cv_bridge
+ image_transport
+ tf
+ sensor_msgs
+ )
+ catkin_package (
+ CATKIN_DEPENDS roscpp rospy std_msgs cv_bridge image_transport tf sensor_msgs
+ LIBRARIES gv_tools
+ )
+ include_directories(
+ ${catkin_INCLUDE_DIRS}
+ )
+ add_executable (ground_tracker_node
+ ros/ground_tracker_node.cpp
+ )
+ target_link_libraries(ground_tracker_node
+ ${LIBS} ${catkin_LIBRARIES} gv_tools
+ )
+endif()
+
+
+
+
diff --git a/README.md b/README.md
index d8d2a2b..f0d23dd 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,109 @@
-# gv_tools
+# Ground-vision toolkit
+A toolkit for ground feature processing, inverse perspective mapping (IPM) and so on, which is applied in our preprint [Ground-VIO](https://arxiv.org/abs/2306.08341).
+
-Ground-Vision Toolkit for ground feature processing, inverse perspective mapping (IPM) and so on.
+
+

+
-# Timeline
-- [ ] Upload code and test data sequences. *Deadline: July 15th*.
+## News
+- [2023/07/11] - Initial release of code and dataset.
+
+## Introduction
+This toolkit fully utilizes the **camera-ground geometry** for accurate ground feature tracking. In the preprint, we verify that the estimation of vehicle states, the calibration of camera-ground geometry and stable feature tracking could be leveraged in a monocular visual-inertial estimator.
+
+
+

+
+
+

+
+
+
+## Dataset
+This repo also provides the urban road visual-inertial dataset used in [Ground-VIO](https://arxiv.org/abs/2306.08341).
+
+The dataset contains both [Carla](https://github.com/carla-simulator/carla) simulated data sequences (S-A, S-B) and real-world data squences (R-A, R-B, R-C, R-D, R-E and R-F). The detail information is listed below. For real-world data sequences, the GT poses are obtained from the forward-and-backward smoothed solution of PPK/tactical-grade IMU integration
+
+### Simulation Data (Carla)
+ | Sequence | Date | Length |Sensors |Features |Image |
+ | :---: | :---: | :---: |:---: |:---: |:---: |
+ | S-A |-| 110 s| IMU/Camera/Semantic |Urban road|
|
+ | S-B |-| 135 s| IMU/Camera/Semantic |Urban road|
|
+
+### Real-World Data
+ | Sequence | Date| Length |Sensors |Features |Image |
+ | :---: | :---: | :---: |:---: |:---: |:---: |
+ | R-A |2022/10/12| 180 s|IMU/Camera |Urban road|
|
+ | R-B |2022/10/12| 180 s|IMU/Camera |Urban road|
|
+ | R-C |2022/10/12| 180 s|IMU/Camera |Urban road|
|
+ | R-D |2022/10/12| 180 s|IMU/Camera |Urban road|
|
+ | R-E |2022/10/12| 270 s|IMU/Camera |Highway |
|
+ | R-F |2022/10/12| 270 s|IMU/Camera |Highway |
|
+
+Dataset is available at [OneDrive](https://whueducn-my.sharepoint.com/:f:/g/personal/2015301610143_whu_edu_cn/Evbp_Tf3GGdHtjI890ZfuUsByA0VF6DuMAJS7bqsiOUz-g?e=r3NQI9).
+
+## Dependencies
+The dependencies include **Eigen** and **OpenCV**. We use the **[camodocal](https://github.com/hengli/camodocal)** project to handle camera models, while we modify it to a minimal version which doesn't need **Ceres**.
+
+## Getting Started
+The project could be built either **with** or **without ROS**.
+
+
+### Building with ROS
+
+Follow the steps to build the project in a ROS workspace
+```Bash
+mkdir catkin_ws
+mkdir catkin_ws/src
+cd catkin_ws/src
+git clone https://github.com/GREAT-WHU/gv_tools
+cd ..
+catkin_make
+```
+
+To run the ground tracker node, following
+```Bash
+source devel/setup.bash
+roslaunch gv_tools track_carla_example.launch
+```
+and a rviz viewer would be simultaneously launched.
+
+Then play the data bag in another terminal
+```Bash
+rosbag play s_a.bag
+```
+
+### Building without ROS
+To build the project just as a plain CMake project
+```Bash
+git clone https://github.com/GREAT-WHU/gv_tools
+cd gv_tools
+mkdir build && cd build
+cmake ..
+make -j8
+```
+Run the example *track_dataset* following
+```Bash
+./build/track_dataset ./config/realworld/tracker.yaml DATASET_DIR/data_r_a/cam0/ DATASET_DIR/data_r_a/stamp.txt DATASET_DIR/data_r_a/gt_pose.txt
+```
+
+## Acknowledgements
+The toolkit is developed by GREAT (GNSS+ REsearch, Application and Teaching) Group, School of Geodesy and Geomatics, Wuhan University.
+
+
+
+Thanks to [VINS-Fusion](https://github.com/HKUST-Aerial-Robotics/VINS-Fusion) for excellent open-source codes. Thanks to [msckf-vio](https://github.com/KumarRobotics/msckf_vio) for inspring grid-based feature extraction method.
+
+## Credit / Licensing
+
+```txt
+@article{zhou2023groundvio,
+ title={Ground-VIO: Monocular Visual-Inertial Odometry with Online Calibration of Camera-Ground Geometric Parameters},
+ author={Yuxuan Zhou, Xingxing Li, Shengyu Li, Xuanbin Wang, Zhiheng Shen},
+ journal={arXiv preprint arXiv:arXiv:2306.08341},
+ year={2023}
+}
+```
+
+The codebase and documentation is licensed under the [GNU General Public License v3 (GPL-3)](https://www.gnu.org/licenses/gpl-3.0.txt).
\ No newline at end of file
diff --git a/camodocal/camera_models/Camera.cc b/camodocal/camera_models/Camera.cc
new file mode 100644
index 0000000..492ca69
--- /dev/null
+++ b/camodocal/camera_models/Camera.cc
@@ -0,0 +1,252 @@
+#include "../camera_models/Camera.h"
+#include "../camera_models/ScaramuzzaCamera.h"
+
+#include
+
+namespace camodocal
+{
+
+Camera::Parameters::Parameters(ModelType modelType)
+ : m_modelType(modelType)
+ , m_imageWidth(0)
+ , m_imageHeight(0)
+{
+ switch (modelType)
+ {
+ case KANNALA_BRANDT:
+ m_nIntrinsics = 8;
+ break;
+ case PINHOLE:
+ m_nIntrinsics = 8;
+ break;
+ case SCARAMUZZA:
+ m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS;
+ break;
+ case MEI:
+ default:
+ m_nIntrinsics = 9;
+ }
+}
+
+Camera::Parameters::Parameters(ModelType modelType,
+ const std::string& cameraName,
+ int w, int h)
+ : m_modelType(modelType)
+ , m_cameraName(cameraName)
+ , m_imageWidth(w)
+ , m_imageHeight(h)
+{
+ switch (modelType)
+ {
+ case KANNALA_BRANDT:
+ m_nIntrinsics = 8;
+ break;
+ case PINHOLE:
+ m_nIntrinsics = 8;
+ break;
+ case SCARAMUZZA:
+ m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS;
+ break;
+ case MEI:
+ default:
+ m_nIntrinsics = 9;
+ }
+}
+
+Camera::ModelType&
+Camera::Parameters::modelType(void)
+{
+ return m_modelType;
+}
+
+std::string&
+Camera::Parameters::cameraName(void)
+{
+ return m_cameraName;
+}
+
+int&
+Camera::Parameters::imageWidth(void)
+{
+ return m_imageWidth;
+}
+
+int&
+Camera::Parameters::imageHeight(void)
+{
+ return m_imageHeight;
+}
+
+Camera::ModelType
+Camera::Parameters::modelType(void) const
+{
+ return m_modelType;
+}
+
+const std::string&
+Camera::Parameters::cameraName(void) const
+{
+ return m_cameraName;
+}
+
+int
+Camera::Parameters::imageWidth(void) const
+{
+ return m_imageWidth;
+}
+
+int
+Camera::Parameters::imageHeight(void) const
+{
+ return m_imageHeight;
+}
+
+int
+Camera::Parameters::nIntrinsics(void) const
+{
+ return m_nIntrinsics;
+}
+
+cv::Mat&
+Camera::mask(void)
+{
+ return m_mask;
+}
+
+const cv::Mat&
+Camera::mask(void) const
+{
+ return m_mask;
+}
+
+void
+Camera::estimateExtrinsics(const std::vector& objectPoints,
+ const std::vector& imagePoints,
+ cv::Mat& rvec, cv::Mat& tvec) const
+{
+ std::vector Ms(imagePoints.size());
+ for (size_t i = 0; i < Ms.size(); ++i)
+ {
+ Eigen::Vector3d P;
+ liftProjective(Eigen::Vector2d(imagePoints.at(i).x, imagePoints.at(i).y), P);
+
+ P /= P(2);
+
+ Ms.at(i).x = P(0);
+ Ms.at(i).y = P(1);
+ }
+
+ // assume unit focal length, zero principal point, and zero distortion
+ cv::solvePnP(objectPoints, Ms, cv::Mat::eye(3, 3, CV_64F), cv::noArray(), rvec, tvec);
+}
+
+double
+Camera::reprojectionDist(const Eigen::Vector3d& P1, const Eigen::Vector3d& P2) const
+{
+ Eigen::Vector2d p1, p2;
+
+ spaceToPlane(P1, p1);
+ spaceToPlane(P2, p2);
+
+ return (p1 - p2).norm();
+}
+
+double
+Camera::reprojectionError(const std::vector< std::vector >& objectPoints,
+ const std::vector< std::vector >& imagePoints,
+ const std::vector& rvecs,
+ const std::vector& tvecs,
+ cv::OutputArray _perViewErrors) const
+{
+ int imageCount = objectPoints.size();
+ size_t pointsSoFar = 0;
+ double totalErr = 0.0;
+
+ bool computePerViewErrors = _perViewErrors.needed();
+ cv::Mat perViewErrors;
+ if (computePerViewErrors)
+ {
+ _perViewErrors.create(imageCount, 1, CV_64F);
+ perViewErrors = _perViewErrors.getMat();
+ }
+
+ for (int i = 0; i < imageCount; ++i)
+ {
+ size_t pointCount = imagePoints.at(i).size();
+
+ pointsSoFar += pointCount;
+
+ std::vector estImagePoints;
+ projectPoints(objectPoints.at(i), rvecs.at(i), tvecs.at(i),
+ estImagePoints);
+
+ double err = 0.0;
+ for (size_t j = 0; j < imagePoints.at(i).size(); ++j)
+ {
+ err += cv::norm(imagePoints.at(i).at(j) - estImagePoints.at(j));
+ }
+
+ if (computePerViewErrors)
+ {
+ perViewErrors.at(i) = err / pointCount;
+ }
+
+ totalErr += err;
+ }
+
+ return totalErr / pointsSoFar;
+}
+
+double
+Camera::reprojectionError(const Eigen::Vector3d& P,
+ const Eigen::Quaterniond& camera_q,
+ const Eigen::Vector3d& camera_t,
+ const Eigen::Vector2d& observed_p) const
+{
+ Eigen::Vector3d P_cam = camera_q.toRotationMatrix() * P + camera_t;
+
+ Eigen::Vector2d p;
+ spaceToPlane(P_cam, p);
+
+ return (p - observed_p).norm();
+}
+
+void
+Camera::projectPoints(const std::vector& objectPoints,
+ const cv::Mat& rvec,
+ const cv::Mat& tvec,
+ std::vector& imagePoints) const
+{
+ // project 3D object points to the image plane
+ imagePoints.reserve(objectPoints.size());
+
+ //double
+ cv::Mat R0;
+ cv::Rodrigues(rvec, R0);
+
+ Eigen::MatrixXd R(3,3);
+ R << R0.at(0,0), R0.at(0,1), R0.at(0,2),
+ R0.at(1,0), R0.at(1,1), R0.at(1,2),
+ R0.at(2,0), R0.at(2,1), R0.at(2,2);
+
+ Eigen::Vector3d t;
+ t << tvec.at(0), tvec.at(1), tvec.at(2);
+
+ for (size_t i = 0; i < objectPoints.size(); ++i)
+ {
+ const cv::Point3f& objectPoint = objectPoints.at(i);
+
+ // Rotate and translate
+ Eigen::Vector3d P;
+ P << objectPoint.x, objectPoint.y, objectPoint.z;
+
+ P = R * P + t;
+
+ Eigen::Vector2d p;
+ spaceToPlane(P, p);
+
+ imagePoints.push_back(cv::Point2f(p(0), p(1)));
+ }
+}
+
+}
diff --git a/camodocal/camera_models/Camera.h b/camodocal/camera_models/Camera.h
new file mode 100644
index 0000000..6322252
--- /dev/null
+++ b/camodocal/camera_models/Camera.h
@@ -0,0 +1,149 @@
+#ifndef CAMERA_H
+#define CAMERA_H
+
+//#include
+#include
+#include
+#include
+#include
+
+namespace camodocal
+{
+
+class Camera
+{
+ public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ enum ModelType
+ {
+ KANNALA_BRANDT,
+ MEI,
+ PINHOLE,
+ PINHOLE_FULL,
+ SCARAMUZZA
+ };
+
+ class Parameters
+ {
+ public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ Parameters( ModelType modelType );
+
+ Parameters( ModelType modelType, const std::string& cameraName, int w, int h );
+
+ ModelType& modelType( void );
+ std::string& cameraName( void );
+ int& imageWidth( void );
+ int& imageHeight( void );
+
+ ModelType modelType( void ) const;
+ const std::string& cameraName( void ) const;
+ int imageWidth( void ) const;
+ int imageHeight( void ) const;
+
+ int nIntrinsics( void ) const;
+
+ virtual bool readFromYamlFile( const std::string& filename ) = 0;
+ virtual void writeToYamlFile( const std::string& filename ) const = 0;
+
+ protected:
+ ModelType m_modelType;
+ int m_nIntrinsics;
+ std::string m_cameraName;
+ int m_imageWidth;
+ int m_imageHeight;
+ };
+
+ virtual ModelType modelType( void ) const = 0;
+ virtual const std::string& cameraName( void ) const = 0;
+ virtual int imageWidth( void ) const = 0;
+ virtual int imageHeight( void ) const = 0;
+
+ virtual cv::Mat& mask( void );
+ virtual const cv::Mat& mask( void ) const;
+
+ virtual void estimateIntrinsics( const cv::Size& boardSize,
+ const std::vector< std::vector< cv::Point3f > >& objectPoints,
+ const std::vector< std::vector< cv::Point2f > >& imagePoints )
+ = 0;
+ virtual void estimateExtrinsics( const std::vector< cv::Point3f >& objectPoints,
+ const std::vector< cv::Point2f >& imagePoints,
+ cv::Mat& rvec,
+ cv::Mat& tvec ) const;
+
+ // Lift points from the image plane to the sphere
+ virtual void liftSphere( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const = 0;
+ //%output P
+
+ // Lift points from the image plane to the projective space
+ virtual void liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const = 0;
+ //%output P
+
+ // Projects 3D points to the image plane (Pi function)
+ virtual void spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p ) const = 0;
+ //%output p
+
+ // Projects 3D points to the image plane (Pi function)
+ // and calculates jacobian
+ // virtual void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
+ // Eigen::Matrix& J) const = 0;
+ //%output p
+ //%output J
+
+ virtual void undistToPlane( const Eigen::Vector2d& p_u, Eigen::Vector2d& p ) const = 0;
+ //%output p
+
+ // virtual void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0)
+ // const = 0;
+ virtual cv::Mat initUndistortRectifyMap( cv::Mat& map1,
+ cv::Mat& map2,
+ float fx = -1.0f,
+ float fy = -1.0f,
+ cv::Size imageSize = cv::Size( 0, 0 ),
+ float cx = -1.0f,
+ float cy = -1.0f,
+ cv::Mat rmat = cv::Mat::eye( 3, 3, CV_32F ) ) const = 0;
+
+ virtual int parameterCount( void ) const = 0;
+
+ virtual void readParameters( const std::vector< double >& parameters ) = 0;
+ virtual void writeParameters( std::vector< double >& parameters ) const = 0;
+
+ virtual void writeParametersToYamlFile( const std::string& filename ) const = 0;
+
+ virtual std::string parametersToString( void ) const = 0;
+
+ /**
+ * \brief Calculates the reprojection distance between points
+ *
+ * \param P1 first 3D point coordinates
+ * \param P2 second 3D point coordinates
+ * \return euclidean distance in the plane
+ */
+ double reprojectionDist( const Eigen::Vector3d& P1, const Eigen::Vector3d& P2 ) const;
+
+ double reprojectionError( const std::vector< std::vector< cv::Point3f > >& objectPoints,
+ const std::vector< std::vector< cv::Point2f > >& imagePoints,
+ const std::vector< cv::Mat >& rvecs,
+ const std::vector< cv::Mat >& tvecs,
+ cv::OutputArray perViewErrors = cv::noArray( ) ) const;
+
+ double reprojectionError( const Eigen::Vector3d& P,
+ const Eigen::Quaterniond& camera_q,
+ const Eigen::Vector3d& camera_t,
+ const Eigen::Vector2d& observed_p ) const;
+
+ void projectPoints( const std::vector< cv::Point3f >& objectPoints,
+ const cv::Mat& rvec,
+ const cv::Mat& tvec,
+ std::vector< cv::Point2f >& imagePoints ) const;
+
+ protected:
+ cv::Mat m_mask;
+};
+
+typedef std::shared_ptr< Camera > CameraPtr;
+typedef std::shared_ptr< const Camera > CameraConstPtr;
+}
+
+#endif
diff --git a/camodocal/camera_models/CameraFactory.cc b/camodocal/camera_models/CameraFactory.cc
new file mode 100644
index 0000000..3848795
--- /dev/null
+++ b/camodocal/camera_models/CameraFactory.cc
@@ -0,0 +1,191 @@
+#include "../camera_models/CameraFactory.h"
+
+//#include
+
+#include "../camera_models/CataCamera.h"
+#include "../camera_models/EquidistantCamera.h"
+#include "../camera_models/PinholeCamera.h"
+#include "../camera_models/PinholeFullCamera.h"
+#include "../camera_models/ScaramuzzaCamera.h"
+
+
+namespace camodocal
+{
+
+std::shared_ptr< CameraFactory > CameraFactory::m_instance;
+
+CameraFactory::CameraFactory( ) {}
+
+std::shared_ptr< CameraFactory >
+CameraFactory::instance( void )
+{
+ if ( m_instance.get( ) == 0 )
+ {
+ m_instance.reset( new CameraFactory );
+ }
+
+ return m_instance;
+}
+
+CameraPtr
+CameraFactory::generateCamera( Camera::ModelType modelType, const std::string& cameraName, cv::Size imageSize ) const
+{
+ switch ( modelType )
+ {
+ case Camera::KANNALA_BRANDT:
+ {
+ EquidistantCameraPtr camera( new EquidistantCamera );
+
+ EquidistantCamera::Parameters params = camera->getParameters( );
+ params.cameraName( ) = cameraName;
+ params.imageWidth( ) = imageSize.width;
+ params.imageHeight( ) = imageSize.height;
+ camera->setParameters( params );
+ return camera;
+ }
+ case Camera::PINHOLE:
+ {
+ PinholeCameraPtr camera( new PinholeCamera );
+
+ PinholeCamera::Parameters params = camera->getParameters( );
+ params.cameraName( ) = cameraName;
+ params.imageWidth( ) = imageSize.width;
+ params.imageHeight( ) = imageSize.height;
+ camera->setParameters( params );
+ return camera;
+ }
+ case Camera::PINHOLE_FULL:
+ {
+ PinholeFullCameraPtr camera( new PinholeFullCamera );
+
+ PinholeFullCamera::Parameters params = camera->getParameters( );
+ params.cameraName( ) = cameraName;
+ params.imageWidth( ) = imageSize.width;
+ params.imageHeight( ) = imageSize.height;
+ camera->setParameters( params );
+ return camera;
+ }
+ case Camera::SCARAMUZZA:
+ {
+ OCAMCameraPtr camera( new OCAMCamera );
+
+ OCAMCamera::Parameters params = camera->getParameters( );
+ params.cameraName( ) = cameraName;
+ params.imageWidth( ) = imageSize.width;
+ params.imageHeight( ) = imageSize.height;
+ camera->setParameters( params );
+ return camera;
+ }
+ case Camera::MEI:
+ default:
+ {
+ CataCameraPtr camera( new CataCamera );
+
+ CataCamera::Parameters params = camera->getParameters( );
+ params.cameraName( ) = cameraName;
+ params.imageWidth( ) = imageSize.width;
+ params.imageHeight( ) = imageSize.height;
+ camera->setParameters( params );
+ return camera;
+ }
+ }
+}
+
+CameraPtr
+CameraFactory::generateCameraFromYamlFile( const std::string& filename )
+{
+ cv::FileStorage fs( filename, cv::FileStorage::READ );
+
+ if ( !fs.isOpened( ) )
+ {
+ return CameraPtr( );
+ }
+
+ Camera::ModelType modelType = Camera::MEI;
+ if ( !fs["model_type"].isNone( ) )
+ {
+ std::string sModelType;
+ fs["model_type"] >> sModelType;
+
+ std::transform(sModelType.begin(), sModelType.end(), sModelType.begin(), ::tolower);
+
+ if ( sModelType== "kannala_brandt" )
+ {
+ modelType = Camera::KANNALA_BRANDT;
+ }
+ else if ( sModelType=="mei" )
+ {
+ modelType = Camera::MEI;
+ }
+ else if ( sModelType=="scaramuzza" )
+ {
+ modelType = Camera::SCARAMUZZA;
+ }
+ else if ( sModelType=="pinhole" )
+ {
+ modelType = Camera::PINHOLE;
+ }
+ else if ( sModelType=="PINHOLE_FULL" )
+ {
+ modelType = Camera::PINHOLE_FULL;
+ }
+ else
+ {
+ std::cerr << "# ERROR: Unknown camera model: " << sModelType << std::endl;
+ return CameraPtr( );
+ }
+ }
+
+ switch ( modelType )
+ {
+ case Camera::KANNALA_BRANDT:
+ {
+ EquidistantCameraPtr camera( new EquidistantCamera );
+
+ EquidistantCamera::Parameters params = camera->getParameters( );
+ params.readFromYamlFile( filename );
+ camera->setParameters( params );
+ return camera;
+ }
+ case Camera::PINHOLE:
+ {
+ PinholeCameraPtr camera( new PinholeCamera );
+
+ PinholeCamera::Parameters params = camera->getParameters( );
+ params.readFromYamlFile( filename );
+ camera->setParameters( params );
+ return camera;
+ }
+ case Camera::PINHOLE_FULL:
+ {
+ PinholeFullCameraPtr camera( new PinholeFullCamera );
+
+ PinholeFullCamera::Parameters params = camera->getParameters( );
+ params.readFromYamlFile( filename );
+ camera->setParameters( params );
+ return camera;
+ }
+ case Camera::SCARAMUZZA:
+ {
+ OCAMCameraPtr camera( new OCAMCamera );
+
+ OCAMCamera::Parameters params = camera->getParameters( );
+ params.readFromYamlFile( filename );
+ camera->setParameters( params );
+ return camera;
+ }
+ case Camera::MEI:
+ default:
+ {
+ CataCameraPtr camera( new CataCamera );
+
+ CataCamera::Parameters params = camera->getParameters( );
+ params.readFromYamlFile( filename );
+ camera->setParameters( params );
+ return camera;
+ }
+ }
+
+ return CameraPtr( );
+}
+}
diff --git a/camodocal/camera_models/CameraFactory.h b/camodocal/camera_models/CameraFactory.h
new file mode 100644
index 0000000..db726ec
--- /dev/null
+++ b/camodocal/camera_models/CameraFactory.h
@@ -0,0 +1,33 @@
+#ifndef CAMERAFACTORY_H
+#define CAMERAFACTORY_H
+
+#include
+#include
+#include
+
+#include "../camera_models/Camera.h"
+
+namespace camodocal
+{
+
+class CameraFactory
+{
+public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+ CameraFactory();
+
+ static std::shared_ptr instance(void);
+
+ CameraPtr generateCamera(Camera::ModelType modelType,
+ const std::string& cameraName,
+ cv::Size imageSize) const;
+
+ CameraPtr generateCameraFromYamlFile(const std::string& filename);
+
+private:
+ static std::shared_ptr m_instance;
+};
+
+}
+
+#endif
diff --git a/camodocal/camera_models/CataCamera.cc b/camodocal/camera_models/CataCamera.cc
new file mode 100644
index 0000000..100979c
--- /dev/null
+++ b/camodocal/camera_models/CataCamera.cc
@@ -0,0 +1,1005 @@
+#include "../camera_models/CataCamera.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "../gpl/gpl.h"
+
+namespace camodocal
+{
+
+CataCamera::Parameters::Parameters()
+ : Camera::Parameters(MEI)
+ , m_xi(0.0)
+ , m_k1(0.0)
+ , m_k2(0.0)
+ , m_p1(0.0)
+ , m_p2(0.0)
+ , m_gamma1(0.0)
+ , m_gamma2(0.0)
+ , m_u0(0.0)
+ , m_v0(0.0)
+{
+
+}
+
+CataCamera::Parameters::Parameters(const std::string& cameraName,
+ int w, int h,
+ double xi,
+ double k1, double k2,
+ double p1, double p2,
+ double gamma1, double gamma2,
+ double u0, double v0)
+ : Camera::Parameters(MEI, cameraName, w, h)
+ , m_xi(xi)
+ , m_k1(k1)
+ , m_k2(k2)
+ , m_p1(p1)
+ , m_p2(p2)
+ , m_gamma1(gamma1)
+ , m_gamma2(gamma2)
+ , m_u0(u0)
+ , m_v0(v0)
+{
+}
+
+double&
+CataCamera::Parameters::xi(void)
+{
+ return m_xi;
+}
+
+double&
+CataCamera::Parameters::k1(void)
+{
+ return m_k1;
+}
+
+double&
+CataCamera::Parameters::k2(void)
+{
+ return m_k2;
+}
+
+double&
+CataCamera::Parameters::p1(void)
+{
+ return m_p1;
+}
+
+double&
+CataCamera::Parameters::p2(void)
+{
+ return m_p2;
+}
+
+double&
+CataCamera::Parameters::gamma1(void)
+{
+ return m_gamma1;
+}
+
+double&
+CataCamera::Parameters::gamma2(void)
+{
+ return m_gamma2;
+}
+
+double&
+CataCamera::Parameters::u0(void)
+{
+ return m_u0;
+}
+
+double&
+CataCamera::Parameters::v0(void)
+{
+ return m_v0;
+}
+
+double
+CataCamera::Parameters::xi(void) const
+{
+ return m_xi;
+}
+
+double
+CataCamera::Parameters::k1(void) const
+{
+ return m_k1;
+}
+
+double
+CataCamera::Parameters::k2(void) const
+{
+ return m_k2;
+}
+
+double
+CataCamera::Parameters::p1(void) const
+{
+ return m_p1;
+}
+
+double
+CataCamera::Parameters::p2(void) const
+{
+ return m_p2;
+}
+
+double
+CataCamera::Parameters::gamma1(void) const
+{
+ return m_gamma1;
+}
+
+double
+CataCamera::Parameters::gamma2(void) const
+{
+ return m_gamma2;
+}
+
+double
+CataCamera::Parameters::u0(void) const
+{
+ return m_u0;
+}
+
+double
+CataCamera::Parameters::v0(void) const
+{
+ return m_v0;
+}
+
+bool
+CataCamera::Parameters::readFromYamlFile(const std::string& filename)
+{
+ cv::FileStorage fs(filename, cv::FileStorage::READ);
+
+ if (!fs.isOpened())
+ {
+ return false;
+ }
+
+ if (!fs["model_type"].isNone())
+ {
+ std::string sModelType;
+ fs["model_type"] >> sModelType;
+
+ if (sModelType.compare("MEI") != 0)
+ {
+ return false;
+ }
+ }
+
+ m_modelType = MEI;
+ fs["camera_name"] >> m_cameraName;
+ m_imageWidth = static_cast(fs["image_width"]);
+ m_imageHeight = static_cast(fs["image_height"]);
+
+ cv::FileNode n = fs["mirror_parameters"];
+ m_xi = static_cast(n["xi"]);
+
+ n = fs["distortion_parameters"];
+ m_k1 = static_cast(n["k1"]);
+ m_k2 = static_cast(n["k2"]);
+ m_p1 = static_cast(n["p1"]);
+ m_p2 = static_cast(n["p2"]);
+
+ n = fs["projection_parameters"];
+ m_gamma1 = static_cast(n["gamma1"]);
+ m_gamma2 = static_cast(n["gamma2"]);
+ m_u0 = static_cast(n["u0"]);
+ m_v0 = static_cast(n["v0"]);
+
+ return true;
+}
+
+void
+CataCamera::Parameters::writeToYamlFile(const std::string& filename) const
+{
+ cv::FileStorage fs(filename, cv::FileStorage::WRITE);
+
+ fs << "model_type" << "MEI";
+ fs << "camera_name" << m_cameraName;
+ fs << "image_width" << m_imageWidth;
+ fs << "image_height" << m_imageHeight;
+
+ // mirror: xi
+ fs << "mirror_parameters";
+ fs << "{" << "xi" << m_xi << "}";
+
+ // radial distortion: k1, k2
+ // tangential distortion: p1, p2
+ fs << "distortion_parameters";
+ fs << "{" << "k1" << m_k1
+ << "k2" << m_k2
+ << "p1" << m_p1
+ << "p2" << m_p2 << "}";
+
+ // projection: gamma1, gamma2, u0, v0
+ fs << "projection_parameters";
+ fs << "{" << "gamma1" << m_gamma1
+ << "gamma2" << m_gamma2
+ << "u0" << m_u0
+ << "v0" << m_v0 << "}";
+
+ fs.release();
+}
+
+CataCamera::Parameters&
+CataCamera::Parameters::operator=(const CataCamera::Parameters& other)
+{
+ if (this != &other)
+ {
+ m_modelType = other.m_modelType;
+ m_cameraName = other.m_cameraName;
+ m_imageWidth = other.m_imageWidth;
+ m_imageHeight = other.m_imageHeight;
+ m_xi = other.m_xi;
+ m_k1 = other.m_k1;
+ m_k2 = other.m_k2;
+ m_p1 = other.m_p1;
+ m_p2 = other.m_p2;
+ m_gamma1 = other.m_gamma1;
+ m_gamma2 = other.m_gamma2;
+ m_u0 = other.m_u0;
+ m_v0 = other.m_v0;
+ }
+
+ return *this;
+}
+
+std::ostream&
+operator<< (std::ostream& out, const CataCamera::Parameters& params)
+{
+ out << "Camera Parameters:" << std::endl;
+ out << " model_type " << "MEI" << std::endl;
+ out << " camera_name " << params.m_cameraName << std::endl;
+ out << " image_width " << params.m_imageWidth << std::endl;
+ out << " image_height " << params.m_imageHeight << std::endl;
+
+ out << "Mirror Parameters" << std::endl;
+ out << std::fixed << std::setprecision(10);
+ out << " xi " << params.m_xi << std::endl;
+
+ // radial distortion: k1, k2
+ // tangential distortion: p1, p2
+ out << "Distortion Parameters" << std::endl;
+ out << " k1 " << params.m_k1 << std::endl
+ << " k2 " << params.m_k2 << std::endl
+ << " p1 " << params.m_p1 << std::endl
+ << " p2 " << params.m_p2 << std::endl;
+
+ // projection: gamma1, gamma2, u0, v0
+ out << "Projection Parameters" << std::endl;
+ out << " gamma1 " << params.m_gamma1 << std::endl
+ << " gamma2 " << params.m_gamma2 << std::endl
+ << " u0 " << params.m_u0 << std::endl
+ << " v0 " << params.m_v0 << std::endl;
+
+ return out;
+}
+
+CataCamera::CataCamera()
+ : m_inv_K11(1.0)
+ , m_inv_K13(0.0)
+ , m_inv_K22(1.0)
+ , m_inv_K23(0.0)
+ , m_noDistortion(true)
+{
+
+}
+
+CataCamera::CataCamera(const std::string& cameraName,
+ int imageWidth, int imageHeight,
+ double xi, double k1, double k2, double p1, double p2,
+ double gamma1, double gamma2, double u0, double v0)
+ : mParameters(cameraName, imageWidth, imageHeight,
+ xi, k1, k2, p1, p2, gamma1, gamma2, u0, v0)
+{
+ if ((mParameters.k1() == 0.0) &&
+ (mParameters.k2() == 0.0) &&
+ (mParameters.p1() == 0.0) &&
+ (mParameters.p2() == 0.0))
+ {
+ m_noDistortion = true;
+ }
+ else
+ {
+ m_noDistortion = false;
+ }
+
+ // Inverse camera projection matrix parameters
+ m_inv_K11 = 1.0 / mParameters.gamma1();
+ m_inv_K13 = -mParameters.u0() / mParameters.gamma1();
+ m_inv_K22 = 1.0 / mParameters.gamma2();
+ m_inv_K23 = -mParameters.v0() / mParameters.gamma2();
+}
+
+CataCamera::CataCamera(const CataCamera::Parameters& params)
+ : mParameters(params)
+{
+ if ((mParameters.k1() == 0.0) &&
+ (mParameters.k2() == 0.0) &&
+ (mParameters.p1() == 0.0) &&
+ (mParameters.p2() == 0.0))
+ {
+ m_noDistortion = true;
+ }
+ else
+ {
+ m_noDistortion = false;
+ }
+
+ // Inverse camera projection matrix parameters
+ m_inv_K11 = 1.0 / mParameters.gamma1();
+ m_inv_K13 = -mParameters.u0() / mParameters.gamma1();
+ m_inv_K22 = 1.0 / mParameters.gamma2();
+ m_inv_K23 = -mParameters.v0() / mParameters.gamma2();
+}
+
+Camera::ModelType
+CataCamera::modelType(void) const
+{
+ return mParameters.modelType();
+}
+
+const std::string&
+CataCamera::cameraName(void) const
+{
+ return mParameters.cameraName();
+}
+
+int
+CataCamera::imageWidth(void) const
+{
+ return mParameters.imageWidth();
+}
+
+int
+CataCamera::imageHeight(void) const
+{
+ return mParameters.imageHeight();
+}
+
+void
+CataCamera::estimateIntrinsics(const cv::Size& boardSize,
+ const std::vector< std::vector >& objectPoints,
+ const std::vector< std::vector >& imagePoints)
+{
+ Parameters params = getParameters();
+
+ double u0 = params.imageWidth() / 2.0;
+ double v0 = params.imageHeight() / 2.0;
+
+ double gamma0 = 0.0;
+ double minReprojErr = std::numeric_limits::max();
+
+ std::vector rvecs, tvecs;
+ rvecs.assign(objectPoints.size(), cv::Mat());
+ tvecs.assign(objectPoints.size(), cv::Mat());
+
+ params.xi() = 1.0;
+ params.k1() = 0.0;
+ params.k2() = 0.0;
+ params.p1() = 0.0;
+ params.p2() = 0.0;
+ params.u0() = u0;
+ params.v0() = v0;
+
+ // Initialize gamma (focal length)
+ // Use non-radial line image and xi = 1
+ for (size_t i = 0; i < imagePoints.size(); ++i)
+ {
+ for (int r = 0; r < boardSize.height; ++r)
+ {
+ cv::Mat P(boardSize.width, 4, CV_64F);
+ for (int c = 0; c < boardSize.width; ++c)
+ {
+ const cv::Point2f& imagePoint = imagePoints.at(i).at(r * boardSize.width + c);
+
+ double u = imagePoint.x - u0;
+ double v = imagePoint.y - v0;
+
+ P.at(c, 0) = u;
+ P.at(c, 1) = v;
+ P.at(c, 2) = 0.5;
+ P.at(c, 3) = -0.5 * (square(u) + square(v));
+ }
+
+ cv::Mat C;
+ cv::SVD::solveZ(P, C);
+
+ double t = square(C.at(0)) + square(C.at(1)) + C.at(2) * C.at(3);
+ if (t < 0.0)
+ {
+ continue;
+ }
+
+ // check that line image is not radial
+ double d = sqrt(1.0 / t);
+ double nx = C.at(0) * d;
+ double ny = C.at(1) * d;
+ if (hypot(nx, ny) > 0.95)
+ {
+ continue;
+ }
+
+ double gamma = sqrt(C.at(2) / C.at(3));
+
+ params.gamma1() = gamma;
+ params.gamma2() = gamma;
+ setParameters(params);
+
+ for (size_t j = 0; j < objectPoints.size(); ++j)
+ {
+ estimateExtrinsics(objectPoints.at(j), imagePoints.at(j), rvecs.at(j), tvecs.at(j));
+ }
+
+ double reprojErr = reprojectionError(objectPoints, imagePoints, rvecs, tvecs, cv::noArray());
+
+ if (reprojErr < minReprojErr)
+ {
+ minReprojErr = reprojErr;
+ gamma0 = gamma;
+ }
+ }
+ }
+
+ if (gamma0 <= 0.0 && minReprojErr >= std::numeric_limits::max())
+ {
+ std::cout << "[" << params.cameraName() << "] "
+ << "# INFO: CataCamera model fails with given data. " << std::endl;
+
+ return;
+ }
+
+ params.gamma1() = gamma0;
+ params.gamma2() = gamma0;
+ setParameters(params);
+}
+
+/**
+ * \brief Lifts a point from the image plane to the unit sphere
+ *
+ * \param p image coordinates
+ * \param P coordinates of the point on the sphere
+ */
+void
+CataCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const
+{
+ double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u;
+ double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d;
+ double lambda;
+
+ // Lift points to normalised plane
+ mx_d = m_inv_K11 * p(0) + m_inv_K13;
+ my_d = m_inv_K22 * p(1) + m_inv_K23;
+
+ if (m_noDistortion)
+ {
+ mx_u = mx_d;
+ my_u = my_d;
+ }
+ else
+ {
+ // Apply inverse distortion model
+ if (0)
+ {
+ double k1 = mParameters.k1();
+ double k2 = mParameters.k2();
+ double p1 = mParameters.p1();
+ double p2 = mParameters.p2();
+
+ // Inverse distortion model
+ // proposed by Heikkila
+ mx2_d = mx_d*mx_d;
+ my2_d = my_d*my_d;
+ mxy_d = mx_d*my_d;
+ rho2_d = mx2_d+my2_d;
+ rho4_d = rho2_d*rho2_d;
+ radDist_d = k1*rho2_d+k2*rho4_d;
+ Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d;
+ Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d;
+ inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d);
+
+ mx_u = mx_d - inv_denom_d*Dx_d;
+ my_u = my_d - inv_denom_d*Dy_d;
+ }
+ else
+ {
+ // Recursive distortion model
+ int n = 6;
+ Eigen::Vector2d d_u;
+ distortion(Eigen::Vector2d(mx_d, my_d), d_u);
+ // Approximate value
+ mx_u = mx_d - d_u(0);
+ my_u = my_d - d_u(1);
+
+ for (int i = 1; i < n; ++i)
+ {
+ distortion(Eigen::Vector2d(mx_u, my_u), d_u);
+ mx_u = mx_d - d_u(0);
+ my_u = my_d - d_u(1);
+ }
+ }
+ }
+
+ // Lift normalised points to the sphere (inv_hslash)
+ double xi = mParameters.xi();
+ if (xi == 1.0)
+ {
+ lambda = 2.0 / (mx_u * mx_u + my_u * my_u + 1.0);
+ P << lambda * mx_u, lambda * my_u, lambda - 1.0;
+ }
+ else
+ {
+ lambda = (xi + sqrt(1.0 + (1.0 - xi * xi) * (mx_u * mx_u + my_u * my_u))) / (1.0 + mx_u * mx_u + my_u * my_u);
+ P << lambda * mx_u, lambda * my_u, lambda - xi;
+ }
+}
+
+/**
+ * \brief Lifts a point from the image plane to its projective ray
+ *
+ * \param p image coordinates
+ * \param P coordinates of the projective ray
+ */
+void
+CataCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const
+{
+ double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u;
+ double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d;
+ //double lambda;
+
+ // Lift points to normalised plane
+ mx_d = m_inv_K11 * p(0) + m_inv_K13;
+ my_d = m_inv_K22 * p(1) + m_inv_K23;
+
+ if (m_noDistortion)
+ {
+ mx_u = mx_d;
+ my_u = my_d;
+ }
+ else
+ {
+ if (0)
+ {
+ double k1 = mParameters.k1();
+ double k2 = mParameters.k2();
+ double p1 = mParameters.p1();
+ double p2 = mParameters.p2();
+
+ // Apply inverse distortion model
+ // proposed by Heikkila
+ mx2_d = mx_d*mx_d;
+ my2_d = my_d*my_d;
+ mxy_d = mx_d*my_d;
+ rho2_d = mx2_d+my2_d;
+ rho4_d = rho2_d*rho2_d;
+ radDist_d = k1*rho2_d+k2*rho4_d;
+ Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d;
+ Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d;
+ inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d);
+
+ mx_u = mx_d - inv_denom_d*Dx_d;
+ my_u = my_d - inv_denom_d*Dy_d;
+ }
+ else
+ {
+ // Recursive distortion model
+ int n = 8;
+ Eigen::Vector2d d_u;
+ distortion(Eigen::Vector2d(mx_d, my_d), d_u);
+ // Approximate value
+ mx_u = mx_d - d_u(0);
+ my_u = my_d - d_u(1);
+
+ for (int i = 1; i < n; ++i)
+ {
+ distortion(Eigen::Vector2d(mx_u, my_u), d_u);
+ mx_u = mx_d - d_u(0);
+ my_u = my_d - d_u(1);
+ }
+ }
+ }
+
+ // Obtain a projective ray
+ double xi = mParameters.xi();
+ if (xi == 1.0)
+ {
+ P << mx_u, my_u, (1.0 - mx_u * mx_u - my_u * my_u) / 2.0;
+ }
+ else
+ {
+ // Reuse variable
+ rho2_d = mx_u * mx_u + my_u * my_u;
+ P << mx_u, my_u, 1.0 - xi * (rho2_d + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * rho2_d));
+ }
+}
+
+
+/**
+ * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v)
+ *
+ * \param P 3D point coordinates
+ * \param p return value, contains the image point coordinates
+ */
+void
+CataCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const
+{
+ Eigen::Vector2d p_u, p_d;
+
+ // Project points to the normalised plane
+ double z = P(2) + mParameters.xi() * P.norm();
+ p_u << P(0) / z, P(1) / z;
+
+ if (m_noDistortion)
+ {
+ p_d = p_u;
+ }
+ else
+ {
+ // Apply distortion
+ Eigen::Vector2d d_u;
+ distortion(p_u, d_u);
+ p_d = p_u + d_u;
+ }
+
+ // Apply generalised projection matrix
+ p << mParameters.gamma1() * p_d(0) + mParameters.u0(),
+ mParameters.gamma2() * p_d(1) + mParameters.v0();
+}
+
+#if 0
+/**
+ * \brief Project a 3D point to the image plane and calculate Jacobian
+ *
+ * \param P 3D point coordinates
+ * \param p return value, contains the image point coordinates
+ */
+void
+CataCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
+ Eigen::Matrix& J) const
+{
+ double xi = mParameters.xi();
+
+ Eigen::Vector2d p_u, p_d;
+ double norm, inv_denom;
+ double dxdmx, dydmx, dxdmy, dydmy;
+
+ norm = P.norm();
+ // Project points to the normalised plane
+ inv_denom = 1.0 / (P(2) + xi * norm);
+ p_u << inv_denom * P(0), inv_denom * P(1);
+
+ // Calculate jacobian
+ inv_denom = inv_denom * inv_denom / norm;
+ double dudx = inv_denom * (norm * P(2) + xi * (P(1) * P(1) + P(2) * P(2)));
+ double dvdx = -inv_denom * xi * P(0) * P(1);
+ double dudy = dvdx;
+ double dvdy = inv_denom * (norm * P(2) + xi * (P(0) * P(0) + P(2) * P(2)));
+ inv_denom = inv_denom * (-xi * P(2) - norm); // reuse variable
+ double dudz = P(0) * inv_denom;
+ double dvdz = P(1) * inv_denom;
+
+ if (m_noDistortion)
+ {
+ p_d = p_u;
+ }
+ else
+ {
+ // Apply distortion
+ Eigen::Vector2d d_u;
+ distortion(p_u, d_u);
+ p_d = p_u + d_u;
+ }
+
+ double gamma1 = mParameters.gamma1();
+ double gamma2 = mParameters.gamma2();
+
+ // Make the product of the jacobians
+ // and add projection matrix jacobian
+ inv_denom = gamma1 * (dudx * dxdmx + dvdx * dxdmy); // reuse
+ dvdx = gamma2 * (dudx * dydmx + dvdx * dydmy);
+ dudx = inv_denom;
+
+ inv_denom = gamma1 * (dudy * dxdmx + dvdy * dxdmy); // reuse
+ dvdy = gamma2 * (dudy * dydmx + dvdy * dydmy);
+ dudy = inv_denom;
+
+ inv_denom = gamma1 * (dudz * dxdmx + dvdz * dxdmy); // reuse
+ dvdz = gamma2 * (dudz * dydmx + dvdz * dydmy);
+ dudz = inv_denom;
+
+ // Apply generalised projection matrix
+ p << gamma1 * p_d(0) + mParameters.u0(),
+ gamma2 * p_d(1) + mParameters.v0();
+
+ J << dudx, dudy, dudz,
+ dvdx, dvdy, dvdz;
+}
+#endif
+
+/**
+ * \brief Projects an undistorted 2D point p_u to the image plane
+ *
+ * \param p_u 2D point coordinates
+ * \return image point coordinates
+ */
+void
+CataCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const
+{
+ Eigen::Vector2d p_d;
+
+ if (m_noDistortion)
+ {
+ p_d = p_u;
+ }
+ else
+ {
+ // Apply distortion
+ Eigen::Vector2d d_u;
+ distortion(p_u, d_u);
+ p_d = p_u + d_u;
+ }
+
+ // Apply generalised projection matrix
+ p << mParameters.gamma1() * p_d(0) + mParameters.u0(),
+ mParameters.gamma2() * p_d(1) + mParameters.v0();
+}
+
+/**
+ * \brief Apply distortion to input point (from the normalised plane)
+ *
+ * \param p_u undistorted coordinates of point on the normalised plane
+ * \return to obtain the distorted point: p_d = p_u + d_u
+ */
+void
+CataCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const
+{
+ double k1 = mParameters.k1();
+ double k2 = mParameters.k2();
+ double p1 = mParameters.p1();
+ double p2 = mParameters.p2();
+
+ double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;
+
+ mx2_u = p_u(0) * p_u(0);
+ my2_u = p_u(1) * p_u(1);
+ mxy_u = p_u(0) * p_u(1);
+ rho2_u = mx2_u + my2_u;
+ rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
+ d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
+ p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
+}
+
+/**
+ * \brief Apply distortion to input point (from the normalised plane)
+ * and calculate Jacobian
+ *
+ * \param p_u undistorted coordinates of point on the normalised plane
+ * \return to obtain the distorted point: p_d = p_u + d_u
+ */
+void
+CataCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u,
+ Eigen::Matrix2d& J) const
+{
+ double k1 = mParameters.k1();
+ double k2 = mParameters.k2();
+ double p1 = mParameters.p1();
+ double p2 = mParameters.p2();
+
+ double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;
+
+ mx2_u = p_u(0) * p_u(0);
+ my2_u = p_u(1) * p_u(1);
+ mxy_u = p_u(0) * p_u(1);
+ rho2_u = mx2_u + my2_u;
+ rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
+ d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
+ p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
+
+ double dxdmx = 1.0 + rad_dist_u + k1 * 2.0 * mx2_u + k2 * rho2_u * 4.0 * mx2_u + 2.0 * p1 * p_u(1) + 6.0 * p2 * p_u(0);
+ double dydmx = k1 * 2.0 * p_u(0) * p_u(1) + k2 * 4.0 * rho2_u * p_u(0) * p_u(1) + p1 * 2.0 * p_u(0) + 2.0 * p2 * p_u(1);
+ double dxdmy = dydmx;
+ double dydmy = 1.0 + rad_dist_u + k1 * 2.0 * my2_u + k2 * rho2_u * 4.0 * my2_u + 6.0 * p1 * p_u(1) + 2.0 * p2 * p_u(0);
+
+ J << dxdmx, dxdmy,
+ dydmx, dydmy;
+}
+
+void
+CataCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const
+{
+ cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight());
+
+ cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F);
+ cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F);
+
+ for (int v = 0; v < imageSize.height; ++v)
+ {
+ for (int u = 0; u < imageSize.width; ++u)
+ {
+ double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale;
+ double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale;
+
+ double xi = mParameters.xi();
+ double d2 = mx_u * mx_u + my_u * my_u;
+
+ Eigen::Vector3d P;
+ P << mx_u, my_u, 1.0 - xi * (d2 + 1.0) / (xi + sqrt(1.0 + (1.0 - xi * xi) * d2));
+
+ Eigen::Vector2d p;
+ spaceToPlane(P, p);
+
+ mapX.at(v,u) = p(0);
+ mapY.at(v,u) = p(1);
+ }
+ }
+
+ cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
+}
+
+cv::Mat
+CataCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
+ float fx, float fy,
+ cv::Size imageSize,
+ float cx, float cy,
+ cv::Mat rmat) const
+{
+ if (imageSize == cv::Size(0, 0))
+ {
+ imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight());
+ }
+
+ cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
+ cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
+
+ Eigen::Matrix3f K_rect;
+
+ if (cx == -1.0f && cy == -1.0f)
+ {
+ K_rect << fx, 0, imageSize.width / 2,
+ 0, fy, imageSize.height / 2,
+ 0, 0, 1;
+ }
+ else
+ {
+ K_rect << fx, 0, cx,
+ 0, fy, cy,
+ 0, 0, 1;
+ }
+
+ if (fx == -1.0f || fy == -1.0f)
+ {
+ K_rect(0,0) = mParameters.gamma1();
+ K_rect(1,1) = mParameters.gamma2();
+ }
+
+ Eigen::Matrix3f K_rect_inv = K_rect.inverse();
+
+ Eigen::Matrix3f R, R_inv;
+ cv::cv2eigen(rmat, R);
+ R_inv = R.inverse();
+
+ for (int v = 0; v < imageSize.height; ++v)
+ {
+ for (int u = 0; u < imageSize.width; ++u)
+ {
+ Eigen::Vector3f xo;
+ xo << u, v, 1;
+
+ Eigen::Vector3f uo = R_inv * K_rect_inv * xo;
+
+ Eigen::Vector2d p;
+ spaceToPlane(uo.cast(), p);
+
+ mapX.at(v,u) = p(0);
+ mapY.at(v,u) = p(1);
+ }
+ }
+
+ cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
+
+ cv::Mat K_rect_cv;
+ cv::eigen2cv(K_rect, K_rect_cv);
+ return K_rect_cv;
+}
+
+int
+CataCamera::parameterCount(void) const
+{
+ return 9;
+}
+
+const CataCamera::Parameters&
+CataCamera::getParameters(void) const
+{
+ return mParameters;
+}
+
+void
+CataCamera::setParameters(const CataCamera::Parameters& parameters)
+{
+ mParameters = parameters;
+
+ if ((mParameters.k1() == 0.0) &&
+ (mParameters.k2() == 0.0) &&
+ (mParameters.p1() == 0.0) &&
+ (mParameters.p2() == 0.0))
+ {
+ m_noDistortion = true;
+ }
+ else
+ {
+ m_noDistortion = false;
+ }
+
+ m_inv_K11 = 1.0 / mParameters.gamma1();
+ m_inv_K13 = -mParameters.u0() / mParameters.gamma1();
+ m_inv_K22 = 1.0 / mParameters.gamma2();
+ m_inv_K23 = -mParameters.v0() / mParameters.gamma2();
+}
+
+void
+CataCamera::readParameters(const std::vector& parameterVec)
+{
+ if ((int)parameterVec.size() != parameterCount())
+ {
+ return;
+ }
+
+ Parameters params = getParameters();
+
+ params.xi() = parameterVec.at(0);
+ params.k1() = parameterVec.at(1);
+ params.k2() = parameterVec.at(2);
+ params.p1() = parameterVec.at(3);
+ params.p2() = parameterVec.at(4);
+ params.gamma1() = parameterVec.at(5);
+ params.gamma2() = parameterVec.at(6);
+ params.u0() = parameterVec.at(7);
+ params.v0() = parameterVec.at(8);
+
+ setParameters(params);
+}
+
+void
+CataCamera::writeParameters(std::vector& parameterVec) const
+{
+ parameterVec.resize(parameterCount());
+ parameterVec.at(0) = mParameters.xi();
+ parameterVec.at(1) = mParameters.k1();
+ parameterVec.at(2) = mParameters.k2();
+ parameterVec.at(3) = mParameters.p1();
+ parameterVec.at(4) = mParameters.p2();
+ parameterVec.at(5) = mParameters.gamma1();
+ parameterVec.at(6) = mParameters.gamma2();
+ parameterVec.at(7) = mParameters.u0();
+ parameterVec.at(8) = mParameters.v0();
+}
+
+void
+CataCamera::writeParametersToYamlFile(const std::string& filename) const
+{
+ mParameters.writeToYamlFile(filename);
+}
+
+std::string
+CataCamera::parametersToString(void) const
+{
+ std::ostringstream oss;
+ oss << mParameters;
+
+ return oss.str();
+}
+
+}
diff --git a/camodocal/camera_models/CataCamera.h b/camodocal/camera_models/CataCamera.h
new file mode 100644
index 0000000..1a7881f
--- /dev/null
+++ b/camodocal/camera_models/CataCamera.h
@@ -0,0 +1,154 @@
+#ifndef CATACAMERA_H
+#define CATACAMERA_H
+
+#include
+#include
+
+#include "Camera.h"
+
+namespace camodocal
+{
+
+/**
+ * C. Mei, and P. Rives, Single View Point Omnidirectional Camera Calibration
+ * from Planar Grids, ICRA 2007
+ */
+
+class CataCamera: public Camera
+{
+public:
+ class Parameters: public Camera::Parameters
+ {
+ public:
+ Parameters();
+ Parameters(const std::string& cameraName,
+ int w, int h,
+ double xi,
+ double k1, double k2, double p1, double p2,
+ double gamma1, double gamma2, double u0, double v0);
+
+ double& xi(void);
+ double& k1(void);
+ double& k2(void);
+ double& p1(void);
+ double& p2(void);
+ double& gamma1(void);
+ double& gamma2(void);
+ double& u0(void);
+ double& v0(void);
+
+ double xi(void) const;
+ double k1(void) const;
+ double k2(void) const;
+ double p1(void) const;
+ double p2(void) const;
+ double gamma1(void) const;
+ double gamma2(void) const;
+ double u0(void) const;
+ double v0(void) const;
+
+ bool readFromYamlFile(const std::string& filename);
+ void writeToYamlFile(const std::string& filename) const;
+
+ Parameters& operator=(const Parameters& other);
+ friend std::ostream& operator<< (std::ostream& out, const Parameters& params);
+
+ private:
+ double m_xi;
+ double m_k1;
+ double m_k2;
+ double m_p1;
+ double m_p2;
+ double m_gamma1;
+ double m_gamma2;
+ double m_u0;
+ double m_v0;
+ };
+
+ CataCamera();
+
+ /**
+ * \brief Constructor from the projection model parameters
+ */
+ CataCamera(const std::string& cameraName,
+ int imageWidth, int imageHeight,
+ double xi, double k1, double k2, double p1, double p2,
+ double gamma1, double gamma2, double u0, double v0);
+ /**
+ * \brief Constructor from the projection model parameters
+ */
+ CataCamera(const Parameters& params);
+
+ Camera::ModelType modelType(void) const;
+ const std::string& cameraName(void) const;
+ int imageWidth(void) const;
+ int imageHeight(void) const;
+
+ void estimateIntrinsics(const cv::Size& boardSize,
+ const std::vector< std::vector >& objectPoints,
+ const std::vector< std::vector >& imagePoints);
+
+ // Lift points from the image plane to the sphere
+ void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
+ //%output P
+
+ // Lift points from the image plane to the projective space
+ void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
+ //%output P
+
+ // Projects 3D points to the image plane (Pi function)
+ void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const;
+ //%output p
+
+ // Projects 3D points to the image plane (Pi function)
+ // and calculates jacobian
+ void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
+ Eigen::Matrix& J) const;
+ //%output p
+ //%output J
+
+ void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const;
+ //%output p
+
+ template
+ static void spaceToPlane(const T* const params,
+ const T* const q, const T* const t,
+ const Eigen::Matrix& P,
+ Eigen::Matrix& p);
+
+ void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const;
+ void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u,
+ Eigen::Matrix2d& J) const;
+
+ void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const;
+ cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
+ float fx = -1.0f, float fy = -1.0f,
+ cv::Size imageSize = cv::Size(0, 0),
+ float cx = -1.0f, float cy = -1.0f,
+ cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;
+
+ int parameterCount(void) const;
+
+ const Parameters& getParameters(void) const;
+ void setParameters(const Parameters& parameters);
+
+ void readParameters(const std::vector& parameterVec);
+ void writeParameters(std::vector& parameterVec) const;
+
+ void writeParametersToYamlFile(const std::string& filename) const;
+
+ std::string parametersToString(void) const;
+
+private:
+ Parameters mParameters;
+
+ double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23;
+ bool m_noDistortion;
+};
+
+typedef std::shared_ptr CataCameraPtr;
+typedef std::shared_ptr CataCameraConstPtr;
+
+}
+
+#endif
diff --git a/camodocal/camera_models/EquidistantCamera.cc b/camodocal/camera_models/EquidistantCamera.cc
new file mode 100644
index 0000000..0713845
--- /dev/null
+++ b/camodocal/camera_models/EquidistantCamera.cc
@@ -0,0 +1,820 @@
+#include "../camera_models/EquidistantCamera.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "../gpl/gpl.h"
+
+namespace camodocal
+{
+
+EquidistantCamera::Parameters::Parameters()
+ : Camera::Parameters(KANNALA_BRANDT)
+ , m_k2(0.0)
+ , m_k3(0.0)
+ , m_k4(0.0)
+ , m_k5(0.0)
+ , m_mu(0.0)
+ , m_mv(0.0)
+ , m_u0(0.0)
+ , m_v0(0.0)
+{
+
+}
+
+EquidistantCamera::Parameters::Parameters(const std::string& cameraName,
+ int w, int h,
+ double k2, double k3, double k4, double k5,
+ double mu, double mv,
+ double u0, double v0)
+ : Camera::Parameters(KANNALA_BRANDT, cameraName, w, h)
+ , m_k2(k2)
+ , m_k3(k3)
+ , m_k4(k4)
+ , m_k5(k5)
+ , m_mu(mu)
+ , m_mv(mv)
+ , m_u0(u0)
+ , m_v0(v0)
+{
+
+}
+
+double&
+EquidistantCamera::Parameters::k2(void)
+{
+ return m_k2;
+}
+
+double&
+EquidistantCamera::Parameters::k3(void)
+{
+ return m_k3;
+}
+
+double&
+EquidistantCamera::Parameters::k4(void)
+{
+ return m_k4;
+}
+
+double&
+EquidistantCamera::Parameters::k5(void)
+{
+ return m_k5;
+}
+
+double&
+EquidistantCamera::Parameters::mu(void)
+{
+ return m_mu;
+}
+
+double&
+EquidistantCamera::Parameters::mv(void)
+{
+ return m_mv;
+}
+
+double&
+EquidistantCamera::Parameters::u0(void)
+{
+ return m_u0;
+}
+
+double&
+EquidistantCamera::Parameters::v0(void)
+{
+ return m_v0;
+}
+
+double
+EquidistantCamera::Parameters::k2(void) const
+{
+ return m_k2;
+}
+
+double
+EquidistantCamera::Parameters::k3(void) const
+{
+ return m_k3;
+}
+
+double
+EquidistantCamera::Parameters::k4(void) const
+{
+ return m_k4;
+}
+
+double
+EquidistantCamera::Parameters::k5(void) const
+{
+ return m_k5;
+}
+
+double
+EquidistantCamera::Parameters::mu(void) const
+{
+ return m_mu;
+}
+
+double
+EquidistantCamera::Parameters::mv(void) const
+{
+ return m_mv;
+}
+
+double
+EquidistantCamera::Parameters::u0(void) const
+{
+ return m_u0;
+}
+
+double
+EquidistantCamera::Parameters::v0(void) const
+{
+ return m_v0;
+}
+
+bool
+EquidistantCamera::Parameters::readFromYamlFile(const std::string& filename)
+{
+ cv::FileStorage fs(filename, cv::FileStorage::READ);
+
+ if (!fs.isOpened())
+ {
+ return false;
+ }
+
+ if (!fs["model_type"].isNone())
+ {
+ std::string sModelType;
+ fs["model_type"] >> sModelType;
+
+ if (sModelType.compare("KANNALA_BRANDT") != 0)
+ {
+ return false;
+ }
+ }
+
+ m_modelType = KANNALA_BRANDT;
+ fs["camera_name"] >> m_cameraName;
+ m_imageWidth = static_cast(fs["image_width"]);
+ m_imageHeight = static_cast(fs["image_height"]);
+
+ cv::FileNode n = fs["projection_parameters"];
+ m_k2 = static_cast(n["k2"]);
+ m_k3 = static_cast(n["k3"]);
+ m_k4 = static_cast(n["k4"]);
+ m_k5 = static_cast(n["k5"]);
+ m_mu = static_cast(n["mu"]);
+ m_mv = static_cast(n["mv"]);
+ m_u0 = static_cast(n["u0"]);
+ m_v0 = static_cast(n["v0"]);
+
+ return true;
+}
+
+void
+EquidistantCamera::Parameters::writeToYamlFile(const std::string& filename) const
+{
+ cv::FileStorage fs(filename, cv::FileStorage::WRITE);
+
+ fs << "model_type" << "KANNALA_BRANDT";
+ fs << "camera_name" << m_cameraName;
+ fs << "image_width" << m_imageWidth;
+ fs << "image_height" << m_imageHeight;
+
+ // projection: k2, k3, k4, k5, mu, mv, u0, v0
+ fs << "projection_parameters";
+ fs << "{" << "k2" << m_k2
+ << "k3" << m_k3
+ << "k4" << m_k4
+ << "k5" << m_k5
+ << "mu" << m_mu
+ << "mv" << m_mv
+ << "u0" << m_u0
+ << "v0" << m_v0 << "}";
+
+ fs.release();
+}
+
+EquidistantCamera::Parameters&
+EquidistantCamera::Parameters::operator=(const EquidistantCamera::Parameters& other)
+{
+ if (this != &other)
+ {
+ m_modelType = other.m_modelType;
+ m_cameraName = other.m_cameraName;
+ m_imageWidth = other.m_imageWidth;
+ m_imageHeight = other.m_imageHeight;
+ m_k2 = other.m_k2;
+ m_k3 = other.m_k3;
+ m_k4 = other.m_k4;
+ m_k5 = other.m_k5;
+ m_mu = other.m_mu;
+ m_mv = other.m_mv;
+ m_u0 = other.m_u0;
+ m_v0 = other.m_v0;
+ }
+
+ return *this;
+}
+
+std::ostream&
+operator<< (std::ostream& out, const EquidistantCamera::Parameters& params)
+{
+ out << "Camera Parameters:" << std::endl;
+ out << " model_type " << "KANNALA_BRANDT" << std::endl;
+ out << " camera_name " << params.m_cameraName << std::endl;
+ out << " image_width " << params.m_imageWidth << std::endl;
+ out << " image_height " << params.m_imageHeight << std::endl;
+
+ // projection: k2, k3, k4, k5, mu, mv, u0, v0
+ out << "Projection Parameters" << std::endl;
+ out << " k2 " << params.m_k2 << std::endl
+ << " k3 " << params.m_k3 << std::endl
+ << " k4 " << params.m_k4 << std::endl
+ << " k5 " << params.m_k5 << std::endl
+ << " mu " << params.m_mu << std::endl
+ << " mv " << params.m_mv << std::endl
+ << " u0 " << params.m_u0 << std::endl
+ << " v0 " << params.m_v0 << std::endl;
+
+ return out;
+}
+
+EquidistantCamera::EquidistantCamera()
+ : m_inv_K11(1.0)
+ , m_inv_K13(0.0)
+ , m_inv_K22(1.0)
+ , m_inv_K23(0.0)
+{
+
+}
+
+EquidistantCamera::EquidistantCamera(const std::string& cameraName,
+ int imageWidth, int imageHeight,
+ double k2, double k3, double k4, double k5,
+ double mu, double mv,
+ double u0, double v0)
+ : mParameters(cameraName, imageWidth, imageHeight,
+ k2, k3, k4, k5, mu, mv, u0, v0)
+{
+ // Inverse camera projection matrix parameters
+ m_inv_K11 = 1.0 / mParameters.mu();
+ m_inv_K13 = -mParameters.u0() / mParameters.mu();
+ m_inv_K22 = 1.0 / mParameters.mv();
+ m_inv_K23 = -mParameters.v0() / mParameters.mv();
+}
+
+EquidistantCamera::EquidistantCamera(const EquidistantCamera::Parameters& params)
+ : mParameters(params)
+{
+ // Inverse camera projection matrix parameters
+ m_inv_K11 = 1.0 / mParameters.mu();
+ m_inv_K13 = -mParameters.u0() / mParameters.mu();
+ m_inv_K22 = 1.0 / mParameters.mv();
+ m_inv_K23 = -mParameters.v0() / mParameters.mv();
+}
+
+Camera::ModelType
+EquidistantCamera::modelType(void) const
+{
+ return mParameters.modelType();
+}
+
+const std::string&
+EquidistantCamera::cameraName(void) const
+{
+ return mParameters.cameraName();
+}
+
+int
+EquidistantCamera::imageWidth(void) const
+{
+ return mParameters.imageWidth();
+}
+
+int
+EquidistantCamera::imageHeight(void) const
+{
+ return mParameters.imageHeight();
+}
+
+void
+EquidistantCamera::estimateIntrinsics(const cv::Size& boardSize,
+ const std::vector< std::vector >& objectPoints,
+ const std::vector< std::vector >& imagePoints)
+{
+ Parameters params = getParameters();
+
+ double u0 = params.imageWidth() / 2.0;
+ double v0 = params.imageHeight() / 2.0;
+
+ double minReprojErr = std::numeric_limits::max();
+
+ std::vector rvecs, tvecs;
+ rvecs.assign(objectPoints.size(), cv::Mat());
+ tvecs.assign(objectPoints.size(), cv::Mat());
+
+ params.k2() = 0.0;
+ params.k3() = 0.0;
+ params.k4() = 0.0;
+ params.k5() = 0.0;
+ params.u0() = u0;
+ params.v0() = v0;
+
+ // Initialize focal length
+ // C. Hughes, P. Denny, M. Glavin, and E. Jones,
+ // Equidistant Fish-Eye Calibration and Rectification by Vanishing Point
+ // Extraction, PAMI 2010
+ // Find circles from rows of chessboard corners, and for each pair
+ // of circles, find vanishing points: v1 and v2.
+ // f = ||v1 - v2|| / PI;
+ double f0 = 0.0;
+ for (size_t i = 0; i < imagePoints.size(); ++i)
+ {
+ std::vector center(boardSize.height);
+ std::vector radius(boardSize.height);
+ for (int r = 0; r < boardSize.height; ++r)
+ {
+ std::vector circle;
+ for (int c = 0; c < boardSize.width; ++c)
+ {
+ circle.push_back(imagePoints.at(i).at(r * boardSize.width + c));
+ }
+
+ fitCircle(circle, center[r](0), center[r](1), radius[r]);
+ }
+
+ for (int j = 0; j < boardSize.height; ++j)
+ {
+ for (int k = j + 1; k < boardSize.height; ++k)
+ {
+ // find distance between pair of vanishing points which
+ // correspond to intersection points of 2 circles
+ std::vector ipts;
+ ipts = intersectCircles(center[j](0), center[j](1), radius[j],
+ center[k](0), center[k](1), radius[k]);
+
+ if (ipts.size() < 2)
+ {
+ continue;
+ }
+
+ double f = cv::norm(ipts.at(0) - ipts.at(1)) / M_PI;
+
+ params.mu() = f;
+ params.mv() = f;
+
+ setParameters(params);
+
+ for (size_t l = 0; l < objectPoints.size(); ++l)
+ {
+ estimateExtrinsics(objectPoints.at(l), imagePoints.at(l), rvecs.at(l), tvecs.at(l));
+ }
+
+ double reprojErr = reprojectionError(objectPoints, imagePoints, rvecs, tvecs, cv::noArray());
+
+ if (reprojErr < minReprojErr)
+ {
+ minReprojErr = reprojErr;
+ f0 = f;
+ }
+ }
+ }
+ }
+
+ if (f0 <= 0.0 && minReprojErr >= std::numeric_limits::max())
+ {
+ std::cout << "[" << params.cameraName() << "] "
+ << "# INFO: kannala-Brandt model fails with given data. " << std::endl;
+
+ return;
+ }
+
+ params.mu() = f0;
+ params.mv() = f0;
+
+ setParameters(params);
+}
+
+/**
+ * \brief Lifts a point from the image plane to the unit sphere
+ *
+ * \param p image coordinates
+ * \param P coordinates of the point on the sphere
+ */
+void
+EquidistantCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const
+{
+ liftProjective(p, P);
+}
+
+/**
+ * \brief Lifts a point from the image plane to its projective ray
+ *
+ * \param p image coordinates
+ * \param P coordinates of the projective ray
+ */
+void
+EquidistantCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const
+{
+ // Lift points to normalised plane
+ Eigen::Vector2d p_u;
+ p_u << m_inv_K11 * p(0) + m_inv_K13,
+ m_inv_K22 * p(1) + m_inv_K23;
+
+ // Obtain a projective ray
+ double theta, phi;
+ backprojectSymmetric(p_u, theta, phi);
+
+ P(0) = sin(theta) * cos(phi);
+ P(1) = sin(theta) * sin(phi);
+ P(2) = cos(theta);
+}
+
+/**
+ * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v)
+ *
+ * \param P 3D point coordinates
+ * \param p return value, contains the image point coordinates
+ */
+void
+EquidistantCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const
+{
+ double theta = acos(P(2) / P.norm());
+ double phi = atan2(P(1), P(0));
+
+ Eigen::Vector2d p_u = r(mParameters.k2(), mParameters.k3(), mParameters.k4(), mParameters.k5(), theta) * Eigen::Vector2d(cos(phi), sin(phi));
+
+ // Apply generalised projection matrix
+ p << mParameters.mu() * p_u(0) + mParameters.u0(),
+ mParameters.mv() * p_u(1) + mParameters.v0();
+}
+
+
+/**
+ * \brief Project a 3D point to the image plane and calculate Jacobian
+ *
+ * \param P 3D point coordinates
+ * \param p return value, contains the image point coordinates
+ */
+void
+EquidistantCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
+ Eigen::Matrix& J) const
+{
+ double theta = acos(P(2) / P.norm());
+ double phi = atan2(P(1), P(0));
+
+ Eigen::Vector2d p_u = r(mParameters.k2(), mParameters.k3(), mParameters.k4(), mParameters.k5(), theta) * Eigen::Vector2d(cos(phi), sin(phi));
+
+ // Apply generalised projection matrix
+ p << mParameters.mu() * p_u(0) + mParameters.u0(),
+ mParameters.mv() * p_u(1) + mParameters.v0();
+}
+
+/**
+ * \brief Projects an undistorted 2D point p_u to the image plane
+ *
+ * \param p_u 2D point coordinates
+ * \return image point coordinates
+ */
+void
+EquidistantCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const
+{
+// Eigen::Vector2d p_d;
+//
+// if (m_noDistortion)
+// {
+// p_d = p_u;
+// }
+// else
+// {
+// // Apply distortion
+// Eigen::Vector2d d_u;
+// distortion(p_u, d_u);
+// p_d = p_u + d_u;
+// }
+//
+// // Apply generalised projection matrix
+// p << mParameters.gamma1() * p_d(0) + mParameters.u0(),
+// mParameters.gamma2() * p_d(1) + mParameters.v0();
+}
+
+void
+EquidistantCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const
+{
+ cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight());
+
+ cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F);
+ cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F);
+
+ for (int v = 0; v < imageSize.height; ++v)
+ {
+ for (int u = 0; u < imageSize.width; ++u)
+ {
+ double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale;
+ double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale;
+
+ double theta, phi;
+ backprojectSymmetric(Eigen::Vector2d(mx_u, my_u), theta, phi);
+
+ Eigen::Vector3d P;
+ P << sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta);
+
+ Eigen::Vector2d p;
+ spaceToPlane(P, p);
+
+ mapX.at(v,u) = p(0);
+ mapY.at(v,u) = p(1);
+ }
+ }
+
+ cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
+}
+
+cv::Mat
+EquidistantCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
+ float fx, float fy,
+ cv::Size imageSize,
+ float cx, float cy,
+ cv::Mat rmat) const
+{
+ if (imageSize == cv::Size(0, 0))
+ {
+ imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight());
+ }
+
+ cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
+ cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
+
+ Eigen::Matrix3f K_rect;
+
+ if (cx == -1.0f && cy == -1.0f)
+ {
+ K_rect << fx, 0, imageSize.width / 2,
+ 0, fy, imageSize.height / 2,
+ 0, 0, 1;
+ }
+ else
+ {
+ K_rect << fx, 0, cx,
+ 0, fy, cy,
+ 0, 0, 1;
+ }
+
+ if (fx == -1.0f || fy == -1.0f)
+ {
+ K_rect(0,0) = mParameters.mu();
+ K_rect(1,1) = mParameters.mv();
+ }
+
+ Eigen::Matrix3f K_rect_inv = K_rect.inverse();
+
+ Eigen::Matrix3f R, R_inv;
+ cv::cv2eigen(rmat, R);
+ R_inv = R.inverse();
+
+ for (int v = 0; v < imageSize.height; ++v)
+ {
+ for (int u = 0; u < imageSize.width; ++u)
+ {
+ Eigen::Vector3f xo;
+ xo << u, v, 1;
+
+ Eigen::Vector3f uo = R_inv * K_rect_inv * xo;
+
+ Eigen::Vector2d p;
+ spaceToPlane(uo.cast(), p);
+
+ mapX.at(v,u) = p(0);
+ mapY.at(v,u) = p(1);
+ }
+ }
+
+ cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
+
+ cv::Mat K_rect_cv;
+ cv::eigen2cv(K_rect, K_rect_cv);
+ return K_rect_cv;
+}
+
+int
+EquidistantCamera::parameterCount(void) const
+{
+ return 8;
+}
+
+const EquidistantCamera::Parameters&
+EquidistantCamera::getParameters(void) const
+{
+ return mParameters;
+}
+
+void
+EquidistantCamera::setParameters(const EquidistantCamera::Parameters& parameters)
+{
+ mParameters = parameters;
+
+ // Inverse camera projection matrix parameters
+ m_inv_K11 = 1.0 / mParameters.mu();
+ m_inv_K13 = -mParameters.u0() / mParameters.mu();
+ m_inv_K22 = 1.0 / mParameters.mv();
+ m_inv_K23 = -mParameters.v0() / mParameters.mv();
+}
+
+void
+EquidistantCamera::readParameters(const std::vector& parameterVec)
+{
+ if (parameterVec.size() != parameterCount())
+ {
+ return;
+ }
+
+ Parameters params = getParameters();
+
+ params.k2() = parameterVec.at(0);
+ params.k3() = parameterVec.at(1);
+ params.k4() = parameterVec.at(2);
+ params.k5() = parameterVec.at(3);
+ params.mu() = parameterVec.at(4);
+ params.mv() = parameterVec.at(5);
+ params.u0() = parameterVec.at(6);
+ params.v0() = parameterVec.at(7);
+
+ setParameters(params);
+}
+
+void
+EquidistantCamera::writeParameters(std::vector& parameterVec) const
+{
+ parameterVec.resize(parameterCount());
+ parameterVec.at(0) = mParameters.k2();
+ parameterVec.at(1) = mParameters.k3();
+ parameterVec.at(2) = mParameters.k4();
+ parameterVec.at(3) = mParameters.k5();
+ parameterVec.at(4) = mParameters.mu();
+ parameterVec.at(5) = mParameters.mv();
+ parameterVec.at(6) = mParameters.u0();
+ parameterVec.at(7) = mParameters.v0();
+}
+
+void
+EquidistantCamera::writeParametersToYamlFile(const std::string& filename) const
+{
+ mParameters.writeToYamlFile(filename);
+}
+
+std::string
+EquidistantCamera::parametersToString(void) const
+{
+ std::ostringstream oss;
+ oss << mParameters;
+
+ return oss.str();
+}
+
+void
+EquidistantCamera::fitOddPoly(const std::vector& x, const std::vector& y,
+ int n, std::vector& coeffs) const
+{
+ std::vector pows;
+ for (int i = 1; i <= n; i += 2)
+ {
+ pows.push_back(i);
+ }
+
+ Eigen::MatrixXd X(x.size(), pows.size());
+ Eigen::MatrixXd Y(y.size(), 1);
+ for (size_t i = 0; i < x.size(); ++i)
+ {
+ for (size_t j = 0; j < pows.size(); ++j)
+ {
+ X(i,j) = pow(x.at(i), pows.at(j));
+ }
+ Y(i,0) = y.at(i);
+ }
+
+ Eigen::MatrixXd A = (X.transpose() * X).inverse() * X.transpose() * Y;
+
+ coeffs.resize(A.rows());
+ for (int i = 0; i < A.rows(); ++i)
+ {
+ coeffs.at(i) = A(i,0);
+ }
+}
+
+void
+EquidistantCamera::backprojectSymmetric(const Eigen::Vector2d& p_u,
+ double& theta, double& phi) const
+{
+ double tol = 1e-10;
+ double p_u_norm = p_u.norm();
+
+ if (p_u_norm < 1e-10)
+ {
+ phi = 0.0;
+ }
+ else
+ {
+ phi = atan2(p_u(1), p_u(0));
+ }
+
+ int npow = 9;
+ if (mParameters.k5() == 0.0)
+ {
+ npow -= 2;
+ }
+ if (mParameters.k4() == 0.0)
+ {
+ npow -= 2;
+ }
+ if (mParameters.k3() == 0.0)
+ {
+ npow -= 2;
+ }
+ if (mParameters.k2() == 0.0)
+ {
+ npow -= 2;
+ }
+
+ Eigen::MatrixXd coeffs(npow + 1, 1);
+ coeffs.setZero();
+ coeffs(0) = -p_u_norm;
+ coeffs(1) = 1.0;
+
+ if (npow >= 3)
+ {
+ coeffs(3) = mParameters.k2();
+ }
+ if (npow >= 5)
+ {
+ coeffs(5) = mParameters.k3();
+ }
+ if (npow >= 7)
+ {
+ coeffs(7) = mParameters.k4();
+ }
+ if (npow >= 9)
+ {
+ coeffs(9) = mParameters.k5();
+ }
+
+ if (npow == 1)
+ {
+ theta = p_u_norm;
+ }
+ else
+ {
+ // Get eigenvalues of companion matrix corresponding to polynomial.
+ // Eigenvalues correspond to roots of polynomial.
+ Eigen::MatrixXd A(npow, npow);
+ A.setZero();
+ A.block(1, 0, npow - 1, npow - 1).setIdentity();
+ A.col(npow - 1) = - coeffs.block(0, 0, npow, 1) / coeffs(npow);
+
+ Eigen::EigenSolver es(A);
+ Eigen::MatrixXcd eigval = es.eigenvalues();
+
+ std::vector thetas;
+ for (int i = 0; i < eigval.rows(); ++i)
+ {
+ if (fabs(eigval(i).imag()) > tol)
+ {
+ continue;
+ }
+
+ double t = eigval(i).real();
+
+ if (t < -tol)
+ {
+ continue;
+ }
+ else if (t < 0.0)
+ {
+ t = 0.0;
+ }
+
+ thetas.push_back(t);
+ }
+
+ if (thetas.empty())
+ {
+ theta = p_u_norm;
+ }
+ else
+ {
+ theta = *std::min_element(thetas.begin(), thetas.end());
+ }
+ }
+}
+
+}
diff --git a/camodocal/camera_models/EquidistantCamera.h b/camodocal/camera_models/EquidistantCamera.h
new file mode 100644
index 0000000..347d441
--- /dev/null
+++ b/camodocal/camera_models/EquidistantCamera.h
@@ -0,0 +1,171 @@
+#ifndef EQUIDISTANTCAMERA_H
+#define EQUIDISTANTCAMERA_H
+
+#include
+#include
+
+#include "Camera.h"
+
+namespace camodocal
+{
+
+/**
+ * J. Kannala, and S. Brandt, A Generic Camera Model and Calibration Method
+ * for Conventional, Wide-Angle, and Fish-Eye Lenses, PAMI 2006
+ */
+
+class EquidistantCamera: public Camera
+{
+public:
+ class Parameters: public Camera::Parameters
+ {
+ public:
+ Parameters();
+ Parameters(const std::string& cameraName,
+ int w, int h,
+ double k2, double k3, double k4, double k5,
+ double mu, double mv,
+ double u0, double v0);
+
+ double& k2(void);
+ double& k3(void);
+ double& k4(void);
+ double& k5(void);
+ double& mu(void);
+ double& mv(void);
+ double& u0(void);
+ double& v0(void);
+
+ double k2(void) const;
+ double k3(void) const;
+ double k4(void) const;
+ double k5(void) const;
+ double mu(void) const;
+ double mv(void) const;
+ double u0(void) const;
+ double v0(void) const;
+
+ bool readFromYamlFile(const std::string& filename);
+ void writeToYamlFile(const std::string& filename) const;
+
+ Parameters& operator=(const Parameters& other);
+ friend std::ostream& operator<< (std::ostream& out, const Parameters& params);
+
+ private:
+ // projection
+ double m_k2;
+ double m_k3;
+ double m_k4;
+ double m_k5;
+
+ double m_mu;
+ double m_mv;
+ double m_u0;
+ double m_v0;
+ };
+
+ EquidistantCamera();
+
+ /**
+ * \brief Constructor from the projection model parameters
+ */
+ EquidistantCamera(const std::string& cameraName,
+ int imageWidth, int imageHeight,
+ double k2, double k3, double k4, double k5,
+ double mu, double mv,
+ double u0, double v0);
+ /**
+ * \brief Constructor from the projection model parameters
+ */
+ EquidistantCamera(const Parameters& params);
+
+ Camera::ModelType modelType(void) const;
+ const std::string& cameraName(void) const;
+ int imageWidth(void) const;
+ int imageHeight(void) const;
+
+ void estimateIntrinsics(const cv::Size& boardSize,
+ const std::vector< std::vector >& objectPoints,
+ const std::vector< std::vector >& imagePoints);
+
+ // Lift points from the image plane to the sphere
+ virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
+ //%output P
+
+ // Lift points from the image plane to the projective space
+ void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
+ //%output P
+
+ // Projects 3D points to the image plane (Pi function)
+ void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const;
+ //%output p
+
+ // Projects 3D points to the image plane (Pi function)
+ // and calculates jacobian
+ void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
+ Eigen::Matrix& J) const;
+ //%output p
+ //%output J
+
+ void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const;
+ //%output p
+
+ template
+ static void spaceToPlane(const T* const params,
+ const T* const q, const T* const t,
+ const Eigen::Matrix& P,
+ Eigen::Matrix& p);
+
+ void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const;
+ cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
+ float fx = -1.0f, float fy = -1.0f,
+ cv::Size imageSize = cv::Size(0, 0),
+ float cx = -1.0f, float cy = -1.0f,
+ cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;
+
+ int parameterCount(void) const;
+
+ const Parameters& getParameters(void) const;
+ void setParameters(const Parameters& parameters);
+
+ void readParameters(const std::vector& parameterVec);
+ void writeParameters(std::vector& parameterVec) const;
+
+ void writeParametersToYamlFile(const std::string& filename) const;
+
+ std::string parametersToString(void) const;
+
+private:
+ template
+ static T r(T k2, T k3, T k4, T k5, T theta);
+
+
+ void fitOddPoly(const std::vector& x, const std::vector& y,
+ int n, std::vector& coeffs) const;
+
+ void backprojectSymmetric(const Eigen::Vector2d& p_u,
+ double& theta, double& phi) const;
+
+ Parameters mParameters;
+
+ double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23;
+};
+
+typedef std::shared_ptr EquidistantCameraPtr;
+typedef std::shared_ptr EquidistantCameraConstPtr;
+
+template
+T
+EquidistantCamera::r(T k2, T k3, T k4, T k5, T theta)
+{
+ // k1 = 1
+ return theta +
+ k2 * theta * theta * theta +
+ k3 * theta * theta * theta * theta * theta +
+ k4 * theta * theta * theta * theta * theta * theta * theta +
+ k5 * theta * theta * theta * theta * theta * theta * theta * theta * theta;
+}
+
+}
+
+#endif
diff --git a/camodocal/camera_models/PinholeCamera.cc b/camodocal/camera_models/PinholeCamera.cc
new file mode 100644
index 0000000..13a1af2
--- /dev/null
+++ b/camodocal/camera_models/PinholeCamera.cc
@@ -0,0 +1,881 @@
+#include "../camera_models/PinholeCamera.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "../gpl/gpl.h"
+
+namespace camodocal
+{
+
+PinholeCamera::Parameters::Parameters()
+ : Camera::Parameters(PINHOLE)
+ , m_k1(0.0)
+ , m_k2(0.0)
+ , m_p1(0.0)
+ , m_p2(0.0)
+ , m_fx(0.0)
+ , m_fy(0.0)
+ , m_cx(0.0)
+ , m_cy(0.0)
+{
+
+}
+
+PinholeCamera::Parameters::Parameters(const std::string& cameraName,
+ int w, int h,
+ double k1, double k2,
+ double p1, double p2,
+ double fx, double fy,
+ double cx, double cy)
+ : Camera::Parameters(PINHOLE, cameraName, w, h)
+ , m_k1(k1)
+ , m_k2(k2)
+ , m_p1(p1)
+ , m_p2(p2)
+ , m_fx(fx)
+ , m_fy(fy)
+ , m_cx(cx)
+ , m_cy(cy)
+{
+}
+
+double&
+PinholeCamera::Parameters::k1(void)
+{
+ return m_k1;
+}
+
+double&
+PinholeCamera::Parameters::k2(void)
+{
+ return m_k2;
+}
+
+double&
+PinholeCamera::Parameters::p1(void)
+{
+ return m_p1;
+}
+
+double&
+PinholeCamera::Parameters::p2(void)
+{
+ return m_p2;
+}
+
+double&
+PinholeCamera::Parameters::fx(void)
+{
+ return m_fx;
+}
+
+double&
+PinholeCamera::Parameters::fy(void)
+{
+ return m_fy;
+}
+
+double&
+PinholeCamera::Parameters::cx(void)
+{
+ return m_cx;
+}
+
+double&
+PinholeCamera::Parameters::cy(void)
+{
+ return m_cy;
+}
+
+double
+PinholeCamera::Parameters::k1(void) const
+{
+ return m_k1;
+}
+
+double
+PinholeCamera::Parameters::k2(void) const
+{
+ return m_k2;
+}
+
+double
+PinholeCamera::Parameters::p1(void) const
+{
+ return m_p1;
+}
+
+double
+PinholeCamera::Parameters::p2(void) const
+{
+ return m_p2;
+}
+
+double
+PinholeCamera::Parameters::fx(void) const
+{
+ return m_fx;
+}
+
+double
+PinholeCamera::Parameters::fy(void) const
+{
+ return m_fy;
+}
+
+double
+PinholeCamera::Parameters::cx(void) const
+{
+ return m_cx;
+}
+
+double
+PinholeCamera::Parameters::cy(void) const
+{
+ return m_cy;
+}
+
+bool
+PinholeCamera::Parameters::readFromYamlFile(const std::string& filename)
+{
+ cv::FileStorage fs(filename, cv::FileStorage::READ);
+
+ if (!fs.isOpened())
+ {
+ return false;
+ }
+
+ if (!fs["model_type"].isNone())
+ {
+ std::string sModelType;
+ fs["model_type"] >> sModelType;
+
+ if (sModelType.compare("PINHOLE") != 0)
+ {
+ return false;
+ }
+ }
+
+ m_modelType = PINHOLE;
+ fs["camera_name"] >> m_cameraName;
+ m_imageWidth = static_cast(fs["image_width"]);
+ m_imageHeight = static_cast(fs["image_height"]);
+
+ cv::FileNode n = fs["distortion_parameters"];
+ m_k1 = static_cast(n["k1"]);
+ m_k2 = static_cast(n["k2"]);
+ m_p1 = static_cast(n["p1"]);
+ m_p2 = static_cast(n["p2"]);
+
+ n = fs["projection_parameters"];
+ m_fx = static_cast(n["fx"]);
+ m_fy = static_cast(n["fy"]);
+ m_cx = static_cast(n["cx"]);
+ m_cy = static_cast(n["cy"]);
+
+ return true;
+}
+
+void
+PinholeCamera::Parameters::writeToYamlFile(const std::string& filename) const
+{
+ cv::FileStorage fs(filename, cv::FileStorage::WRITE);
+
+ fs << "model_type" << "PINHOLE";
+ fs << "camera_name" << m_cameraName;
+ fs << "image_width" << m_imageWidth;
+ fs << "image_height" << m_imageHeight;
+
+ // radial distortion: k1, k2
+ // tangential distortion: p1, p2
+ fs << "distortion_parameters";
+ fs << "{" << "k1" << m_k1
+ << "k2" << m_k2
+ << "p1" << m_p1
+ << "p2" << m_p2 << "}";
+
+ // projection: fx, fy, cx, cy
+ fs << "projection_parameters";
+ fs << "{" << "fx" << m_fx
+ << "fy" << m_fy
+ << "cx" << m_cx
+ << "cy" << m_cy << "}";
+
+ fs.release();
+}
+
+PinholeCamera::Parameters&
+PinholeCamera::Parameters::operator=(const PinholeCamera::Parameters& other)
+{
+ if (this != &other)
+ {
+ m_modelType = other.m_modelType;
+ m_cameraName = other.m_cameraName;
+ m_imageWidth = other.m_imageWidth;
+ m_imageHeight = other.m_imageHeight;
+ m_k1 = other.m_k1;
+ m_k2 = other.m_k2;
+ m_p1 = other.m_p1;
+ m_p2 = other.m_p2;
+ m_fx = other.m_fx;
+ m_fy = other.m_fy;
+ m_cx = other.m_cx;
+ m_cy = other.m_cy;
+ }
+
+ return *this;
+}
+
+std::ostream&
+operator<< (std::ostream& out, const PinholeCamera::Parameters& params)
+{
+ out << "Camera Parameters:" << std::endl;
+ out << " model_type " << "PINHOLE" << std::endl;
+ out << " camera_name " << params.m_cameraName << std::endl;
+ out << " image_width " << params.m_imageWidth << std::endl;
+ out << " image_height " << params.m_imageHeight << std::endl;
+
+ // radial distortion: k1, k2
+ // tangential distortion: p1, p2
+ out << "Distortion Parameters" << std::endl;
+ out << " k1 " << params.m_k1 << std::endl
+ << " k2 " << params.m_k2 << std::endl
+ << " p1 " << params.m_p1 << std::endl
+ << " p2 " << params.m_p2 << std::endl;
+
+ // projection: fx, fy, cx, cy
+ out << "Projection Parameters" << std::endl;
+ out << " fx " << params.m_fx << std::endl
+ << " fy " << params.m_fy << std::endl
+ << " cx " << params.m_cx << std::endl
+ << " cy " << params.m_cy << std::endl;
+
+ return out;
+}
+
+PinholeCamera::PinholeCamera()
+ : m_inv_K11(1.0)
+ , m_inv_K13(0.0)
+ , m_inv_K22(1.0)
+ , m_inv_K23(0.0)
+ , m_noDistortion(true)
+{
+
+}
+
+PinholeCamera::PinholeCamera(const std::string& cameraName,
+ int imageWidth, int imageHeight,
+ double k1, double k2, double p1, double p2,
+ double fx, double fy, double cx, double cy)
+ : mParameters(cameraName, imageWidth, imageHeight,
+ k1, k2, p1, p2, fx, fy, cx, cy)
+{
+ if ((mParameters.k1() == 0.0) &&
+ (mParameters.k2() == 0.0) &&
+ (mParameters.p1() == 0.0) &&
+ (mParameters.p2() == 0.0))
+ {
+ m_noDistortion = true;
+ }
+ else
+ {
+ m_noDistortion = false;
+ }
+
+ // Inverse camera projection matrix parameters
+ m_inv_K11 = 1.0 / mParameters.fx();
+ m_inv_K13 = -mParameters.cx() / mParameters.fx();
+ m_inv_K22 = 1.0 / mParameters.fy();
+ m_inv_K23 = -mParameters.cy() / mParameters.fy();
+}
+
+PinholeCamera::PinholeCamera(const PinholeCamera::Parameters& params)
+ : mParameters(params)
+{
+ if ((mParameters.k1() == 0.0) &&
+ (mParameters.k2() == 0.0) &&
+ (mParameters.p1() == 0.0) &&
+ (mParameters.p2() == 0.0))
+ {
+ m_noDistortion = true;
+ }
+ else
+ {
+ m_noDistortion = false;
+ }
+
+ // Inverse camera projection matrix parameters
+ m_inv_K11 = 1.0 / mParameters.fx();
+ m_inv_K13 = -mParameters.cx() / mParameters.fx();
+ m_inv_K22 = 1.0 / mParameters.fy();
+ m_inv_K23 = -mParameters.cy() / mParameters.fy();
+}
+
+Camera::ModelType
+PinholeCamera::modelType(void) const
+{
+ return mParameters.modelType();
+}
+
+const std::string&
+PinholeCamera::cameraName(void) const
+{
+ return mParameters.cameraName();
+}
+
+int
+PinholeCamera::imageWidth(void) const
+{
+ return mParameters.imageWidth();
+}
+
+int
+PinholeCamera::imageHeight(void) const
+{
+ return mParameters.imageHeight();
+}
+
+void
+PinholeCamera::estimateIntrinsics(const cv::Size& boardSize,
+ const std::vector< std::vector >& objectPoints,
+ const std::vector< std::vector >& imagePoints)
+{
+ // Z. Zhang, A Flexible New Technique for Camera Calibration, PAMI 2000
+
+ Parameters params = getParameters();
+
+ params.k1() = 0.0;
+ params.k2() = 0.0;
+ params.p1() = 0.0;
+ params.p2() = 0.0;
+
+ double cx = params.imageWidth() / 2.0;
+ double cy = params.imageHeight() / 2.0;
+ params.cx() = cx;
+ params.cy() = cy;
+
+ size_t nImages = imagePoints.size();
+
+ cv::Mat A(nImages * 2, 2, CV_64F);
+ cv::Mat b(nImages * 2, 1, CV_64F);
+
+ for (size_t i = 0; i < nImages; ++i)
+ {
+ const std::vector& oPoints = objectPoints.at(i);
+
+ std::vector M(oPoints.size());
+ for (size_t j = 0; j < M.size(); ++j)
+ {
+ M.at(j) = cv::Point2f(oPoints.at(j).x, oPoints.at(j).y);
+ }
+
+ cv::Mat H = cv::findHomography(M, imagePoints.at(i));
+
+ H.at(0,0) -= H.at(2,0) * cx;
+ H.at(0,1) -= H.at(2,1) * cx;
+ H.at(0,2) -= H.at(2,2) * cx;
+ H.at(1,0) -= H.at(2,0) * cy;
+ H.at(1,1) -= H.at(2,1) * cy;
+ H.at(1,2) -= H.at(2,2) * cy;
+
+ double h[3], v[3], d1[3], d2[3];
+ double n[4] = {0,0,0,0};
+
+ for (int j = 0; j < 3; ++j)
+ {
+ double t0 = H.at(j,0);
+ double t1 = H.at(j,1);
+ h[j] = t0; v[j] = t1;
+ d1[j] = (t0 + t1) * 0.5;
+ d2[j] = (t0 - t1) * 0.5;
+ n[0] += t0 * t0; n[1] += t1 * t1;
+ n[2] += d1[j] * d1[j]; n[3] += d2[j] * d2[j];
+ }
+
+ for (int j = 0; j < 4; ++j)
+ {
+ n[j] = 1.0 / sqrt(n[j]);
+ }
+
+ for (int j = 0; j < 3; ++j)
+ {
+ h[j] *= n[0]; v[j] *= n[1];
+ d1[j] *= n[2]; d2[j] *= n[3];
+ }
+
+ A.at(i * 2, 0) = h[0] * v[0];
+ A.at(i * 2, 1) = h[1] * v[1];
+ A.at(i * 2 + 1, 0) = d1[0] * d2[0];
+ A.at(i * 2 + 1, 1) = d1[1] * d2[1];
+ b.at(i * 2, 0) = -h[2] * v[2];
+ b.at(i * 2 + 1, 0) = -d1[2] * d2[2];
+ }
+
+ cv::Mat f(2, 1, CV_64F);
+ cv::solve(A, b, f, cv::DECOMP_NORMAL | cv::DECOMP_LU);
+
+ params.fx() = sqrt(fabs(1.0 / f.at(0)));
+ params.fy() = sqrt(fabs(1.0 / f.at(1)));
+
+ setParameters(params);
+}
+
+/**
+ * \brief Lifts a point from the image plane to the unit sphere
+ *
+ * \param p image coordinates
+ * \param P coordinates of the point on the sphere
+ */
+void
+PinholeCamera::liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const
+{
+ liftProjective(p, P);
+
+ P.normalize();
+}
+
+/**
+ * \brief Lifts a point from the image plane to its projective ray
+ *
+ * \param p image coordinates
+ * \param P coordinates of the projective ray
+ */
+void
+PinholeCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const
+{
+ double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u;
+ double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d;
+ //double lambda;
+
+ // Lift points to normalised plane
+ mx_d = m_inv_K11 * p(0) + m_inv_K13;
+ my_d = m_inv_K22 * p(1) + m_inv_K23;
+
+ if (m_noDistortion)
+ {
+ mx_u = mx_d;
+ my_u = my_d;
+ }
+ else
+ {
+ if (0)
+ {
+ double k1 = mParameters.k1();
+ double k2 = mParameters.k2();
+ double p1 = mParameters.p1();
+ double p2 = mParameters.p2();
+
+ // Apply inverse distortion model
+ // proposed by Heikkila
+ mx2_d = mx_d*mx_d;
+ my2_d = my_d*my_d;
+ mxy_d = mx_d*my_d;
+ rho2_d = mx2_d+my2_d;
+ rho4_d = rho2_d*rho2_d;
+ radDist_d = k1*rho2_d+k2*rho4_d;
+ Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d;
+ Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d;
+ inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d);
+
+ mx_u = mx_d - inv_denom_d*Dx_d;
+ my_u = my_d - inv_denom_d*Dy_d;
+ }
+ else
+ {
+ // Recursive distortion model
+ int n = 8;
+ Eigen::Vector2d d_u;
+ distortion(Eigen::Vector2d(mx_d, my_d), d_u);
+ // Approximate value
+ mx_u = mx_d - d_u(0);
+ my_u = my_d - d_u(1);
+
+ for (int i = 1; i < n; ++i)
+ {
+ distortion(Eigen::Vector2d(mx_u, my_u), d_u);
+ mx_u = mx_d - d_u(0);
+ my_u = my_d - d_u(1);
+ }
+ }
+ }
+
+ // Obtain a projective ray
+ P << mx_u, my_u, 1.0;
+}
+
+
+/**
+ * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v)
+ *
+ * \param P 3D point coordinates
+ * \param p return value, contains the image point coordinates
+ */
+void
+PinholeCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const
+{
+ Eigen::Vector2d p_u, p_d;
+
+ // Project points to the normalised plane
+ p_u << P(0) / P(2), P(1) / P(2);
+
+ if (m_noDistortion)
+ {
+ p_d = p_u;
+ }
+ else
+ {
+ // Apply distortion
+ Eigen::Vector2d d_u;
+ distortion(p_u, d_u);
+ p_d = p_u + d_u;
+ }
+
+ // Apply generalised projection matrix
+ p << mParameters.fx() * p_d(0) + mParameters.cx(),
+ mParameters.fy() * p_d(1) + mParameters.cy();
+}
+
+#if 0
+/**
+ * \brief Project a 3D point to the image plane and calculate Jacobian
+ *
+ * \param P 3D point coordinates
+ * \param p return value, contains the image point coordinates
+ */
+void
+PinholeCamera::spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
+ Eigen::Matrix& J) const
+{
+ Eigen::Vector2d p_u, p_d;
+ double norm, inv_denom;
+ double dxdmx, dydmx, dxdmy, dydmy;
+
+ norm = P.norm();
+ // Project points to the normalised plane
+ inv_denom = 1.0 / P(2);
+ p_u << inv_denom * P(0), inv_denom * P(1);
+
+ // Calculate jacobian
+ double dudx = inv_denom;
+ double dvdx = 0.0;
+ double dudy = 0.0;
+ double dvdy = inv_denom;
+ inv_denom = - inv_denom * inv_denom;
+ double dudz = P(0) * inv_denom;
+ double dvdz = P(1) * inv_denom;
+
+ if (m_noDistortion)
+ {
+ p_d = p_u;
+ }
+ else
+ {
+ // Apply distortion
+ Eigen::Vector2d d_u;
+ distortion(p_u, d_u);
+ p_d = p_u + d_u;
+ }
+
+ double fx = mParameters.fx();
+ double fy = mParameters.fy();
+
+ // Make the product of the jacobians
+ // and add projection matrix jacobian
+ inv_denom = fx * (dudx * dxdmx + dvdx * dxdmy); // reuse
+ dvdx = fy * (dudx * dydmx + dvdx * dydmy);
+ dudx = inv_denom;
+
+ inv_denom = fx * (dudy * dxdmx + dvdy * dxdmy); // reuse
+ dvdy = fy * (dudy * dydmx + dvdy * dydmy);
+ dudy = inv_denom;
+
+ inv_denom = fx * (dudz * dxdmx + dvdz * dxdmy); // reuse
+ dvdz = fy * (dudz * dydmx + dvdz * dydmy);
+ dudz = inv_denom;
+
+ // Apply generalised projection matrix
+ p << fx * p_d(0) + mParameters.cx(),
+ fy * p_d(1) + mParameters.cy();
+
+ J << dudx, dudy, dudz,
+ dvdx, dvdy, dvdz;
+}
+#endif
+
+/**
+ * \brief Projects an undistorted 2D point p_u to the image plane
+ *
+ * \param p_u 2D point coordinates
+ * \return image point coordinates
+ */
+void
+PinholeCamera::undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const
+{
+ Eigen::Vector2d p_d;
+
+ if (m_noDistortion)
+ {
+ p_d = p_u;
+ }
+ else
+ {
+ // Apply distortion
+ Eigen::Vector2d d_u;
+ distortion(p_u, d_u);
+ p_d = p_u + d_u;
+ }
+
+ // Apply generalised projection matrix
+ p << mParameters.fx() * p_d(0) + mParameters.cx(),
+ mParameters.fy() * p_d(1) + mParameters.cy();
+}
+
+/**
+ * \brief Apply distortion to input point (from the normalised plane)
+ *
+ * \param p_u undistorted coordinates of point on the normalised plane
+ * \return to obtain the distorted point: p_d = p_u + d_u
+ */
+void
+PinholeCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const
+{
+ double k1 = mParameters.k1();
+ double k2 = mParameters.k2();
+ double p1 = mParameters.p1();
+ double p2 = mParameters.p2();
+
+ double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;
+
+ mx2_u = p_u(0) * p_u(0);
+ my2_u = p_u(1) * p_u(1);
+ mxy_u = p_u(0) * p_u(1);
+ rho2_u = mx2_u + my2_u;
+ rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
+ d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
+ p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
+}
+
+/**
+ * \brief Apply distortion to input point (from the normalised plane)
+ * and calculate Jacobian
+ *
+ * \param p_u undistorted coordinates of point on the normalised plane
+ * \return to obtain the distorted point: p_d = p_u + d_u
+ */
+void
+PinholeCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u,
+ Eigen::Matrix2d& J) const
+{
+ double k1 = mParameters.k1();
+ double k2 = mParameters.k2();
+ double p1 = mParameters.p1();
+ double p2 = mParameters.p2();
+
+ double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;
+
+ mx2_u = p_u(0) * p_u(0);
+ my2_u = p_u(1) * p_u(1);
+ mxy_u = p_u(0) * p_u(1);
+ rho2_u = mx2_u + my2_u;
+ rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
+ d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
+ p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
+
+ double dxdmx = 1.0 + rad_dist_u + k1 * 2.0 * mx2_u + k2 * rho2_u * 4.0 * mx2_u + 2.0 * p1 * p_u(1) + 6.0 * p2 * p_u(0);
+ double dydmx = k1 * 2.0 * p_u(0) * p_u(1) + k2 * 4.0 * rho2_u * p_u(0) * p_u(1) + p1 * 2.0 * p_u(0) + 2.0 * p2 * p_u(1);
+ double dxdmy = dydmx;
+ double dydmy = 1.0 + rad_dist_u + k1 * 2.0 * my2_u + k2 * rho2_u * 4.0 * my2_u + 6.0 * p1 * p_u(1) + 2.0 * p2 * p_u(0);
+
+ J << dxdmx, dxdmy,
+ dydmx, dydmy;
+}
+
+void
+PinholeCamera::initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale) const
+{
+ cv::Size imageSize(mParameters.imageWidth(), mParameters.imageHeight());
+
+ cv::Mat mapX = cv::Mat::zeros(imageSize, CV_32F);
+ cv::Mat mapY = cv::Mat::zeros(imageSize, CV_32F);
+
+ for (int v = 0; v < imageSize.height; ++v)
+ {
+ for (int u = 0; u < imageSize.width; ++u)
+ {
+ double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale;
+ double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale;
+
+ Eigen::Vector3d P;
+ P << mx_u, my_u, 1.0;
+
+ Eigen::Vector2d p;
+ spaceToPlane(P, p);
+
+ mapX.at(v,u) = p(0);
+ mapY.at(v,u) = p(1);
+ }
+ }
+
+ cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
+}
+
+cv::Mat
+PinholeCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
+ float fx, float fy,
+ cv::Size imageSize,
+ float cx, float cy,
+ cv::Mat rmat) const
+{
+ if (imageSize == cv::Size(0, 0))
+ {
+ imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight());
+ }
+
+ cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
+ cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
+
+ Eigen::Matrix3f R, R_inv;
+ cv::cv2eigen(rmat, R);
+ R_inv = R.inverse();
+
+ // assume no skew
+ Eigen::Matrix3f K_rect;
+
+ if (cx == -1.0f || cy == -1.0f)
+ {
+ K_rect << fx, 0, imageSize.width / 2,
+ 0, fy, imageSize.height / 2,
+ 0, 0, 1;
+ }
+ else
+ {
+ K_rect << fx, 0, cx,
+ 0, fy, cy,
+ 0, 0, 1;
+ }
+
+ if (fx == -1.0f || fy == -1.0f)
+ {
+ K_rect(0,0) = mParameters.fx();
+ K_rect(1,1) = mParameters.fy();
+ }
+
+ Eigen::Matrix3f K_rect_inv = K_rect.inverse();
+
+ for (int v = 0; v < imageSize.height; ++v)
+ {
+ for (int u = 0; u < imageSize.width; ++u)
+ {
+ Eigen::Vector3f xo;
+ xo << u, v, 1;
+
+ Eigen::Vector3f uo = R_inv * K_rect_inv * xo;
+
+ Eigen::Vector2d p;
+ spaceToPlane(uo.cast(), p);
+
+ mapX.at(v,u) = p(0);
+ mapY.at(v,u) = p(1);
+ }
+ }
+
+ cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
+
+ cv::Mat K_rect_cv;
+ cv::eigen2cv(K_rect, K_rect_cv);
+ return K_rect_cv;
+}
+
+int
+PinholeCamera::parameterCount(void) const
+{
+ return 8;
+}
+
+const PinholeCamera::Parameters&
+PinholeCamera::getParameters(void) const
+{
+ return mParameters;
+}
+
+void
+PinholeCamera::setParameters(const PinholeCamera::Parameters& parameters)
+{
+ mParameters = parameters;
+
+ if ((mParameters.k1() == 0.0) &&
+ (mParameters.k2() == 0.0) &&
+ (mParameters.p1() == 0.0) &&
+ (mParameters.p2() == 0.0))
+ {
+ m_noDistortion = true;
+ }
+ else
+ {
+ m_noDistortion = false;
+ }
+
+ m_inv_K11 = 1.0 / mParameters.fx();
+ m_inv_K13 = -mParameters.cx() / mParameters.fx();
+ m_inv_K22 = 1.0 / mParameters.fy();
+ m_inv_K23 = -mParameters.cy() / mParameters.fy();
+}
+
+void
+PinholeCamera::readParameters(const std::vector& parameterVec)
+{
+ if ((int)parameterVec.size() != parameterCount())
+ {
+ return;
+ }
+
+ Parameters params = getParameters();
+
+ params.k1() = parameterVec.at(0);
+ params.k2() = parameterVec.at(1);
+ params.p1() = parameterVec.at(2);
+ params.p2() = parameterVec.at(3);
+ params.fx() = parameterVec.at(4);
+ params.fy() = parameterVec.at(5);
+ params.cx() = parameterVec.at(6);
+ params.cy() = parameterVec.at(7);
+
+ setParameters(params);
+}
+
+void
+PinholeCamera::writeParameters(std::vector& parameterVec) const
+{
+ parameterVec.resize(parameterCount());
+ parameterVec.at(0) = mParameters.k1();
+ parameterVec.at(1) = mParameters.k2();
+ parameterVec.at(2) = mParameters.p1();
+ parameterVec.at(3) = mParameters.p2();
+ parameterVec.at(4) = mParameters.fx();
+ parameterVec.at(5) = mParameters.fy();
+ parameterVec.at(6) = mParameters.cx();
+ parameterVec.at(7) = mParameters.cy();
+}
+
+void
+PinholeCamera::writeParametersToYamlFile(const std::string& filename) const
+{
+ mParameters.writeToYamlFile(filename);
+}
+
+std::string
+PinholeCamera::parametersToString(void) const
+{
+ std::ostringstream oss;
+ oss << mParameters;
+
+ return oss.str();
+}
+
+}
diff --git a/camodocal/camera_models/PinholeCamera.h b/camodocal/camera_models/PinholeCamera.h
new file mode 100644
index 0000000..a292280
--- /dev/null
+++ b/camodocal/camera_models/PinholeCamera.h
@@ -0,0 +1,146 @@
+#ifndef PINHOLECAMERA_H
+#define PINHOLECAMERA_H
+
+#include
+#include
+
+#include "Camera.h"
+
+namespace camodocal
+{
+
+class PinholeCamera: public Camera
+{
+public:
+ class Parameters: public Camera::Parameters
+ {
+ public:
+ Parameters();
+ Parameters(const std::string& cameraName,
+ int w, int h,
+ double k1, double k2, double p1, double p2,
+ double fx, double fy, double cx, double cy);
+
+ double& k1(void);
+ double& k2(void);
+ double& p1(void);
+ double& p2(void);
+ double& fx(void);
+ double& fy(void);
+ double& cx(void);
+ double& cy(void);
+
+ double xi(void) const;
+ double k1(void) const;
+ double k2(void) const;
+ double p1(void) const;
+ double p2(void) const;
+ double fx(void) const;
+ double fy(void) const;
+ double cx(void) const;
+ double cy(void) const;
+
+ bool readFromYamlFile(const std::string& filename);
+ void writeToYamlFile(const std::string& filename) const;
+
+ Parameters& operator=(const Parameters& other);
+ friend std::ostream& operator<< (std::ostream& out, const Parameters& params);
+
+ private:
+ double m_k1;
+ double m_k2;
+ double m_p1;
+ double m_p2;
+ double m_fx;
+ double m_fy;
+ double m_cx;
+ double m_cy;
+ };
+
+ PinholeCamera();
+
+ /**
+ * \brief Constructor from the projection model parameters
+ */
+ PinholeCamera(const std::string& cameraName,
+ int imageWidth, int imageHeight,
+ double k1, double k2, double p1, double p2,
+ double fx, double fy, double cx, double cy);
+ /**
+ * \brief Constructor from the projection model parameters
+ */
+ PinholeCamera(const Parameters& params);
+
+ Camera::ModelType modelType(void) const;
+ const std::string& cameraName(void) const;
+ int imageWidth(void) const;
+ int imageHeight(void) const;
+
+ void estimateIntrinsics(const cv::Size& boardSize,
+ const std::vector< std::vector >& objectPoints,
+ const std::vector< std::vector >& imagePoints);
+
+ // Lift points from the image plane to the sphere
+ virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
+ //%output P
+
+ // Lift points from the image plane to the projective space
+ void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const;
+ //%output P
+
+ // Projects 3D points to the image plane (Pi function)
+ void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const;
+ //%output p
+
+ // Projects 3D points to the image plane (Pi function)
+ // and calculates jacobian
+ void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p,
+ Eigen::Matrix& J) const;
+ //%output p
+ //%output J
+
+ void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const;
+ //%output p
+
+ template
+ static void spaceToPlane(const T* const params,
+ const T* const q, const T* const t,
+ const Eigen::Matrix& P,
+ Eigen::Matrix& p);
+
+ void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const;
+ void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u,
+ Eigen::Matrix2d& J) const;
+
+ void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const;
+ cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
+ float fx = -1.0f, float fy = -1.0f,
+ cv::Size imageSize = cv::Size(0, 0),
+ float cx = -1.0f, float cy = -1.0f,
+ cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const;
+
+ int parameterCount(void) const;
+
+ const Parameters& getParameters(void) const;
+ void setParameters(const Parameters& parameters);
+
+ void readParameters(const std::vector& parameterVec);
+ void writeParameters(std::vector& parameterVec) const;
+
+ void writeParametersToYamlFile(const std::string& filename) const;
+
+ std::string parametersToString(void) const;
+
+private:
+ Parameters mParameters;
+
+ double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23;
+ bool m_noDistortion;
+};
+
+typedef std::shared_ptr PinholeCameraPtr;
+typedef std::shared_ptr PinholeCameraConstPtr;
+
+}
+
+#endif
diff --git a/camodocal/camera_models/PinholeFullCamera.cc b/camodocal/camera_models/PinholeFullCamera.cc
new file mode 100644
index 0000000..a6dcec3
--- /dev/null
+++ b/camodocal/camera_models/PinholeFullCamera.cc
@@ -0,0 +1,1000 @@
+#include "../camera_models/PinholeFullCamera.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "../gpl/gpl.h"
+
+namespace camodocal
+{
+
+PinholeFullCamera::Parameters::Parameters( )
+: Camera::Parameters( PINHOLE_FULL )
+, m_k1( 0.0 )
+, m_k2( 0.0 )
+, m_k3( 0.0 )
+, m_k4( 0.0 )
+, m_k5( 0.0 )
+, m_k6( 0.0 )
+, m_p1( 0.0 )
+, m_p2( 0.0 )
+, m_fx( 0.0 )
+, m_fy( 0.0 )
+, m_cx( 0.0 )
+, m_cy( 0.0 )
+{
+}
+
+PinholeFullCamera::Parameters::Parameters( const std::string& cameraName, //
+ int w,
+ int h,
+ double k1,
+ double k2,
+ double k3,
+ double k4,
+ double k5,
+ double k6,
+ double p1,
+ double p2,
+ double fx,
+ double fy,
+ double cx,
+ double cy )
+: Camera::Parameters( PINHOLE_FULL, cameraName, w, h )
+, m_k1( k1 )
+, m_k2( k2 )
+, m_k3( k3 )
+, m_k4( k4 )
+, m_k5( k5 )
+, m_k6( k6 )
+, m_p1( p1 )
+, m_p2( p2 )
+, m_fx( fx )
+, m_fy( fy )
+, m_cx( cx )
+, m_cy( cy )
+{
+}
+
+double&
+PinholeFullCamera::Parameters::k1( void )
+{
+ return m_k1;
+}
+
+double&
+PinholeFullCamera::Parameters::k2( void )
+{
+ return m_k2;
+}
+
+double&
+PinholeFullCamera::Parameters::k3( )
+{
+ return m_k3;
+}
+
+double&
+PinholeFullCamera::Parameters::k4( )
+{
+ return m_k4;
+}
+
+double&
+PinholeFullCamera::Parameters::k5( )
+{
+ return m_k5;
+}
+
+double&
+PinholeFullCamera::Parameters::k6( )
+{
+ return m_k6;
+}
+
+double&
+PinholeFullCamera::Parameters::p1( void )
+{
+ return m_p1;
+}
+
+double&
+PinholeFullCamera::Parameters::p2( void )
+{
+ return m_p2;
+}
+
+double&
+PinholeFullCamera::Parameters::fx( void )
+{
+ return m_fx;
+}
+
+double&
+PinholeFullCamera::Parameters::fy( void )
+{
+ return m_fy;
+}
+
+double&
+PinholeFullCamera::Parameters::cx( void )
+{
+ return m_cx;
+}
+
+double&
+PinholeFullCamera::Parameters::cy( void )
+{
+ return m_cy;
+}
+
+double
+PinholeFullCamera::Parameters::k1( void ) const
+{
+ return m_k1;
+}
+
+double
+PinholeFullCamera::Parameters::k2( void ) const
+{
+ return m_k2;
+}
+
+double
+PinholeFullCamera::Parameters::p1( void ) const
+{
+ return m_p1;
+}
+
+double
+PinholeFullCamera::Parameters::p2( void ) const
+{
+ return m_p2;
+}
+
+double
+PinholeFullCamera::Parameters::fx( void ) const
+{
+ return m_fx;
+}
+
+double
+PinholeFullCamera::Parameters::fy( void ) const
+{
+ return m_fy;
+}
+
+double
+PinholeFullCamera::Parameters::cx( void ) const
+{
+ return m_cx;
+}
+
+double
+PinholeFullCamera::Parameters::cy( void ) const
+{
+ return m_cy;
+}
+
+double
+PinholeFullCamera::Parameters::k3( ) const
+{
+ return m_k3;
+}
+
+double
+PinholeFullCamera::Parameters::k4( ) const
+{
+ return m_k4;
+}
+
+double
+PinholeFullCamera::Parameters::k5( ) const
+{
+ return m_k5;
+}
+
+double
+PinholeFullCamera::Parameters::k6( ) const
+{
+ return m_k6;
+}
+
+bool
+PinholeFullCamera::Parameters::readFromYamlFile( const std::string& filename )
+{
+ cv::FileStorage fs( filename, cv::FileStorage::READ );
+
+ if ( !fs.isOpened( ) )
+ {
+ return false;
+ }
+
+ if ( !fs["model_type"].isNone( ) )
+ {
+ std::string sModelType;
+ fs["model_type"] >> sModelType;
+
+ if ( sModelType.compare( "PINHOLE_FULL" ) != 0 )
+ {
+ return false;
+ }
+ }
+
+ m_modelType = PINHOLE_FULL;
+ fs["camera_name"] >> m_cameraName;
+ m_imageWidth = static_cast< int >( fs["image_width"] );
+ m_imageHeight = static_cast< int >( fs["image_height"] );
+
+ cv::FileNode n = fs["distortion_parameters"];
+ m_k1 = static_cast< double >( n["k1"] );
+ m_k2 = static_cast< double >( n["k2"] );
+ m_k3 = static_cast< double >( n["k3"] );
+ m_k4 = static_cast< double >( n["k4"] );
+ m_k5 = static_cast< double >( n["k5"] );
+ m_k6 = static_cast< double >( n["k6"] );
+ m_p1 = static_cast< double >( n["p1"] );
+ m_p2 = static_cast< double >( n["p2"] );
+
+ n = fs["projection_parameters"];
+ m_fx = static_cast< double >( n["fx"] );
+ m_fy = static_cast< double >( n["fy"] );
+ m_cx = static_cast< double >( n["cx"] );
+ m_cy = static_cast< double >( n["cy"] );
+
+ return true;
+}
+
+void
+PinholeFullCamera::Parameters::writeToYamlFile( const std::string& filename ) const
+{
+ cv::FileStorage fs( filename, cv::FileStorage::WRITE );
+
+ fs << "model_type"
+ << "PINHOLE_FULL";
+ fs << "camera_name" << m_cameraName;
+ fs << "image_width" << m_imageWidth;
+ fs << "image_height" << m_imageHeight;
+
+ // radial distortion: k1, k2
+ // tangential distortion: p1, p2
+ fs << "distortion_parameters";
+ fs << "{"
+ << "k1" << m_k1 << "k2" << m_k2 << "k3" << m_k3 << "k4" << m_k4 << "k5" << m_k5
+ << "k6" << m_k6 << "p1" << m_p1 << "p2" << m_p2 << "}";
+
+ // projection: fx, fy, cx, cy
+ fs << "projection_parameters";
+ fs << "{"
+ << "fx" << m_fx << "fy" << m_fy << "cx" << m_cx << "cy" << m_cy << "}";
+
+ fs.release( );
+}
+
+PinholeFullCamera::Parameters&
+PinholeFullCamera::Parameters::operator=( const PinholeFullCamera::Parameters& other )
+{
+ if ( this != &other )
+ {
+ m_modelType = other.m_modelType;
+ m_cameraName = other.m_cameraName;
+ m_imageWidth = other.m_imageWidth;
+ m_imageHeight = other.m_imageHeight;
+ m_k1 = other.m_k1;
+ m_k2 = other.m_k2;
+ m_k3 = other.m_k3;
+ m_k4 = other.m_k4;
+ m_k5 = other.m_k5;
+ m_k6 = other.m_k6;
+ m_p1 = other.m_p1;
+ m_p2 = other.m_p2;
+ m_fx = other.m_fx;
+ m_fy = other.m_fy;
+ m_cx = other.m_cx;
+ m_cy = other.m_cy;
+ }
+
+ return *this;
+}
+
+std::ostream&
+operator<<( std::ostream& out, const PinholeFullCamera::Parameters& params )
+{
+ out << "Camera Parameters:" << std::endl;
+ out << " model_type "
+ << "PINHOLE_FULL" << std::endl;
+ out << " camera_name " << params.m_cameraName << std::endl;
+ out << " image_width " << params.m_imageWidth << std::endl;
+ out << " image_height " << params.m_imageHeight << std::endl;
+
+ // radial distortion: k1, k2
+ // tangential distortion: p1, p2
+ out << "Distortion Parameters" << std::endl;
+ out << " k1 " << params.m_k1 << std::endl
+ << " k2 " << params.m_k2 << std::endl
+ << " k3 " << params.m_k3 << std::endl
+ << " k4 " << params.m_k4 << std::endl
+ << " k5 " << params.m_k5 << std::endl
+ << " k6 " << params.m_k6 << std::endl
+ << " p1 " << params.m_p1 << std::endl
+ << " p2 " << params.m_p2 << std::endl;
+
+ // projection: fx, fy, cx, cy
+ out << "Projection Parameters" << std::endl;
+ out << " fx " << params.m_fx << std::endl
+ << " fy " << params.m_fy << std::endl
+ << " cx " << params.m_cx << std::endl
+ << " cy " << params.m_cy << std::endl;
+
+ return out;
+}
+
+PinholeFullCamera::PinholeFullCamera( )
+: m_inv_K11( 1.0 )
+, m_inv_K13( 0.0 )
+, m_inv_K22( 1.0 )
+, m_inv_K23( 0.0 )
+, m_noDistortion( true )
+{
+}
+
+PinholeFullCamera::PinholeFullCamera( const std::string& cameraName,
+ int imageWidth,
+ int imageHeight,
+ double k1,
+ double k2,
+ double k3,
+ double k4,
+ double k5,
+ double k6,
+ double p1,
+ double p2,
+ double fx,
+ double fy,
+ double cx,
+ double cy )
+: mParameters( cameraName, imageWidth, imageHeight, k1, k2, k3, k4, k5, k6, p1, p2, fx, fy, cx, cy )
+{
+ if ( ( mParameters.k1( ) == 0.0 ) && ( mParameters.k2( ) == 0.0 )
+ && ( mParameters.p1( ) == 0.0 ) && ( mParameters.p2( ) == 0.0 ) )
+ {
+ m_noDistortion = true;
+ }
+ else
+ {
+ m_noDistortion = false;
+ }
+
+ // Inverse camera projection matrix parameters
+ m_inv_K11 = 1.0 / mParameters.fx( );
+ m_inv_K13 = -mParameters.cx( ) / mParameters.fx( );
+ m_inv_K22 = 1.0 / mParameters.fy( );
+ m_inv_K23 = -mParameters.cy( ) / mParameters.fy( );
+}
+
+PinholeFullCamera::PinholeFullCamera( const PinholeFullCamera::Parameters& params )
+: mParameters( params )
+{
+ if ( ( mParameters.k1( ) == 0.0 ) && ( mParameters.k2( ) == 0.0 )
+ && ( mParameters.p1( ) == 0.0 ) && ( mParameters.p2( ) == 0.0 ) )
+ {
+ m_noDistortion = true;
+ }
+ else
+ {
+ m_noDistortion = false;
+ }
+
+ // Inverse camera projection matrix parameters
+ m_inv_K11 = 1.0 / mParameters.fx( );
+ m_inv_K13 = -mParameters.cx( ) / mParameters.fx( );
+ m_inv_K22 = 1.0 / mParameters.fy( );
+ m_inv_K23 = -mParameters.cy( ) / mParameters.fy( );
+}
+
+Camera::ModelType
+PinholeFullCamera::modelType( void ) const
+{
+ return mParameters.modelType( );
+}
+
+const std::string&
+PinholeFullCamera::cameraName( void ) const
+{
+ return mParameters.cameraName( );
+}
+
+int
+PinholeFullCamera::imageWidth( void ) const
+{
+ return mParameters.imageWidth( );
+}
+
+int
+PinholeFullCamera::imageHeight( void ) const
+{
+ return mParameters.imageHeight( );
+}
+
+void
+PinholeFullCamera::estimateIntrinsics( const cv::Size& boardSize,
+ const std::vector< std::vector< cv::Point3f > >& objectPoints,
+ const std::vector< std::vector< cv::Point2f > >& imagePoints )
+{
+ // Z. Zhang, A Flexible New Technique for Camera Calibration, PAMI 2000
+
+ Parameters params = getParameters( );
+
+ params.k1( ) = 0.0;
+ params.k2( ) = 0.0;
+ params.p1( ) = 0.0;
+ params.p2( ) = 0.0;
+
+ double cx = params.imageWidth( ) / 2.0;
+ double cy = params.imageHeight( ) / 2.0;
+ params.cx( ) = cx;
+ params.cy( ) = cy;
+
+ size_t nImages = imagePoints.size( );
+
+ cv::Mat A( nImages * 2, 2, CV_64F );
+ cv::Mat b( nImages * 2, 1, CV_64F );
+
+ for ( size_t i = 0; i < nImages; ++i )
+ {
+ const std::vector< cv::Point3f >& oPoints = objectPoints.at( i );
+
+ std::vector< cv::Point2f > M( oPoints.size( ) );
+ for ( size_t j = 0; j < M.size( ); ++j )
+ {
+ M.at( j ) = cv::Point2f( oPoints.at( j ).x, oPoints.at( j ).y );
+ }
+
+ cv::Mat H = cv::findHomography( M, imagePoints.at( i ) );
+
+ H.at< double >( 0, 0 ) -= H.at< double >( 2, 0 ) * cx;
+ H.at< double >( 0, 1 ) -= H.at< double >( 2, 1 ) * cx;
+ H.at< double >( 0, 2 ) -= H.at< double >( 2, 2 ) * cx;
+ H.at< double >( 1, 0 ) -= H.at< double >( 2, 0 ) * cy;
+ H.at< double >( 1, 1 ) -= H.at< double >( 2, 1 ) * cy;
+ H.at< double >( 1, 2 ) -= H.at< double >( 2, 2 ) * cy;
+
+ double h[3], v[3], d1[3], d2[3];
+ double n[4] = { 0, 0, 0, 0 };
+
+ for ( int j = 0; j < 3; ++j )
+ {
+ double t0 = H.at< double >( j, 0 );
+ double t1 = H.at< double >( j, 1 );
+ h[j] = t0;
+ v[j] = t1;
+ d1[j] = ( t0 + t1 ) * 0.5;
+ d2[j] = ( t0 - t1 ) * 0.5;
+ n[0] += t0 * t0;
+ n[1] += t1 * t1;
+ n[2] += d1[j] * d1[j];
+ n[3] += d2[j] * d2[j];
+ }
+
+ for ( int j = 0; j < 4; ++j )
+ {
+ n[j] = 1.0 / sqrt( n[j] );
+ }
+
+ for ( int j = 0; j < 3; ++j )
+ {
+ h[j] *= n[0];
+ v[j] *= n[1];
+ d1[j] *= n[2];
+ d2[j] *= n[3];
+ }
+
+ A.at< double >( i * 2, 0 ) = h[0] * v[0];
+ A.at< double >( i * 2, 1 ) = h[1] * v[1];
+ A.at< double >( i * 2 + 1, 0 ) = d1[0] * d2[0];
+ A.at< double >( i * 2 + 1, 1 ) = d1[1] * d2[1];
+ b.at< double >( i * 2, 0 ) = -h[2] * v[2];
+ b.at< double >( i * 2 + 1, 0 ) = -d1[2] * d2[2];
+ }
+
+ cv::Mat f( 2, 1, CV_64F );
+ cv::solve( A, b, f, cv::DECOMP_NORMAL | cv::DECOMP_LU );
+
+ params.fx( ) = sqrt( fabs( 1.0 / f.at< double >( 0 ) ) );
+ params.fy( ) = sqrt( fabs( 1.0 / f.at< double >( 1 ) ) );
+
+ setParameters( params );
+}
+
+/**
+ * \brief Lifts a point from the image plane to the unit sphere
+ *
+ * \param p image coordinates
+ * \param P coordinates of the point on the sphere
+ */
+void
+PinholeFullCamera::liftSphere( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const
+{
+ liftProjective( p, P );
+
+ P.normalize( );
+}
+
+/**
+ * \brief Lifts a point from the image plane to its projective ray
+ *
+ * \param p image coordinates
+ * \param P coordinates of the projective ray
+ */
+void
+PinholeFullCamera::liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const
+{
+ double k1 = mParameters.k1( );
+ double k2 = mParameters.k2( );
+ double k3 = mParameters.k3( );
+ double k4 = mParameters.k4( );
+ double k5 = mParameters.k5( );
+ double k6 = mParameters.k6( );
+ double p1 = mParameters.p1( );
+ double p2 = mParameters.p2( );
+
+ double fx = mParameters.fx( );
+ double fy = mParameters.fy( );
+ double ifx = 1. / fx;
+ double ify = 1. / fy;
+ double cx = mParameters.cx( );
+ double cy = mParameters.cy( );
+
+ // Lift points to normalised plane
+ double mx_d = ifx * p( 0 ) + m_inv_K13;
+ double my_d = ify * p( 1 ) + m_inv_K23;
+ double u = p( 0 );
+ double v = p( 1 );
+ double x = mx_d;
+ double y = my_d;
+ double x0 = x;
+ double y0 = y;
+
+ double error = std::numeric_limits< double >::max( );
+
+ int max_cnt = 8; // 5
+ int min_error = 0.01;
+ for ( int j = 0;; j++ )
+ {
+ if ( j > max_cnt )
+ break;
+ if ( error < min_error )
+ break;
+
+ double r2 = x * x + y * y;
+ double icdist = ( 1 + ( ( k6 * r2 + k5 ) * r2 + k4 ) * r2 )
+ / ( 1 + ( ( k3 * r2 + k2 ) * r2 + k1 ) * r2 );
+ double deltaX = 2 * p1 * x * y + p2 * ( r2 + 2 * x * x );
+ double deltaY = p1 * ( r2 + 2 * y * y ) + 2 * p2 * x * y;
+
+ x = ( x0 - deltaX ) * icdist;
+ y = ( y0 - deltaY ) * icdist;
+
+ if ( 1 )
+ {
+ double r4, r6, a1, a2, a3, cdist, icdist2;
+ double xd, yd, xd0, yd0;
+
+ r2 = x * x + y * y;
+ r4 = r2 * r2;
+ r6 = r4 * r2;
+ a1 = 2 * x * y;
+ a2 = r2 + 2 * x * x;
+ a3 = r2 + 2 * y * y;
+ cdist = 1 + k1 * r2 + k2 * r4 + k3 * r6;
+ icdist2 = 1. / ( 1 + k4 * r2 + k5 * r4 + k6 * r6 );
+ xd0 = x * cdist * icdist2 + p1 * a1 + p2 * a2;
+ yd0 = y * cdist * icdist2 + p1 * a3 + p2 * a1;
+
+ double x_proj = xd * fx + cx;
+ double y_proj = yd * fy + cy;
+
+ error = sqrt( pow( x_proj - u, 2 ) + pow( y_proj - v, 2 ) );
+ }
+ }
+
+ P << x, y, 1.0;
+}
+
+void
+PinholeFullCamera::liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P, float image_scale ) const
+{
+ Eigen::Vector2d p_tmp = p / image_scale; // p_tmp is without resize, p is with resize
+ liftProjective( p_tmp, P ); // p_tmp is without resize
+}
+
+/**
+ * \brief Project a 3D point (\a x,\a y,\a z) to the image plane in (\a u,\a v)
+ *
+ * \param P 3D point coordinates
+ * \param p return value, contains the image point coordinates
+ */
+void
+PinholeFullCamera::spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p ) const
+{
+ Eigen::Vector2d p_u, p_d;
+
+ // Project points to the normalised plane
+ p_u << P( 0 ) / P( 2 ), P( 1 ) / P( 2 );
+
+ if ( m_noDistortion )
+ {
+ p_d = p_u;
+ }
+ else
+ {
+ // Apply distortion
+ Eigen::Vector2d d_u;
+ distortion( p_u, d_u );
+ p_d = p_u + d_u;
+ }
+
+ // Apply generalised projection matrix
+ p << mParameters.fx( ) * p_d( 0 ) + mParameters.cx( ),
+ mParameters.fy( ) * p_d( 1 ) + mParameters.cy( );
+}
+
+void
+PinholeFullCamera::spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p, float image_scalse ) const
+{
+ Eigen::Vector2d p_tmp;
+ spaceToPlane( P, p_tmp );
+ p = p_tmp * image_scalse;
+}
+
+/**
+ * \brief Project a 3D point to the image plane and calculate Jacobian
+ *
+ * \param P 3D point coordinates
+ * \param p return value, contains the image point coordinates
+ */
+void
+PinholeFullCamera::spaceToPlane( const Eigen::Vector3d& P,
+ Eigen::Vector2d& p,
+ Eigen::Matrix< double, 2, 3 >& J ) const
+{
+ Eigen::Vector2d p_u, p_d;
+ double norm, inv_denom;
+ double dxdmx, dydmx, dxdmy, dydmy;
+
+ norm = P.norm( );
+ // Project points to the normalised plane
+ inv_denom = 1.0 / P( 2 );
+ p_u << inv_denom * P( 0 ), inv_denom * P( 1 );
+
+ // Calculate jacobian
+ double dudx = inv_denom;
+ double dvdx = 0.0;
+ double dudy = 0.0;
+ double dvdy = inv_denom;
+ inv_denom = -inv_denom * inv_denom;
+ double dudz = P( 0 ) * inv_denom;
+ double dvdz = P( 1 ) * inv_denom;
+
+ if ( m_noDistortion )
+ {
+ p_d = p_u;
+ }
+ else
+ {
+ // Apply distortion
+ Eigen::Vector2d d_u;
+ distortion( p_u, d_u );
+ p_d = p_u + d_u;
+ }
+
+ double fx = mParameters.fx( );
+ double fy = mParameters.fy( );
+
+ // Make the product of the jacobians
+ // and add projection matrix jacobian
+ inv_denom = fx * ( dudx * dxdmx + dvdx * dxdmy ); // reuse
+ dvdx = fy * ( dudx * dydmx + dvdx * dydmy );
+ dudx = inv_denom;
+
+ inv_denom = fx * ( dudy * dxdmx + dvdy * dxdmy ); // reuse
+ dvdy = fy * ( dudy * dydmx + dvdy * dydmy );
+ dudy = inv_denom;
+
+ inv_denom = fx * ( dudz * dxdmx + dvdz * dxdmy ); // reuse
+ dvdz = fy * ( dudz * dydmx + dvdz * dydmy );
+ dudz = inv_denom;
+
+ // Apply generalised projection matrix
+ p << fx * p_d( 0 ) + mParameters.cx( ), fy * p_d( 1 ) + mParameters.cy( );
+
+ J << dudx, dudy, dudz, dvdx, dvdy, dvdz;
+}
+
+/**
+ * \brief Projects an undistorted 2D point p_u to the image plane
+ *
+ * \param p_u 2D point coordinates
+ * \return image point coordinates
+ */
+void
+PinholeFullCamera::undistToPlane( const Eigen::Vector2d& p_u, Eigen::Vector2d& p ) const
+{
+ Eigen::Vector2d p_d;
+
+ if ( m_noDistortion )
+ {
+ p_d = p_u;
+ }
+ else
+ {
+ // Apply distortion
+ Eigen::Vector2d d_u;
+ distortion( p_u, d_u );
+ p_d = p_u + d_u;
+ }
+
+ // Apply generalised projection matrix
+ p << mParameters.fx( ) * p_d( 0 ) + mParameters.cx( ),
+ mParameters.fy( ) * p_d( 1 ) + mParameters.cy( );
+}
+
+/**
+ * \brief Apply distortion to input point (from the normalised plane)
+ *
+ * \param p_u undistorted coordinates of point on the normalised plane
+ * \return to obtain the distorted point: p_d = p_u + d_u
+ */
+void
+PinholeFullCamera::distortion( const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u ) const
+{
+ // project 3D object point to the image plane
+ double k1 = mParameters.k1( );
+ double k2 = mParameters.k2( );
+ double k3 = mParameters.k3( );
+ double k4 = mParameters.k4( );
+ double k5 = mParameters.k5( );
+ double k6 = mParameters.k6( );
+ double p1 = mParameters.p1( );
+ double p2 = mParameters.p2( );
+
+ // Transform to model plane
+ double x = p_u( 0 );
+ double y = p_u( 1 );
+
+ double r2 = x * x + y * y;
+ double r4 = r2 * r2;
+ double r6 = r4 * r2;
+ double a1 = 2 * x * y;
+ double a2 = r2 + 2 * x * x;
+ double a3 = r2 + 2 * y * y;
+ double cdist = 1 + k1 * r2 + k2 * r4 + k3 * r6;
+ double icdist2 = 1. / ( 1 + k4 * r2 + k5 * r4 + k6 * r6 );
+
+ d_u << x * cdist * icdist2 + p1 * a1 + p2 * a2 - x, //
+ y * cdist * icdist2 + p1 * a3 + p2 * a1 - y;
+}
+
+/**
+ * \brief Apply distortion to input point (from the normalised plane)
+ * and calculate Jacobian
+ *
+ * \param p_u undistorted coordinates of point on the normalised plane
+ * \return to obtain the distorted point: p_d = p_u + d_u
+ */
+void
+PinholeFullCamera::distortion( const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, Eigen::Matrix2d& J ) const
+{
+ // project 3D object point to the image plane
+ double k1 = mParameters.k1( );
+ double k2 = mParameters.k2( );
+ double k3 = mParameters.k3( );
+ double k4 = mParameters.k4( );
+ double k5 = mParameters.k5( );
+ double k6 = mParameters.k6( );
+ double p1 = mParameters.p1( );
+ double p2 = mParameters.p2( );
+
+ // Transform to model plane
+ double x = p_u( 0 );
+ double y = p_u( 1 );
+
+ double r2 = x * x + y * y;
+ double r4 = r2 * r2;
+ double r6 = r4 * r2;
+ double a1 = 2 * x * y;
+ double a2 = r2 + 2 * x * x;
+ double a3 = r2 + 2 * y * y;
+ double cdist = 1 + k1 * r2 + k2 * r4 + k3 * r6;
+ double icdist2 = 1. / ( 1 + k4 * r2 + k5 * r4 + k6 * r6 );
+
+ d_u << x * cdist * icdist2 + p1 * a1 + p2 * a2 - x, //
+ y * cdist * icdist2 + p1 * a3 + p2 * a1 - y; //
+}
+
+void
+PinholeFullCamera::initUndistortMap( cv::Mat& map1, cv::Mat& map2, double fScale ) const
+{
+ cv::Size imageSize( mParameters.imageWidth( ), mParameters.imageHeight( ) );
+
+ cv::Mat mapX = cv::Mat::zeros( imageSize, CV_32F );
+ cv::Mat mapY = cv::Mat::zeros( imageSize, CV_32F );
+
+ for ( int v = 0; v < imageSize.height; ++v )
+ {
+ for ( int u = 0; u < imageSize.width; ++u )
+ {
+ double mx_u = m_inv_K11 / fScale * u + m_inv_K13 / fScale;
+ double my_u = m_inv_K22 / fScale * v + m_inv_K23 / fScale;
+
+ Eigen::Vector3d P;
+ P << mx_u, my_u, 1.0;
+
+ Eigen::Vector2d p;
+ spaceToPlane( P, p );
+
+ mapX.at< float >( v, u ) = p( 0 );
+ mapY.at< float >( v, u ) = p( 1 );
+ }
+ }
+
+ cv::convertMaps( mapX, mapY, map1, map2, CV_32FC1, false );
+}
+
+cv::Mat
+PinholeFullCamera::initUndistortRectifyMap(
+cv::Mat& map1, cv::Mat& map2, float fx, float fy, cv::Size imageSize, float cx, float cy, cv::Mat rmat ) const
+{
+ if ( imageSize == cv::Size( 0, 0 ) )
+ {
+ imageSize = cv::Size( mParameters.imageWidth( ), mParameters.imageHeight( ) );
+ }
+
+ cv::Mat mapX = cv::Mat::zeros( imageSize.height, imageSize.width, CV_32F );
+ cv::Mat mapY = cv::Mat::zeros( imageSize.height, imageSize.width, CV_32F );
+
+ Eigen::Matrix3f R, R_inv;
+ cv::cv2eigen( rmat, R );
+ R_inv = R.inverse( );
+
+ // assume no skew
+ Eigen::Matrix3f K_rect;
+
+ if ( cx == -1.0f || cy == -1.0f )
+ {
+ K_rect << fx, 0, imageSize.width / 2, 0, fy, imageSize.height / 2, 0, 0, 1;
+ }
+ else
+ {
+ K_rect << fx, 0, cx, 0, fy, cy, 0, 0, 1;
+ }
+
+ if ( fx == -1.0f || fy == -1.0f )
+ {
+ K_rect( 0, 0 ) = mParameters.fx( );
+ K_rect( 1, 1 ) = mParameters.fy( );
+ }
+
+ Eigen::Matrix3f K_rect_inv = K_rect.inverse( );
+
+ for ( int v = 0; v < imageSize.height; ++v )
+ {
+ for ( int u = 0; u < imageSize.width; ++u )
+ {
+ Eigen::Vector3f xo;
+ xo << u, v, 1;
+
+ Eigen::Vector3f uo = R_inv * K_rect_inv * xo;
+
+ Eigen::Vector2d p;
+ spaceToPlane( uo.cast< double >( ), p );
+
+ mapX.at< float >( v, u ) = p( 0 );
+ mapY.at< float >( v, u ) = p( 1 );
+ }
+ }
+
+ cv::convertMaps( mapX, mapY, map1, map2, CV_32FC1, false );
+
+ cv::Mat K_rect_cv;
+ cv::eigen2cv( K_rect, K_rect_cv );
+ return K_rect_cv;
+}
+
+int
+PinholeFullCamera::parameterCount( void ) const
+{
+ return 12;
+}
+
+const PinholeFullCamera::Parameters&
+PinholeFullCamera::getParameters( void ) const
+{
+ return mParameters;
+}
+
+void
+PinholeFullCamera::setParameters( const PinholeFullCamera::Parameters& parameters )
+{
+ mParameters = parameters;
+
+ if ( ( mParameters.k1( ) == 0.0 ) && ( mParameters.k2( ) == 0.0 )
+ && ( mParameters.p1( ) == 0.0 ) && ( mParameters.p2( ) == 0.0 ) )
+ {
+ m_noDistortion = true;
+ }
+ else
+ {
+ m_noDistortion = false;
+ }
+
+ m_inv_K11 = 1.0 / mParameters.fx( );
+ m_inv_K13 = -mParameters.cx( ) / mParameters.fx( );
+ m_inv_K22 = 1.0 / mParameters.fy( );
+ m_inv_K23 = -mParameters.cy( ) / mParameters.fy( );
+}
+
+void
+PinholeFullCamera::readParameters( const std::vector< double >& parameterVec )
+{
+ if ( ( int )parameterVec.size( ) != parameterCount( ) )
+ {
+ return;
+ }
+
+ Parameters params = getParameters( );
+
+ params.k1( ) = parameterVec.at( 0 );
+ params.k2( ) = parameterVec.at( 1 );
+ params.k3( ) = parameterVec.at( 2 );
+ params.k4( ) = parameterVec.at( 3 );
+ params.k5( ) = parameterVec.at( 4 );
+ params.k6( ) = parameterVec.at( 5 );
+ params.p1( ) = parameterVec.at( 6 );
+ params.p2( ) = parameterVec.at( 7 );
+ params.fx( ) = parameterVec.at( 8 );
+ params.fy( ) = parameterVec.at( 9 );
+ params.cx( ) = parameterVec.at( 10 );
+ params.cy( ) = parameterVec.at( 11 );
+
+ setParameters( params );
+}
+
+void
+PinholeFullCamera::writeParameters( std::vector< double >& parameterVec ) const
+{
+ parameterVec.resize( parameterCount( ) );
+ parameterVec.at( 0 ) = mParameters.k1( );
+ parameterVec.at( 1 ) = mParameters.k2( );
+ parameterVec.at( 2 ) = mParameters.k3( );
+ parameterVec.at( 3 ) = mParameters.k4( );
+ parameterVec.at( 4 ) = mParameters.k5( );
+ parameterVec.at( 5 ) = mParameters.k6( );
+ parameterVec.at( 6 ) = mParameters.p1( );
+ parameterVec.at( 7 ) = mParameters.p2( );
+ parameterVec.at( 8 ) = mParameters.fx( );
+ parameterVec.at( 9 ) = mParameters.fy( );
+ parameterVec.at( 10 ) = mParameters.cx( );
+ parameterVec.at( 11 ) = mParameters.cy( );
+}
+
+void
+PinholeFullCamera::writeParametersToYamlFile( const std::string& filename ) const
+{
+ mParameters.writeToYamlFile( filename );
+}
+
+std::string
+PinholeFullCamera::parametersToString( void ) const
+{
+ std::ostringstream oss;
+ oss << mParameters;
+
+ return oss.str( );
+}
+}
diff --git a/camodocal/camera_models/PinholeFullCamera.h b/camodocal/camera_models/PinholeFullCamera.h
new file mode 100644
index 0000000..61a5716
--- /dev/null
+++ b/camodocal/camera_models/PinholeFullCamera.h
@@ -0,0 +1,215 @@
+#ifndef PinholeFullCAMERA_H
+#define PinholeFullCAMERA_H
+
+#include
+#include
+
+#include "Camera.h"
+
+namespace camodocal
+{
+
+class PinholeFullCamera : public Camera
+{
+ public:
+ class Parameters : public Camera::Parameters
+ {
+ public:
+ Parameters( );
+ Parameters( const std::string& cameraName,
+ int w,
+ int h,
+ double k1,
+ double k2,
+ double k3,
+ double k4,
+ double k5,
+ double k6,
+ double p1,
+ double p2,
+ double fx,
+ double fy,
+ double cx,
+ double cy );
+
+ double& k1( void );
+ double& k2( void );
+ double& k3( void );
+ double& k4( void );
+ double& k5( void );
+ double& k6( void );
+ double& p1( void );
+ double& p2( void );
+ double& fx( void );
+ double& fy( void );
+ double& cx( void );
+ double& cy( void );
+
+ double xi( void ) const;
+ double k1( void ) const;
+ double k2( void ) const;
+ double k3( void ) const;
+ double k4( void ) const;
+ double k5( void ) const;
+ double k6( void ) const;
+ double p1( void ) const;
+ double p2( void ) const;
+ double fx( void ) const;
+ double fy( void ) const;
+ double cx( void ) const;
+ double cy( void ) const;
+
+ bool readFromYamlFile( const std::string& filename );
+ void writeToYamlFile( const std::string& filename ) const;
+
+ Parameters& operator=( const Parameters& other );
+ friend std::ostream& operator<<( std::ostream& out, const Parameters& params );
+
+ private:
+ double m_k1;
+ double m_k2;
+ double m_p1;
+ double m_p2;
+ double m_fx;
+ double m_fy;
+ double m_cx;
+ double m_cy;
+ double m_k3;
+ double m_k4;
+ double m_k5;
+ double m_k6;
+ };
+
+ PinholeFullCamera( );
+
+ /**
+ * \brief Constructor from the projection model parameters
+ */
+ PinholeFullCamera( const std::string& cameraName,
+ int imageWidth,
+ int imageHeight,
+ double k1,
+ double k2,
+ double k3,
+ double k4,
+ double k5,
+ double k6,
+ double p1,
+ double p2,
+ double fx,
+ double fy,
+ double cx,
+ double cy );
+ /**
+ * \brief Constructor from the projection model parameters
+ */
+ PinholeFullCamera( const Parameters& params );
+
+ Camera::ModelType modelType( void ) const;
+ const std::string& cameraName( void ) const;
+ int imageWidth( void ) const;
+ int imageHeight( void ) const;
+ cv::Size imageSize( ) const { return cv::Size( imageWidth( ), imageHeight( ) ); }
+ cv::Point2f getPrinciple( ) const
+ {
+ return cv::Point2f( mParameters.cx( ), mParameters.cy( ) );
+ }
+
+ void estimateIntrinsics( const cv::Size& boardSize,
+ const std::vector< std::vector< cv::Point3f > >& objectPoints,
+ const std::vector< std::vector< cv::Point2f > >& imagePoints );
+
+ void setInitIntrinsics( const std::vector< std::vector< cv::Point3f > >& objectPoints,
+ const std::vector< std::vector< cv::Point2f > >& imagePoints )
+ {
+ Parameters params = getParameters( );
+
+ params.k1( ) = 0.0;
+ params.k2( ) = 0.0;
+ params.k3( ) = 0.0;
+ params.k4( ) = 0.0;
+ params.k5( ) = 0.0;
+ params.k6( ) = 0.0;
+ params.p1( ) = 0.0;
+ params.p2( ) = 0.0;
+
+ double cx = params.imageWidth( ) / 2.0;
+ double cy = params.imageHeight( ) / 2.0;
+ params.cx( ) = cx;
+ params.cy( ) = cy;
+ params.fx( ) = 1200;
+ params.fy( ) = 1200;
+
+ setParameters( params );
+ }
+
+ // Lift points from the image plane to the sphere
+ virtual void liftSphere( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const;
+ //%output P
+
+ // Lift points from the image plane to the projective space
+ void liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const;
+ //%output P
+
+ void liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P, float image_scale ) const;
+
+ // Projects 3D points to the image plane (Pi function)
+ void spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p ) const;
+ //%output p
+
+ void spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p, float image_scalse ) const;
+
+ // Projects 3D points to the image plane (Pi function)
+ // and calculates jacobian
+ void spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p, Eigen::Matrix< double, 2, 3 >& J ) const;
+ //%output p
+ //%output J
+
+ void undistToPlane( const Eigen::Vector2d& p_u, Eigen::Vector2d& p ) const;
+ //%output p
+
+ template< typename T >
+ static void spaceToPlane( const T* const params,
+ const T* const q,
+ const T* const t,
+ const Eigen::Matrix< T, 3, 1 >& P,
+ Eigen::Matrix< T, 2, 1 >& p );
+
+ void distortion( const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u ) const;
+ void distortion( const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, Eigen::Matrix2d& J ) const;
+
+ void initUndistortMap( cv::Mat& map1, cv::Mat& map2, double fScale = 1.0 ) const;
+ cv::Mat initUndistortRectifyMap( cv::Mat& map1,
+ cv::Mat& map2,
+ float fx = -1.0f,
+ float fy = -1.0f,
+ cv::Size imageSize = cv::Size( 0, 0 ),
+ float cx = -1.0f,
+ float cy = -1.0f,
+ cv::Mat rmat = cv::Mat::eye( 3, 3, CV_32F ) ) const;
+
+ int parameterCount( void ) const;
+
+ const Parameters& getParameters( void ) const;
+ void setParameters( const Parameters& parameters );
+
+ void readParameters( const std::vector< double >& parameterVec );
+ void writeParameters( std::vector< double >& parameterVec ) const;
+
+ void writeParametersToYamlFile( const std::string& filename ) const;
+
+ std::string parametersToString( void ) const;
+
+ private:
+ Parameters mParameters;
+
+ double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23;
+ bool m_noDistortion;
+};
+
+typedef std::shared_ptr< PinholeFullCamera > PinholeFullCameraPtr;
+typedef std::shared_ptr< const PinholeFullCamera > PinholeFullCameraConstPtr;
+
+}
+
+#endif
diff --git a/camodocal/camera_models/ScaramuzzaCamera.cc b/camodocal/camera_models/ScaramuzzaCamera.cc
new file mode 100644
index 0000000..260a940
--- /dev/null
+++ b/camodocal/camera_models/ScaramuzzaCamera.cc
@@ -0,0 +1,833 @@
+#include "../camera_models/ScaramuzzaCamera.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+//#include
+//#include
+
+#include "../gpl/gpl.h"
+
+
+Eigen::VectorXd polyfit(Eigen::VectorXd& xVec, Eigen::VectorXd& yVec, int poly_order) {
+ assert(poly_order > 0);
+ assert(xVec.size() > poly_order);
+ assert(xVec.size() == yVec.size());
+
+ Eigen::MatrixXd A(xVec.size(), poly_order+1);
+ Eigen::VectorXd B(xVec.size());
+
+ for(int i = 0; i < xVec.size(); ++i) {
+ const double x = xVec(i);
+ const double y = yVec(i);
+
+ double x_pow_k = 1.0;
+
+ for(int k=0; k<=poly_order; ++k) {
+ A(i,k) = x_pow_k;
+ x_pow_k *= x;
+ }
+
+ B(i) = y;
+ }
+
+ Eigen::JacobiSVD svd(A, Eigen::ComputeThinU | Eigen::ComputeThinV);
+ Eigen::VectorXd x = svd.solve(B);
+
+ return x;
+}
+
+namespace camodocal
+{
+
+OCAMCamera::Parameters::Parameters()
+ : Camera::Parameters(SCARAMUZZA)
+ , m_C(0.0)
+ , m_D(0.0)
+ , m_E(0.0)
+ , m_center_x(0.0)
+ , m_center_y(0.0)
+{
+ memset(m_poly, 0, sizeof(double) * SCARAMUZZA_POLY_SIZE);
+ memset(m_inv_poly, 0, sizeof(double) * SCARAMUZZA_INV_POLY_SIZE);
+}
+
+
+
+bool
+OCAMCamera::Parameters::readFromYamlFile(const std::string& filename)
+{
+ cv::FileStorage fs(filename, cv::FileStorage::READ);
+
+ if (!fs.isOpened())
+ {
+ return false;
+ }
+
+ if (!fs["model_type"].isNone())
+ {
+ std::string sModelType;
+ fs["model_type"] >> sModelType;
+
+ if (sModelType=="scaramuzza")
+ {
+ return false;
+ }
+ }
+
+ m_modelType = SCARAMUZZA;
+ fs["camera_name"] >> m_cameraName;
+ m_imageWidth = static_cast(fs["image_width"]);
+ m_imageHeight = static_cast(fs["image_height"]);
+
+ cv::FileNode n = fs["poly_parameters"];
+ for(int i=0; i < SCARAMUZZA_POLY_SIZE; i++)
+ m_poly[i] = static_cast(n[std::string("p") + std::to_string(i)]);
+
+ n = fs["inv_poly_parameters"];
+ for(int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
+ m_inv_poly[i] = static_cast(n[std::string("p") + std::to_string(i)]);
+
+ n = fs["affine_parameters"];
+ m_C = static_cast(n["ac"]);
+ m_D = static_cast(n["ad"]);
+ m_E = static_cast(n["ae"]);
+
+ m_center_x = static_cast(n["cx"]);
+ m_center_y = static_cast(n["cy"]);
+
+ return true;
+}
+
+void
+OCAMCamera::Parameters::writeToYamlFile(const std::string& filename) const
+{
+ cv::FileStorage fs(filename, cv::FileStorage::WRITE);
+
+ fs << "model_type" << "scaramuzza";
+ fs << "camera_name" << m_cameraName;
+ fs << "image_width" << m_imageWidth;
+ fs << "image_height" << m_imageHeight;
+
+ fs << "poly_parameters";
+ fs << "{";
+ for(int i=0; i < SCARAMUZZA_POLY_SIZE; i++)
+ fs << std::string("p") + std::to_string(i) << m_poly[i];
+ fs << "}";
+
+ fs << "inv_poly_parameters";
+ fs << "{";
+ for(int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
+ fs << std::string("p") + std::to_string(i) << m_inv_poly[i];
+ fs << "}";
+
+ fs << "affine_parameters";
+ fs << "{" << "ac" << m_C
+ << "ad" << m_D
+ << "ae" << m_E
+ << "cx" << m_center_x
+ << "cy" << m_center_y << "}";
+
+ fs.release();
+}
+
+OCAMCamera::Parameters&
+OCAMCamera::Parameters::operator=(const OCAMCamera::Parameters& other)
+{
+ if (this != &other)
+ {
+ m_modelType = other.m_modelType;
+ m_cameraName = other.m_cameraName;
+ m_imageWidth = other.m_imageWidth;
+ m_imageHeight = other.m_imageHeight;
+ m_C = other.m_C;
+ m_D = other.m_D;
+ m_E = other.m_E;
+ m_center_x = other.m_center_x;
+ m_center_y = other.m_center_y;
+
+ memcpy(m_poly, other.m_poly, sizeof(double) * SCARAMUZZA_POLY_SIZE);
+ memcpy(m_inv_poly, other.m_inv_poly, sizeof(double) * SCARAMUZZA_INV_POLY_SIZE);
+ }
+
+ return *this;
+}
+
+std::ostream&
+operator<< (std::ostream& out, const OCAMCamera::Parameters& params)
+{
+ out << "Camera Parameters:" << std::endl;
+ out << " model_type " << "scaramuzza" << std::endl;
+ out << " camera_name " << params.m_cameraName << std::endl;
+ out << " image_width " << params.m_imageWidth << std::endl;
+ out << " image_height " << params.m_imageHeight << std::endl;
+
+ out << std::fixed << std::setprecision(10);
+
+ out << "Poly Parameters" << std::endl;
+ for(int i=0; i < SCARAMUZZA_POLY_SIZE; i++)
+ out << std::string("p") + std::to_string(i) << ": " << params.m_poly[i] << std::endl;
+
+ out << "Inverse Poly Parameters" << std::endl;
+ for(int i=0; i < SCARAMUZZA_INV_POLY_SIZE; i++)
+ out << std::string("p") + std::to_string(i) << ": " << params.m_inv_poly[i] << std::endl;
+
+ out << "Affine Parameters" << std::endl;
+ out << " ac " << params.m_C << std::endl
+ << " ad " << params.m_D << std::endl
+ << " ae " << params.m_E << std::endl;
+ out << " cx " << params.m_center_x << std::endl
+ << " cy " << params.m_center_y << std::endl;
+
+ return out;
+}
+
+OCAMCamera::OCAMCamera()
+ : m_inv_scale(0.0)
+{
+
+}
+
+OCAMCamera::OCAMCamera(const OCAMCamera::Parameters& params)
+ : mParameters(params)
+{
+ m_inv_scale = 1.0 / (params.C() - params.D() * params.E());
+}
+
+Camera::ModelType
+OCAMCamera::modelType(void) const
+{
+ return mParameters.modelType();
+}
+
+const std::string&
+OCAMCamera::cameraName(void) const
+{
+ return mParameters.cameraName();
+}
+
+int
+OCAMCamera::imageWidth(void) const
+{
+ return mParameters.imageWidth();
+}
+
+int
+OCAMCamera::imageHeight(void) const
+{
+ return mParameters.imageHeight();
+}
+
+void
+OCAMCamera::estimateIntrinsics(const cv::Size& boardSize,
+ const std::vector< std::vector >& objectPoints,
+ const std::vector< std::vector >& imagePoints)
+{
+ // std::cout << "OCAMCamera::estimateIntrinsics - NOT IMPLEMENTED" << std::endl;
+ // throw std::string("OCAMCamera::estimateIntrinsics - NOT IMPLEMENTED");
+
+ // Reference: Page 30 of
+ // " Scaramuzza, D. Omnidirectional Vision: from Calibration to Robot Motion Estimation, ETH Zurich. Thesis no. 17635."
+ // http://e-collection.library.ethz.ch/eserv/eth:30301/eth-30301-02.pdf
+ // Matlab code: calibrate.m
+
+ // First, estimate every image's extrinsics parameters
+ std::vector< Eigen::Matrix3d > RList;
+ std::vector< Eigen::Vector3d > TList;
+
+ RList.reserve(imagePoints.size());
+ TList.reserve(imagePoints.size());
+
+ // i-th image
+ for (size_t image_index = 0; image_index < imagePoints.size(); ++image_index)
+ {
+ const std::vector& objPts = objectPoints.at(image_index);
+ const std::vector& imgPts = imagePoints.at(image_index);
+
+ assert(objPts.size() == imgPts.size());
+ assert(objPts.size() == static_cast(boardSize.width * boardSize.height));
+
+ Eigen::MatrixXd M(objPts.size(), 6);
+
+ for(size_t corner_index = 0; corner_index < objPts.size(); ++corner_index) {
+ double X = objPts.at(corner_index).x;
+ double Y = objPts.at(corner_index).y;
+ assert(objPts.at(corner_index).z==0.0);
+
+ double u = imgPts.at(corner_index).x;
+ double v = imgPts.at(corner_index).y;
+
+ M(corner_index, 0) = -v * X;
+ M(corner_index, 1) = -v * Y;
+ M(corner_index, 2) = u * X;
+ M(corner_index, 3) = u * Y;
+ M(corner_index, 4) = -v;
+ M(corner_index, 5) = u;
+ }
+
+ Eigen::JacobiSVD svd(M, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ assert(svd.matrixV().cols() == 6);
+ Eigen::VectorXd h = -svd.matrixV().col(5);
+
+ // scaled version of R and T
+ const double sr11 = h(0);
+ const double sr12 = h(1);
+ const double sr21 = h(2);
+ const double sr22 = h(3);
+ const double st1 = h(4);
+ const double st2 = h(5);
+
+ const double AA = square(sr11*sr12 + sr21*sr22);
+ const double BB = square(sr11) + square(sr21);
+ const double CC = square(sr12) + square(sr22);
+
+ const double sr32_squared_1 = (- (CC-BB) + sqrt(square(CC-BB) + 4.0 * AA)) / 2.0;
+ const double sr32_squared_2 = (- (CC-BB) - sqrt(square(CC-BB) + 4.0 * AA)) / 2.0;
+
+// printf("rst = %.12f\n", sr32_squared_1*sr32_squared_1 + (CC-BB)*sr32_squared_1 - AA);
+
+ std::vector sr32_squared_values;
+ if (sr32_squared_1 > 0) sr32_squared_values.push_back(sr32_squared_1);
+ if (sr32_squared_2 > 0) sr32_squared_values.push_back(sr32_squared_2);
+ assert(!sr32_squared_values.empty());
+
+ std::vector sr32_values;
+ std::vector sr31_values;
+ for (auto sr32_squared : sr32_squared_values) {
+ for(int sign = -1; sign <= 1; sign += 2) {
+ const double sr32 = static_cast(sign) * std::sqrt(sr32_squared);
+ sr32_values.push_back( sr32 );
+ if (sr32_squared == 0.0) {
+ // sr31 can be calculated through norm equality,
+ // but it has positive and negative posibilities
+ // positive one
+ sr31_values.push_back(std::sqrt(CC-BB));
+ // negative one
+ sr32_values.push_back( sr32 );
+ sr31_values.push_back(-std::sqrt(CC-BB));
+
+ break; // skip the same situation
+ } else {
+ // sr31 can be calculated throught dot product == 0
+ sr31_values.push_back(- (sr11*sr12 + sr21*sr22) / sr32);
+ }
+ }
+ }
+
+ // std::cout << "h= " << std::setprecision(12) << h.transpose() << std::endl;
+ // std::cout << "length: " << sr32_values.size() << " & " << sr31_values.size() << std::endl;
+
+ assert(!sr31_values.empty());
+ assert(sr31_values.size() == sr32_values.size());
+
+ std::vector