Point Cloud Library学习之ICP迭代最近点匹配法NDT2D正态分布转换法

参考来源:

https://pointclouds.org/documentation/classpcl_1_1_registration.html#ab1d64f86162b2df716ead8d978579c11

http://epsilonjohn.club/2020/02/29/Autoware.ai/2D-NDT-%E5%8C%B9%E9%85%8D%E7%AE%97%E6%B3%95/

https://pcl.readthedocs.io/projects/tutorials/en/pcl-1.11.0/iterative_closest_point.html#iterative-closest-point

NDT进行匹配时耗时较多,且角度误差较大;

NDT参数不明确:对于我的情况GridStep一般设置0.6以上,默认1,GridExtent设置2或以上,默认值为20;

pcl::PointCloud<PointXYZ>::makeShared()函数可以得到已知点云的指针

tictoc()函数可以得到运行时间;

icp.hasConverged()可以得到是否收敛;

注意匹配时的source和Target区分;

其中具体参数需要调整

  
#include <pcl/registration/icp.h>
#include <pcl/registration/ndt_2d.h>
#include <pcl/console/time.h>
#include <pcl/registration/sample_consensus_prerejective.h>
#include <pcl/features/brisk_2d.h>
typedef pcl::PointXYZ PointT;
typedef pcl::PointCloud<PointT> PointCloudT;
typedef pcl::BRISKSignature512 FeatureT;
typedef pcl::BRISK2DEstimation<pcl::PointXYZ> FeatureEstimationT;
typedef pcl::PointCloud<FeatureT> FeatureCloudT;
typedef pcl::PointWithScale KeyPointT; 
typedef pcl::PointCloud<KeyPointT> KeyPointCloudT;
pcl::console::TicToc tt;
tt.tic ();

  //  // Initializing Normal Distributions Transform (NDT).
  // pcl::NormalDistributionsTransform2D<pcl::PointXYZ, pcl::PointXYZ> ndt;

  // // Setting scale dependent NDT parameters
  // // Setting minimum transformation difference for termination condition.
  // ndt.setTransformationEpsilon (NDT_EPSILON);
  // // Setting maximum step size for More-Thuente line search.

  // //Setting Resolution of NDT grid structure (VoxelGridCovariance).
  // //ndt.setResolution (1.0);
  // ndt.setGridStep (Eigen::Vector2f(NDT_GRID_STEP, NDT_GRID_STEP));
  // ndt.setGridExtent (Eigen::Vector2f(NDT_GRID_EXTEND, NDT_GRID_EXTEND));
  // // Setting max number of registration iterations.
  // //ndt.setMaximumIterations (100);

  // // Setting point cloud to be aligned.
  // ndt.setInputSource (cloud_out.makeShared());
  // // Setting point cloud to be aligned to.
  // ndt.setInputTarget (cloud_in.makeShared());
  // // Set initial alignment estimate found using robot odometry.
  // Eigen::AngleAxisf init_rotation(initial_rad, Eigen::Vector3f::UnitZ ());
  // Eigen::Translation3f init_translation (0, 0, 0);
  // Eigen::Matrix4f init_guess = (init_translation * init_rotation).matrix ();

  // // Calculating required rigid transform to align the input cloud to the target cloud.
  // pcl::PointCloud<pcl::PointXYZ>::Ptr output_cloud (new pcl::PointCloud<pcl::PointXYZ>);
  // ndt.align (*output_cloud, init_guess);


  
  //icp
  pcl::IterativeClosestPoint<PointT, PointT> icp;
  PointCloudT::Ptr finalCloud(new PointCloudT);
  icp.setInputSource(curr_keypoints.makeShared());
  icp.setInputTarget(templates[id].pointcloud_data.makeShared());
  icp.setMaximumIterations (ICP_MAX_ITERATIONS);
  icp.setTransformationEpsilon (ICP_TRANSFORM_EPSILON);
  icp.setMaxCorrespondenceDistance (ICP_MAX_CORRESPONDANCE_DISTANCE);
  icp.setEuclideanFitnessEpsilon (ICP_EUCLIDEAN_FITNESS_EPSILON);
  icp.setRANSACOutlierRejectionThreshold (ICP_OUTLIER_REJECTION_THRESHOLD);

  //icp.setMaxCorrespondenceDistance(100);
  //icp.setMaximumIterations(100);
  //icp.setTransformationEpsilon(1e-6);
  //icp.setEuclideanFitnessEpsilon(1e-6);
  //icp.setRANSACIterations(0);
  //Set initial alignment estimate found using robot odometry.
  Eigen::AngleAxisf init_rotation(vt_relative_rad*M_PI/180.0, Eigen::Vector3f::UnitZ ());
  Eigen::Translation3f init_translation (0, 0, 0);
  Eigen::Matrix4f init_guess = (init_translation * init_rotation).matrix ();
  icp.align(*finalCloud, init_guess);
  if (icp.hasConverged() == false)
  {
    cout << "icp alignment failed score = " <<  icp.getFitnessScore() << " time: " << tt.toc () << "ms" << endl;
    cout.flush();
    return;
  }
  float x, y, z, roll, pitch, yaw;
  Eigen::Affine3f correctionCameraFrame;
  correctionCameraFrame = icp.getFinalTransformation(); // get transformation in camera frame (because points are in camera frame)
  pcl::getTranslationAndEulerAngles(correctionCameraFrame, x, y, z, roll, pitch, yaw); 
  cout << "icp alignment x:" <<  x << " y:" << y << " yaw:" << yaw*180/M_PI << " score:" << icp.getFitnessScore() << " time: " << tt.toc () << "ms"  << endl;
  cout.flush();    

由于BRISK2D特征提取点云要求Organized PointCloud 因此不适用 !


  //BRISK2DEstimation
  // Keypoint Description 
    // pcl::BRISK2DEstimation<pcl::PointXYZ> brisk_descriptor_estimation; 

    // // Source Cloud 
    // FeatureCloudT::Ptr Source_descriptors (new FeatureCloudT); 
    // brisk_descriptor_estimation.setInputCloud (cloud_out.makeShared()); 
    // brisk_descriptor_estimation.setKeypoints (keypoints_out.makeShared()); 
    // brisk_descriptor_estimation.compute (*Source_descriptors); 

    // // Target Cloud 
    // FeatureCloudT::Ptr Target_descriptors (new FeatureCloudT); 
    // brisk_descriptor_estimation.setInputCloud (cloud_in.makeShared()); 
    // brisk_descriptor_estimation.setKeypoints (keypoints_in.makeShared()); 
    // brisk_descriptor_estimation.compute (*Target_descriptors); 

    // std::cout << "Target descriptor number : " << Target_descriptors->size() << std::endl; 
    // std::cout << "Source descriptor number : " << Source_descriptors->size() << std::endl; 

    // // // Correspondences matching 
    // // pcl::registration::CorrespondenceEstimation<FeatureT, FeatureT> correspondence_estimation; 

    // // pcl::CorrespondencesPtr correspondences(new pcl::Correspondences); 
    // // correspondence_estimation.setInputSource (Source_descriptors); 
    // // correspondence_estimation.setInputTarget (Target_descriptors); 
    // // correspondence_estimation.determineCorrespondences (*correspondences); 
    // pcl::SampleConsensusPrerejective<PointT,PointT,FeatureT> align;
    // align.setInputSource (cloud_out.makeShared());
    // align.setSourceFeatures (Source_descriptors);
    // align.setInputTarget (cloud_in.makeShared());
    // align.setTargetFeatures (Target_descriptors);
    // align.setMaximumIterations (5000); // Number of RANSAC iterations
    // align.setNumberOfSamples (3); // Number of points to sample for generating/prerejecting a pose
    // align.setCorrespondenceRandomness (5); // Number of nearest features to use
    // align.setSimilarityThreshold (0.9f); // Polygonal edge length similarity threshold
    // align.setMaxCorrespondenceDistance (0.5f); // Inlier threshold
    // align.setInlierFraction (0.25f); // Required inlier fraction for accepting a pose hypothesis

 

上一篇:pandas 分组学习(利用MovieLens数据集)


下一篇:Cascaded Deep Video Deblurring Using Temporal Sharpness Prior阅读笔记