42#include <visp3/core/vpIoTools.h>
43#include <visp3/vision/vpKeyPoint.h>
45#if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
47#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48#include <opencv2/calib3d/calib3d.hpp>
56inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
58 if (knnMatches.size() > 0) {
65inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
83 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
84 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
85 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
86 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
87 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
88 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
89 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
90 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
91 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
92 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
93 m_useAffineDetection(false),
94#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
95 m_useBruteForceCrossCheck(true),
97 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
98 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
102 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
103 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
119 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
120 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
121 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
122 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
123 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
124 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
125 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
126 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
127 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
128 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
129 m_useAffineDetection(false),
130#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
131 m_useBruteForceCrossCheck(true),
133 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
134 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
138 m_detectorNames.push_back(detectorName);
139 m_extractorNames.push_back(extractorName);
155 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
156 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
157 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
158 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
159 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
160 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
161 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
162 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(),
163 m_ransacOutliers(), m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
164 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
165#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
166 m_useBruteForceCrossCheck(true),
168 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
169 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
183void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
188 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
190 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
193 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
198 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
200 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
201 cv::Mat tcorners = corners * A.t();
202 cv::Mat tcorners_x, tcorners_y;
203 tcorners.col(0).copyTo(tcorners_x);
204 tcorners.col(1).copyTo(tcorners_y);
205 std::vector<cv::Mat> channels;
206 channels.push_back(tcorners_x);
207 channels.push_back(tcorners_y);
208 cv::merge(channels, tcorners);
210 cv::Rect rect = cv::boundingRect(tcorners);
211 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
213 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
216 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
217 double s = 0.8 * sqrt(tilt * tilt - 1);
218 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
219 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
220 A.row(0) = A.row(0) / tilt;
223 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
224 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
227 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
229 cv::invertAffineTransform(A, Ai);
258 unsigned int height,
unsigned int width)
273 unsigned int height,
unsigned int width)
289 m_trainPoints.clear();
290 m_mapOfImageId.clear();
291 m_mapOfImages.clear();
292 m_currentImageId = 1;
294 if (m_useAffineDetection) {
295 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
296 std::vector<cv::Mat> listOfTrainDescriptors;
302 m_trainKeyPoints.clear();
303 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
304 it != listOfTrainKeyPoints.end(); ++it) {
305 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
309 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
312 it->copyTo(m_trainDescriptors);
314 m_trainDescriptors.push_back(*it);
318 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
319 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
324 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
325 m_mapOfImageId[it->class_id] = m_currentImageId;
329 m_mapOfImages[m_currentImageId] = I;
338 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
340 return static_cast<unsigned int>(m_trainKeyPoints.size());
368 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
370 cv::Mat trainDescriptors;
372 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
374 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
376 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
380 std::map<size_t, size_t> mapOfKeypointHashes;
382 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
384 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
387 std::vector<cv::Point3f> trainPoints_tmp;
388 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
389 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
390 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
395 points3f = trainPoints_tmp;
398 return (
buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
413 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
415 cv::Mat trainDescriptors;
417 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
419 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
421 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
425 std::map<size_t, size_t> mapOfKeypointHashes;
427 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
429 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
432 std::vector<cv::Point3f> trainPoints_tmp;
433 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
434 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
435 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
440 points3f = trainPoints_tmp;
443 return (
buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
460 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
461 bool append,
int class_id)
464 m_trainPoints.clear();
465 m_mapOfImageId.clear();
466 m_mapOfImages.clear();
467 m_currentImageId = 0;
468 m_trainKeyPoints.clear();
473 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
475 if (class_id != -1) {
476 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
477 it->class_id = class_id;
483 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
484 m_mapOfImageId[it->class_id] = m_currentImageId;
488 m_mapOfImages[m_currentImageId] = I;
491 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
493 trainDescriptors.copyTo(m_trainDescriptors);
495 m_trainDescriptors.push_back(trainDescriptors);
497 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
505 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
509 return static_cast<unsigned int>(m_trainKeyPoints.size());
525 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
526 bool append,
int class_id)
529 return (
buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
551 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
558 vpPlane Po(pts[0], pts[1], pts[2]);
559 double xc = 0.0, yc = 0.0;
570 point_obj = cMo.
inverse() * point_cam;
571 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
593 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
600 vpPlane Po(pts[0], pts[1], pts[2]);
601 double xc = 0.0, yc = 0.0;
612 point_obj = cMo.
inverse() * point_cam;
633 std::vector<cv::KeyPoint> &candidates,
634 const std::vector<vpPolygon> &polygons,
635 const std::vector<std::vector<vpPoint> > &roisPt,
636 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
638 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
645 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
646 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
647 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
651 std::vector<vpPolygon> polygons_tmp = polygons;
652 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
653 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
655 while (it2 != pairOfCandidatesToCheck.end()) {
656 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
657 if (it1->isInside(imPt)) {
658 candidates.push_back(it2->first);
660 points.push_back(pt);
662 if (descriptors != NULL) {
663 desc.push_back(descriptors->row((
int)it2->second));
667 it2 = pairOfCandidatesToCheck.erase(it2);
674 if (descriptors != NULL) {
675 desc.copyTo(*descriptors);
696 std::vector<vpImagePoint> &candidates,
697 const std::vector<vpPolygon> &polygons,
698 const std::vector<std::vector<vpPoint> > &roisPt,
699 std::vector<vpPoint> &points, cv::Mat *descriptors)
701 std::vector<vpImagePoint> candidatesToCheck = candidates;
707 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
708 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
709 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
713 std::vector<vpPolygon> polygons_tmp = polygons;
714 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
715 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
717 while (it2 != pairOfCandidatesToCheck.end()) {
718 if (it1->isInside(it2->first)) {
719 candidates.push_back(it2->first);
721 points.push_back(pt);
723 if (descriptors != NULL) {
724 desc.push_back(descriptors->row((
int)it2->second));
728 it2 = pairOfCandidatesToCheck.erase(it2);
753 const std::vector<vpCylinder> &cylinders,
754 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
755 cv::Mat *descriptors)
757 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
763 size_t cpt_keypoint = 0;
764 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
765 ++it1, cpt_keypoint++) {
766 size_t cpt_cylinder = 0;
769 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
770 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
773 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
775 candidates.push_back(*it1);
779 double xm = 0.0, ym = 0.0;
781 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
783 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
785 point_cam[0] = xm * Z;
786 point_cam[1] = ym * Z;
790 point_obj = cMo.
inverse() * point_cam;
793 points.push_back(cv::Point3f((
float)pt.
get_oX(), (
float)pt.
get_oY(), (
float)pt.
get_oZ()));
795 if (descriptors != NULL) {
796 desc.push_back(descriptors->row((
int)cpt_keypoint));
806 if (descriptors != NULL) {
807 desc.copyTo(*descriptors);
828 const std::vector<vpCylinder> &cylinders,
829 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
830 cv::Mat *descriptors)
832 std::vector<vpImagePoint> candidatesToCheck = candidates;
838 size_t cpt_keypoint = 0;
839 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
840 ++it1, cpt_keypoint++) {
841 size_t cpt_cylinder = 0;
844 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
845 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
848 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
850 candidates.push_back(*it1);
854 double xm = 0.0, ym = 0.0;
856 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
858 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
860 point_cam[0] = xm * Z;
861 point_cam[1] = ym * Z;
865 point_obj = cMo.
inverse() * point_cam;
868 points.push_back(pt);
870 if (descriptors != NULL) {
871 desc.push_back(descriptors->row((
int)cpt_keypoint));
881 if (descriptors != NULL) {
882 desc.copyTo(*descriptors);
905 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
907 std::cerr <<
"Not enough points to compute the pose (at least 4 points "
914 cv::Mat cameraMatrix =
924 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
927#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
929 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
930 (
float)m_ransacReprojectionError,
933 inlierIndex, cv::SOLVEPNP_ITERATIVE);
953 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
954 if (m_useConsensusPercentage) {
955 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
958 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
959 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
961 }
catch (cv::Exception &e) {
962 std::cerr << e.what() << std::endl;
966 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
967 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
996 std::vector<vpPoint> &inliers,
double &elapsedTime,
bool (*func)(
const vpHomogeneousMatrix &))
998 std::vector<unsigned int> inlierIndex;
999 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1016 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
1021 if (objectVpPoints.size() < 4) {
1031 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1035 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
1036 if (m_useConsensusPercentage) {
1037 nbInlierToReachConsensus =
1038 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
1048 bool isRansacPoseEstimationOk =
false;
1055 if (m_computeCovariance) {
1059 std::cerr <<
"e=" << e.
what() << std::endl;
1077 return isRansacPoseEstimationOk;
1094double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1097 if (matchKeyPoints.size() == 0) {
1103 std::vector<double> errors(matchKeyPoints.size());
1106 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1107 it != matchKeyPoints.end(); ++it, cpt++) {
1112 double u = 0.0, v = 0.0;
1114 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1117 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1169 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1171 if (m_mapOfImages.empty()) {
1172 std::cerr <<
"There is no training image loaded !" << std::endl;
1182 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1185 unsigned int nbWidth = nbImgSqrt;
1187 unsigned int nbHeight = nbImgSqrt;
1190 if (nbImgSqrt * nbImgSqrt < nbImg) {
1194 unsigned int maxW = ICurrent.
getWidth();
1195 unsigned int maxH = ICurrent.
getHeight();
1196 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1198 if (maxW < it->second.getWidth()) {
1199 maxW = it->second.getWidth();
1202 if (maxH < it->second.getHeight()) {
1203 maxH = it->second.getHeight();
1224 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1226 if (m_mapOfImages.empty()) {
1227 std::cerr <<
"There is no training image loaded !" << std::endl;
1237 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1240 unsigned int nbWidth = nbImgSqrt;
1242 unsigned int nbHeight = nbImgSqrt;
1245 if (nbImgSqrt * nbImgSqrt < nbImg) {
1249 unsigned int maxW = ICurrent.
getWidth();
1250 unsigned int maxH = ICurrent.
getHeight();
1251 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1253 if (maxW < it->second.getWidth()) {
1254 maxW = it->second.getWidth();
1257 if (maxH < it->second.getHeight()) {
1258 maxH = it->second.getHeight();
1276 detect(I, keyPoints, elapsedTime, rectangle);
1289 detect(I_color, keyPoints, elapsedTime, rectangle);
1303 detect(matImg, keyPoints, elapsedTime, mask);
1319 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1322 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1324 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1326 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1329 detect(matImg, keyPoints, elapsedTime, mask);
1345 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1348 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1350 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1352 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1355 detect(matImg, keyPoints, elapsedTime, mask);
1368 const cv::Mat &mask)
1373 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1374 it != m_detectors.end(); ++it) {
1375 std::vector<cv::KeyPoint> kp;
1377 it->second->detect(matImg, kp, mask);
1378 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1393 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1395 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1398 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1413 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1415 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1418 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1433 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1436 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1450 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1453 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1470 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1473 srand((
unsigned int)time(NULL));
1476 std::vector<vpImagePoint> queryImageKeyPoints;
1478 std::vector<vpImagePoint> trainImageKeyPoints;
1482 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1484 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1487 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1488 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1489 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1508 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1511 srand((
unsigned int)time(NULL));
1514 std::vector<vpImagePoint> queryImageKeyPoints;
1516 std::vector<vpImagePoint> trainImageKeyPoints;
1520 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1522 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1525 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1526 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1527 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1546 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1549 srand((
unsigned int)time(NULL));
1552 std::vector<vpImagePoint> queryImageKeyPoints;
1554 std::vector<vpImagePoint> trainImageKeyPoints;
1558 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1560 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1563 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1564 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1565 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1584 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1585 unsigned int lineThickness)
1587 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1589 std::cerr <<
"There is no training image loaded !" << std::endl;
1595 int nbImg = (int)(m_mapOfImages.size() + 1);
1603 int nbWidth = nbImgSqrt;
1604 int nbHeight = nbImgSqrt;
1606 if (nbImgSqrt * nbImgSqrt < nbImg) {
1610 std::map<int, int> mapOfImageIdIndex;
1613 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1615 mapOfImageIdIndex[it->first] = cpt;
1617 if (maxW < it->second.getWidth()) {
1618 maxW = it->second.getWidth();
1621 if (maxH < it->second.getHeight()) {
1622 maxH = it->second.getHeight();
1628 int medianI = nbHeight / 2;
1629 int medianJ = nbWidth / 2;
1630 int medianIndex = medianI * nbWidth + medianJ;
1631 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1633 int current_class_id_index = 0;
1634 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1635 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1639 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1642 int indexI = current_class_id_index / nbWidth;
1643 int indexJ = current_class_id_index - (indexI * nbWidth);
1644 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1651 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1652 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1657 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1662 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1668 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1669 int current_class_id = 0;
1670 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1671 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1675 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1678 int indexI = current_class_id / nbWidth;
1679 int indexJ = current_class_id - (indexI * nbWidth);
1681 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1682 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1683 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1684 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1705 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1706 unsigned int lineThickness)
1708 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1710 std::cerr <<
"There is no training image loaded !" << std::endl;
1716 int nbImg = (int)(m_mapOfImages.size() + 1);
1724 int nbWidth = nbImgSqrt;
1725 int nbHeight = nbImgSqrt;
1727 if (nbImgSqrt * nbImgSqrt < nbImg) {
1731 std::map<int, int> mapOfImageIdIndex;
1734 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1736 mapOfImageIdIndex[it->first] = cpt;
1738 if (maxW < it->second.getWidth()) {
1739 maxW = it->second.getWidth();
1742 if (maxH < it->second.getHeight()) {
1743 maxH = it->second.getHeight();
1749 int medianI = nbHeight / 2;
1750 int medianJ = nbWidth / 2;
1751 int medianIndex = medianI * nbWidth + medianJ;
1752 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1754 int current_class_id_index = 0;
1755 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1756 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1760 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1763 int indexI = current_class_id_index / nbWidth;
1764 int indexJ = current_class_id_index - (indexI * nbWidth);
1765 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1772 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1773 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1778 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1783 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1789 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1790 int current_class_id = 0;
1791 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1792 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1796 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1799 int indexI = current_class_id / nbWidth;
1800 int indexJ = current_class_id - (indexI * nbWidth);
1802 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1803 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1804 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1805 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1825 std::vector<cv::Point3f> *trainPoints)
1828 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1842 std::vector<cv::Point3f> *trainPoints)
1845 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1859 std::vector<cv::Point3f> *trainPoints)
1862 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1877 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1881 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1896 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1900 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1915 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1920 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1921 itd != m_extractors.end(); ++itd) {
1925 if (trainPoints != NULL && !trainPoints->empty()) {
1928 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1931 itd->second->compute(matImg, keyPoints, descriptors);
1933 if (keyPoints.size() != keyPoints_tmp.size()) {
1937 std::map<size_t, size_t> mapOfKeypointHashes;
1939 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1941 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1944 std::vector<cv::Point3f> trainPoints_tmp;
1945 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1946 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1947 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1952 *trainPoints = trainPoints_tmp;
1956 itd->second->compute(matImg, keyPoints, descriptors);
1961 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1965 itd->second->compute(matImg, keyPoints, desc);
1967 if (keyPoints.size() != keyPoints_tmp.size()) {
1971 std::map<size_t, size_t> mapOfKeypointHashes;
1973 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1975 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1978 std::vector<cv::Point3f> trainPoints_tmp;
1979 cv::Mat descriptors_tmp;
1980 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1981 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1982 if (trainPoints != NULL && !trainPoints->empty()) {
1983 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1986 if (!descriptors.empty()) {
1987 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1992 if (trainPoints != NULL) {
1994 *trainPoints = trainPoints_tmp;
1997 descriptors_tmp.copyTo(descriptors);
2001 if (descriptors.empty()) {
2002 desc.copyTo(descriptors);
2004 cv::hconcat(descriptors, desc, descriptors);
2009 if (keyPoints.size() != (
size_t)descriptors.rows) {
2010 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2018void vpKeyPoint::filterMatches()
2020 std::vector<cv::KeyPoint> queryKpts;
2021 std::vector<cv::Point3f> trainPts;
2022 std::vector<cv::DMatch> m;
2028 double min_dist = DBL_MAX;
2030 std::vector<double> distance_vec(m_knnMatches.size());
2033 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2034 double dist = m_knnMatches[i][0].distance;
2036 distance_vec[i] = dist;
2038 if (dist < min_dist) {
2045 mean /= m_queryDescriptors.rows;
2048 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2049 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2050 double threshold = min_dist + stdev;
2052 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2053 if (m_knnMatches[i].size() >= 2) {
2056 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2061 double dist = m_knnMatches[i][0].distance;
2064 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2066 if (!m_trainPoints.empty()) {
2067 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
2069 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
2077 double min_dist = DBL_MAX;
2079 std::vector<double> distance_vec(m_matches.size());
2080 for (
size_t i = 0; i < m_matches.size(); i++) {
2081 double dist = m_matches[i].distance;
2083 distance_vec[i] = dist;
2085 if (dist < min_dist) {
2092 mean /= m_queryDescriptors.rows;
2094 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2095 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2105 for (
size_t i = 0; i < m_matches.size(); i++) {
2106 if (m_matches[i].distance <= threshold) {
2107 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2109 if (!m_trainPoints.empty()) {
2110 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
2112 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
2117 if (m_useSingleMatchFilter) {
2120 std::vector<cv::DMatch> mTmp;
2121 std::vector<cv::Point3f> trainPtsTmp;
2122 std::vector<cv::KeyPoint> queryKptsTmp;
2124 std::map<int, int> mapOfTrainIdx;
2126 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2127 mapOfTrainIdx[it->trainIdx]++;
2131 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2132 if (mapOfTrainIdx[it->trainIdx] == 1) {
2133 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
2135 if (!m_trainPoints.empty()) {
2136 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
2138 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
2142 m_filteredMatches = mTmp;
2143 m_objectFilteredPoints = trainPtsTmp;
2144 m_queryFilteredKeyPoints = queryKptsTmp;
2146 m_filteredMatches = m;
2147 m_objectFilteredPoints = trainPts;
2148 m_queryFilteredKeyPoints = queryKpts;
2161 objectPoints = m_objectFilteredPoints;
2187 keyPoints = m_queryFilteredKeyPoints;
2190 keyPoints = m_queryKeyPoints;
2246void vpKeyPoint::init()
2249#if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2251 if (!cv::initModule_nonfree()) {
2252 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2262 initDetectors(m_detectorNames);
2263 initExtractors(m_extractorNames);
2272void vpKeyPoint::initDetector(
const std::string &detectorName)
2274#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2275 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2277 if (m_detectors[detectorName] == NULL) {
2278 std::stringstream ss_msg;
2279 ss_msg <<
"Fail to initialize the detector: " << detectorName
2280 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2284 std::string detectorNameTmp = detectorName;
2285 std::string pyramid =
"Pyramid";
2286 std::size_t pos = detectorName.find(pyramid);
2287 bool usePyramid =
false;
2288 if (pos != std::string::npos) {
2289 detectorNameTmp = detectorName.substr(pos + pyramid.size());
2293 if (detectorNameTmp ==
"SIFT") {
2294#if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
2295 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
2297 m_detectors[detectorNameTmp] = siftDetector;
2299 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2302#if defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2303 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2305 cv::Ptr<cv::FeatureDetector> siftDetector;
2306 if (m_maxFeatures > 0) {
2307#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2308 siftDetector = cv::SIFT::create(m_maxFeatures);
2310 siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
2313#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2314 siftDetector = cv::SIFT::create();
2316 siftDetector = cv::xfeatures2d::SIFT::create();
2320 m_detectors[detectorNameTmp] = siftDetector;
2322 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
2323 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2326 std::stringstream ss_msg;
2327 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2328 <<
" was not build with xFeatures2d module.";
2332 }
else if (detectorNameTmp ==
"SURF") {
2333#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2334 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2336 m_detectors[detectorNameTmp] = surfDetector;
2338 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
2339 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2342 std::stringstream ss_msg;
2343 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2344 <<
" was not build with xFeatures2d module.";
2347 }
else if (detectorNameTmp ==
"FAST") {
2348 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2350 m_detectors[detectorNameTmp] = fastDetector;
2352 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2354 }
else if (detectorNameTmp ==
"MSER") {
2355 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2357 m_detectors[detectorNameTmp] = fastDetector;
2359 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2361 }
else if (detectorNameTmp ==
"ORB") {
2362 cv::Ptr<cv::FeatureDetector> orbDetector;
2363 if (m_maxFeatures > 0) {
2364 orbDetector = cv::ORB::create(m_maxFeatures);
2367 orbDetector = cv::ORB::create();
2370 m_detectors[detectorNameTmp] = orbDetector;
2372 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
2373 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2375 }
else if (detectorNameTmp ==
"BRISK") {
2376 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2378 m_detectors[detectorNameTmp] = briskDetector;
2380 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
2381 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2383 }
else if (detectorNameTmp ==
"KAZE") {
2384 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2386 m_detectors[detectorNameTmp] = kazeDetector;
2388 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
2389 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2391 }
else if (detectorNameTmp ==
"AKAZE") {
2392 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2394 m_detectors[detectorNameTmp] = akazeDetector;
2396 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
2397 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2399 }
else if (detectorNameTmp ==
"GFTT") {
2400 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2402 m_detectors[detectorNameTmp] = gfttDetector;
2404 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2406 }
else if (detectorNameTmp ==
"SimpleBlob") {
2407 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2409 m_detectors[detectorNameTmp] = simpleBlobDetector;
2411 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2413 }
else if (detectorNameTmp ==
"STAR") {
2414#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2415 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2417 m_detectors[detectorNameTmp] = starDetector;
2419 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2422 std::stringstream ss_msg;
2423 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2424 <<
" was not build with xFeatures2d module.";
2427 }
else if (detectorNameTmp ==
"AGAST") {
2428 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2430 m_detectors[detectorNameTmp] = agastDetector;
2432 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2434 }
else if (detectorNameTmp ==
"MSD") {
2435#if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2436#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2437 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2439 m_detectors[detectorNameTmp] = msdDetector;
2441 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
2442 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2445 std::stringstream ss_msg;
2446 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2447 <<
" was not build with xFeatures2d module.";
2451 std::stringstream ss_msg;
2452 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
2453 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
2456 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
2459 bool detectorInitialized =
false;
2462 detectorInitialized = !m_detectors[detectorNameTmp].empty();
2465 detectorInitialized = !m_detectors[detectorName].empty();
2468 if (!detectorInitialized) {
2469 std::stringstream ss_msg;
2470 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
2471 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2483void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
2485 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2495void vpKeyPoint::initExtractor(
const std::string &extractorName)
2497#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2498 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2500 if (extractorName ==
"SIFT") {
2501#if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
2502 m_extractors[extractorName] = cv::SIFT::create();
2504#if defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2505 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2507#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2508 m_extractors[extractorName] = cv::SIFT::create();
2510 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2513 std::stringstream ss_msg;
2514 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2515 <<
" was not build with xFeatures2d module.";
2519 }
else if (extractorName ==
"SURF") {
2520#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2522 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2524 std::stringstream ss_msg;
2525 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2526 <<
" was not build with xFeatures2d module.";
2529 }
else if (extractorName ==
"ORB") {
2530 m_extractors[extractorName] = cv::ORB::create();
2531 }
else if (extractorName ==
"BRISK") {
2532 m_extractors[extractorName] = cv::BRISK::create();
2533 }
else if (extractorName ==
"FREAK") {
2534#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2535 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2537 std::stringstream ss_msg;
2538 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2539 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2542 }
else if (extractorName ==
"BRIEF") {
2543#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2544 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2546 std::stringstream ss_msg;
2547 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2548 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2551 }
else if (extractorName ==
"KAZE") {
2552 m_extractors[extractorName] = cv::KAZE::create();
2553 }
else if (extractorName ==
"AKAZE") {
2554 m_extractors[extractorName] = cv::AKAZE::create();
2555 }
else if (extractorName ==
"DAISY") {
2556#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2557 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2559 std::stringstream ss_msg;
2560 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2561 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2564 }
else if (extractorName ==
"LATCH") {
2565#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2566 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2568 std::stringstream ss_msg;
2569 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2570 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2573 }
else if (extractorName ==
"LUCID") {
2574#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2579 std::stringstream ss_msg;
2580 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2581 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2584 }
else if (extractorName ==
"VGG") {
2585#if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2586#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2587 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2589 std::stringstream ss_msg;
2590 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2591 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2595 std::stringstream ss_msg;
2596 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2597 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2600 }
else if (extractorName ==
"BoostDesc") {
2601#if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2602#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2603 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2605 std::stringstream ss_msg;
2606 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2607 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2611 std::stringstream ss_msg;
2612 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2613 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2617 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2621 if (!m_extractors[extractorName]) {
2622 std::stringstream ss_msg;
2623 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2624 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2628#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2629 if (extractorName ==
"SURF") {
2631 m_extractors[extractorName]->set(
"extended", 1);
2642void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2644 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2648 int descriptorType = CV_32F;
2649 bool firstIteration =
true;
2650 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2651 it != m_extractors.end(); ++it) {
2652 if (firstIteration) {
2653 firstIteration =
false;
2654 descriptorType = it->second->descriptorType();
2656 if (descriptorType != it->second->descriptorType()) {
2663void vpKeyPoint::initFeatureNames()
2666#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2673#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2674 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2676#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2677 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2680#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2683#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2688#if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2689 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2693#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2696#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2697 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] =
"FREAK";
2698 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] =
"BRIEF";
2700#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2701 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2704#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2707#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2710#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2711 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2712 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2715#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2716 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2717 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2729 int descriptorType = CV_32F;
2730 bool firstIteration =
true;
2731 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2732 it != m_extractors.end(); ++it) {
2733 if (firstIteration) {
2734 firstIteration =
false;
2735 descriptorType = it->second->descriptorType();
2737 if (descriptorType != it->second->descriptorType()) {
2743 if (matcherName ==
"FlannBased") {
2744 if (m_extractors.empty()) {
2745 std::cout <<
"Warning: No extractor initialized, by default use "
2746 "floating values (CV_32F) "
2747 "for descriptor type !"
2751 if (descriptorType == CV_8U) {
2752#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2753 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2755 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2758#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2759 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2761 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2765 m_matcher = cv::DescriptorMatcher::create(matcherName);
2768#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2769 if (m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
2770 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2775 std::stringstream ss_msg;
2776 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2777 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2794 IMatching.
insert(IRef, topLeftCorner);
2796 IMatching.
insert(ICurrent, topLeftCorner);
2811 IMatching.
insert(IRef, topLeftCorner);
2813 IMatching.
insert(ICurrent, topLeftCorner);
2827 int nbImg = (int)(m_mapOfImages.size() + 1);
2829 if (m_mapOfImages.empty()) {
2830 std::cerr <<
"There is no training image loaded !" << std::endl;
2840 int nbWidth = nbImgSqrt;
2841 int nbHeight = nbImgSqrt;
2843 if (nbImgSqrt * nbImgSqrt < nbImg) {
2848 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2850 if (maxW < it->second.getWidth()) {
2851 maxW = it->second.getWidth();
2854 if (maxH < it->second.getHeight()) {
2855 maxH = it->second.getHeight();
2861 int medianI = nbHeight / 2;
2862 int medianJ = nbWidth / 2;
2863 int medianIndex = medianI * nbWidth + medianJ;
2866 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2868 int local_cpt = cpt;
2869 if (cpt >= medianIndex) {
2874 int indexI = local_cpt / nbWidth;
2875 int indexJ = local_cpt - (indexI * nbWidth);
2876 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2878 IMatching.
insert(it->second, topLeftCorner);
2881 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2882 IMatching.
insert(ICurrent, topLeftCorner);
2897 int nbImg = (int)(m_mapOfImages.size() + 1);
2899 if (m_mapOfImages.empty()) {
2900 std::cerr <<
"There is no training image loaded !" << std::endl;
2912 int nbWidth = nbImgSqrt;
2913 int nbHeight = nbImgSqrt;
2915 if (nbImgSqrt * nbImgSqrt < nbImg) {
2920 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2922 if (maxW < it->second.getWidth()) {
2923 maxW = it->second.getWidth();
2926 if (maxH < it->second.getHeight()) {
2927 maxH = it->second.getHeight();
2933 int medianI = nbHeight / 2;
2934 int medianJ = nbWidth / 2;
2935 int medianIndex = medianI * nbWidth + medianJ;
2938 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2940 int local_cpt = cpt;
2941 if (cpt >= medianIndex) {
2946 int indexI = local_cpt / nbWidth;
2947 int indexJ = local_cpt - (indexI * nbWidth);
2948 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2952 IMatching.
insert(IRef, topLeftCorner);
2955 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2956 IMatching.
insert(ICurrent, topLeftCorner);
2971 m_detectorNames.clear();
2972 m_extractorNames.clear();
2973 m_detectors.clear();
2974 m_extractors.clear();
2976 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint "
2979 xmlp.
parse(configFile);
3043 int startClassId = 0;
3044 int startImageId = 0;
3046 m_trainKeyPoints.clear();
3047 m_trainPoints.clear();
3048 m_mapOfImageId.clear();
3049 m_mapOfImages.clear();
3052 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
3053 if (startClassId < it->first) {
3054 startClassId = it->first;
3059 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3061 if (startImageId < it->first) {
3062 startImageId = it->first;
3069 if (!parent.empty()) {
3074 std::ifstream file(filename.c_str(), std::ifstream::binary);
3075 if (!file.is_open()) {
3083#if !defined(VISP_HAVE_MODULE_IO)
3085 std::cout <<
"Warning: The learning file contains image data that will "
3086 "not be loaded as visp_io module "
3087 "is not available !"
3092 for (
int i = 0; i < nbImgs; i++) {
3100 char *path =
new char[length + 1];
3102 for (
int cpt = 0; cpt < length; cpt++) {
3104 file.read((
char *)(&c),
sizeof(c));
3107 path[length] =
'\0';
3110#ifdef VISP_HAVE_MODULE_IO
3118 m_mapOfImages[
id + startImageId] = I;
3126 int have3DInfoInt = 0;
3128 bool have3DInfo = have3DInfoInt != 0;
3139 int descriptorType = 5;
3142 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3143 for (
int i = 0; i < nRows; i++) {
3145 float u, v, size, angle, response;
3146 int octave, class_id, image_id;
3155 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3156 m_trainKeyPoints.push_back(keyPoint);
3158 if (image_id != -1) {
3159#ifdef VISP_HAVE_MODULE_IO
3161 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3171 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3174 for (
int j = 0; j < nCols; j++) {
3176 switch (descriptorType) {
3178 unsigned char value;
3179 file.read((
char *)(&value),
sizeof(value));
3180 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
3185 file.read((
char *)(&value),
sizeof(value));
3186 trainDescriptorsTmp.at<
char>(i, j) = value;
3190 unsigned short int value;
3192 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
3198 trainDescriptorsTmp.at<
short int>(i, j) = value;
3204 trainDescriptorsTmp.at<
int>(i, j) = value;
3210 trainDescriptorsTmp.at<
float>(i, j) = value;
3216 trainDescriptorsTmp.at<
double>(i, j) = value;
3222 trainDescriptorsTmp.at<
float>(i, j) = value;
3228 if (!append || m_trainDescriptors.empty()) {
3229 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3231 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3236 pugi::xml_document doc;
3239 if (!doc.load_file(filename.c_str())) {
3243 pugi::xml_node root_element = doc.document_element();
3245 int descriptorType = CV_32F;
3246 int nRows = 0, nCols = 0;
3249 cv::Mat trainDescriptorsTmp;
3251 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node; first_level_node = first_level_node.next_sibling()) {
3253 std::string name(first_level_node.name());
3254 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
3256 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node; image_info_node = image_info_node.next_sibling()) {
3257 name = std::string(image_info_node.name());
3259 if (name ==
"trainImg") {
3261 int id = image_info_node.attribute(
"image_id").as_int();
3264#ifdef VISP_HAVE_MODULE_IO
3265 std::string path(image_info_node.text().as_string());
3274 m_mapOfImages[
id + startImageId] = I;
3278 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
3279 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3280 descriptors_info_node = descriptors_info_node.next_sibling()) {
3281 if (descriptors_info_node.type() == pugi::node_element) {
3282 name = std::string(descriptors_info_node.name());
3284 if (name ==
"nrows") {
3285 nRows = descriptors_info_node.text().as_int();
3286 }
else if (name ==
"ncols") {
3287 nCols = descriptors_info_node.text().as_int();
3288 }
else if (name ==
"type") {
3289 descriptorType = descriptors_info_node.text().as_int();
3294 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3295 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
3296 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3297 int octave = 0, class_id = 0, image_id = 0;
3298 double oX = 0.0, oY = 0.0, oZ = 0.0;
3300 std::stringstream ss;
3302 for (pugi::xml_node point_node = first_level_node.first_child(); point_node; point_node = point_node.next_sibling()) {
3303 if (point_node.type() == pugi::node_element) {
3304 name = std::string(point_node.name());
3308 u = point_node.text().as_double();
3309 }
else if (name ==
"v") {
3310 v = point_node.text().as_double();
3311 }
else if (name ==
"size") {
3312 size = point_node.text().as_double();
3313 }
else if (name ==
"angle") {
3314 angle = point_node.text().as_double();
3315 }
else if (name ==
"response") {
3316 response = point_node.text().as_double();
3317 }
else if (name ==
"octave") {
3318 octave = point_node.text().as_int();
3319 }
else if (name ==
"class_id") {
3320 class_id = point_node.text().as_int();
3321 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
3322 (class_id + startClassId));
3323 m_trainKeyPoints.push_back(keyPoint);
3324 }
else if (name ==
"image_id") {
3325 image_id = point_node.text().as_int();
3326 if (image_id != -1) {
3327#ifdef VISP_HAVE_MODULE_IO
3329 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3332 }
else if (name ==
"oX") {
3333 oX = point_node.text().as_double();
3334 }
else if (name ==
"oY") {
3335 oY = point_node.text().as_double();
3336 }
else if (name ==
"oZ") {
3337 oZ = point_node.text().as_double();
3338 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
3339 }
else if (name ==
"desc") {
3342 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3343 descriptor_value_node = descriptor_value_node.next_sibling()) {
3345 if (descriptor_value_node.type() == pugi::node_element) {
3347 std::string parseStr(descriptor_value_node.text().as_string());
3352 switch (descriptorType) {
3357 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
3364 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
3368 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
3372 ss >> trainDescriptorsTmp.at<
short int>(i, j);
3376 ss >> trainDescriptorsTmp.at<
int>(i, j);
3380 ss >> trainDescriptorsTmp.at<
float>(i, j);
3384 ss >> trainDescriptorsTmp.at<
double>(i, j);
3388 ss >> trainDescriptorsTmp.at<
float>(i, j);
3392 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
3405 if (!append || m_trainDescriptors.empty()) {
3406 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3408 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3418 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3424 m_currentImageId = (int)m_mapOfImages.size();
3436 std::vector<cv::DMatch> &matches,
double &elapsedTime)
3441 m_knnMatches.clear();
3443 if (m_useMatchTrainToQuery) {
3444 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3447 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3448 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3450 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3451 it1 != knnMatchesTmp.end(); ++it1) {
3452 std::vector<cv::DMatch> tmp;
3453 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3454 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3456 m_knnMatches.push_back(tmp);
3459 matches.resize(m_knnMatches.size());
3460 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3463 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3464 matches.resize(m_knnMatches.size());
3465 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3470 if (m_useMatchTrainToQuery) {
3471 std::vector<cv::DMatch> matchesTmp;
3473 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3474 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3476 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3477 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3481 m_matcher->match(queryDescriptors, matches);
3547 if (m_trainDescriptors.empty()) {
3548 std::cerr <<
"Reference is empty." << std::endl;
3550 std::cerr <<
"Reference is not computed." << std::endl;
3552 std::cerr <<
"Matching is not possible." << std::endl;
3557 if (m_useAffineDetection) {
3558 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3559 std::vector<cv::Mat> listOfQueryDescriptors;
3565 m_queryKeyPoints.clear();
3566 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3567 it != listOfQueryKeyPoints.end(); ++it) {
3568 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3572 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3576 it->copyTo(m_queryDescriptors);
3578 m_queryDescriptors.push_back(*it);
3582 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3583 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3586 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3599 m_queryKeyPoints = queryKeyPoints;
3600 m_queryDescriptors = queryDescriptors;
3602 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3605 m_queryFilteredKeyPoints.clear();
3606 m_objectFilteredPoints.clear();
3607 m_filteredMatches.clear();
3611 if (m_useMatchTrainToQuery) {
3613 m_queryFilteredKeyPoints.clear();
3614 m_filteredMatches.clear();
3615 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3616 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3617 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3620 m_queryFilteredKeyPoints = m_queryKeyPoints;
3621 m_filteredMatches = m_matches;
3624 if (!m_trainPoints.empty()) {
3625 m_objectFilteredPoints.clear();
3629 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3631 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3640 return static_cast<unsigned int>(m_filteredMatches.size());
3672 double error, elapsedTime;
3673 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3691 double error, elapsedTime;
3692 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3715 if (m_trainDescriptors.empty()) {
3716 std::cerr <<
"Reference is empty." << std::endl;
3718 std::cerr <<
"Reference is not computed." << std::endl;
3720 std::cerr <<
"Matching is not possible." << std::endl;
3725 if (m_useAffineDetection) {
3726 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3727 std::vector<cv::Mat> listOfQueryDescriptors;
3733 m_queryKeyPoints.clear();
3734 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3735 it != listOfQueryKeyPoints.end(); ++it) {
3736 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3740 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3744 it->copyTo(m_queryDescriptors);
3746 m_queryDescriptors.push_back(*it);
3750 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3751 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3754 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3756 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3759 m_queryFilteredKeyPoints.clear();
3760 m_objectFilteredPoints.clear();
3761 m_filteredMatches.clear();
3765 if (m_useMatchTrainToQuery) {
3767 m_queryFilteredKeyPoints.clear();
3768 m_filteredMatches.clear();
3769 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3770 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3771 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3774 m_queryFilteredKeyPoints = m_queryKeyPoints;
3775 m_filteredMatches = m_matches;
3778 if (!m_trainPoints.empty()) {
3779 m_objectFilteredPoints.clear();
3783 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3785 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3797 m_ransacInliers.clear();
3798 m_ransacOutliers.clear();
3800 if (m_useRansacVVS) {
3801 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3805 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3806 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3810 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3812 double x = 0.0, y = 0.0;
3817 objectVpPoints[cpt] = pt;
3820 std::vector<vpPoint> inliers;
3821 std::vector<unsigned int> inlierIndex;
3823 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3825 std::map<unsigned int, bool> mapOfInlierIndex;
3826 m_matchRansacKeyPointsToPoints.clear();
3828 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3829 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3830 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3831 mapOfInlierIndex[*it] =
true;
3834 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3835 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3836 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3840 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3842 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3843 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3844 m_ransacInliers.begin(), matchRansacToVpImage);
3846 elapsedTime += m_poseTime;
3850 std::vector<cv::Point2f> imageFilteredPoints;
3851 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3852 std::vector<int> inlierIndex;
3853 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3855 std::map<int, bool> mapOfInlierIndex;
3856 m_matchRansacKeyPointsToPoints.clear();
3858 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3859 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3860 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3861 mapOfInlierIndex[*it] =
true;
3864 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3865 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3866 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3870 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3872 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3873 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3874 m_ransacInliers.begin(), matchRansacToVpImage);
3876 elapsedTime += m_poseTime;
3902 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3927 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3928 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3929 double *meanDescriptorDistance,
double *detection_score,
const vpRect &rectangle)
3931 if (imPts1 != NULL && imPts2 != NULL) {
3938 double meanDescriptorDistanceTmp = 0.0;
3939 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3940 meanDescriptorDistanceTmp += (double)it->distance;
3943 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3944 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3946 if (meanDescriptorDistance != NULL) {
3947 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3949 if (detection_score != NULL) {
3950 *detection_score = score;
3953 if (m_filteredMatches.size() >= 4) {
3955 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3957 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3959 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3960 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3961 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3964 std::vector<vpImagePoint> inliers;
3965 if (isPlanarObject) {
3966#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3967 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3969 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3972 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3974 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3975 realPoint.at<
double>(0, 0) = points1[i].x;
3976 realPoint.at<
double>(1, 0) = points1[i].y;
3977 realPoint.at<
double>(2, 0) = 1.f;
3979 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3980 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3981 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3982 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3984 if (reprojectionError < 6.0) {
3985 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3986 if (imPts1 != NULL) {
3987 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3990 if (imPts2 != NULL) {
3991 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3995 }
else if (m_filteredMatches.size() >= 8) {
3996 cv::Mat fundamentalInliers;
3997 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3999 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
4000 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
4001 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
4003 if (imPts1 != NULL) {
4004 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
4007 if (imPts2 != NULL) {
4008 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
4014 if (!inliers.empty()) {
4021 double meanU = 0.0, meanV = 0.0;
4022 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
4023 meanU += it->get_u();
4024 meanV += it->get_v();
4027 meanU /= (double)inliers.size();
4028 meanV /= (double)inliers.size();
4030 centerOfGravity.
set_u(meanU);
4031 centerOfGravity.
set_v(meanV);
4039 return meanDescriptorDistanceTmp < m_detectionThreshold;
4041 return score > m_detectionScore;
4069 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
4074 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
4076 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4080 modelImagePoints[cpt] = imPt;
4089 double meanU = 0.0, meanV = 0.0;
4090 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4091 meanU += it->get_u();
4092 meanV += it->get_v();
4095 meanU /= (double)m_ransacInliers.size();
4096 meanV /= (double)m_ransacInliers.size();
4098 centerOfGravity.
set_u(meanU);
4099 centerOfGravity.
set_v(meanV);
4120 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4121 std::vector<cv::Mat> &listOfDescriptors,
4127 listOfKeypoints.clear();
4128 listOfDescriptors.clear();
4130 for (
int tl = 1; tl < 6; tl++) {
4131 double t = pow(2, 0.5 * tl);
4132 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4133 std::vector<cv::KeyPoint> keypoints;
4134 cv::Mat descriptors;
4136 cv::Mat timg, mask, Ai;
4139 affineSkew(t, phi, timg, mask, Ai);
4142 if(listOfAffineI != NULL) {
4144 bitwise_and(mask, timg, img_disp);
4147 listOfAffineI->push_back(tI);
4151 cv::bitwise_and(mask, timg, img_disp);
4152 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4153 cv::imshow(
"Skew", img_disp );
4157 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4158 it != m_detectors.end(); ++it) {
4159 std::vector<cv::KeyPoint> kp;
4160 it->second->detect(timg, kp, mask);
4161 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4165 extract(timg, keypoints, descriptors, elapsedTime);
4167 for(
unsigned int i = 0; i < keypoints.size(); i++) {
4168 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4169 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4170 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4171 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4174 listOfKeypoints.push_back(keypoints);
4175 listOfDescriptors.push_back(descriptors);
4184 std::vector<std::pair<double, int> > listOfAffineParams;
4185 for (
int tl = 1; tl < 6; tl++) {
4186 double t = pow(2, 0.5 * tl);
4187 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4188 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4192 listOfKeypoints.resize(listOfAffineParams.size());
4193 listOfDescriptors.resize(listOfAffineParams.size());
4195 if (listOfAffineI != NULL) {
4196 listOfAffineI->resize(listOfAffineParams.size());
4199#ifdef VISP_HAVE_OPENMP
4200#pragma omp parallel for
4202 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4203 std::vector<cv::KeyPoint> keypoints;
4204 cv::Mat descriptors;
4206 cv::Mat timg, mask, Ai;
4209 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
4211 if (listOfAffineI != NULL) {
4213 bitwise_and(mask, timg, img_disp);
4216 (*listOfAffineI)[(size_t)cpt] = tI;
4221 cv::bitwise_and(mask, timg, img_disp);
4222 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4223 cv::imshow(
"Skew", img_disp );
4227 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4228 it != m_detectors.end(); ++it) {
4229 std::vector<cv::KeyPoint> kp;
4230 it->second->detect(timg, kp, mask);
4231 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4235 extract(timg, keypoints, descriptors, elapsedTime);
4237 for (
size_t i = 0; i < keypoints.size(); i++) {
4238 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4239 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4240 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4241 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4244 listOfKeypoints[(size_t)cpt] = keypoints;
4245 listOfDescriptors[(size_t)cpt] = descriptors;
4261 m_computeCovariance =
false;
4263 m_currentImageId = 0;
4265 m_detectionScore = 0.15;
4266 m_detectionThreshold = 100.0;
4267 m_detectionTime = 0.0;
4268 m_detectorNames.clear();
4269 m_detectors.clear();
4270 m_extractionTime = 0.0;
4271 m_extractorNames.clear();
4272 m_extractors.clear();
4273 m_filteredMatches.clear();
4276 m_knnMatches.clear();
4277 m_mapOfImageId.clear();
4278 m_mapOfImages.clear();
4279 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4280 m_matcherName =
"BruteForce-Hamming";
4282 m_matchingFactorThreshold = 2.0;
4283 m_matchingRatioThreshold = 0.85;
4284 m_matchingTime = 0.0;
4285 m_matchRansacKeyPointsToPoints.clear();
4286 m_nbRansacIterations = 200;
4287 m_nbRansacMinInlierCount = 100;
4288 m_objectFilteredPoints.clear();
4290 m_queryDescriptors = cv::Mat();
4291 m_queryFilteredKeyPoints.clear();
4292 m_queryKeyPoints.clear();
4293 m_ransacConsensusPercentage = 20.0;
4295 m_ransacInliers.clear();
4296 m_ransacOutliers.clear();
4297 m_ransacParallel =
true;
4298 m_ransacParallelNbThreads = 0;
4299 m_ransacReprojectionError = 6.0;
4300 m_ransacThreshold = 0.01;
4301 m_trainDescriptors = cv::Mat();
4302 m_trainKeyPoints.clear();
4303 m_trainPoints.clear();
4304 m_trainVpPoints.clear();
4305 m_useAffineDetection =
false;
4306#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4307 m_useBruteForceCrossCheck =
true;
4309 m_useConsensusPercentage =
false;
4311 m_useMatchTrainToQuery =
false;
4312 m_useRansacVVS =
true;
4313 m_useSingleMatchFilter =
true;
4315 m_detectorNames.push_back(
"ORB");
4316 m_extractorNames.push_back(
"ORB");
4332 if (!parent.empty()) {
4336 std::map<int, std::string> mapOfImgPath;
4337 if (saveTrainingImages) {
4338#ifdef VISP_HAVE_MODULE_IO
4340 unsigned int cpt = 0;
4342 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4348 std::stringstream ss;
4349 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
4351 switch (m_imageFormat) {
4373 std::string imgFilename = ss.str();
4374 mapOfImgPath[it->first] = imgFilename;
4375 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
4378 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images "
4379 "are not saved because "
4380 "visp_io module is not available !"
4385 bool have3DInfo = m_trainPoints.size() > 0;
4386 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4392 std::ofstream file(filename.c_str(), std::ofstream::binary);
4393 if (!file.is_open()) {
4398 int nbImgs = (int)mapOfImgPath.size();
4401#ifdef VISP_HAVE_MODULE_IO
4402 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4408 std::string path = it->second;
4409 int length = (int)path.length();
4412 for (
int cpt = 0; cpt < length; cpt++) {
4413 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
4419 int have3DInfoInt = have3DInfo ? 1 : 0;
4422 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4423 int descriptorType = m_trainDescriptors.type();
4434 for (
int i = 0; i < nRows; i++) {
4435 unsigned int i_ = (
unsigned int)i;
4437 float u = m_trainKeyPoints[i_].pt.x;
4441 float v = m_trainKeyPoints[i_].pt.y;
4445 float size = m_trainKeyPoints[i_].size;
4449 float angle = m_trainKeyPoints[i_].angle;
4453 float response = m_trainKeyPoints[i_].response;
4457 int octave = m_trainKeyPoints[i_].octave;
4461 int class_id = m_trainKeyPoints[i_].class_id;
4465#ifdef VISP_HAVE_MODULE_IO
4466 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4467 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4476 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4487 for (
int j = 0; j < nCols; j++) {
4489 switch (descriptorType) {
4491 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
4492 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
4496 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
4528 pugi::xml_document doc;
4529 pugi::xml_node node = doc.append_child(pugi::node_declaration);
4530 node.append_attribute(
"version") =
"1.0";
4531 node.append_attribute(
"encoding") =
"UTF-8";
4537 pugi::xml_node root_node = doc.append_child(
"LearningData");
4540 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
4542#ifdef VISP_HAVE_MODULE_IO
4543 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4544 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
4545 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4546 std::stringstream ss;
4548 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
4553 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
4555 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4556 int descriptorType = m_trainDescriptors.type();
4559 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
4562 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
4565 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
4567 for (
int i = 0; i < nRows; i++) {
4568 unsigned int i_ = (
unsigned int)i;
4569 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
4571 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4572 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4573 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4574 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4575 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4576 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4577 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4579#ifdef VISP_HAVE_MODULE_IO
4580 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4581 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
4582 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4584 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
4588 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4589 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4590 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4593 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
4595 for (
int j = 0; j < nCols; j++) {
4596 switch (descriptorType) {
4602 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
4603 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4611 int val_tmp = m_trainDescriptors.at<
char>(i, j);
4612 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4616 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4617 m_trainDescriptors.at<
unsigned short int>(i, j);
4621 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4622 m_trainDescriptors.at<
short int>(i, j);
4626 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4627 m_trainDescriptors.at<
int>(i, j);
4631 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4632 m_trainDescriptors.at<
float>(i, j);
4636 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4637 m_trainDescriptors.at<
double>(i, j);
4647 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
4651#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4652#ifndef DOXYGEN_SHOULD_SKIP_THIS
4654struct KeypointResponseGreaterThanThreshold {
4655 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) {}
4656 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
4660struct KeypointResponseGreater {
4661 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
4665void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
4669 if (n_points >= 0 && keypoints.size() > (
size_t)n_points) {
4670 if (n_points == 0) {
4676 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4678 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4681 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4682 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4685 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4689struct RoiPredicate {
4690 RoiPredicate(
const cv::Rect &_r) : r(_r) {}
4692 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4697void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4700 if (borderSize > 0) {
4701 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4704 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4705 RoiPredicate(cv::Rect(
4706 cv::Point(borderSize, borderSize),
4707 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4712struct SizePredicate {
4713 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4715 bool operator()(
const cv::KeyPoint &keyPt)
const
4717 float size = keyPt.size;
4718 return (size < minSize) || (size > maxSize);
4721 float minSize, maxSize;
4724void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4726 CV_Assert(minSize >= 0);
4727 CV_Assert(maxSize >= 0);
4728 CV_Assert(minSize <= maxSize);
4730 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4736 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) {}
4737 bool operator()(
const cv::KeyPoint &key_pt)
const
4739 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4746void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4751 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4754struct KeyPoint_LessThan {
4755 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4756 bool operator()(
size_t i,
size_t j)
const
4758 const cv::KeyPoint &kp1 = (*kp)[ i];
4759 const cv::KeyPoint &kp2 = (*kp)[ j];
4761 std::numeric_limits<float>::epsilon())) {
4763 return kp1.pt.x < kp2.pt.x;
4767 std::numeric_limits<float>::epsilon())) {
4769 return kp1.pt.y < kp2.pt.y;
4773 std::numeric_limits<float>::epsilon())) {
4775 return kp1.size > kp2.size;
4779 std::numeric_limits<float>::epsilon())) {
4781 return kp1.angle < kp2.angle;
4785 std::numeric_limits<float>::epsilon())) {
4787 return kp1.response > kp2.response;
4790 if (kp1.octave != kp2.octave) {
4791 return kp1.octave > kp2.octave;
4794 if (kp1.class_id != kp2.class_id) {
4795 return kp1.class_id > kp2.class_id;
4800 const std::vector<cv::KeyPoint> *kp;
4803void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4805 size_t i, j, n = keypoints.size();
4806 std::vector<size_t> kpidx(n);
4807 std::vector<uchar> mask(n, (uchar)1);
4809 for (i = 0; i < n; i++) {
4812 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4813 for (i = 1, j = 0; i < n; i++) {
4814 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4815 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4818 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4819 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4820 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4821 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4828 for (i = j = 0; i < n; i++) {
4831 keypoints[j] = keypoints[i];
4836 keypoints.resize(j);
4842vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &_detector,
4844 : detector(_detector), maxLevel(_maxLevel)
4848bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const
4850 return detector.empty() || (cv::FeatureDetector *)detector->empty();
4853void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4854 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4856 detectImpl(image.getMat(), keypoints, mask.getMat());
4859void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4860 const cv::Mat &mask)
const
4862 cv::Mat src = image;
4863 cv::Mat src_mask = mask;
4865 cv::Mat dilated_mask;
4866 if (!mask.empty()) {
4867 cv::dilate(mask, dilated_mask, cv::Mat());
4868 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4869 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4870 dilated_mask = mask255;
4873 for (
int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4875 std::vector<cv::KeyPoint> new_pts;
4876 detector->detect(src, new_pts, src_mask);
4877 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4878 for (; it != end; ++it) {
4879 it->pt.x *= multiplier;
4880 it->pt.y *= multiplier;
4881 it->size *= multiplier;
4884 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4893 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4898 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4903#elif !defined(VISP_BUILD_SHARED_LIBS)
4906void dummy_vpKeyPoint(){};
bool _reference_computed
flag to indicate if the reference has been built.
std::vector< vpImagePoint > currentImagePointsList
std::vector< unsigned int > matchedReferencePoints
std::vector< vpImagePoint > referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionnalities.
static const vpColor none
static const vpColor green
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
error that can be emited by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_ij(double ii, double jj)
unsigned int getWidth() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void initMatcher(const std::string &matcherName)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
static bool equal(double x, double y, double s=0.001)
static int round(double x)
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
void set_x(double x)
Set the point x coordinate in the image plane.
double get_y() const
Get the point y coordinate in the image plane.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_oY(double oY)
Set the point oY coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
void set_oX(double oX)
Set the point oX coordinate in the object frame.
double get_oY() const
Get the point oY coordinate in the object frame.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
vpMatrix getCovarianceMatrix() const
std::vector< unsigned int > getRansacInlierIndex() const
void setCovarianceComputation(const bool &flag)
std::vector< vpPoint > getRansacInliers() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void setUseParallelRansac(bool use)
void setNbParallelRansacThreads(int nb)
void setRansacThreshold(const double &t)
Defines a rectangle in the plane.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::string getDetectorName() const
double getMatchingRatioThreshold() const
std::string getExtractorName() const
void parse(const std::string &filename)
double getRansacReprojectionError() const
bool getUseRansacVVSPoseEstimation() const
double getMatchingFactorThreshold() const
int getNbRansacMinInlierCount() const
bool getUseRansacConsensusPercentage() const
std::string getMatcherName() const
int getNbRansacIterations() const
double getRansacConsensusPercentage() const
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
vpMatchingMethodEnum getMatchingMethod() const
double getRansacThreshold() const
VISP_EXPORT double measureTimeMs()