Visual Servoing Platform version 3.5.0
tutorial-mb-generic-tracker-rgbd-realsense.cpp
1
2#include <iostream>
3
4#include <visp3/core/vpConfig.h>
5
6#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_OPENCV)
7#include <visp3/core/vpDisplay.h>
8#include <visp3/core/vpIoTools.h>
9#include <visp3/core/vpXmlParserCamera.h>
10#include <visp3/gui/vpDisplayX.h>
11#include <visp3/gui/vpDisplayGDI.h>
12#include <visp3/gui/vpDisplayOpenCV.h>
13#include <visp3/mbt/vpMbGenericTracker.h>
14#include <visp3/sensor/vpRealSense2.h>
15#include <visp3/vision/vpKeyPoint.h>
16
17int main(int argc, char *argv[])
18{
19 std::string config_color = "", config_depth = "";
20 std::string model_color = "", model_depth = "";
21 std::string init_file = "";
22 bool use_ogre = false;
23 bool use_scanline = false;
24 bool use_edges = true;
25 bool use_klt = true;
26 bool use_depth = true;
27 bool learn = false;
28 bool auto_init = false;
29 double proj_error_threshold = 25;
30 std::string learning_data = "learning/data-learned.bin";
31 bool display_projection_error = false;
32
33 for (int i = 1; i < argc; i++) {
34 if (std::string(argv[i]) == "--config_color" && i+1 < argc) {
35 config_color = std::string(argv[i+1]);
36 } else if (std::string(argv[i]) == "--config_depth" && i+1 < argc) {
37 config_depth = std::string(argv[i+1]);
38 } else if (std::string(argv[i]) == "--model_color" && i+1 < argc) {
39 model_color = std::string(argv[i+1]);
40 } else if (std::string(argv[i]) == "--model_depth" && i+1 < argc) {
41 model_depth = std::string(argv[i+1]);
42 } else if (std::string(argv[i]) == "--init_file" && i+1 < argc) {
43 init_file = std::string(argv[i+1]);
44 } else if (std::string(argv[i]) == "--proj_error_threshold" && i+1 < argc) {
45 proj_error_threshold = std::atof(argv[i+1]);
46 } else if (std::string(argv[i]) == "--use_ogre") {
47 use_ogre = true;
48 } else if (std::string(argv[i]) == "--use_scanline") {
49 use_scanline = true;
50 } else if (std::string(argv[i]) == "--use_edges" && i+1 < argc) {
51 use_edges = (std::atoi(argv[i+1]) == 0 ? false : true);
52 } else if (std::string(argv[i]) == "--use_klt" && i+1 < argc) {
53 use_klt = (std::atoi(argv[i+1]) == 0 ? false : true);
54 } else if (std::string(argv[i]) == "--use_depth" && i+1 < argc) {
55 use_depth = (std::atoi(argv[i+1]) == 0 ? false : true);
56 } else if (std::string(argv[i]) == "--learn") {
57 learn = true;
58 } else if (std::string(argv[i]) == "--learning_data" && i+1 < argc) {
59 learning_data = argv[i+1];
60 } else if (std::string(argv[i]) == "--auto_init") {
61 auto_init = true;
62 } else if (std::string(argv[i]) == "--display_proj_error") {
63 display_projection_error = true;
64 } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
65 std::cout << "Usage: \n" << argv[0]
66 << " [--model_color <object.cao>] [--model_depth <object.cao>]"
67 " [--config_color <object.xml>] [--config_depth <object.xml>]"
68 " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
69 " [--proj_error_threshold <threshold between 0 and 90> (default: "<< proj_error_threshold << ")]"
70 " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth <0|1> (default: 1)]"
71 " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
72 " [--display_proj_error]" << std::endl;
73
74 std::cout << "\n** How to track a 4.2 cm width cube with manual initialization:\n"
75 << argv[0]
76 << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1"
77 << std::endl;
78 std::cout << "\n** How to learn the cube and create a learning database:\n" << argv[0]
79 << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
80 << std::endl;
81 std::cout << "\n** How to track the cube with initialization from learning database:\n" << argv[0]
82 << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
83 << std::endl;
84
85 return 0;
86 }
87 }
88
89 if (model_depth.empty()) {
90 model_depth = model_color;
91 }
92 std::string parentname = vpIoTools::getParent(model_color);
93 if (config_color.empty()) {
94 config_color = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".xml";
95 }
96 if (config_depth.empty()) {
97 config_depth = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + "_depth.xml";
98 }
99 if (init_file.empty()) {
100 init_file = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".init";
101 }
102 std::cout << "Tracked features: " << std::endl;
103 std::cout << " Use edges : " << use_edges << std::endl;
104 std::cout << " Use klt : " << use_klt << std::endl;
105 std::cout << " Use depth : " << use_depth << std::endl;
106 std::cout << "Tracker options: " << std::endl;
107 std::cout << " Use ogre : " << use_ogre << std::endl;
108 std::cout << " Use scanline: " << use_scanline << std::endl;
109 std::cout << " Proj. error : " << proj_error_threshold << std::endl;
110 std::cout << " Display proj. error: " << display_projection_error << std::endl;
111 std::cout << "Config files: " << std::endl;
112 std::cout << " Config color: " << "\"" << config_color << "\"" << std::endl;
113 std::cout << " Config depth: " << "\"" << config_depth << "\"" << std::endl;
114 std::cout << " Model color : " << "\"" << model_color << "\"" << std::endl;
115 std::cout << " Model depth : " << "\"" << model_depth << "\"" << std::endl;
116 std::cout << " Init file : " << "\"" << init_file << "\"" << std::endl;
117 std::cout << "Learning options : " << std::endl;
118 std::cout << " Learn : " << learn << std::endl;
119 std::cout << " Auto init : " << auto_init << std::endl;
120 std::cout << " Learning data: " << learning_data << std::endl;
121
122 if (!use_edges && !use_klt && !use_depth) {
123 std::cout << "You must choose at least one visual features between edge, KLT and depth." << std::endl;
124 return EXIT_FAILURE;
125 }
126
127 if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
128 std::cout << "config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()" << std::endl;
129 return EXIT_FAILURE;
130 }
131
132 vpRealSense2 realsense;
133 int width = 640, height = 480;
134 int fps = 30;
135 rs2::config config;
136 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
137 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
138
139 try {
140 realsense.open(config);
141 }
142 catch (const vpException &e) {
143 std::cout << "Catch an exception: " << e.what() << std::endl;
144 std::cout << "Check if the Realsense camera is connected..." << std::endl;
145 return EXIT_SUCCESS;
146 }
147
150
151 std::cout << "Sensor internal camera parameters for color camera: " << cam_color << std::endl;
152 std::cout << "Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
153
154 vpImage<vpRGBa> I_color(height, width);
155 vpImage<unsigned char> I_gray(height, width);
156 vpImage<unsigned char> I_depth(height, width);
157 vpImage<uint16_t> I_depth_raw(height, width);
158
159 unsigned int _posx = 100, _posy = 50;
160
161#ifdef VISP_HAVE_X11
162 vpDisplayX d1, d2;
163#elif defined(VISP_HAVE_GDI)
164 vpDisplayGDI d1, d2;
165#elif defined(VISP_HAVE_OPENCV)
166 vpDisplayOpenCV d1, d2;
167#endif
168 if (use_edges || use_klt)
169 d1.init(I_gray, _posx, _posy, "Color stream");
170 if (use_depth)
171 d2.init(I_depth, _posx + I_gray.getWidth()+10, _posy, "Depth stream");
172
173 while (true) {
174 realsense.acquire((unsigned char *) I_color.bitmap, (unsigned char *) I_depth_raw.bitmap, NULL, NULL);
175
176 if (use_edges || use_klt) {
177 vpImageConvert::convert(I_color, I_gray);
178 vpDisplay::display(I_gray);
179 vpDisplay::displayText(I_gray, 20, 20, "Click when ready.", vpColor::red);
180 vpDisplay::flush(I_gray);
181
182 if (vpDisplay::getClick(I_gray, false)) {
183 break;
184 }
185 }
186 if (use_depth) {
187 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
188
189 vpDisplay::display(I_depth);
190 vpDisplay::displayText(I_depth, 20, 20, "Click when ready.", vpColor::red);
191 vpDisplay::flush(I_depth);
192
193 if (vpDisplay::getClick(I_depth, false)) {
194 break;
195 }
196 }
197 }
198
199 std::vector<int> trackerTypes;
200 if (use_edges && use_klt)
202 else if (use_edges)
203 trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER );
204 else if (use_klt)
205 trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER);
206
207 if (use_depth)
208 trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
209
210 vpHomogeneousMatrix depth_M_color = realsense.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH);
211 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
212 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
213 std::map<std::string, std::string> mapOfInitFiles;
214 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
215 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
216 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
217
218 std::vector<vpColVector> pointcloud;
219
220 vpMbGenericTracker tracker(trackerTypes);
221
222 if ((use_edges || use_klt) && use_depth) {
223 tracker.loadConfigFile(config_color, config_depth);
224 tracker.loadModel(model_color, model_depth);
225 std::cout << "Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
226 mapOfCameraTransformations["Camera2"] = depth_M_color;
227 tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
228 mapOfImages["Camera1"] = &I_gray;
229 mapOfImages["Camera2"] = &I_depth;
230 mapOfInitFiles["Camera1"] = init_file;
231 tracker.setCameraParameters(cam_color, cam_depth);
232 }
233 else if (use_edges || use_klt) {
234 tracker.loadConfigFile(config_color);
235 tracker.loadModel(model_color);
236 tracker.setCameraParameters(cam_color);
237 }
238 else if (use_depth) {
239 tracker.loadConfigFile(config_depth);
240 tracker.loadModel(model_depth);
241 tracker.setCameraParameters(cam_depth);
242 }
243
244 tracker.setDisplayFeatures(true);
245 tracker.setOgreVisibilityTest(use_ogre);
246 tracker.setScanLineVisibilityTest(use_scanline);
247 tracker.setProjectionErrorComputation(true);
248 tracker.setProjectionErrorDisplay(display_projection_error);
249
250#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
251 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
252 std::string detectorName = "SIFT";
253 std::string extractorName = "SIFT";
254 std::string matcherName = "BruteForce";
255#else
256 std::string detectorName = "FAST";
257 std::string extractorName = "ORB";
258 std::string matcherName = "BruteForce-Hamming";
259#endif
260 vpKeyPoint keypoint;
261 if (learn || auto_init) {
262 keypoint.setDetector(detectorName);
263 keypoint.setExtractor(extractorName);
264 keypoint.setMatcher(matcherName);
265#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
266# if (VISP_HAVE_OPENCV_VERSION < 0x030000)
267 keypoint.setDetectorParameter("ORB", "nLevels", 1);
268# else
269 cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
270 if (orb_detector) {
271 orb_detector->setNLevels(1);
272 }
273# endif
274#endif
275 }
276
277 if (auto_init) {
278 if (!vpIoTools::checkFilename(learning_data)) {
279 std::cout << "Cannot enable auto detection. Learning file \"" << learning_data << "\" doesn't exist" << std::endl;
280 return EXIT_FAILURE;
281 }
282 keypoint.loadLearningData(learning_data, true);
283 } else {
284 if ((use_edges || use_klt) && use_depth)
285 tracker.initClick(mapOfImages, mapOfInitFiles, true);
286 else if (use_edges || use_klt)
287 tracker.initClick(I_gray, init_file, true);
288 else if (use_depth)
289 tracker.initClick(I_depth, init_file, true);
290
291 if (learn)
293 }
294
295
296 bool run_auto_init = false;
297 if (auto_init) {
298 run_auto_init = true;
299 }
300 std::vector<double> times_vec;
301
302 try {
303 //To be able to display keypoints matching with test-detection-rs2
304 int learn_id = 1;
305 bool quit = false;
306 bool learn_position = false;
307 double loop_t = 0;
309
310 while (!quit) {
311 double t = vpTime::measureTimeMs();
312 bool tracking_failed = false;
313
314 // Acquire images and update tracker input data
315 realsense.acquire((unsigned char *) I_color.bitmap, (unsigned char *) I_depth_raw.bitmap, &pointcloud, NULL, NULL);
316
317 if (use_edges || use_klt || run_auto_init) {
318 vpImageConvert::convert(I_color, I_gray);
319 vpDisplay::display(I_gray);
320 }
321 if (use_depth) {
322 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
323 vpDisplay::display(I_depth);
324 }
325
326 if ((use_edges || use_klt) && use_depth) {
327 mapOfImages["Camera1"] = &I_gray;
328 mapOfPointclouds["Camera2"] = &pointcloud;
329 mapOfWidths["Camera2"] = width;
330 mapOfHeights["Camera2"] = height;
331 } else if (use_edges || use_klt) {
332 mapOfImages["Camera"] = &I_gray;
333 } else if (use_depth) {
334 mapOfPointclouds["Camera"] = &pointcloud;
335 mapOfWidths["Camera"] = width;
336 mapOfHeights["Camera"] = height;
337 }
338
339 // Run auto initialization from learned data
340 if (run_auto_init) {
341 if (keypoint.matchPoint(I_gray, cam_color, cMo)) {
342 std::cout << "Auto init succeed" << std::endl;
343 if ((use_edges || use_klt) && use_depth) {
344 mapOfCameraPoses["Camera1"] = cMo;
345 mapOfCameraPoses["Camera2"] = depth_M_color *cMo;
346 tracker.initFromPose(mapOfImages, mapOfCameraPoses);
347 } else if (use_edges || use_klt) {
348 tracker.initFromPose(I_gray, cMo);
349 } else if (use_depth) {
350 tracker.initFromPose(I_depth, depth_M_color*cMo);
351 }
352 } else {
353 if (use_edges || use_klt) {
354 vpDisplay::flush(I_gray);
355 }
356 if (use_depth) {
357 vpDisplay::flush(I_depth);
358 }
359 continue;
360 }
361 }
362
363 // Run the tracker
364 try {
365 if (run_auto_init) {
366 // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
367 tracker.setDisplayFeatures(false);
368
369 run_auto_init = false;
370 }
371 if ((use_edges || use_klt) && use_depth) {
372 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
373 } else if (use_edges || use_klt) {
374 tracker.track(I_gray);
375 } else if (use_depth) {
376 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
377 }
378 } catch (const vpException &e) {
379 std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
380 tracking_failed = true;
381 if (auto_init) {
382 std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
383 run_auto_init = true;
384 }
385 }
386
387 // Get object pose
388 cMo = tracker.getPose();
389
390 // Check tracking errors
391 double proj_error = 0;
392 if (tracker.getTrackerType() & vpMbGenericTracker::EDGE_TRACKER) {
393 // Check tracking errors
394 proj_error = tracker.getProjectionError();
395 }
396 else {
397 proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
398 }
399
400 if (auto_init && proj_error > proj_error_threshold) {
401 std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
402 run_auto_init = true;
403 tracking_failed = true;
404 }
405
406 // Display tracking results
407 if (!tracking_failed) {
408 // Turn display features on
409 tracker.setDisplayFeatures(true);
410
411 if ((use_edges || use_klt) && use_depth) {
412 tracker.display(I_gray, I_depth, cMo, depth_M_color*cMo, cam_color, cam_depth, vpColor::red, 3);
413 vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
414 vpDisplay::displayFrame(I_depth, depth_M_color*cMo, cam_depth, 0.05, vpColor::none, 3);
415 } else if (use_edges || use_klt) {
416 tracker.display(I_gray, cMo, cam_color, vpColor::red, 3);
417 vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
418 } else if (use_depth) {
419 tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
420 vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
421 }
422
423 {
424 std::stringstream ss;
425 ss << "Nb features: " << tracker.getError().size();
426 vpDisplay::displayText(I_gray, I_gray.getHeight() - 50, 20, ss.str(), vpColor::red);
427 }
428 {
429 std::stringstream ss;
430 ss << "Features: edges " << tracker.getNbFeaturesEdge()
431 << ", klt " << tracker.getNbFeaturesKlt()
432 << ", depth " << tracker.getNbFeaturesDepthDense();
433 vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
434 }
435 }
436
437 std::stringstream ss;
438 ss << "Loop time: " << loop_t << " ms";
439
441 if (use_edges || use_klt) {
442 vpDisplay::displayText(I_gray, 20, 20, ss.str(), vpColor::red);
443 if (learn)
444 vpDisplay::displayText(I_gray, 35, 20, "Left click: learn Right click: quit", vpColor::red);
445 else if (auto_init)
446 vpDisplay::displayText(I_gray, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
447 else
448 vpDisplay::displayText(I_gray, 35, 20, "Right click: quit", vpColor::red);
449
450 vpDisplay::flush(I_gray);
451
452 if (vpDisplay::getClick(I_gray, button, false)) {
453 if (button == vpMouseButton::button3) {
454 quit = true;
455 } else if (button == vpMouseButton::button1 && learn) {
456 learn_position = true;
457 } else if (button == vpMouseButton::button1 && auto_init && !learn) {
458 run_auto_init = true;
459 }
460 }
461 }
462 if (use_depth) {
463 vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
464 vpDisplay::displayText(I_depth, 40, 20, "Click to quit", vpColor::red);
465 vpDisplay::flush(I_depth);
466
467 if (vpDisplay::getClick(I_depth, false)) {
468 quit = true;
469 }
470 }
471
472 if (learn_position) {
473 // Detect keypoints on the current image
474 std::vector<cv::KeyPoint> trainKeyPoints;
475 keypoint.detect(I_gray, trainKeyPoints);
476
477 // Keep only keypoints on the cube
478 std::vector<vpPolygon> polygons;
479 std::vector<std::vector<vpPoint> > roisPt;
480 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
481 polygons = pair.first;
482 roisPt = pair.second;
483
484 // Compute the 3D coordinates
485 std::vector<cv::Point3f> points3f;
486 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam_color, trainKeyPoints, polygons, roisPt, points3f);
487
488 // Build the reference keypoints
489 keypoint.buildReference(I_gray, trainKeyPoints, points3f, true, learn_id++);
490
491 // Display learned data
492 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
493 vpDisplay::displayCross(I_gray, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
494 }
495 learn_position = false;
496 std::cout << "Data learned" << std::endl;
497 }
498 loop_t = vpTime::measureTimeMs() - t;
499 times_vec.push_back(loop_t);
500 }
501 if (learn) {
502 std::cout << "Save learning file: " << learning_data << std::endl;
503 keypoint.saveLearningData(learning_data, true, true);
504 }
505 } catch (const vpException &e) {
506 std::cout << "Catch an exception: " << e.what() << std::endl;
507 }
508
509 if (!times_vec.empty()) {
510 std::cout << "\nProcessing time, Mean: " << vpMath::getMean(times_vec) << " ms ; Median: " << vpMath::getMedian(times_vec)
511 << " ; Std: " << vpMath::getStdev(times_vec) << " ms" << std::endl;
512 }
513
514 return EXIT_SUCCESS;
515}
516#elif defined(VISP_HAVE_REALSENSE2)
517int main() {
518 std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
519 return 0;
520}
521#else
522int main() {
523 std::cout << "Install librealsense2 3rd party, configure and build ViSP again to use this example" << std::endl;
524 return 0;
525}
526#endif
Generic class defining intrinsic camera parameters.
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor yellow
Definition: vpColor.h:225
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:129
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:135
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emited by ViSP classes.
Definition: vpException.h:72
const std::string & getStringMessage() const
Send a reference (constant) related the error message (can be empty).
Definition: vpException.cpp:92
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:802
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:570
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1532
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1606
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:223
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:840
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:916
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:782
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:493
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:238
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:261
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition: vpMath.cpp:291
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:241
Real-time 6D object pose tracking using its CAD model.
void acquire(vpImage< unsigned char > &grey, double *ts=NULL)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const
VISP_EXPORT double measureTimeMs()