Voir la notice de l'article provenant de la source Library of Science
@article{IJAMCS_2016_26_1_a4, author = {Kraft, M. and Nowicki, M. and Penne, R. and Schmidt, A. and Skrzypczy\'nski, P.}, title = {Efficient {RGB-D} data processing for feature-based self-localization of mobile robots}, journal = {International Journal of Applied Mathematics and Computer Science}, pages = {63--79}, publisher = {mathdoc}, volume = {26}, number = {1}, year = {2016}, language = {en}, url = {http://geodesic.mathdoc.fr/item/IJAMCS_2016_26_1_a4/} }
TY - JOUR AU - Kraft, M. AU - Nowicki, M. AU - Penne, R. AU - Schmidt, A. AU - Skrzypczyński, P. TI - Efficient RGB-D data processing for feature-based self-localization of mobile robots JO - International Journal of Applied Mathematics and Computer Science PY - 2016 SP - 63 EP - 79 VL - 26 IS - 1 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/IJAMCS_2016_26_1_a4/ LA - en ID - IJAMCS_2016_26_1_a4 ER -
%0 Journal Article %A Kraft, M. %A Nowicki, M. %A Penne, R. %A Schmidt, A. %A Skrzypczyński, P. %T Efficient RGB-D data processing for feature-based self-localization of mobile robots %J International Journal of Applied Mathematics and Computer Science %D 2016 %P 63-79 %V 26 %N 1 %I mathdoc %U http://geodesic.mathdoc.fr/item/IJAMCS_2016_26_1_a4/ %G en %F IJAMCS_2016_26_1_a4
Kraft, M.; Nowicki, M.; Penne, R.; Schmidt, A.; Skrzypczyński, P. Efficient RGB-D data processing for feature-based self-localization of mobile robots. International Journal of Applied Mathematics and Computer Science, Tome 26 (2016) no. 1, pp. 63-79. http://geodesic.mathdoc.fr/item/IJAMCS_2016_26_1_a4/
[1] Bachrach, A., Prentice, S., He, R., Henry, P., Huang, A.S., Krainin, M., Maturana, D., Fox, D. and Roy, N. (2012). Estimation, planning, and mapping for autonomous flight using an RGB-D camera in GPS-denied environments, International Journal of Robotics Research 31(11): 1320–1343.
[2] Bączyk, R. and Kasiński, A. (2010). Visual simultaneous localisation and map-building supported by structured landmarks, International Journal of Applied Mathematics and Computer Science 20(2): 281–293, DOI: 10.2478/v10006-010-0021-7.
[3] Bailey, T. and Durrant-Whyte, H. (2006). Simultaneous localization and mapping: Part II, IEEE Robotics Automation Magazine 13(3): 108–117.
[4] Baker, S. and Matthews, I. (2004). Lucas–Kanade 20 years on: A unifying framework, International Journal of Computer Vision 56(3): 221–255.
[5] Bay, H., Ess, A., Tuytelaars, T. and Van Gool, L. (2008). Speeded-up robust features (SURF), Computer Vision and Image Understanding 110(3): 346–359.
[6] Belter, D., Nowicki, M. and Skrzypczyński, P. (2015). On the performance of pose-based RGB-D visual navigation systems, in D. Cremers et al. (Eds.), Computer Vision, ACCV 2014, Part II, Lecture Notes in Computer Science, Vol. 9004, Springer, Zurich, pp. 1–17.
[7] Bouguet, J.Y. (2000). Pyramidal implementation of the Lucas–Kanade feature tracker, description of the algorithm, Technical report, Intel Corp., Microprocessor Research Labs., Pittsburgh, PA.
[8] Choi, S., Kim, T. and Yu, W. (2009). Performance evaluation of RANSAC family, British Machine Vision Conference, London, UK.
[9] Cummins, M. and Newman, P. (2010). Accelerating FAB-MAP with concentration inequalities, IEEE Transactions on Robotics 26(6): 1042–1050.
[10] Davison, A.J., Reid, I.D., Molton, N.D. and Stasse, O. (2007). MonoSLAM: Real-time single camera SLAM, IEEE Transactions on Pattern Analysis and Machine Intelligence 29(6): 1052–1067.
[11] Durrant-Whyte, H. and Bailey, T. (2006). Simultaneous localization and mapping: Part I, IEEE Robotics Automation Magazine 13(2): 99–110.
[12] Eggert, D.W., Lorusso, A. and Fisher, R.B. (1997). Estimating 3-D rigid body transformations: A comparison of four major algorithms, Machine Vision and Applications 9(5–6): 272–290.
[13] Endres, F., Hess, J., Engelhard, N., Sturm, J., Cremers, D. and Burgard, W. (2012). An evaluation of the RGB-D SLAM system, IEEE International Conference on Robotics and Automation, St. Paul MN, USA, pp. 1691–1696.
[14] Endres, F., Hess, J., Sturm, J., Cremers, D. and Burgard, W. (2014). 3-D mapping with an RGB-D camera, IEEE Transactions on Robotics 30(1): 177–187.
[15] Engel, J., Sturm, J. and Cremers, D. (2012). Camera-based navigation of a low-cost quadrocopter, IEEE/RSJ International Conference on Intelligent Robots Systems, Vilamoura, Portugal, pp. 2815–2821.
[16] Ester, M., Kriegel, H.-P., Sander, J. and Xu, X. (1996). A density-based algorithm for discovering clusters in large spatial databases with noise, International Conference on Knowledge Discovery and Data Mining, Portland, OR, USA, pp. 226–231.
[17] Hansard, M., Lee, S., Choi, O. and Horaud, R. (2012). Time-of-Flight Cameras: Principles, Methods and Applications, Springer, Berlin.
[18] Hartley, R.I. and Zisserman, A. (2004). Multiple View Geometry in Computer Vision, 2nd Edn., Cambridge University Press, Cambridge.
[19] Izadi, S., Kim, D., Hilliges, O., Molyneaux, D., Newcombe, R., Kohli, P., Shotton, J., Hodges, S., Freeman, D., Davison, A. and Fitzgibbon, A. (2011). KinectFusion: Real-time 3D reconstruction and interaction using a moving depth camera, ACM Symposium on User Interface Software and Technology, New York, NY, pp. 559–568.
[20] Kerl, C., Sturm, J. and Cremers, D. (2013). Robust odometry estimation for RGB-D cameras, IEEE International Conference on Robotics and Automation, Karlsruhe, Germany, pp. 3748–3754.
[21] Khoskelham, K. and Elberink, S.O. (2012). Accuracy and resolution of Kinect depth data for indoor mapping applications, Sensors 12(2): 1437–1454.
[22] Kraft, M., Nowicki, M., Schmidt, A. and Skrzypczyński, P. (2014). Efficient RGB-D data processing for point-feature-based self-localization, in C. Zieliński and K. Tchoń (Eds.), Postępy robotyki, PW, Warsaw, pp. 245–256, (in Polish).
[23] Kuemmerle, R., Grisetti, G., Strasdat, H., Konolige, K. and Burgard, W. (2011). g2o: A general framework for graph optimization, IEEE International Conference on Robotics and Automation, Shanghai, China, pp. 3607–3613.
[24] Lowe, D. (2004). Distinctive image features from scale-invariant keypoints, International Journal of Computer Vision 60(2): 91–110.
[25] Mertens, L., Penne, R. and Ribbens, B. (2013). Time of flight cameras (3D vision), in J. Buytaert (Ed.), Recent Advances in Topography, Engineering Tools, Techniques and Tables, Nova Science, Hauppauge, NY, pp. 353–417.
[26] Nascimento, E., Oliveira, G., Campos, M.F.M., Vieira, A. and Schwartz, W. (2012). BRAND: A robust appearance and depth descriptor for RGB-D images, IEEE/RSJ International Conference on Intelligent Robots Systems, Vilamoura, Portugal, pp. 1720–1726.
[27] Nowicki, M. and Skrzypczyński, P. (2013a). Combining photometric and depth data for lightweight and robust visual odometry, European Conference on Mobile Robots (ECMR), Barcelona, Spain, pp. 125–130.
[28] Nowicki, M. and Skrzypczyński, P. (2013b). Experimental verification of a walking robot self-localization system with the Kinect sensor, Journal of Automation, Mobile Robotics Intelligent Systems 7(4): 42–51.
[29] Nüchter, A., Lingemann, K., Hertzberg, J. and Surmann, H. (2007). 6D SLAM—3D mapping outdoor environments, Journal of Field Robotics 24(8–9): 699–722.
[30] Penne, R., Mertens, L. and Ribbens, B. (2013). Planar segmentation by time-of-flight cameras, in J. Blanc-Talon et al. (Eds.), Advanced Concepts for Intelligent Vision Systems, Lecture Notes in Computer Science, Vol. 8192, Springer, Berlin, pp. 286–297.
[31] Penne, R., Raposo, C., Mertens, L., Ribbens, B. and Araujo, H. (2015). Investigating new calibration methods without feature detection for ToF cameras, Image and Vision Computing 43: 50–62.
[32] Raguram, R., Chum, O., Pollefeys, M., Matas, J. and Frahm, J. (2013). USAC: A universal framework for random sample consenus, IEEE Transactions on Pattern Analysis and Machine Intelligence 35(8): 2022–2038.
[33] Rosten, E. and Drummond, T. (2006). Machine learning for high-speed corner detection, 9th European Conference on Computer Vision (ECCV’06), Graz, Austria, pp. 430–443.
[34] Rublee, E., Rabaud, V., Konolige, K. and Bradski, G. (2011). ORB: an efficient alternative to SIFT or SURF, IEEE International Conference on Computer Vision (ICCV), Barcelona, Spain, pp. 2564–2571.
[35] Rusu, R., Blodow, N., Marton, Z. and Beetz, M. (2008). Aligning point cloud views using persistent feature histograms, IEEE/RSJ International Conference on Intelligent Robots Systems, Nice, France, pp. 3384–3391.
[36] Scaramuzza, D. and Fraundorfer, F. (2011). Visual odometry, Part I: The first 30 years and fundamentals, IEEE Robotics Automation Magazine 18(4): 80–92.
[37] Schmidt, A., Fularz, M., Kraft, M., Kasiński, A. and Nowicki, M. (2013a). An indoor RGB-D dataset for the evaluation of robot navigation algorithms, in J. Blanc-Talon et al. (Eds.), Advanced Concepts for Intelligent Vision Systems, Lecture Notes in Computer Science, Vol. 8192, Springer, Berlin, pp. 321–329.
[38] Schmidt, A., Kraft, M., Fularz, M. and Domagala, Z. (2013b). The comparison of point feature detectors and descriptors in the context of robot navigation, Journal of Automation, Mobile Robotics Intelligent Systems 7(1): 11–20.
[39] Segal, A., Haehnel, D. and Thrun, S. (2009). Generalized-ICP, Robotics: Science and Systems, Seattle, WA, USA.
[40] Shi, J. and Tomasi, C. (1994). Good features to track, IEEE Conference on Computer Vision and Pattern Recognition (CVPR’94), Seattle, WA, USA, pp. 593–600.
[41] Skrzypczyński, P. (2009). Simultaneous localization and mapping: A feature-based probabilistic approach, International Journal of Applied Mathematics and Computer Science 19(4): 575–588, DOI: 10.2478/v10006-009-0045-z.
[42] Steder, B., Rusu, R.B., Konolige, K. and Burgard, W. (2011). Point feature extraction on 3D range scans taking into account object boundaries, IEEE International Conference on Robotics and Automation, Shanghai, China, pp. 2601–2608.
[43] Steinbrücker, F., Sturm, J. and Cremers, D. (2011). Real-time visual odometry from dense RGB-D images, Workshop on Live Dense Reconstruction with Moving Cameras/ International Conference on Computer Visision, Barcelona, Spain, pp. 719–722.
[44] Stewénius, H., Engels, C. and Nistér, D. (2006). Recent developments on direct relative orientation, ISPRS Journal of Photogrammetry and Remote Sensing 60(4): 284–294.
[45] Stoyanov, T., Louloudi, A., Andreasson, H. and Lilienthal, A. (2011). Comparative evaluation of range sensor accuracy in indoor environments, 5th European Conference on Mobile Robots, Örebro, Sweden, pp. 19–24.
[46] Strasdat, H. (2012). Local Accuracy and Global Consistency for Efficient Visual SLAM, Ph.D. thesis, Imperial College, London.
[47] Sturm, J., Engelhard, M., Endres, F., Burgard, W. and Cremers, D. (2012). A benchmark for the evaluation of RGB-D SLAM systems, IEEE/RSJ International Conference on Intelligent Robots Systems, Vilamoura, Portugal, pp. 573–580.
[48] Umeyama, S. (1991). Least-squares estimation of transformation parameters between two point patterns, IEEE Transactions on Pattern Analysis and Machine Intelligence 13(4): 376–380.
[49] Whelan, T., McDonald, J., Kaess, M., Fallon, M., Johannsson, H. and Leonard, J. (2012). Kintinuous: Spatially extended KinectFusion, Robotics: Science and Systems, Sydney, Australia.
[50] Whelan, T., Johannsson, H., Kaess, M., Leonard, J. and McDonald, J. (2013). Robust real-time visual odometry for dense RGB-D mapping, IEEE International Conference on Robotics and Automation, Karlsruhe, Germany, pp. 5724–5731.