Voir la notice de l'article provenant de la source Library of Science
@article{IJAMCS_2021_31_1_a12, author = {Peng, Gang and Zheng, Yuezhi and Li, Jianfeng and Yang, Jin}, title = {A single upper limb pose estimation method based on the improved stacked hourglass network}, journal = {International Journal of Applied Mathematics and Computer Science}, pages = {123--133}, publisher = {mathdoc}, volume = {31}, number = {1}, year = {2021}, language = {en}, url = {http://geodesic.mathdoc.fr/item/IJAMCS_2021_31_1_a12/} }
TY - JOUR AU - Peng, Gang AU - Zheng, Yuezhi AU - Li, Jianfeng AU - Yang, Jin TI - A single upper limb pose estimation method based on the improved stacked hourglass network JO - International Journal of Applied Mathematics and Computer Science PY - 2021 SP - 123 EP - 133 VL - 31 IS - 1 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/IJAMCS_2021_31_1_a12/ LA - en ID - IJAMCS_2021_31_1_a12 ER -
%0 Journal Article %A Peng, Gang %A Zheng, Yuezhi %A Li, Jianfeng %A Yang, Jin %T A single upper limb pose estimation method based on the improved stacked hourglass network %J International Journal of Applied Mathematics and Computer Science %D 2021 %P 123-133 %V 31 %N 1 %I mathdoc %U http://geodesic.mathdoc.fr/item/IJAMCS_2021_31_1_a12/ %G en %F IJAMCS_2021_31_1_a12
Peng, Gang; Zheng, Yuezhi; Li, Jianfeng; Yang, Jin. A single upper limb pose estimation method based on the improved stacked hourglass network. International Journal of Applied Mathematics and Computer Science, Tome 31 (2021) no. 1, pp. 123-133. http://geodesic.mathdoc.fr/item/IJAMCS_2021_31_1_a12/
[1] [1] Andriluka, M., Pishchulin, L., Gehler, P.V. and Schiele, B. (2014). 2D human pose estimation: New benchmark and state of the art analysis, Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Columbus, USA, pp. 3686–3693.
[2] [2] Artacho, B. and Savakis, A. (2020). Unipose: Unified human pose estimation in single images and videos, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR Virtual), pp. 7035–7044, (online).
[3] [3] Chu, X., Yang, W., Ouyang, W., Ma, C., Yuille, A.L. and Wang, X. (2017). Multi-context attention for human pose estimation, 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, USA, pp. 5669–5678.
[4] [4] Fan, X., Zheng, K., Lin, Y. and Wang, S. (2015). Combining local appearance and holistic view: Dual-source deep neural networks for human pose estimation, 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston, USA, pp. 1347–1355.
[5] [5] Hu, H., Liao, Z. and Xiao, X.C. (2019). Action recognition using multiple pooling strategies of CNN features, Neural Processing Letters 50(1): 379–396.
[6] [6] Hu, P. and Ramanan, D. (2015). Bottom-up and top-down reasoning with convolutional latent-variable models, ArXiv: abs/1507.05699.
[7] [7] Li, C., Yung, N.H.C., Sun, X. and Lam, E.Y. (2017). Human arm pose modeling with learned features using joint convolutional neural network, Machine Vision and Applications 28(1–2): 1–14.
[8] [8] Lifshitz, I., Fetaya, E. and Ullman, S. (2016). Human pose estimation using deep consensus voting, European Conference on Computer Vision (ECCV), Amsterdam, Holland, pp. 246–260.
[9] [9] Long, J., Shelhamer, E. and Darrell, T. (2015). Fully convolutional networks for semantic segmentation, Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Boston, USA, pp. 3431–3440.
[10] [10] Newell, A., Yang, K. and Deng, J. (2016). Stacked hourglass networks for human pose estimation, European Conference on Computer Vision (ECCV), Amsterdam, Holland, pp. 483–499.
[11] [11] Ning, F., Shi, Y., Cai, M. and Xu, W. (2020). Various realization methods of machine-part classification based on deep learning, Journal of Intelligent Manufacturing 31(8): 2019–2032.
[12] [12] Pfister, T., Charles, J. and Zisserman, A. (2015). Flowing ConvNets for human pose estimation in videos, 2015 IEEE International Conference on Computer Vision (ICCV), Santiago, Chile, pp. 1913–1921.
[13] [13] Redmon, J. and Farhadi, A. (2018). Yolov3: An incremental improvement, arXiv: 1804.02767.
[14] [14] Sun, K., Xiao, B., Liu, D. and Wang, J. (2019). Deep high-resolution representation learning for human pose estimation, Computer Vision and Pattern Recognition (CVPR), Los Angeles, USA, pp. 5693–5703.
[15] [15] Sun, X., Xiao, B., Wei, F., Liang, S. and Wei, Y. (2018). Integral human pose regression, European Conference on Computer Vision (ECCV), Munich, Germany, pp. 529–545.
[16] [16] Tompson, J., Goroshin, R., Jain, A., LeCun, Y. and Bregler, C. (2015). Efficient object localization using convolutional networks, 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston, USA, pp. 648–656.
[17] [17] Toshev, A. and Szegedy, C. (2015). DeepPose: Human pose estimation via deep neural networks, 2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Columbus, USA, pp. 1653–1660.
[18] [18] Wei, S.-E., Ramakrishna, V., Kanade, T. and Sheikh, Y. (2016). Convolutional pose machines, 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Las Vegas, USA, pp. 4724–4732.
[19] [19] Xiao, B.,Wu, H. andWei, Y. (2018). Simple baselines for human pose estimation and tracking, European Conference on Computer Vision (ECCV), Munich, Germany, pp. 466–481.
[20] [20] Yang, W., Li, S., Ouyang, W., Li, H. and Wang, X. (2017). Learning feature pyramids for human pose estimation, 2017 IEEE International Conference on Computer Vision (ICCV), Venice, Italy, pp. 1281–1290.
[21] [21] Yang, W., Ouyang, W., Li, H. and Wang, X. (2016). End-to-end learning of deformable mixture of parts and deep convolutional neural networks for human pose estimation, 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Las Vegas, USA, pp. 3073–3082.
[22] [22] Zhang, F., Zhu, X. and Ye, M. (2019). Fast human pose estimation, Compter Vision and Pattern Recognition (CVPR), Los Angeles, USA, pp. 3517–3526.
[23] [23] Zhou, J., Liu, J. and Zhang, M. (2020). Curve skeleton extraction via k-nearest-neighbors based contraction, International Journal of Applied Mathematics and Computer Science 30(1): 123–132, DOI: 10.34768/amcs-2020-0010.
[24] [24] Zlatanski, M., Sommer, P., Zurfluh, F., Zadeh, S.G., Faraone, A. and Perera, N. (2019). Machine perception platform for safe human-robot collaboration, 2019 IEEE SENSORS, Montreal, Canada, pp. 1–4.