@article{ZNSL_2024_540_a5,
author = {V. Leonenkova and E. Shumitskaya and A. Antsiferova and D. Vatolin},
title = {Tiled physical adversarial patch for no-reference video quality metrics},
journal = {Zapiski Nauchnykh Seminarov POMI},
pages = {113--131},
year = {2024},
volume = {540},
language = {en},
url = {http://geodesic.mathdoc.fr/item/ZNSL_2024_540_a5/}
}
TY - JOUR AU - V. Leonenkova AU - E. Shumitskaya AU - A. Antsiferova AU - D. Vatolin TI - Tiled physical adversarial patch for no-reference video quality metrics JO - Zapiski Nauchnykh Seminarov POMI PY - 2024 SP - 113 EP - 131 VL - 540 UR - http://geodesic.mathdoc.fr/item/ZNSL_2024_540_a5/ LA - en ID - ZNSL_2024_540_a5 ER -
%0 Journal Article %A V. Leonenkova %A E. Shumitskaya %A A. Antsiferova %A D. Vatolin %T Tiled physical adversarial patch for no-reference video quality metrics %J Zapiski Nauchnykh Seminarov POMI %D 2024 %P 113-131 %V 540 %U http://geodesic.mathdoc.fr/item/ZNSL_2024_540_a5/ %G en %F ZNSL_2024_540_a5
V. Leonenkova; E. Shumitskaya; A. Antsiferova; D. Vatolin. Tiled physical adversarial patch for no-reference video quality metrics. Zapiski Nauchnykh Seminarov POMI, Investigations on applied mathematics and informatics. Part IV, Tome 540 (2024), pp. 113-131. http://geodesic.mathdoc.fr/item/ZNSL_2024_540_a5/
[1] A. Antsiferova, K. Abud, A. Gushchin, S. Lavrushkin, E. Shumitskaya, M. Velikanov, and D. Vatolin, Comparing the robustness of modern no-reference image-and video-quality metrics to adversarial attacks, 2023, arXiv: 2310.06958
[2] A. Antsiferova, S. Lavrushkin, M. Smirnov, A. Gushchin, D. Vatolin, and D. Kulikov, “Video compression dataset and benchmark of learning-based video-quality metrics”, Adv. Neural Inf. Process. Syst., 35 (2022), 13814–13825
[3] A. Antsiferova, S. Lavrushkin, M. Smirnov, A. Gushchin, D. Vatolin, and D. Kulikov, “Video compression dataset and benchmark of learning-based video-quality metrics”, Advances in Neural Information Processing Systems, 35, eds. S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh, 2022, 13814–13825
[4] T.B. Brown, D. Mané, A. Roy, M. Abadi, and J. Gilmer, Adversarial patch, 2017, arXiv: 1712.09665
[5] A. Chindaudom, P. Siritanawan, K. Sumongkayothin, and K. Kotani, “AdversarialQR: An adversarial patch in QR code format”, 2020 Joint 9th International Conference on Informatics, Electronics Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision Pattern Recognition (icIVPR), IEEE, 2020, 1–6
[6] B.G. Doan, M. Xue, S. Ma, E. Abbasnejad, and D.C. Ranasinghe, “TNT attacks! Universal naturalistic adversarial patches against deep neural network systems”, IEEE Trans. Inf. Forensics Secur., 17 (2022), 3816–3830 | DOI
[7] I.J. Goodfellow, J. Shlens, and C. Szegedy, Explaining and harnessing adversarial examples, 2014, arXiv: 1412.6572
[8] A. Kalyakulina, I. Yusipov, A. Moskalev, C. Franceschi, and M. Ivanchenko, “Explainable artificial intelligence (XAI) in aging clock models”, Ageing Res. Rev., 2023, 102144
[9] D. Karmon, D. Zoran, and Y. Goldberg, “Lavan: Localized and visible adversarial noise”, International Conference on Machine Learning, PMLR, 2018, 2507–2515
[10] M. Kettunen, E. Härkönen, and J. Lehtinen, E-LPIPS: Robust perceptual image similarity via random transformation ensembles, 2019, arXiv: 1906.03973
[11] S. Komkov and A. Petiushko, “AdvHat: Real-world adversarial attack on ArcFace face ID system”, 2020 25th International Conference on Pattern Recognition (ICPR), IEEE, 2021, 819–826 | DOI
[12] J. Korhonen and J. You, “Adversarial attacks against blind image quality assessment models”, Proceedings of the 2nd Workshop on Quality of Experience in Visual Multimedia Applications, 2022, 3–11 | DOI
[13] A. Kurakin, I. J. Goodfellow, and S. Bengio, “Adversarial examples in the physical world”, Artificial Intelligence Safety and Security, Chapman and Hall/CRC, 2018, 99–112 | DOI
[14] M. Lee and Z. Kolter, On physical adversarial patches for object detection, 2019, arXiv: 1906.11897
[15] L. Li, T. Xie, and B. Li, “SoK: Certified robustness for deep neural networks”, 2023 IEEE Symposium on Security and Privacy (SP), IEEE, 2023, 1289–1310 | MR
[16] T. Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona, D. Ramanan, P. Dollár, and C.L. Zitnick, “Microsoft COCO: Common objects in context”, Computer Vision–ECCV 2014: 13th European Conference, Proceedings (Zurich, Switzerland, September 6-12, 2014), v. V, Springer, 2014, 740–755 | DOI
[17] X. Liu, H. Yang, Z. Liu, L. Song, H. Li, and Y. Chen, DPatch: An adversarial patch attack on object detectors, 2018, arXiv: 1806.02299
[18] A. Madry, A. Makelov, L. Schmidt, D. Tsipras, and A. Vladu, Towards deep learning models resistant to adversarial attacks, 2017, arXiv: 1706.06083
[19] A. Mahendran and A. Vedaldi, “Understanding deep image representations by inverting them”, Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2015, 5188–5196
[20] H.F.B. Meftah, S.A. Fezza, W. Hamidouche, and O. Déforges, “Evaluating the vulnerability of deep learning-based image quality assessment methods to adversarial attacks”, 2023 11th European Workshop on Visual Information Processing (EUVIP), IEEE, 2023, 1–6
[21] A.S. Panfilova and D.Y. Turdakov, “Applying explainable artificial intelligence methods to models for diagnosing personal traits and cognitive abilities by social network data”, Sci. Rep., 14:1 (2024), 5369 | DOI
[22] M. Pautov, G. Melnikov, E. Kaziakhmedov, K. Kireev, and A. Petiushko, “On adversarial patches: Real-world attack on ArcFace-100 face recognition system”, 2019 International Multi-Conference on Engineering, Computer and Information Sciences (SIBIRCON), IEEE, 2019, 0391–0396
[23] Y. Ran, A.X. Zhang, M. Li, W. Tang, and Y.G. Wang, Black-box adversarial attacks against image quality assessment models, 2024, arXiv: 2402.17533
[24] Q. Sang, H. Zhang, L. Liu, X. Wu, and A.C. Bovik, “On the generation of adversarial examples for image quality assessment”, Vis. Comput., 2023, 1–16
[25] M. Sharif, S. Bhagavatula, L. Bauer, and M.K. Reiter, “Accessorize to a crime: Real and stealthy attacks on state-of-the-art face recognition”, Proceedings of the 2016 ACM SIGSAC Conference on Computer and Communications Security, 2016, 1528–1540 | DOI
[26] E. Shumitskaya, A. Antsiferova, and D. Vatolin, “Towards adversarial robustness verification of no-reference image- and video-quality metrics”, Comput. Vis. Image Underst, 240 (2024), 103913 | DOI
[27] E. Shumitskaya, A. Antsiferova, and D.S. Vatolin, “Universal perturbation attack on differentiable no-reference image- and video-quality metrics”, 33rd British Machine Vision Conference 2022 (BMVC 2022) (London, UK, November 21-24), BMVA Press, 2022
[28] E. Shumitskaya, A. Antsiferova, and D.S. Vatolin, “Fast adversarial CNN-based perturbation attack on no-reference image- and video-quality metrics”, The First Tiny Papers Track at ICLR 2023, Tiny Papers @ ICLR 2023 (Kigali, Rwanda, May 5, 2023, OpenReview.net), 2023
[29] J. Su, D.V. Vargas, and K. Sakurai, “One pixel attack for fooling deep neural networks”, IEEE Trans. Evol. Comput., 23:5 (2019), 828–841 | DOI | MR
[30] C. Szegedy, W. Zaremba, I. Sutskever, J. Bruna, D. Erhan, I. Goodfellow, and R. Fergus, Intriguing properties of neural networks, 2013, arXiv: 1312.6199
[31] X. Wang, X. He, J. Wang, and K. He, “Admix: Enhancing the transferability of adversarial attacks”, Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021, 16158–16167
[32] W. Wu, Y. Su, M.R. Lyu, and I. King, “Improving the transferability of adversarial samples with adversarial transformations”, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2021, 9024–9033
[33] C. Xie, Z. Zhang, Y. Zhou, S. Bai, J. Wang, Z. Ren, and A.L. Yuille, “Improving transferability of adversarial examples with input diversity”, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019
[34] K. Xu, G. Zhang, S. Liu, Q. Fan, M. Sun, H. Chen, P.Y. Chen, Y. Wang, and X. Lin, “Adversarial T-shirt! Evading person detectors in a physical world”, Computer Vision–ECCV 2020: 16th European Conference, Proceedings (Glasgow, UK, August 23-28, 2020), v. V, Springer, 2020, 665–681
[35] C. Yang, Y. Liu, D. Li, et al., Exploring vulnerabilities of no-reference image quality assessment models: A query-based black-box method, 2024, arXiv: 2401.05217
[36] B. Yin, W. Wang, T. Yao, J. Guo, Z. Kong, S. Ding, J. Li, and C. Liu, Adv-makeup: A new imperceptible and transferable attack on face recognition, 2021, arXiv: 2105.03162
[37] Z. Ying, H. Niu, P. Gupta, D. Mahajan, D. Ghadiyaram, and A. Bovik, “From patches to pictures (paq-2-piq): Mapping the perceptual space of picture quality”, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, 3575–3585
[38] A. Zhang, Y. Ran, W. Tang, and Y.G. Wang, “Vulnerabilities in video quality assessment models: The challenge of adversarial attacks”, Adv. Neural Inf. Process. Syst., 36 (2024)
[39] W. Zhang, D. Li, X. Min, G. Zhai, G. Guo, X. Yang, and K. Ma, “Perceptual attacks of no-reference image quality models with human-in-the-loop”, Advances in Neural Information Processing Systems, 35, 2022, 2916–2929