Voir la notice de l'article provenant de la source Library of Science
@article{IJAMCS_2024_34_3_a6, author = {Qian, Yaguan and Zhang, Liangjian and Wang, Yuqi and Ji, Boyuan and Yao, Tengteng and Wang, Bin}, title = {Developing {Hessian-free} second-order adversarial examples for adversarial training}, journal = {International Journal of Applied Mathematics and Computer Science}, pages = {425--438}, publisher = {mathdoc}, volume = {34}, number = {3}, year = {2024}, language = {en}, url = {http://geodesic.mathdoc.fr/item/IJAMCS_2024_34_3_a6/} }
TY - JOUR AU - Qian, Yaguan AU - Zhang, Liangjian AU - Wang, Yuqi AU - Ji, Boyuan AU - Yao, Tengteng AU - Wang, Bin TI - Developing Hessian-free second-order adversarial examples for adversarial training JO - International Journal of Applied Mathematics and Computer Science PY - 2024 SP - 425 EP - 438 VL - 34 IS - 3 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/IJAMCS_2024_34_3_a6/ LA - en ID - IJAMCS_2024_34_3_a6 ER -
%0 Journal Article %A Qian, Yaguan %A Zhang, Liangjian %A Wang, Yuqi %A Ji, Boyuan %A Yao, Tengteng %A Wang, Bin %T Developing Hessian-free second-order adversarial examples for adversarial training %J International Journal of Applied Mathematics and Computer Science %D 2024 %P 425-438 %V 34 %N 3 %I mathdoc %U http://geodesic.mathdoc.fr/item/IJAMCS_2024_34_3_a6/ %G en %F IJAMCS_2024_34_3_a6
Qian, Yaguan; Zhang, Liangjian; Wang, Yuqi; Ji, Boyuan; Yao, Tengteng; Wang, Bin. Developing Hessian-free second-order adversarial examples for adversarial training. International Journal of Applied Mathematics and Computer Science, Tome 34 (2024) no. 3, pp. 425-438. http://geodesic.mathdoc.fr/item/IJAMCS_2024_34_3_a6/
[1] Andriushchenko, M., Croce, F., Marion, N.F. and Hein, M. (2020). Square attack: A query-efficient black-box adversarial attack via random search, European Conference on Computer Vision, Glasgow, UK, pp. 484-501.
[2] Anil, R., Gupta, V., Koren, T., Regan, K. and Singer, Y. (2020). Second-order optimization made practical, arXiv: 2002.09018.
[3] Athalye, A., Carlini, N. and Wagner, D. (2018). Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples, International Conference on Machine Learning, Stockholm, Sweden, pp. 274-283.
[4] Badjie, B., Cec´ılio, J. and Casimiro, A. (2023). Denoising autoencoder-based defensive distillation as an adversarial robustness algorithm, CoRR: abs/2303.15901.
[5] Bertolace, A., Gatsis, K. and Margellos, K. (2024). Robust optimization for adversarial learning with finite sample complexity guarantees, CoRR: abs/2403.15207.
[6] Croce, F. and Hein, M. (2020a). Minimally distorted adversarial examples with a fast adaptive boundary attack, International Conference on Machine Learning, pp. 2196-2205, (virtual event).
[7] Croce, F. and Hein, M. (2020b). Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks, International Conference on Machine Learning, pp. 2206-2216, (virtual event).
[8] Ding, X., Zhang, X., Zhou, Y., Han, J., Ding, G. and Sun, J. (2022). Scaling up your kernels to 31x31: Revisiting large kernel design in CNNs, CoRR: abs/2203.06717.
[9] Faragallah, O.S., El-Hoseny, H., El-Shafai, W., El-Rahman, W.A., El-Sayed, H.S., El-Rabaie, E.-S.M., El-Samie, F.E.A. and Geweid, G.G.N. (2021). A comprehensive survey analysis for present solutions of medical image fusion and future directions, IEEE Access 9: 11358-11371.
[10] Fawzi, A., Moosavi-Dezfooli, S., Frossard, P. and Soatto, S. (2018). Empirical study of the topology and geometry of deep networks, 2018 IEEE Conference on Computer Vision and Pattern Recognition, Salt Lake City, USA, pp. 3762-3770.
[11] Ge, Z.,Wang, X., Liu, H., Shang, F. and Liu, Y. (2023). Boosting adversarial transferability by achieving flat local maxima, Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, USA.
[12] Goodfellow, I., Shlens, J. and Szegedy, C. (2015). Explaining and harnessing adversarial examples, International Conference on Learning Representations, San Diego, USA.
[13] Guo, S., Li, X., Zhu, P. and Mu, Z. (2023). Ads-detector: An attention-based dual stream adversarial example detection method, Knowledge-Based Systems 265: 110388.
[14] Heaven, D. (2019). Why deep-learning AIs are so easy to fool, Nature 574(777): 163-166.
[15] Huang, B., Chen, M., Wang, Y., Lu, J., Cheng, M. and Wang, W. (2023). Boosting accuracy and robustness of student models via adaptive adversarial distillation, IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023, Vancouver, Canada, pp. 24668-24677.
[16] Jea, K.C. and Young, D.M. (1980). Generalized conjugate gradient acceleration of non-symmetrizable iterative methods, Linear Algebra and Its Applications 34: 159-194.
[17] Jetly, S., Lord, N. and Torr, P. (2018). With friend like this, who need adversaries?, 2018 Conference on Neural Information Processing Systems, Montréal, Canada, pp. 10772-10782.
[18] Jin, G., Yi, X., Huang, W., Schewe, S. and Huang, X. (2022). Enhancing adversarial training with second-order statistics of weights, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, New Orleans, USA, pp. 15273-15283.
[19] Jin, G., Yi, X., Wu, D., Mu, R. and Huang, X. (2023). Randomized adversarial training via Taylor expansion, IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023, Vancouver, Canada, pp. 16447-16457.
[20] Li, B., Chen, C., Wang, W. and Carin, L. (2018). Second-order adversarial attack and certifiable robustness. arXiv: 1809.03113.
[21] Li, L. and Spratling, M.W. (2023). Understanding and combating robust overfitting via input loss landscape analysis and regularization, Pattern Recognition 136: 109229.
[22] Li, T., Wu, Y., Chen, S., Fang, K. and Huang, X. (2022). Subspace adversarial training, IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, USA, pp. 13399-13408.
[23] Liu, X., Kuang, H., Lin, X., Wu, Y. and Ji, R. (2023). CAT: Collaborative adversarial training, CoRR: abs/2303.14922.
[24] Long, Y., Wen, Y., Han, J., Xu, H., Ren, P., Zhang, W., Zhao, S. and Liang, X. (2023). CAPDET: Unifying dense captioning and open-world detection pretraining, IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023, Vancouver, Canada, pp. 15233-15243.
[25] Lu, Y., Ren, H., Chai, W., Velipasalar, S. and Li, Y. (2024). Time-aware and task-transferable adversarial attack for perception of autonomous vehicles, Pattern Recognition Letters 178: 145-152.
[26] Lyu, C., Huang, K. and Liang, H. (2015). A unified gradient regularization family for adversarial examples, IEEE International Conference on Data Mining, Atlantic City, USA, pp. 301-309.
[27] Ma, A., Faghri, F., Papernot, N. and Massoud Farahmand, A. (2020). SOAR: Second-order adversarial regularization. arXiv: 2004.01832.
[28] Madry, A., Makelov, A., Schmidt, L., Tsipras, D. and Vladu, A. (2017). Towards deep learning models resistant to adversarial attacks, arXiv: 1706.06083.
[29] Moosavi-Dezfooli, S.-M., Fawzi, A., Uesato, J. and Frossard, P. (2019). Robustness via curvature regularization, and vice versa, 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Long Beach, USA, pp. 9070-9078.
[30] Nilsson, J. and Akenine-Möller, T. (2020). Understanding SSIM, arXiv: 2006.13846.
[31] Pozdnyakov, V., Kovalenko, A., Makarov, I., Drobyshevskiy, M. and Lukyanov, K. (2024). Adversarial attacks and defenses in automated control systems: A comprehensive benchmark, arXiv: 2403.13502.
[32] Ros, A.S. and Doshi-Velez, F. (2018). Improving the adversarial robustness and interpretability of deep neural networks by regularizing their input gradients, AAAI Conference on Artificial Intelligence, New Orleans, USA, pp. 1660-1669.
[33] Shimonishi, H., MAKI, I., Murase, T. and Murata, M. (2002). Dynamic fair bandwidth allocation for diffserv classes, IEEE International Conference on Communications, ICC 2002, New York, USA, pp. 2348-2352.
[34] Song, M., Choi, J. and Han, B. (2024). A training-free defense framework for robust learned image compression, CoRR: abs/2401.11902.
[35] Szegedy, C., Zaremba, W., Sutskever, I., Bruna, J., Erhan, D., Goodfellow, I. and Fergus, R. (2013). Intriguing properties of neural networks, arXiv: 1312.6199.
[36] Tejankar, A., Sanjabi, M., Wang, Q., Wang, S., Firooz, H., Pirsiavash, H. and Tan, L. (2023). Defending against patch-based backdoor attacks on self-supervised learning, IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023, Vancouver, Canada, pp. 12239-12249.
[37] Thornton, C. and Bierman, G. (1977). Givens transformation techniques for Kalman filtering, Acta Astronautica 4(7): 847-863.
[38] Tsiligkaridis, T. and Roberts, J. (2020). Second-order optimization for adversarial robustness and interpretability, arXiv: 2009.04923.
[39] Wang, H. and Wang, Y. (2022). Self-ensemble adversarial training for improved robustness, 10th International Conference on Learning Representations, ICLR 2022, (virtual event).
[40] Wu, T., Luo, T. and Wunsch II, D.C. (2024). LRS: Enhancing adversarial transferability through Lipschitz regularized surrogate, Proceedings of the AAAI Conference on Artificial Intelligence, Vancouver, Canada, pp. 6135-6143.
[41] Yang, X., Liu, C., Xu, L., Wang, Y., Dong, Y., Chen, N., Su, H. and Zhu, J. (2023). Towards effective adversarial textured 3D meshes on physical face recognition, IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023, Vancouver, BC, Canada, pp. 4119-4128.
[42] Yeom, I. and Reddy, A. (2001). Modeling TCP behavior in a differentiated services network, IEEE/ACM Transactions on Networking 9(1): 31-46.
[43] Yin, Z., Liu, M., Li, X., Yang, H., Xiao, L. and Zuo, W. (2023). MetaF2N: Blind image super-resolution by learning efficient model adaptation from faces, IEEE/CVF International Conference on Computer Vision, ICCV 2023, Paris, France, pp. 12987-12998.
[44] Zhang, H., Yu, Y., Jiao, J., Xing, E., Ghaoui, L.E. and Jordan, M. (2019). Theoretically principled trade-off between robustness and accuracy, Proceedings of the 36th International Conference on Machine Learning, Long Beach, USA, pp. 7472-7482.
[45] Zhang, J., Qian, W., Nie, R., Cao, J. and Xu, D. (2023). Generate adversarial examples by adaptive moment iterative fast gradient sign method, Applied Intelligence 53(1): 1101-1114.
[46] Zhang, X. (2016). Empirical risk minimization, in C. Sammut and G.I. Webb (Eds), Encyclopedia of Machine Learning and Data Mining, Springer, Berlin/Heidelberg, pp. 392-393.
[47] Zhao, K., Chen, X., Huang, W., Ding, L., Kong, X. and Zhang, F. (2024). Ensemble adversarial defense via integration of multiple dispersed low curvature models, CoRR: abs/2403.16405.