Voir la notice de l'article provenant de la source Math-Net.Ru
@article{BGUMI_2020_3_a5, author = {D. M. Voynov and V. A. Kovalev}, title = {The stability of neural networks under condition of adversarial attacks to biomedical image classification}, journal = {Journal of the Belarusian State University. Mathematics and Informatics}, pages = {60--72}, publisher = {mathdoc}, volume = {3}, year = {2020}, language = {ru}, url = {http://geodesic.mathdoc.fr/item/BGUMI_2020_3_a5/} }
TY - JOUR AU - D. M. Voynov AU - V. A. Kovalev TI - The stability of neural networks under condition of adversarial attacks to biomedical image classification JO - Journal of the Belarusian State University. Mathematics and Informatics PY - 2020 SP - 60 EP - 72 VL - 3 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/BGUMI_2020_3_a5/ LA - ru ID - BGUMI_2020_3_a5 ER -
%0 Journal Article %A D. M. Voynov %A V. A. Kovalev %T The stability of neural networks under condition of adversarial attacks to biomedical image classification %J Journal of the Belarusian State University. Mathematics and Informatics %D 2020 %P 60-72 %V 3 %I mathdoc %U http://geodesic.mathdoc.fr/item/BGUMI_2020_3_a5/ %G ru %F BGUMI_2020_3_a5
D. M. Voynov; V. A. Kovalev. The stability of neural networks under condition of adversarial attacks to biomedical image classification. Journal of the Belarusian State University. Mathematics and Informatics, Tome 3 (2020), pp. 60-72. http://geodesic.mathdoc.fr/item/BGUMI_2020_3_a5/
[1] B. Recht, R. Roelofs, L. Schmidt, V. Shankar, “Do CIFAR-10 classifiers generalize to CIFAR-10”, 2018, 25, arXiv: https://arxiv.org/abs/1806.00451 | Zbl
[2] N. Akhtar, A. S. Mian, “Threat of adversarial attacks on deep learning in computer vision: a survey”, IEEE Access, 6 (2018), 14410–14430 | DOI
[3] G. Litjens, T. Kooi, B. E. Bejnordi, AAA. Setio, F. Ciompi, M. Ghafoorian, “A survey on deep learning in medical image analysis”, Medical Image Analysis, 42 (2017), 60–88 | DOI
[4] J. Ker, L. Wang, J. Rao, T. Lim, “Deep learning applications in medical image analysis”, IEEE Access, 6 (2018), 9375–9389 | DOI | MR
[5] A. Madry, A. Makelov, L. Schmidt, D. Tsipras, A. Vladu, “Towards deep learning models resistant to adversarial attacks”, 2017, 28, arXiv: https://arxiv.org/abs/1706.06083
[6] M. Ozdag, “Adversarial attacks and defenses against deep neural networks: a survey”, Procedia Computer Science, 140 (2018), 152–161 | DOI
[7] H. Wang, C. N. Yu, “A direct approach to robust deep learning using adversarial networks”, 2019, 15, arXiv: https://arxiv.org/abs/1905.09591
[8] W. Xu, D. Evans, Y. Qi, “Feature squeezing: detecting adversarial examples in deep neural networks”, 2017, 15, arXiv: https://arxiv.org/abs/1704.01155
[9] S. M. Moosavi-Dezfooli, A. Fawzi, P. Frossard, “DeepFool: a simple and accurate method to fool deep neural networks”, 2015, 9, arXiv: https://arxiv.org/abs/1511.04599
[10] C. Szegedy, W. Zaremba, I. Sutskever, J. Bruna, D. Erhan, I. Goodfellow, “Intriguing properties of neural networks”, 2nd International conference on learning representations (Canada), 2014, 1–10, Banff: Springer
[11] N. Carlini, D. Wagner, “Towards evaluating the robustness of neural networks”, 2017 IEEE symposium on security and privacy (San Jose, CA, USA), 2017, 39–57 | DOI
[12] I. J. Goodfellow, J. Shlens, C. Szegedy, “Explaining and harnessing adversarial examples”, 2015, 11, arXiv: hhttps://arxiv.org/abs/1412.6572v3