Mots-clés : Markov chains
@article{VSPUI_2023_19_1_a3,
author = {R. R. Saakyan and I. A. Shpekht and G. A. Petrosyan},
title = {Finding the presence of borrowings in scientific works based on {Markov} chains},
journal = {Vestnik Sankt-Peterburgskogo universiteta. Prikladna\^a matematika, informatika, processy upravleni\^a},
pages = {43--50},
year = {2023},
volume = {19},
number = {1},
language = {ru},
url = {http://geodesic.mathdoc.fr/item/VSPUI_2023_19_1_a3/}
}
TY - JOUR AU - R. R. Saakyan AU - I. A. Shpekht AU - G. A. Petrosyan TI - Finding the presence of borrowings in scientific works based on Markov chains JO - Vestnik Sankt-Peterburgskogo universiteta. Prikladnaâ matematika, informatika, processy upravleniâ PY - 2023 SP - 43 EP - 50 VL - 19 IS - 1 UR - http://geodesic.mathdoc.fr/item/VSPUI_2023_19_1_a3/ LA - ru ID - VSPUI_2023_19_1_a3 ER -
%0 Journal Article %A R. R. Saakyan %A I. A. Shpekht %A G. A. Petrosyan %T Finding the presence of borrowings in scientific works based on Markov chains %J Vestnik Sankt-Peterburgskogo universiteta. Prikladnaâ matematika, informatika, processy upravleniâ %D 2023 %P 43-50 %V 19 %N 1 %U http://geodesic.mathdoc.fr/item/VSPUI_2023_19_1_a3/ %G ru %F VSPUI_2023_19_1_a3
R. R. Saakyan; I. A. Shpekht; G. A. Petrosyan. Finding the presence of borrowings in scientific works based on Markov chains. Vestnik Sankt-Peterburgskogo universiteta. Prikladnaâ matematika, informatika, processy upravleniâ, Tome 19 (2023) no. 1, pp. 43-50. http://geodesic.mathdoc.fr/item/VSPUI_2023_19_1_a3/
[1] Borrowings in scientific publications and recommendations for citations, Plekhanov Russian University of Economics Press, M., 2022 (accessed: September 1, 2022)
[2] Agrawal R., Must known techniques for text preprocessing in NLP, Analytics Vidhya, , 2022 (accessed: September 1, 2022) https://www.analyticsvidhya.com/blog/2021/06/must-known-techniques-for-text-preprocessing-in-nlp/
[3] Camacho-Collados J., Pilehvar M. T., On the role of text preprocessing in neural network architectures: An evaluation study on text categorization and sentiment analysis, 2018, arXiv: (accessed: September 1, 2022) 1707.01780 | Zbl
[4] Minaee Sh., Kalchbrenner N., Cambria E., Nikzad N., Chenaghlu M., Gao J., Deep learning based text classification: a comprehensive review, Cornell University, Cornell, 2020, arXiv: (accessed: September 1, 2022) 2004.03705
[5] Mikolov T., Chen K., Corrado G., Dean J., Efficient estimation of word representations in vector space, Cornell University, Cornell, 2013, arXiv: (accessed: September 1, 2022) 1301.3781
[6] Le V., Mikolov T., Distributed representations of sentences and documents, Cornell University, Cornell, 2014, arXiv: (accessed: September 1, 2022) 1405.4053
[7] Yang Zh., Jin Sh., Huang Y., Zhang Y., Li H., Automatically generate steganographic text based on Markov model and Huffman coding, Cornell University, Cornell, 2018, arXiv: (accessed: September 1, 2022) 1811.04720
[8] Thelin R., Build a deep learning text generator project with Markov chains, Educative, , 2022 (accessed: September 1, 2022) https://www.educative.io/blog/deep-learning-text-generation-markov-chains | Zbl
[9] Papadopoulos A., Roy P., Pachet F., “Avoiding plagiarism in Markov sequence generation”, Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence (July 27–31, 2014), 2731–2737 (accessed: September 1, 2022) https://www.francoispachet.fr/wp-content/uploads/2021/01/papadopoulos-14a.pdf | MR