Voir la notice de l'article provenant de la source Math-Net.Ru
@article{MM_2020_32_9_a2, author = {Y. S. Popkov and A. Y. Popkov and Y. A. Dubnov}, title = {Cross-entropy reduction of data matrix with restriction on information capacity of projectors and their norms}, journal = {Matemati\v{c}eskoe modelirovanie}, pages = {35--52}, publisher = {mathdoc}, volume = {32}, number = {9}, year = {2020}, language = {ru}, url = {http://geodesic.mathdoc.fr/item/MM_2020_32_9_a2/} }
TY - JOUR AU - Y. S. Popkov AU - A. Y. Popkov AU - Y. A. Dubnov TI - Cross-entropy reduction of data matrix with restriction on information capacity of projectors and their norms JO - Matematičeskoe modelirovanie PY - 2020 SP - 35 EP - 52 VL - 32 IS - 9 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/MM_2020_32_9_a2/ LA - ru ID - MM_2020_32_9_a2 ER -
%0 Journal Article %A Y. S. Popkov %A A. Y. Popkov %A Y. A. Dubnov %T Cross-entropy reduction of data matrix with restriction on information capacity of projectors and their norms %J Matematičeskoe modelirovanie %D 2020 %P 35-52 %V 32 %N 9 %I mathdoc %U http://geodesic.mathdoc.fr/item/MM_2020_32_9_a2/ %G ru %F MM_2020_32_9_a2
Y. S. Popkov; A. Y. Popkov; Y. A. Dubnov. Cross-entropy reduction of data matrix with restriction on information capacity of projectors and their norms. Matematičeskoe modelirovanie, Tome 32 (2020) no. 9, pp. 35-52. http://geodesic.mathdoc.fr/item/MM_2020_32_9_a2/
[1] T. Hastie, R. Tibshirani, J. Friedman, The Elements of Statistical Learning: Data mining, Inference, and Prediction, Springer, New York, 2009 | MR | Zbl
[2] K. V. Vorontsov, Matematicheskie metodi obuchenia po precedentam, Lecture Course, MIPT, 2013
[3] Van der Maaten Laurens, Postma Eric, Van den Herik Jaap, “Dimensionality Reduction: A Comparative Review”, TiCC TR, 005:1 (2009), 1–35
[4] I. K. Fodor, A Survey of Dimension Reduction Techniques, Technical Report, No 1, 2002, 18 pp. http://www.osti.gov/servlets/purl/15002155-mumfPN/native/
[5] A. M. Bruckstein, D. L. Donoho, M. Elad, “From sparse solutions of systems of equations to sparse modeling of signals and images”, SIAM review, 51:1 (2009), 34–81 | MR | Zbl
[6] M. Kendall, A. Stewart, L. I. Galchuk, A. T. Terekhin, Statisticheskie methodi i sviazi, Nauka, M., 1973
[7] I. T. Jolliffe, Principal component analysis, Springer-Verlag, New York, 1986 | MR
[8] P. Comon, C. Jutten, Handbook of Blind Source Separation. Independent Component Analysis and Applications, Academic Press, Oxford, 2010
[9] M. W. Berry, M. Browne, “Algorithms and Applications for Approximate Nonnegative Matrix Factorization”, Computational Statistics Data Analysis, 52 (2007), 155–173 | MR | Zbl
[10] B. T. Polyak, M. V. Khlebnikov, “Principle component analysis: Robust versions”, Automation Remote Control, 78 (2017), 490–506 | DOI | MR | Zbl
[11] E. Bingham, H. Mannila, “Random projection in dimensionality reduction: applications to image and text data”, Proc. of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining, ACM, 2001, 245–250
[12] Santosh S. Vempala, The random projection method, DIMACS, 65, American Math. Soc., 2005 | MR
[13] W. B. Johnson, J. Lindenstrauss, “Extensions of Lipshitz mapping into Hilbert Space”, Modern Analysis and Probability, 26, Amer. Math. Soc., 1984, 189–206 | MR
[14] D. Achlioptas, “Database-friendly random projections”, PODS'01, Amer. Math. Soc., 2001, 274–281 | MR
[15] H. C. Peng, F. Long, C. Ding, “Feature selection based on mutual information: criteria of max-dependency, max-relevance, and min-redundancy”, IEEE Trans. on Pattern Analysis and Machine Intelligence, 27:8 (2005), 1236–1238
[16] Y. Zhang, S. Li, T. Wang, Z. Zhang, “Divergence-based feature selection for separate classes”, Neurocomputing, 101 (2013), 32–42
[17] Y. S. Popkov, Y. A. Dubnov, A. Y. Popkov, “Dimension Reduction Method for Randomized Machine Learning Problems”, Automation Remote Control, 79:11 (2018), 2038–2051 | DOI | DOI | MR | Zbl
[18] J. R. Magnus, H. Neudecker, Matrix differential calculus with applications in statistics and econometrics, Wiley, 1988 | MR
[19] B. T. Polyak, Vvedenie v optimizaciu, Nauka, M., 1983
[20] A. S. Strekalovskiy, Elementy nevipukloy optimizacii, Nauka, Novosibirsk, 2003
[21] Y. S. Popkov, Teoria macrosistem. Ravnovesnie modeli, URSS, M., 2012
[22] C. Bishop, Pattern Recognition and Machine Learning, Information Science and Statistics, 1st edn. 2006. corr. 2nd printing edn., Springer, New York, 2007 | MR
[23] J. Friedman, T. Hastie, R. Tibshirani, The elements of statistical learning, Springer series in statistics, 1, Springer, Berlin, 2001 | MR
[24] K. Q. Weinberger, L. K. Saul, “Unsupervised learning of image manifolds by semidefinite programming”, International J. of Comp. Vision, 70:1 (2006), 77–90 | DOI
[25] L. K. Saul, S. T. Roweis, “Think globally, fit locally: unsupervised learning of low dimensional manifolds”, Journal of Machine Learning Research, 4 (2003), 119–155 | MR
[26] F. Pedregosa, G. Varoquaux, A. Gramfort et al., “Scikit-learn: Machine Learning in Python”, Journal of Machine Learning Research, 2011, 2825–2830 | MR | Zbl
[27] L. Buitinck, G. Louppe, M. Blondel et al., “API design for machine learning software: experiences from the scikit-learn project”, ECML PKDD Workshop: Languages for Data Mining and Machine Learning, 2013, 108–122 | MR
[28] KEEL Dataset repository, (Accessed: 2019–07–03) https://sci2s.ugr.es/keel/datasets.php
[29] Dieter Kraft (Executor), A software package for sequential quadratic programming, Rep.: DFVLR-FB 88-28, DLR German Aerospace Center – Institute for Flight Mechanics, Koln, Germany, 1988