Voir la notice de l'article provenant de la source Library of Science
@article{IJAMCS_2019_29_3_a12, author = {Jankowski, Norbert and Linowiecki, Rafa{\l}}, title = {A fast neural network learning algorithm with approximate singular value decomposition}, journal = {International Journal of Applied Mathematics and Computer Science}, pages = {581--594}, publisher = {mathdoc}, volume = {29}, number = {3}, year = {2019}, language = {en}, url = {http://geodesic.mathdoc.fr/item/IJAMCS_2019_29_3_a12/} }
TY - JOUR AU - Jankowski, Norbert AU - Linowiecki, Rafał TI - A fast neural network learning algorithm with approximate singular value decomposition JO - International Journal of Applied Mathematics and Computer Science PY - 2019 SP - 581 EP - 594 VL - 29 IS - 3 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/IJAMCS_2019_29_3_a12/ LA - en ID - IJAMCS_2019_29_3_a12 ER -
%0 Journal Article %A Jankowski, Norbert %A Linowiecki, Rafał %T A fast neural network learning algorithm with approximate singular value decomposition %J International Journal of Applied Mathematics and Computer Science %D 2019 %P 581-594 %V 29 %N 3 %I mathdoc %U http://geodesic.mathdoc.fr/item/IJAMCS_2019_29_3_a12/ %G en %F IJAMCS_2019_29_3_a12
Jankowski, Norbert; Linowiecki, Rafał. A fast neural network learning algorithm with approximate singular value decomposition. International Journal of Applied Mathematics and Computer Science, Tome 29 (2019) no. 3, pp. 581-594. http://geodesic.mathdoc.fr/item/IJAMCS_2019_29_3_a12/
[1] Bishop, C.M. (1991). Training with noise is equivalent to Tikhonov regularization, Neural Computation 7(1): 108–116.
[2] Boser, B.E., Guyon, I.M. and Vapnik, V. (1992). A training algorithm for optimal margin classifiers, in D. Haussler (Ed.), Proceedings of the 5th Annual ACM Workshop on Computational Learning Theory, Pittsburgh, PA, USA, pp. 144–152.
[3] Broomhead, D.S. and Lowe, D. (1988). Multivariable functional interpolation and adaptive networks, Complex Systems 2(3): 321–355.
[4] Dumais, S.T. (2005). Latent semantic analysis, Annual Review of Information Science and Technology 38(1): 188–230.
[5] Eirola, E., Lendasse, A., Vandewalle, V. and Biernacki, C. (2014). Mixture of Gaussians for distance estimation with missing data, Neurocomputing 131: 32–42.
[6] Goodfellow, I., Bengio, Y. and Courville, A. (2016). Deep Learning, MIT Press, Cambridge, MA, http://www.deeplearningbook.org.
[7] Górecki, T. and Łuczak, M. (2013). Linear discriminant analysis with a generalization of the Moore–Penrose pseudoinverse, International Journal of Applied Mathematics and Computer Science 23(2): 463–471, DOI: 10.2478/amcs-2013-0035.
[8] Halko, N., Martinsson, P.G. and Tropp, J.A. (2011). Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions, SIAM Review 53(2): 217–288.
[9] Heseltine, T., Pears, N., Austin, J. and Chen, Z. (2003). Face recognition: A comparison of appearance-based approaches, 7th International Conference on Digital Image Computing: Techniques and Applications, Sydney, Australia, Vol. 1, pp. 59–68.
[10] Huang, G.-B., Bai, Z., Kasun, L.L.C. and Vong, C.M. (2015). Local receptive fields based extreme learning machine, IEEE Computational Intelligence Magazine 10(2): 18–29.
[11] Huang, G.-B., Zhu, Q.-Y. and Siew, C.-K. (2004). Extreme learning machine: A new learning scheme of feedforward neural networks, International Joint Conference on Neural Networks, Budapest, Hungary, pp. 985–990.
[12] Huang, G.-B., Zhu, Q.-Y. and Siew, C.-K. (2006). Extreme learning machine: Theory and applications, Neurocomputing 70(1–3): 489–501.
[13] Jankowski, N. (2013). Meta-learning and new ways in model construction for classification problems, Journal of Network Information Security 4(4): 275–284.
[14] Jankowski, N. (2018). Comparison of prototype selection algorithms used in construction of neural networks learned by SVD, International Journal of Applied Mathematics and Computer Science 28(4): 719–733, DOI: 10.2478/amcs-2018-0055.
[15] Merz, C.J. and Murphy, P.M. (1998). UCI Repository of Machine Learning Databases, https://archive.ics.uci.edu/ml/index.php.
[16] Mitchell, T. (1997). Machine Learning, McGraw Hill, New York, NY.
[17] Rumelhart, D.E., Hinton, G.E. and Williams, R.J. (1986). Learning internal representations by error propagation, in J.L.M.D.E. Rumelhart (Ed.), Parallel Distributed Processing: Explorations in Microstructure of Congnition, Vol. 1: Foundations, MIT Press, Cambridge, MA, pp. 318–362.
[18] Sovilj, D., Eirola, E., Miche, Y., Bjork, K.-M., Nian, R., Akusok, A. and Lendasse, A. (2016). Extreme learning machine for missing data using multiple imputations, Neurocomputing 174(PA): 220–231.
[19] Tang, J., Deng, C., Member, S. and Huang, G.-B. (2016). Extreme learning machine for multilayer perceptron, IEEE Transactions on Neural Networks and Learning Systems 27(4): 809–821.
[20] Tikhonov, A.N. and Arsenin, V.Y. (1977). Solutions of Ill-posed Problems, W.H.Winston, Washington, DC.
[21] Vapnik, V. (1995). The Nature of Statistical Learning Theory, Springer-Verlag, New York, NY.