Voir la notice de l'article provenant de la source Math-Net.Ru
@article{DA_2022_29_3_a2, author = {V. N. Krutikov and P. S. Stanimirovi\'c and O. N. Indenko and E. M. Tovbis and L. A. Kazakovtsev}, title = {Optimization of subgradient method parameters on the base of rank-two correction of~metric~matrices}, journal = {Diskretnyj analiz i issledovanie operacij}, pages = {24--44}, publisher = {mathdoc}, volume = {29}, number = {3}, year = {2022}, language = {ru}, url = {http://geodesic.mathdoc.fr/item/DA_2022_29_3_a2/} }
TY - JOUR AU - V. N. Krutikov AU - P. S. Stanimirović AU - O. N. Indenko AU - E. M. Tovbis AU - L. A. Kazakovtsev TI - Optimization of subgradient method parameters on the base of rank-two correction of~metric~matrices JO - Diskretnyj analiz i issledovanie operacij PY - 2022 SP - 24 EP - 44 VL - 29 IS - 3 PB - mathdoc UR - http://geodesic.mathdoc.fr/item/DA_2022_29_3_a2/ LA - ru ID - DA_2022_29_3_a2 ER -
%0 Journal Article %A V. N. Krutikov %A P. S. Stanimirović %A O. N. Indenko %A E. M. Tovbis %A L. A. Kazakovtsev %T Optimization of subgradient method parameters on the base of rank-two correction of~metric~matrices %J Diskretnyj analiz i issledovanie operacij %D 2022 %P 24-44 %V 29 %N 3 %I mathdoc %U http://geodesic.mathdoc.fr/item/DA_2022_29_3_a2/ %G ru %F DA_2022_29_3_a2
V. N. Krutikov; P. S. Stanimirović; O. N. Indenko; E. M. Tovbis; L. A. Kazakovtsev. Optimization of subgradient method parameters on the base of rank-two correction of~metric~matrices. Diskretnyj analiz i issledovanie operacij, Tome 29 (2022) no. 3, pp. 24-44. http://geodesic.mathdoc.fr/item/DA_2022_29_3_a2/
[1] N. Z. Shor, “Application of the gradient descent method for solving network transportation problems”, Proc. Scientific Seminar on Theoretic and Applied Problems of Cybernetics and Operations Research, 1, Nauch. sovet po kibernetike AN USSR, Kyev, 1962, 9–17 (Russian)
[2] B. T. Polyak, “A general method for solving extremal problems”, Dokl. Akad. Nauk SSSR, 174:1 (1967), 33–36 (Russian) | Zbl
[3] B. T. Polyak, Introduction to Optimization, Nauka, M., 1983 (Russian) | MR
[4] Wolfe P., “Note on a method of conjugate subgradients for minimizing nondifferentiable functions”, Math. Program., 7:1 (1974), 380–383 | DOI | MR | Zbl
[5] E. G. Gol'shtein, A. S. Nemirovskii, and Yu. E. Nesterov, “Level method, its generalizations and applications”, Ekon. Mat. Metody, 31:3 (1983), 164–180 (Russian) | MR
[6] Nesterov Yu. E., “Universal gradient methods for convex optimization problems”, Math. Program. Ser. A, 152 (2015), 381–404 | DOI | MR | Zbl
[7] Gasnikov A. V., Nesterov Yu. E., Universal method for stochastic composite optimization, Cornell Univ, Ithaca, NY, 2016, arXiv: 1604.05275 | MR
[8] Ouyang H., Gray A., “Stochastic smoothing for nonsmooth minimizations: Accelerating SGD by exploiting structure”, Proc. 29th Int. Conf. Machine Learning (Edinburgh, Scotland, June 26–July 1, 2012), Omnipress, Madison, WI, 2012, 33–40
[9] Boob D., Deng Q., Lan G., “Stochastic first-order methods for convex and nonconvex functional constrained optimization”, Math. Program, 2022 (to appear) (accessed June 17, 2022) | DOI | MR | Zbl
[10] Lan G., First-order and stochastic optimization methods for machine learning, Springer, Cham, 2020 | MR | Zbl
[11] Ghadimi S., Lan G., “Accelerated gradient methods for nonconvex nonlinear and stochastic programming”, Math. Program, 156:1–2 (2016), 59–99 | DOI | MR | Zbl
[12] Fang C., Li C. J., Lin Z., Zhang T., “Spider: Near-optimal non-convex optimization via stochastic path-integrated differential estimator”, 32nd Annual Conf. (Montréal, Canada, Dec. 3–8, 2018), Advances in Neural Information Processing Systems, 31, Curran Associates, Red Hook, NY, 2018, 687–697 | MR
[13] A. S. Nemirovskii and D. B. Yudin, Complexity of Problems and Efficiency of Methods in Optimization, Nauka, M., 1979 (Russian) | MR
[14] N. Z. Shor, Minimization Methods for Non-differentiable Functions and Their Applications, Nauk. Dumka, Kyev, 1979 (Russian) | MR
[15] Cao H., Song Y., Khan K., “Convergence of subtangent-based relaxations of non-linear programs”, Processes, 7:4 (2019), 221 | DOI
[16] B. T. Polyak, “Minimization of nonsmooth functionals”, Comput. Math. Math. Phys., 9:3 (1969), 14–29 | DOI | MR | Zbl
[17] V. N. Krutikov, N. S. Samoilenko, and V. V. Meshechkin, “On the properties of the method of minimization for convex functions with relaxation on the distance to extremum”, Autom. Remote Control, 80:1 (2019), 102–111 | DOI | MR | Zbl
[18] V. F. Demyanov and L. V. Vasilyev, Non-differentiable Optimization, Nauka, M., 1981 (Russian) | MR
[19] Lemarechal C., “An extension of Davidon methods to non-differentiable problems”, Math. Program. Study, 3 (1975), 95–109 | DOI | MR | Zbl
[20] V. N. Krutikov and T. V. Petrova, “Relaxation method of minimization with space extension in the subgradient direction”, Ekon. Mat. Metody, 39:1 (2003), 106–119 (Russian) | Zbl
[21] V. N. Krutikov and T. A. Gorskaya, “A family of subgradient relaxation methods with rank $2$ correction of metric matrices”, Ekon. Mat. Metody, 45:4 (2009), 37–80 (Russian)
[22] V. A. Skokov, “Note on minimization methods employing space stretching”, Cybern. Syst. Anal., 10:4 (1974), 689–692 | DOI | Zbl
[23] V. N. Krutikov and N. S. Samoilenko, “On the convergence rate of the subgradient method with metric variation and its applications in neural network approximation schemes”, Vestn. Tomsk. Gos. Univ., Ser. Mat. Mekh., 2018, no. 55, 22–37 (Russian) | MR
[24] Nocedal J., Wright S. J., Numerical optimization, Springer, New York, 2006 | MR | Zbl
[25] Avriel M., Nonlinear programming: Analysis and methods, Dover Publ, Mineola, 2003 | MR | Zbl
[26] E. A. Nurminskii and D. Tien, “Method of conjugate subgradients with constrained memory”, Autom. Remote Control, 75:4 (2014), 646–656 | DOI | MR | Zbl
[27] Ya. Z. Tsypkin, Basics of Theory of Learning Systems, Nauka, M., 1970 (Russian) | MR
[28] E. L. Zhukovskii and R. Sh. Liptser, “A recurrence method for computing the normal solutions of linear algebraic equations”, Comput. Math. Math. Phys., 12:4 (1972), 1–18 | DOI | MR | Zbl
[29] Krutikov V. N., Kazakovtsev L. A., Kazakovtsev V. L., “Non-smooth regularization in radial artificial neural networks”, IOP Conf. Ser.: Materials Science and Engineering, 450:4 (2018), 042010, 7 pp. | DOI
[30] Krutikov V. N., Kazakovtsev L. A., Shkaberina G. Sh., Kazakovtsev V. L., “New method of training two-layer sigmoid neural networks using regularization”, IOP Conf. Ser.: Materials Science and Engineering, 537:4 (2019), 042055, 6 pp. | DOI
[31] Tibshirani R. J., “Regression shrinkage and selection via the Lasso”, J. Royal Stat. Soc. Ser. B (Methodological), 58 (1996), 267–288 | MR | Zbl
[32] Frostig R., Ge R., Kakade S. M., Sidford A., “Un-regularizing: Approximate proximal point and faster stochastic algorithms for empirical risk minimization”, Proc. Mach. Learn. Res., 37 (2015), 2540–2548